2 * Copyright (c) 2003 The Libav Project
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
43 #include "libavutil/attributes.h"
46 #include "mpegvideo.h"
49 #include "h264data.h" // FIXME FIXME FIXME
51 #include "h264_mvpred.h"
54 #include "rectangle.h"
55 #include "vdpau_internal.h"
79 uint32_t watermark_key;
81 int next_p_frame_damaged;
84 int last_frame_output;
87 #define FULLPEL_MODE 1
88 #define HALFPEL_MODE 2
89 #define THIRDPEL_MODE 3
90 #define PREDICT_MODE 4
92 /* dual scan (from some older h264 draft)
101 static const uint8_t svq3_scan[16] = {
102 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
103 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
104 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
105 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
108 static const uint8_t svq3_pred_0[25][2] = {
111 { 0, 2 }, { 1, 1 }, { 2, 0 },
112 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
113 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
114 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
115 { 2, 4 }, { 3, 3 }, { 4, 2 },
120 static const int8_t svq3_pred_1[6][6][5] = {
121 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
122 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
123 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
124 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
125 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
126 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
127 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
128 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
129 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
130 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
131 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
132 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
135 static const struct {
138 } svq3_dct_tables[2][16] = {
139 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
140 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
141 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
142 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
145 static const uint32_t svq3_dequant_coeff[32] = {
146 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
147 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
148 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
149 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
152 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
154 const int qmul = svq3_dequant_coeff[qp];
158 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
160 for (i = 0; i < 4; i++) {
161 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
162 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
163 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
164 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
166 temp[4 * i + 0] = z0 + z3;
167 temp[4 * i + 1] = z1 + z2;
168 temp[4 * i + 2] = z1 - z2;
169 temp[4 * i + 3] = z0 - z3;
172 for (i = 0; i < 4; i++) {
173 const int offset = x_offset[i];
174 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
175 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
176 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
177 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
179 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
180 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
181 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
182 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
187 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
188 int stride, int qp, int dc)
190 const int qmul = svq3_dequant_coeff[qp];
194 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
195 : qmul * (block[0] >> 3) / 2);
199 for (i = 0; i < 4; i++) {
200 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
201 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
202 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
203 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
205 block[0 + 4 * i] = z0 + z3;
206 block[1 + 4 * i] = z1 + z2;
207 block[2 + 4 * i] = z1 - z2;
208 block[3 + 4 * i] = z0 - z3;
211 for (i = 0; i < 4; i++) {
212 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
213 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
214 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
215 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
216 const int rr = (dc + 0x80000);
218 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
219 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
220 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
221 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
224 memset(block, 0, 16 * sizeof(int16_t));
227 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
228 int index, const int type)
230 static const uint8_t *const scan_patterns[4] =
231 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
233 int run, level, limit;
235 const int intra = 3 * type >> 2;
236 const uint8_t *const scan = scan_patterns[type];
238 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
239 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
240 int sign = (vlc & 1) ? 0 : -1;
247 } else if (vlc < 4) {
252 level = (vlc + 9 >> 2) - run;
256 run = svq3_dct_tables[intra][vlc].run;
257 level = svq3_dct_tables[intra][vlc].level;
261 ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
265 ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
269 if ((index += run) >= limit)
272 block[scan[index]] = (level ^ sign) - sign;
283 static inline void svq3_mc_dir_part(SVQ3Context *s,
284 int x, int y, int width, int height,
285 int mx, int my, int dxy,
286 int thirdpel, int dir, int avg)
288 H264Context *h = &s->h;
289 const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
292 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
297 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
298 my < 0 || my >= s->v_edge_pos - height - 1) {
300 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
301 my = av_clip(my, -16, s->v_edge_pos - height + 15);
304 /* form component predictions */
305 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
306 src = pic->f.data[0] + mx + my * h->linesize;
309 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->linesize,
310 width + 1, height + 1,
311 mx, my, s->h_edge_pos, s->v_edge_pos);
312 src = h->edge_emu_buffer;
315 (avg ? h->dsp.avg_tpel_pixels_tab
316 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
319 (avg ? s->hdsp.avg_pixels_tab
320 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
323 if (!(h->flags & CODEC_FLAG_GRAY)) {
324 mx = mx + (mx < (int) x) >> 1;
325 my = my + (my < (int) y) >> 1;
327 height = height >> 1;
330 for (i = 1; i < 3; i++) {
331 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
332 src = pic->f.data[i] + mx + my * h->uvlinesize;
335 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->uvlinesize,
336 width + 1, height + 1,
337 mx, my, (s->h_edge_pos >> 1),
339 src = h->edge_emu_buffer;
342 (avg ? h->dsp.avg_tpel_pixels_tab
343 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
347 (avg ? s->hdsp.avg_pixels_tab
348 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
355 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
358 int i, j, k, mx, my, dx, dy, x, y;
359 H264Context *h = &s->h;
360 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
361 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
362 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
363 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
364 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
366 for (i = 0; i < 16; i += part_height)
367 for (j = 0; j < 16; j += part_width) {
368 const int b_xy = (4 * h->mb_x + (j >> 2)) +
369 (4 * h->mb_y + (i >> 2)) * h->b_stride;
371 x = 16 * h->mb_x + j;
372 y = 16 * h->mb_y + i;
373 k = (j >> 2 & 1) + (i >> 1 & 2) +
374 (j >> 1 & 4) + (i & 8);
376 if (mode != PREDICT_MODE) {
377 pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
379 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
380 my = s->next_pic->motion_val[0][b_xy][1] << 1;
383 mx = mx * h->frame_num_offset /
384 h->prev_frame_num_offset + 1 >> 1;
385 my = my * h->frame_num_offset /
386 h->prev_frame_num_offset + 1 >> 1;
388 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
389 h->prev_frame_num_offset + 1 >> 1;
390 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
391 h->prev_frame_num_offset + 1 >> 1;
395 /* clip motion vector prediction to frame border */
396 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
397 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
399 /* get (optional) motion vector differential */
400 if (mode == PREDICT_MODE) {
403 dy = svq3_get_se_golomb(&h->gb);
404 dx = svq3_get_se_golomb(&h->gb);
406 if (dx == INVALID_VLC || dy == INVALID_VLC) {
407 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
412 /* compute motion vector */
413 if (mode == THIRDPEL_MODE) {
415 mx = (mx + 1 >> 1) + dx;
416 my = (my + 1 >> 1) + dy;
417 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
418 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
419 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
421 svq3_mc_dir_part(s, x, y, part_width, part_height,
422 fx, fy, dxy, 1, dir, avg);
425 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
426 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
427 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
428 dxy = (mx & 1) + 2 * (my & 1);
430 svq3_mc_dir_part(s, x, y, part_width, part_height,
431 mx >> 1, my >> 1, dxy, 0, dir, avg);
435 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
436 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
438 svq3_mc_dir_part(s, x, y, part_width, part_height,
439 mx, my, 0, 0, dir, avg);
444 /* update mv_cache */
445 if (mode != PREDICT_MODE) {
446 int32_t mv = pack16to32(mx, my);
448 if (part_height == 8 && i < 8) {
449 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
451 if (part_width == 8 && j < 8)
452 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
454 if (part_width == 8 && j < 8)
455 AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
456 if (part_width == 4 || part_height == 4)
457 AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
460 /* write back motion vectors */
461 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
462 part_width >> 2, part_height >> 2, h->b_stride,
463 pack16to32(mx, my), 4);
469 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
471 H264Context *h = &s->h;
472 int i, j, k, m, dir, mode;
476 const int mb_xy = h->mb_xy;
477 const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
479 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
480 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
481 h->topright_samples_available = 0xFFFF;
483 if (mb_type == 0) { /* SKIP */
484 if (h->pict_type == AV_PICTURE_TYPE_P ||
485 s->next_pic->mb_type[mb_xy] == -1) {
486 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
489 if (h->pict_type == AV_PICTURE_TYPE_B)
490 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
493 mb_type = MB_TYPE_SKIP;
495 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
496 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
498 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
501 mb_type = MB_TYPE_16x16;
503 } else if (mb_type < 8) { /* INTER */
504 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
505 mode = THIRDPEL_MODE;
506 else if (s->halfpel_flag &&
507 s->thirdpel_flag == !get_bits1(&h->gb))
513 /* note ref_cache should contain here:
521 for (m = 0; m < 2; m++) {
522 if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
523 for (i = 0; i < 4; i++)
524 AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
525 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
527 for (i = 0; i < 4; i++)
528 AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
531 memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
532 h->cur_pic.motion_val[m][b_xy - h->b_stride],
533 4 * 2 * sizeof(int16_t));
534 memset(&h->ref_cache[m][scan8[0] - 1 * 8],
535 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
537 if (h->mb_x < h->mb_width - 1) {
538 AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
539 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
540 h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
541 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
542 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
544 h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
546 AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
547 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
548 h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
549 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
551 h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
553 memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
554 PART_NOT_AVAILABLE, 8);
556 if (h->pict_type != AV_PICTURE_TYPE_B)
560 /* decode motion vector(s) and form prediction(s) */
561 if (h->pict_type == AV_PICTURE_TYPE_P) {
562 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
564 } else { /* AV_PICTURE_TYPE_B */
566 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
569 for (i = 0; i < 4; i++)
570 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
571 0, 4 * 2 * sizeof(int16_t));
574 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
577 for (i = 0; i < 4; i++)
578 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
579 0, 4 * 2 * sizeof(int16_t));
583 mb_type = MB_TYPE_16x16;
584 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
585 memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
589 for (i = 0; i < 4; i++)
590 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
591 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
592 h->left_samples_available = 0x5F5F;
595 h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
596 h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
597 h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
598 h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
600 if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
601 h->top_samples_available = 0x33FF;
604 /* decode prediction codes for luma blocks */
605 for (i = 0; i < 16; i += 2) {
606 vlc = svq3_get_ue_golomb(&h->gb);
609 av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
613 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
614 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
616 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
617 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
619 if (left[1] == -1 || left[2] == -1) {
620 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
624 } else { /* mb_type == 33, DC_128_PRED block type */
625 for (i = 0; i < 4; i++)
626 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
629 write_back_intra_pred_mode(h);
632 ff_h264_check_intra4x4_pred_mode(h);
634 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
635 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
637 for (i = 0; i < 4; i++)
638 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
640 h->top_samples_available = 0x33FF;
641 h->left_samples_available = 0x5F5F;
644 mb_type = MB_TYPE_INTRA4x4;
645 } else { /* INTRA16x16 */
646 dir = i_mb_type_info[mb_type - 8].pred_mode;
647 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
649 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1) {
650 av_log(h->avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
654 cbp = i_mb_type_info[mb_type - 8].cbp;
655 mb_type = MB_TYPE_INTRA16x16;
658 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
659 for (i = 0; i < 4; i++)
660 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
661 0, 4 * 2 * sizeof(int16_t));
662 if (h->pict_type == AV_PICTURE_TYPE_B) {
663 for (i = 0; i < 4; i++)
664 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
665 0, 4 * 2 * sizeof(int16_t));
668 if (!IS_INTRA4x4(mb_type)) {
669 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
671 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
672 memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
675 if (!IS_INTRA16x16(mb_type) &&
676 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
677 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48) {
678 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
682 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
683 : golomb_to_inter_cbp[vlc];
685 if (IS_INTRA16x16(mb_type) ||
686 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
687 h->qscale += svq3_get_se_golomb(&h->gb);
689 if (h->qscale > 31u) {
690 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
694 if (IS_INTRA16x16(mb_type)) {
695 AV_ZERO128(h->mb_luma_dc[0] + 0);
696 AV_ZERO128(h->mb_luma_dc[0] + 8);
697 if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
698 av_log(h->avctx, AV_LOG_ERROR,
699 "error while decoding intra luma dc\n");
705 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
706 const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
708 for (i = 0; i < 4; i++)
709 if ((cbp & (1 << i))) {
710 for (j = 0; j < 4; j++) {
711 k = index ? (1 * (j & 1) + 2 * (i & 1) +
712 2 * (j & 2) + 4 * (i & 2))
714 h->non_zero_count_cache[scan8[k]] = 1;
716 if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
717 av_log(h->avctx, AV_LOG_ERROR,
718 "error while decoding block\n");
725 for (i = 1; i < 3; ++i)
726 if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
727 av_log(h->avctx, AV_LOG_ERROR,
728 "error while decoding chroma dc block\n");
733 for (i = 1; i < 3; i++) {
734 for (j = 0; j < 4; j++) {
736 h->non_zero_count_cache[scan8[k]] = 1;
738 if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
739 av_log(h->avctx, AV_LOG_ERROR,
740 "error while decoding chroma ac block\n");
750 h->cur_pic.mb_type[mb_xy] = mb_type;
752 if (IS_INTRA(mb_type))
753 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
758 static int svq3_decode_slice_header(AVCodecContext *avctx)
760 SVQ3Context *s = avctx->priv_data;
761 H264Context *h = &s->h;
762 const int mb_xy = h->mb_xy;
766 header = get_bits(&h->gb, 8);
768 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
770 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
773 int length = header >> 5 & 3;
775 s->next_slice_index = get_bits_count(&h->gb) +
776 8 * show_bits(&h->gb, 8 * length) +
779 if (s->next_slice_index > h->gb.size_in_bits) {
780 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
784 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
785 skip_bits(&h->gb, 8);
787 if (s->watermark_key) {
788 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
789 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
790 header ^ s->watermark_key);
793 memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
794 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
796 skip_bits_long(&h->gb, 0);
799 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
800 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
804 h->slice_type = golomb_to_pict_type[slice_id];
806 if ((header & 0x9F) == 2) {
807 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
808 h->mb_skip_run = get_bits(&h->gb, i) -
809 (h->mb_y * h->mb_width + h->mb_x);
815 h->slice_num = get_bits(&h->gb, 8);
816 h->qscale = get_bits(&h->gb, 5);
817 s->adaptive_quant = get_bits1(&h->gb);
826 skip_bits(&h->gb, 2);
828 while (get_bits1(&h->gb))
829 skip_bits(&h->gb, 8);
831 /* reset intra predictors and invalidate motion vector references */
833 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
834 -1, 4 * sizeof(int8_t));
835 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
836 -1, 8 * sizeof(int8_t) * h->mb_x);
839 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
840 -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
843 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
849 static av_cold int svq3_decode_init(AVCodecContext *avctx)
851 SVQ3Context *s = avctx->priv_data;
852 H264Context *h = &s->h;
854 unsigned char *extradata;
855 unsigned char *extradata_end;
857 int marker_found = 0;
859 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
860 s->last_pic = av_mallocz(sizeof(*s->last_pic));
861 s->next_pic = av_mallocz(sizeof(*s->next_pic));
862 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
863 av_freep(&s->cur_pic);
864 av_freep(&s->last_pic);
865 av_freep(&s->next_pic);
866 return AVERROR(ENOMEM);
869 if (ff_h264_decode_init(avctx) < 0)
872 ff_hpeldsp_init(&s->hdsp, avctx->flags);
873 h->flags = avctx->flags;
875 h->picture_structure = PICT_FRAME;
876 avctx->pix_fmt = avctx->codec->pix_fmts[0];
878 h->chroma_qp[0] = h->chroma_qp[1] = 4;
879 h->chroma_x_shift = h->chroma_y_shift = 1;
882 s->thirdpel_flag = 1;
885 /* prowl for the "SEQH" marker in the extradata */
886 extradata = (unsigned char *)avctx->extradata;
887 extradata_end = avctx->extradata + avctx->extradata_size;
889 for (m = 0; m + 8 < avctx->extradata_size; m++) {
890 if (!memcmp(extradata, "SEQH", 4)) {
898 /* if a match was found, parse the extra data */
903 size = AV_RB32(&extradata[4]);
904 if (size > extradata_end - extradata - 8)
905 return AVERROR_INVALIDDATA;
906 init_get_bits(&gb, extradata + 8, size * 8);
908 /* 'frame size code' and optional 'width, height' */
909 frame_size_code = get_bits(&gb, 3);
910 switch (frame_size_code) {
940 avctx->width = get_bits(&gb, 12);
941 avctx->height = get_bits(&gb, 12);
945 s->halfpel_flag = get_bits1(&gb);
946 s->thirdpel_flag = get_bits1(&gb);
954 h->low_delay = get_bits1(&gb);
959 while (get_bits1(&gb))
962 s->unknown_flag = get_bits1(&gb);
963 avctx->has_b_frames = !h->low_delay;
964 if (s->unknown_flag) {
966 unsigned watermark_width = svq3_get_ue_golomb(&gb);
967 unsigned watermark_height = svq3_get_ue_golomb(&gb);
968 int u1 = svq3_get_ue_golomb(&gb);
969 int u2 = get_bits(&gb, 8);
970 int u3 = get_bits(&gb, 2);
971 int u4 = svq3_get_ue_golomb(&gb);
972 unsigned long buf_len = watermark_width *
973 watermark_height * 4;
974 int offset = get_bits_count(&gb) + 7 >> 3;
977 if ((uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
980 buf = av_malloc(buf_len);
981 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
982 watermark_width, watermark_height);
983 av_log(avctx, AV_LOG_DEBUG,
984 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
985 u1, u2, u3, u4, offset);
986 if (uncompress(buf, &buf_len, extradata + 8 + offset,
987 size - offset) != Z_OK) {
988 av_log(avctx, AV_LOG_ERROR,
989 "could not uncompress watermark logo\n");
993 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
994 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
995 av_log(avctx, AV_LOG_DEBUG,
996 "watermark key %#x\n", s->watermark_key);
999 av_log(avctx, AV_LOG_ERROR,
1000 "this svq3 file contains watermark which need zlib support compiled in\n");
1006 h->width = avctx->width;
1007 h->height = avctx->height;
1008 h->mb_width = (h->width + 15) / 16;
1009 h->mb_height = (h->height + 15) / 16;
1010 h->mb_stride = h->mb_width + 1;
1011 h->mb_num = h->mb_width * h->mb_height;
1012 h->b_stride = 4 * h->mb_width;
1013 s->h_edge_pos = h->mb_width * 16;
1014 s->v_edge_pos = h->mb_height * 16;
1016 if (ff_h264_alloc_tables(h) < 0) {
1017 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1018 return AVERROR(ENOMEM);
1024 static void free_picture(AVCodecContext *avctx, Picture *pic)
1027 for (i = 0; i < 2; i++) {
1028 av_buffer_unref(&pic->motion_val_buf[i]);
1029 av_buffer_unref(&pic->ref_index_buf[i]);
1031 av_buffer_unref(&pic->mb_type_buf);
1033 av_frame_unref(&pic->f);
1036 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1038 SVQ3Context *s = avctx->priv_data;
1039 H264Context *h = &s->h;
1040 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1041 const int mb_array_size = h->mb_stride * h->mb_height;
1042 const int b4_stride = h->mb_width * 4 + 1;
1043 const int b4_array_size = b4_stride * h->mb_height * 4;
1046 if (!pic->motion_val_buf[0]) {
1049 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1050 if (!pic->mb_type_buf)
1051 return AVERROR(ENOMEM);
1052 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1054 for (i = 0; i < 2; i++) {
1055 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1056 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1057 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1058 ret = AVERROR(ENOMEM);
1062 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1063 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1066 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1068 ret = ff_get_buffer(avctx, &pic->f,
1069 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1073 if (!h->edge_emu_buffer) {
1074 h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1075 if (!h->edge_emu_buffer)
1076 return AVERROR(ENOMEM);
1079 h->linesize = pic->f.linesize[0];
1080 h->uvlinesize = pic->f.linesize[1];
1084 free_picture(avctx, pic);
1088 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1089 int *got_frame, AVPacket *avpkt)
1091 const uint8_t *buf = avpkt->data;
1092 SVQ3Context *s = avctx->priv_data;
1093 H264Context *h = &s->h;
1094 int buf_size = avpkt->size;
1097 /* special case for last picture */
1098 if (buf_size == 0) {
1099 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1100 ret = av_frame_ref(data, &s->next_pic->f);
1103 s->last_frame_output = 1;
1109 init_get_bits(&h->gb, buf, 8 * buf_size);
1111 h->mb_x = h->mb_y = h->mb_xy = 0;
1113 if (svq3_decode_slice_header(avctx))
1116 h->pict_type = h->slice_type;
1118 if (h->pict_type != AV_PICTURE_TYPE_B)
1119 FFSWAP(Picture*, s->next_pic, s->last_pic);
1121 av_frame_unref(&s->cur_pic->f);
1123 /* for skipping the frame */
1124 s->cur_pic->f.pict_type = h->pict_type;
1125 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1127 ret = get_buffer(avctx, s->cur_pic);
1131 h->cur_pic_ptr = s->cur_pic;
1132 av_frame_unref(&h->cur_pic.f);
1133 h->cur_pic = *s->cur_pic;
1134 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1138 for (i = 0; i < 16; i++) {
1139 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1140 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1142 for (i = 0; i < 16; i++) {
1143 h->block_offset[16 + i] =
1144 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1145 h->block_offset[48 + 16 + i] =
1146 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1149 if (h->pict_type != AV_PICTURE_TYPE_I) {
1150 if (!s->last_pic->f.data[0]) {
1151 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1152 ret = get_buffer(avctx, s->last_pic);
1155 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1156 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1157 s->last_pic->f.linesize[1]);
1158 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1159 s->last_pic->f.linesize[2]);
1162 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1163 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1164 ret = get_buffer(avctx, s->next_pic);
1167 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1168 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1169 s->next_pic->f.linesize[1]);
1170 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1171 s->next_pic->f.linesize[2]);
1175 if (avctx->debug & FF_DEBUG_PICT_INFO)
1176 av_log(h->avctx, AV_LOG_DEBUG,
1177 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1178 av_get_picture_type_char(h->pict_type),
1179 s->halfpel_flag, s->thirdpel_flag,
1180 s->adaptive_quant, h->qscale, h->slice_num);
1182 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1183 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1184 avctx->skip_frame >= AVDISCARD_ALL)
1187 if (s->next_p_frame_damaged) {
1188 if (h->pict_type == AV_PICTURE_TYPE_B)
1191 s->next_p_frame_damaged = 0;
1194 if (h->pict_type == AV_PICTURE_TYPE_B) {
1195 h->frame_num_offset = h->slice_num - h->prev_frame_num;
1197 if (h->frame_num_offset < 0)
1198 h->frame_num_offset += 256;
1199 if (h->frame_num_offset == 0 ||
1200 h->frame_num_offset >= h->prev_frame_num_offset) {
1201 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1205 h->prev_frame_num = h->frame_num;
1206 h->frame_num = h->slice_num;
1207 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1209 if (h->prev_frame_num_offset < 0)
1210 h->prev_frame_num_offset += 256;
1213 for (m = 0; m < 2; m++) {
1215 for (i = 0; i < 4; i++) {
1217 for (j = -1; j < 4; j++)
1218 h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1220 h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1224 for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1225 for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1227 h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1229 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1230 ((get_bits_count(&h->gb) & 7) == 0 ||
1231 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1232 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1233 h->gb.size_in_bits = 8 * buf_size;
1235 if (svq3_decode_slice_header(avctx))
1238 /* TODO: support s->mb_skip_run */
1241 mb_type = svq3_get_ue_golomb(&h->gb);
1243 if (h->pict_type == AV_PICTURE_TYPE_I)
1245 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1247 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1248 av_log(h->avctx, AV_LOG_ERROR,
1249 "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1254 ff_h264_hl_decode_mb(h);
1256 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1257 h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1258 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1261 ff_draw_horiz_band(avctx, NULL, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL,
1262 16 * h->mb_y, 16, h->picture_structure, 0, 0,
1263 h->low_delay, h->mb_height * 16, h->mb_width * 16);
1266 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1267 ret = av_frame_ref(data, &s->cur_pic->f);
1268 else if (s->last_pic->f.data[0])
1269 ret = av_frame_ref(data, &s->last_pic->f);
1273 /* Do not output the last pic after seeking. */
1274 if (s->last_pic->f.data[0] || h->low_delay)
1277 if (h->pict_type != AV_PICTURE_TYPE_B) {
1278 FFSWAP(Picture*, s->cur_pic, s->next_pic);
1280 av_frame_unref(&s->cur_pic->f);
1286 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1288 SVQ3Context *s = avctx->priv_data;
1289 H264Context *h = &s->h;
1291 free_picture(avctx, s->cur_pic);
1292 free_picture(avctx, s->next_pic);
1293 free_picture(avctx, s->last_pic);
1294 av_freep(&s->cur_pic);
1295 av_freep(&s->next_pic);
1296 av_freep(&s->last_pic);
1298 av_frame_unref(&h->cur_pic.f);
1300 ff_h264_free_context(h);
1305 AVCodec ff_svq3_decoder = {
1307 .type = AVMEDIA_TYPE_VIDEO,
1308 .id = AV_CODEC_ID_SVQ3,
1309 .priv_data_size = sizeof(SVQ3Context),
1310 .init = svq3_decode_init,
1311 .close = svq3_decode_end,
1312 .decode = svq3_decode_frame,
1313 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1316 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1317 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,