2 * Copyright (c) 2003 The Libav Project
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
44 #include "mpegvideo.h"
47 #include "h264data.h" // FIXME FIXME FIXME
49 #include "h264_mvpred.h"
52 #include "rectangle.h"
53 #include "vdpau_internal.h"
77 uint32_t watermark_key;
79 int next_p_frame_damaged;
82 int last_frame_output;
85 #define FULLPEL_MODE 1
86 #define HALFPEL_MODE 2
87 #define THIRDPEL_MODE 3
88 #define PREDICT_MODE 4
90 /* dual scan (from some older h264 draft)
99 static const uint8_t svq3_scan[16] = {
100 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
101 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
102 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
103 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
106 static const uint8_t svq3_pred_0[25][2] = {
109 { 0, 2 }, { 1, 1 }, { 2, 0 },
110 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
111 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
112 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
113 { 2, 4 }, { 3, 3 }, { 4, 2 },
118 static const int8_t svq3_pred_1[6][6][5] = {
119 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
120 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
121 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
122 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
123 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
124 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
125 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
126 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
127 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
128 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
129 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
130 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
133 static const struct {
136 } svq3_dct_tables[2][16] = {
137 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
138 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
139 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
140 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
143 static const uint32_t svq3_dequant_coeff[32] = {
144 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
145 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
146 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
147 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
150 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
152 const int qmul = svq3_dequant_coeff[qp];
156 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
158 for (i = 0; i < 4; i++) {
159 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
160 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
161 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
162 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
164 temp[4 * i + 0] = z0 + z3;
165 temp[4 * i + 1] = z1 + z2;
166 temp[4 * i + 2] = z1 - z2;
167 temp[4 * i + 3] = z0 - z3;
170 for (i = 0; i < 4; i++) {
171 const int offset = x_offset[i];
172 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
173 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
174 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
175 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
177 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
178 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
179 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
180 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
185 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
186 int stride, int qp, int dc)
188 const int qmul = svq3_dequant_coeff[qp];
192 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
193 : qmul * (block[0] >> 3) / 2);
197 for (i = 0; i < 4; i++) {
198 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
199 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
200 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
201 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
203 block[0 + 4 * i] = z0 + z3;
204 block[1 + 4 * i] = z1 + z2;
205 block[2 + 4 * i] = z1 - z2;
206 block[3 + 4 * i] = z0 - z3;
209 for (i = 0; i < 4; i++) {
210 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
211 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
212 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
213 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
214 const int rr = (dc + 0x80000);
216 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
217 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
218 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
219 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
222 memset(block, 0, 16 * sizeof(int16_t));
225 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
226 int index, const int type)
228 static const uint8_t *const scan_patterns[4] =
229 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
231 int run, level, limit;
233 const int intra = 3 * type >> 2;
234 const uint8_t *const scan = scan_patterns[type];
236 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
237 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
238 int sign = (vlc & 1) ? 0 : -1;
245 } else if (vlc < 4) {
250 level = (vlc + 9 >> 2) - run;
254 run = svq3_dct_tables[intra][vlc].run;
255 level = svq3_dct_tables[intra][vlc].level;
259 ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
263 ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
267 if ((index += run) >= limit)
270 block[scan[index]] = (level ^ sign) - sign;
281 static inline void svq3_mc_dir_part(SVQ3Context *s,
282 int x, int y, int width, int height,
283 int mx, int my, int dxy,
284 int thirdpel, int dir, int avg)
286 H264Context *h = &s->h;
287 const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
290 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
295 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
296 my < 0 || my >= s->v_edge_pos - height - 1) {
298 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
299 my = av_clip(my, -16, s->v_edge_pos - height + 15);
302 /* form component predictions */
303 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
304 src = pic->f.data[0] + mx + my * h->linesize;
307 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->linesize,
308 width + 1, height + 1,
309 mx, my, s->h_edge_pos, s->v_edge_pos);
310 src = h->edge_emu_buffer;
313 (avg ? h->dsp.avg_tpel_pixels_tab
314 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
317 (avg ? s->hdsp.avg_pixels_tab
318 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
321 if (!(h->flags & CODEC_FLAG_GRAY)) {
322 mx = mx + (mx < (int) x) >> 1;
323 my = my + (my < (int) y) >> 1;
325 height = height >> 1;
328 for (i = 1; i < 3; i++) {
329 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
330 src = pic->f.data[i] + mx + my * h->uvlinesize;
333 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->uvlinesize,
334 width + 1, height + 1,
335 mx, my, (s->h_edge_pos >> 1),
337 src = h->edge_emu_buffer;
340 (avg ? h->dsp.avg_tpel_pixels_tab
341 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
345 (avg ? s->hdsp.avg_pixels_tab
346 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
353 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
356 int i, j, k, mx, my, dx, dy, x, y;
357 H264Context *h = &s->h;
358 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
359 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
360 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
361 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
362 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
364 for (i = 0; i < 16; i += part_height)
365 for (j = 0; j < 16; j += part_width) {
366 const int b_xy = (4 * h->mb_x + (j >> 2)) +
367 (4 * h->mb_y + (i >> 2)) * h->b_stride;
369 x = 16 * h->mb_x + j;
370 y = 16 * h->mb_y + i;
371 k = (j >> 2 & 1) + (i >> 1 & 2) +
372 (j >> 1 & 4) + (i & 8);
374 if (mode != PREDICT_MODE) {
375 pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
377 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
378 my = s->next_pic->motion_val[0][b_xy][1] << 1;
381 mx = mx * h->frame_num_offset /
382 h->prev_frame_num_offset + 1 >> 1;
383 my = my * h->frame_num_offset /
384 h->prev_frame_num_offset + 1 >> 1;
386 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
387 h->prev_frame_num_offset + 1 >> 1;
388 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
389 h->prev_frame_num_offset + 1 >> 1;
393 /* clip motion vector prediction to frame border */
394 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
395 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
397 /* get (optional) motion vector differential */
398 if (mode == PREDICT_MODE) {
401 dy = svq3_get_se_golomb(&h->gb);
402 dx = svq3_get_se_golomb(&h->gb);
404 if (dx == INVALID_VLC || dy == INVALID_VLC) {
405 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
410 /* compute motion vector */
411 if (mode == THIRDPEL_MODE) {
413 mx = (mx + 1 >> 1) + dx;
414 my = (my + 1 >> 1) + dy;
415 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
416 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
417 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
419 svq3_mc_dir_part(s, x, y, part_width, part_height,
420 fx, fy, dxy, 1, dir, avg);
423 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
424 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
425 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
426 dxy = (mx & 1) + 2 * (my & 1);
428 svq3_mc_dir_part(s, x, y, part_width, part_height,
429 mx >> 1, my >> 1, dxy, 0, dir, avg);
433 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
434 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
436 svq3_mc_dir_part(s, x, y, part_width, part_height,
437 mx, my, 0, 0, dir, avg);
442 /* update mv_cache */
443 if (mode != PREDICT_MODE) {
444 int32_t mv = pack16to32(mx, my);
446 if (part_height == 8 && i < 8) {
447 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
449 if (part_width == 8 && j < 8)
450 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
452 if (part_width == 8 && j < 8)
453 AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
454 if (part_width == 4 || part_height == 4)
455 AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
458 /* write back motion vectors */
459 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
460 part_width >> 2, part_height >> 2, h->b_stride,
461 pack16to32(mx, my), 4);
467 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
469 H264Context *h = &s->h;
470 int i, j, k, m, dir, mode;
474 const int mb_xy = h->mb_xy;
475 const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
477 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
478 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
479 h->topright_samples_available = 0xFFFF;
481 if (mb_type == 0) { /* SKIP */
482 if (h->pict_type == AV_PICTURE_TYPE_P ||
483 s->next_pic->mb_type[mb_xy] == -1) {
484 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
487 if (h->pict_type == AV_PICTURE_TYPE_B)
488 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
491 mb_type = MB_TYPE_SKIP;
493 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
494 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
496 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
499 mb_type = MB_TYPE_16x16;
501 } else if (mb_type < 8) { /* INTER */
502 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
503 mode = THIRDPEL_MODE;
504 else if (s->halfpel_flag &&
505 s->thirdpel_flag == !get_bits1(&h->gb))
511 /* note ref_cache should contain here:
519 for (m = 0; m < 2; m++) {
520 if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
521 for (i = 0; i < 4; i++)
522 AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
523 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
525 for (i = 0; i < 4; i++)
526 AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
529 memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
530 h->cur_pic.motion_val[m][b_xy - h->b_stride],
531 4 * 2 * sizeof(int16_t));
532 memset(&h->ref_cache[m][scan8[0] - 1 * 8],
533 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
535 if (h->mb_x < h->mb_width - 1) {
536 AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
537 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
538 h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
539 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
540 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
542 h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
544 AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
545 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
546 h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
547 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
549 h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
551 memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
552 PART_NOT_AVAILABLE, 8);
554 if (h->pict_type != AV_PICTURE_TYPE_B)
558 /* decode motion vector(s) and form prediction(s) */
559 if (h->pict_type == AV_PICTURE_TYPE_P) {
560 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
562 } else { /* AV_PICTURE_TYPE_B */
564 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
567 for (i = 0; i < 4; i++)
568 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
569 0, 4 * 2 * sizeof(int16_t));
572 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
575 for (i = 0; i < 4; i++)
576 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
577 0, 4 * 2 * sizeof(int16_t));
581 mb_type = MB_TYPE_16x16;
582 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
583 memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
587 for (i = 0; i < 4; i++)
588 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
589 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
590 h->left_samples_available = 0x5F5F;
593 h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
594 h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
595 h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
596 h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
598 if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
599 h->top_samples_available = 0x33FF;
602 /* decode prediction codes for luma blocks */
603 for (i = 0; i < 16; i += 2) {
604 vlc = svq3_get_ue_golomb(&h->gb);
607 av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
611 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
612 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
614 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
615 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
617 if (left[1] == -1 || left[2] == -1) {
618 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
622 } else { /* mb_type == 33, DC_128_PRED block type */
623 for (i = 0; i < 4; i++)
624 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
627 write_back_intra_pred_mode(h);
630 ff_h264_check_intra4x4_pred_mode(h);
632 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
633 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
635 for (i = 0; i < 4; i++)
636 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
638 h->top_samples_available = 0x33FF;
639 h->left_samples_available = 0x5F5F;
642 mb_type = MB_TYPE_INTRA4x4;
643 } else { /* INTRA16x16 */
644 dir = i_mb_type_info[mb_type - 8].pred_mode;
645 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
647 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1) {
648 av_log(h->avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
652 cbp = i_mb_type_info[mb_type - 8].cbp;
653 mb_type = MB_TYPE_INTRA16x16;
656 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
657 for (i = 0; i < 4; i++)
658 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
659 0, 4 * 2 * sizeof(int16_t));
660 if (h->pict_type == AV_PICTURE_TYPE_B) {
661 for (i = 0; i < 4; i++)
662 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
663 0, 4 * 2 * sizeof(int16_t));
666 if (!IS_INTRA4x4(mb_type)) {
667 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
669 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
670 memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
673 if (!IS_INTRA16x16(mb_type) &&
674 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
675 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48) {
676 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
680 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
681 : golomb_to_inter_cbp[vlc];
683 if (IS_INTRA16x16(mb_type) ||
684 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
685 h->qscale += svq3_get_se_golomb(&h->gb);
687 if (h->qscale > 31u) {
688 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
692 if (IS_INTRA16x16(mb_type)) {
693 AV_ZERO128(h->mb_luma_dc[0] + 0);
694 AV_ZERO128(h->mb_luma_dc[0] + 8);
695 if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
696 av_log(h->avctx, AV_LOG_ERROR,
697 "error while decoding intra luma dc\n");
703 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
704 const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
706 for (i = 0; i < 4; i++)
707 if ((cbp & (1 << i))) {
708 for (j = 0; j < 4; j++) {
709 k = index ? (1 * (j & 1) + 2 * (i & 1) +
710 2 * (j & 2) + 4 * (i & 2))
712 h->non_zero_count_cache[scan8[k]] = 1;
714 if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
715 av_log(h->avctx, AV_LOG_ERROR,
716 "error while decoding block\n");
723 for (i = 1; i < 3; ++i)
724 if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
725 av_log(h->avctx, AV_LOG_ERROR,
726 "error while decoding chroma dc block\n");
731 for (i = 1; i < 3; i++) {
732 for (j = 0; j < 4; j++) {
734 h->non_zero_count_cache[scan8[k]] = 1;
736 if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
737 av_log(h->avctx, AV_LOG_ERROR,
738 "error while decoding chroma ac block\n");
748 h->cur_pic.mb_type[mb_xy] = mb_type;
750 if (IS_INTRA(mb_type))
751 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
756 static int svq3_decode_slice_header(AVCodecContext *avctx)
758 SVQ3Context *s = avctx->priv_data;
759 H264Context *h = &s->h;
760 const int mb_xy = h->mb_xy;
764 header = get_bits(&h->gb, 8);
766 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
768 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
771 int length = header >> 5 & 3;
773 s->next_slice_index = get_bits_count(&h->gb) +
774 8 * show_bits(&h->gb, 8 * length) +
777 if (s->next_slice_index > h->gb.size_in_bits) {
778 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
782 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
783 skip_bits(&h->gb, 8);
785 if (s->watermark_key) {
786 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
787 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
788 header ^ s->watermark_key);
791 memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
792 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
794 skip_bits_long(&h->gb, 0);
797 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
798 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
802 h->slice_type = golomb_to_pict_type[slice_id];
804 if ((header & 0x9F) == 2) {
805 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
806 h->mb_skip_run = get_bits(&h->gb, i) -
807 (h->mb_y * h->mb_width + h->mb_x);
813 h->slice_num = get_bits(&h->gb, 8);
814 h->qscale = get_bits(&h->gb, 5);
815 s->adaptive_quant = get_bits1(&h->gb);
824 skip_bits(&h->gb, 2);
826 while (get_bits1(&h->gb))
827 skip_bits(&h->gb, 8);
829 /* reset intra predictors and invalidate motion vector references */
831 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
832 -1, 4 * sizeof(int8_t));
833 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
834 -1, 8 * sizeof(int8_t) * h->mb_x);
837 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
838 -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
841 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
847 static av_cold int svq3_decode_init(AVCodecContext *avctx)
849 SVQ3Context *s = avctx->priv_data;
850 H264Context *h = &s->h;
852 unsigned char *extradata;
853 unsigned char *extradata_end;
855 int marker_found = 0;
857 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
858 s->last_pic = av_mallocz(sizeof(*s->last_pic));
859 s->next_pic = av_mallocz(sizeof(*s->next_pic));
860 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
861 av_freep(&s->cur_pic);
862 av_freep(&s->last_pic);
863 av_freep(&s->next_pic);
864 return AVERROR(ENOMEM);
867 if (ff_h264_decode_init(avctx) < 0)
870 ff_hpeldsp_init(&s->hdsp, avctx->flags);
871 h->flags = avctx->flags;
873 h->picture_structure = PICT_FRAME;
874 avctx->pix_fmt = avctx->codec->pix_fmts[0];
876 h->chroma_qp[0] = h->chroma_qp[1] = 4;
877 h->chroma_x_shift = h->chroma_y_shift = 1;
880 s->thirdpel_flag = 1;
883 /* prowl for the "SEQH" marker in the extradata */
884 extradata = (unsigned char *)avctx->extradata;
885 extradata_end = avctx->extradata + avctx->extradata_size;
887 for (m = 0; m + 8 < avctx->extradata_size; m++) {
888 if (!memcmp(extradata, "SEQH", 4)) {
896 /* if a match was found, parse the extra data */
901 size = AV_RB32(&extradata[4]);
902 if (size > extradata_end - extradata - 8)
903 return AVERROR_INVALIDDATA;
904 init_get_bits(&gb, extradata + 8, size * 8);
906 /* 'frame size code' and optional 'width, height' */
907 frame_size_code = get_bits(&gb, 3);
908 switch (frame_size_code) {
938 avctx->width = get_bits(&gb, 12);
939 avctx->height = get_bits(&gb, 12);
943 s->halfpel_flag = get_bits1(&gb);
944 s->thirdpel_flag = get_bits1(&gb);
952 h->low_delay = get_bits1(&gb);
957 while (get_bits1(&gb))
960 s->unknown_flag = get_bits1(&gb);
961 avctx->has_b_frames = !h->low_delay;
962 if (s->unknown_flag) {
964 unsigned watermark_width = svq3_get_ue_golomb(&gb);
965 unsigned watermark_height = svq3_get_ue_golomb(&gb);
966 int u1 = svq3_get_ue_golomb(&gb);
967 int u2 = get_bits(&gb, 8);
968 int u3 = get_bits(&gb, 2);
969 int u4 = svq3_get_ue_golomb(&gb);
970 unsigned long buf_len = watermark_width *
971 watermark_height * 4;
972 int offset = get_bits_count(&gb) + 7 >> 3;
975 if ((uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
978 buf = av_malloc(buf_len);
979 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
980 watermark_width, watermark_height);
981 av_log(avctx, AV_LOG_DEBUG,
982 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
983 u1, u2, u3, u4, offset);
984 if (uncompress(buf, &buf_len, extradata + 8 + offset,
985 size - offset) != Z_OK) {
986 av_log(avctx, AV_LOG_ERROR,
987 "could not uncompress watermark logo\n");
991 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
992 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
993 av_log(avctx, AV_LOG_DEBUG,
994 "watermark key %#x\n", s->watermark_key);
997 av_log(avctx, AV_LOG_ERROR,
998 "this svq3 file contains watermark which need zlib support compiled in\n");
1004 h->width = avctx->width;
1005 h->height = avctx->height;
1006 h->mb_width = (h->width + 15) / 16;
1007 h->mb_height = (h->height + 15) / 16;
1008 h->mb_stride = h->mb_width + 1;
1009 h->mb_num = h->mb_width * h->mb_height;
1010 h->b_stride = 4 * h->mb_width;
1011 s->h_edge_pos = h->mb_width * 16;
1012 s->v_edge_pos = h->mb_height * 16;
1014 if (ff_h264_alloc_tables(h) < 0) {
1015 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1016 return AVERROR(ENOMEM);
1022 static void free_picture(AVCodecContext *avctx, Picture *pic)
1025 for (i = 0; i < 2; i++) {
1026 av_buffer_unref(&pic->motion_val_buf[i]);
1027 av_buffer_unref(&pic->ref_index_buf[i]);
1029 av_buffer_unref(&pic->mb_type_buf);
1031 av_frame_unref(&pic->f);
1034 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1036 SVQ3Context *s = avctx->priv_data;
1037 H264Context *h = &s->h;
1038 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1039 const int mb_array_size = h->mb_stride * h->mb_height;
1040 const int b4_stride = h->mb_width * 4 + 1;
1041 const int b4_array_size = b4_stride * h->mb_height * 4;
1044 if (!pic->motion_val_buf[0]) {
1047 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1048 if (!pic->mb_type_buf)
1049 return AVERROR(ENOMEM);
1050 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1052 for (i = 0; i < 2; i++) {
1053 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1054 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1055 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1056 ret = AVERROR(ENOMEM);
1060 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1061 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1064 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1066 ret = ff_get_buffer(avctx, &pic->f,
1067 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1071 if (!h->edge_emu_buffer) {
1072 h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1073 if (!h->edge_emu_buffer)
1074 return AVERROR(ENOMEM);
1077 h->linesize = pic->f.linesize[0];
1078 h->uvlinesize = pic->f.linesize[1];
1082 free_picture(avctx, pic);
1086 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1087 int *got_frame, AVPacket *avpkt)
1089 const uint8_t *buf = avpkt->data;
1090 SVQ3Context *s = avctx->priv_data;
1091 H264Context *h = &s->h;
1092 int buf_size = avpkt->size;
1095 /* special case for last picture */
1096 if (buf_size == 0) {
1097 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1098 ret = av_frame_ref(data, &s->next_pic->f);
1101 s->last_frame_output = 1;
1107 init_get_bits(&h->gb, buf, 8 * buf_size);
1109 h->mb_x = h->mb_y = h->mb_xy = 0;
1111 if (svq3_decode_slice_header(avctx))
1114 h->pict_type = h->slice_type;
1116 if (h->pict_type != AV_PICTURE_TYPE_B)
1117 FFSWAP(Picture*, s->next_pic, s->last_pic);
1119 av_frame_unref(&s->cur_pic->f);
1121 /* for skipping the frame */
1122 s->cur_pic->f.pict_type = h->pict_type;
1123 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1125 ret = get_buffer(avctx, s->cur_pic);
1129 h->cur_pic_ptr = s->cur_pic;
1130 av_frame_unref(&h->cur_pic.f);
1131 h->cur_pic = *s->cur_pic;
1132 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1136 for (i = 0; i < 16; i++) {
1137 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1138 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1140 for (i = 0; i < 16; i++) {
1141 h->block_offset[16 + i] =
1142 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1143 h->block_offset[48 + 16 + i] =
1144 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1147 if (h->pict_type != AV_PICTURE_TYPE_I) {
1148 if (!s->last_pic->f.data[0]) {
1149 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1150 ret = get_buffer(avctx, s->last_pic);
1153 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1154 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1155 s->last_pic->f.linesize[1]);
1156 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1157 s->last_pic->f.linesize[2]);
1160 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1161 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1162 ret = get_buffer(avctx, s->next_pic);
1165 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1166 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1167 s->next_pic->f.linesize[1]);
1168 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1169 s->next_pic->f.linesize[2]);
1173 if (avctx->debug & FF_DEBUG_PICT_INFO)
1174 av_log(h->avctx, AV_LOG_DEBUG,
1175 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1176 av_get_picture_type_char(h->pict_type),
1177 s->halfpel_flag, s->thirdpel_flag,
1178 s->adaptive_quant, h->qscale, h->slice_num);
1180 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1181 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1182 avctx->skip_frame >= AVDISCARD_ALL)
1185 if (s->next_p_frame_damaged) {
1186 if (h->pict_type == AV_PICTURE_TYPE_B)
1189 s->next_p_frame_damaged = 0;
1192 if (h->pict_type == AV_PICTURE_TYPE_B) {
1193 h->frame_num_offset = h->slice_num - h->prev_frame_num;
1195 if (h->frame_num_offset < 0)
1196 h->frame_num_offset += 256;
1197 if (h->frame_num_offset == 0 ||
1198 h->frame_num_offset >= h->prev_frame_num_offset) {
1199 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1203 h->prev_frame_num = h->frame_num;
1204 h->frame_num = h->slice_num;
1205 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1207 if (h->prev_frame_num_offset < 0)
1208 h->prev_frame_num_offset += 256;
1211 for (m = 0; m < 2; m++) {
1213 for (i = 0; i < 4; i++) {
1215 for (j = -1; j < 4; j++)
1216 h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1218 h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1222 for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1223 for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1225 h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1227 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1228 ((get_bits_count(&h->gb) & 7) == 0 ||
1229 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1230 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1231 h->gb.size_in_bits = 8 * buf_size;
1233 if (svq3_decode_slice_header(avctx))
1236 /* TODO: support s->mb_skip_run */
1239 mb_type = svq3_get_ue_golomb(&h->gb);
1241 if (h->pict_type == AV_PICTURE_TYPE_I)
1243 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1245 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1246 av_log(h->avctx, AV_LOG_ERROR,
1247 "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1252 ff_h264_hl_decode_mb(h);
1254 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1255 h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1256 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1259 ff_draw_horiz_band(avctx, NULL, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL,
1260 16 * h->mb_y, 16, h->picture_structure, 0, 0,
1261 h->low_delay, h->mb_height * 16, h->mb_width * 16);
1264 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1265 ret = av_frame_ref(data, &s->cur_pic->f);
1266 else if (s->last_pic->f.data[0])
1267 ret = av_frame_ref(data, &s->last_pic->f);
1271 /* Do not output the last pic after seeking. */
1272 if (s->last_pic->f.data[0] || h->low_delay)
1275 if (h->pict_type != AV_PICTURE_TYPE_B) {
1276 FFSWAP(Picture*, s->cur_pic, s->next_pic);
1278 av_frame_unref(&s->cur_pic->f);
1284 static int svq3_decode_end(AVCodecContext *avctx)
1286 SVQ3Context *s = avctx->priv_data;
1287 H264Context *h = &s->h;
1289 free_picture(avctx, s->cur_pic);
1290 free_picture(avctx, s->next_pic);
1291 free_picture(avctx, s->last_pic);
1292 av_freep(&s->cur_pic);
1293 av_freep(&s->next_pic);
1294 av_freep(&s->last_pic);
1296 av_frame_unref(&h->cur_pic.f);
1298 ff_h264_free_context(h);
1303 AVCodec ff_svq3_decoder = {
1305 .type = AVMEDIA_TYPE_VIDEO,
1306 .id = AV_CODEC_ID_SVQ3,
1307 .priv_data_size = sizeof(SVQ3Context),
1308 .init = svq3_decode_init,
1309 .close = svq3_decode_end,
1310 .decode = svq3_decode_frame,
1311 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1314 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1315 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,