2 * Copyright (c) 2003 The Libav Project
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
44 #include "mpegvideo.h"
47 #include "h264data.h" // FIXME FIXME FIXME
49 #include "h264_mvpred.h"
51 #include "rectangle.h"
52 #include "vdpau_internal.h"
75 uint32_t watermark_key;
77 int next_p_frame_damaged;
80 int last_frame_output;
83 #define FULLPEL_MODE 1
84 #define HALFPEL_MODE 2
85 #define THIRDPEL_MODE 3
86 #define PREDICT_MODE 4
88 /* dual scan (from some older h264 draft)
97 static const uint8_t svq3_scan[16] = {
98 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
99 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
100 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
101 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
104 static const uint8_t svq3_pred_0[25][2] = {
107 { 0, 2 }, { 1, 1 }, { 2, 0 },
108 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
109 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
110 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
111 { 2, 4 }, { 3, 3 }, { 4, 2 },
116 static const int8_t svq3_pred_1[6][6][5] = {
117 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
118 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
119 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
120 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
121 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
122 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
123 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
124 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
125 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
126 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
127 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
128 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
131 static const struct {
134 } svq3_dct_tables[2][16] = {
135 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
136 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
137 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
138 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
141 static const uint32_t svq3_dequant_coeff[32] = {
142 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
143 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
144 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
145 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
148 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
150 const int qmul = svq3_dequant_coeff[qp];
154 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
156 for (i = 0; i < 4; i++) {
157 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
158 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
159 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
160 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
162 temp[4 * i + 0] = z0 + z3;
163 temp[4 * i + 1] = z1 + z2;
164 temp[4 * i + 2] = z1 - z2;
165 temp[4 * i + 3] = z0 - z3;
168 for (i = 0; i < 4; i++) {
169 const int offset = x_offset[i];
170 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
171 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
172 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
173 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
175 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
176 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
177 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
178 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
183 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
184 int stride, int qp, int dc)
186 const int qmul = svq3_dequant_coeff[qp];
190 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
191 : qmul * (block[0] >> 3) / 2);
195 for (i = 0; i < 4; i++) {
196 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
197 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
198 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
199 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
201 block[0 + 4 * i] = z0 + z3;
202 block[1 + 4 * i] = z1 + z2;
203 block[2 + 4 * i] = z1 - z2;
204 block[3 + 4 * i] = z0 - z3;
207 for (i = 0; i < 4; i++) {
208 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
209 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
210 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
211 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
212 const int rr = (dc + 0x80000);
214 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
215 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
216 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
217 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
221 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
222 int index, const int type)
224 static const uint8_t *const scan_patterns[4] =
225 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
227 int run, level, limit;
229 const int intra = 3 * type >> 2;
230 const uint8_t *const scan = scan_patterns[type];
232 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
233 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
234 int sign = (vlc & 1) ? 0 : -1;
241 } else if (vlc < 4) {
246 level = (vlc + 9 >> 2) - run;
250 run = svq3_dct_tables[intra][vlc].run;
251 level = svq3_dct_tables[intra][vlc].level;
255 ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
259 ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
263 if ((index += run) >= limit)
266 block[scan[index]] = (level ^ sign) - sign;
277 static inline void svq3_mc_dir_part(SVQ3Context *s,
278 int x, int y, int width, int height,
279 int mx, int my, int dxy,
280 int thirdpel, int dir, int avg)
282 H264Context *h = &s->h;
283 const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
286 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
291 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
292 my < 0 || my >= s->v_edge_pos - height - 1) {
294 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
295 my = av_clip(my, -16, s->v_edge_pos - height + 15);
298 /* form component predictions */
299 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
300 src = pic->f.data[0] + mx + my * h->linesize;
303 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->linesize,
304 width + 1, height + 1,
305 mx, my, s->h_edge_pos, s->v_edge_pos);
306 src = h->edge_emu_buffer;
309 (avg ? h->dsp.avg_tpel_pixels_tab
310 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
313 (avg ? h->dsp.avg_pixels_tab
314 : h->dsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
317 if (!(h->flags & CODEC_FLAG_GRAY)) {
318 mx = mx + (mx < (int) x) >> 1;
319 my = my + (my < (int) y) >> 1;
321 height = height >> 1;
324 for (i = 1; i < 3; i++) {
325 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
326 src = pic->f.data[i] + mx + my * h->uvlinesize;
329 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->uvlinesize,
330 width + 1, height + 1,
331 mx, my, (s->h_edge_pos >> 1),
333 src = h->edge_emu_buffer;
336 (avg ? h->dsp.avg_tpel_pixels_tab
337 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
341 (avg ? h->dsp.avg_pixels_tab
342 : h->dsp.put_pixels_tab)[blocksize][dxy](dest, src,
349 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
352 int i, j, k, mx, my, dx, dy, x, y;
353 H264Context *h = &s->h;
354 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
355 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
356 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
357 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
358 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
360 for (i = 0; i < 16; i += part_height)
361 for (j = 0; j < 16; j += part_width) {
362 const int b_xy = (4 * h->mb_x + (j >> 2)) +
363 (4 * h->mb_y + (i >> 2)) * h->b_stride;
365 x = 16 * h->mb_x + j;
366 y = 16 * h->mb_y + i;
367 k = (j >> 2 & 1) + (i >> 1 & 2) +
368 (j >> 1 & 4) + (i & 8);
370 if (mode != PREDICT_MODE) {
371 pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
373 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
374 my = s->next_pic->motion_val[0][b_xy][1] << 1;
377 mx = mx * h->frame_num_offset /
378 h->prev_frame_num_offset + 1 >> 1;
379 my = my * h->frame_num_offset /
380 h->prev_frame_num_offset + 1 >> 1;
382 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
383 h->prev_frame_num_offset + 1 >> 1;
384 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
385 h->prev_frame_num_offset + 1 >> 1;
389 /* clip motion vector prediction to frame border */
390 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
391 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
393 /* get (optional) motion vector differential */
394 if (mode == PREDICT_MODE) {
397 dy = svq3_get_se_golomb(&h->gb);
398 dx = svq3_get_se_golomb(&h->gb);
400 if (dx == INVALID_VLC || dy == INVALID_VLC) {
401 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
406 /* compute motion vector */
407 if (mode == THIRDPEL_MODE) {
409 mx = (mx + 1 >> 1) + dx;
410 my = (my + 1 >> 1) + dy;
411 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
412 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
413 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
415 svq3_mc_dir_part(s, x, y, part_width, part_height,
416 fx, fy, dxy, 1, dir, avg);
419 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
420 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
421 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
422 dxy = (mx & 1) + 2 * (my & 1);
424 svq3_mc_dir_part(s, x, y, part_width, part_height,
425 mx >> 1, my >> 1, dxy, 0, dir, avg);
429 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
430 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
432 svq3_mc_dir_part(s, x, y, part_width, part_height,
433 mx, my, 0, 0, dir, avg);
438 /* update mv_cache */
439 if (mode != PREDICT_MODE) {
440 int32_t mv = pack16to32(mx, my);
442 if (part_height == 8 && i < 8) {
443 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
445 if (part_width == 8 && j < 8)
446 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
448 if (part_width == 8 && j < 8)
449 AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
450 if (part_width == 4 || part_height == 4)
451 AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
454 /* write back motion vectors */
455 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
456 part_width >> 2, part_height >> 2, h->b_stride,
457 pack16to32(mx, my), 4);
463 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
465 H264Context *h = &s->h;
466 int i, j, k, m, dir, mode;
470 const int mb_xy = h->mb_xy;
471 const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
473 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
474 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
475 h->topright_samples_available = 0xFFFF;
477 if (mb_type == 0) { /* SKIP */
478 if (h->pict_type == AV_PICTURE_TYPE_P ||
479 s->next_pic->mb_type[mb_xy] == -1) {
480 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
483 if (h->pict_type == AV_PICTURE_TYPE_B)
484 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
487 mb_type = MB_TYPE_SKIP;
489 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
490 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
492 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
495 mb_type = MB_TYPE_16x16;
497 } else if (mb_type < 8) { /* INTER */
498 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
499 mode = THIRDPEL_MODE;
500 else if (s->halfpel_flag &&
501 s->thirdpel_flag == !get_bits1(&h->gb))
507 /* note ref_cache should contain here:
515 for (m = 0; m < 2; m++) {
516 if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
517 for (i = 0; i < 4; i++)
518 AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
519 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
521 for (i = 0; i < 4; i++)
522 AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
525 memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
526 h->cur_pic.motion_val[m][b_xy - h->b_stride],
527 4 * 2 * sizeof(int16_t));
528 memset(&h->ref_cache[m][scan8[0] - 1 * 8],
529 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
531 if (h->mb_x < h->mb_width - 1) {
532 AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
533 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
534 h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
535 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
536 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
538 h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
540 AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
541 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
542 h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
543 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
545 h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
547 memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
548 PART_NOT_AVAILABLE, 8);
550 if (h->pict_type != AV_PICTURE_TYPE_B)
554 /* decode motion vector(s) and form prediction(s) */
555 if (h->pict_type == AV_PICTURE_TYPE_P) {
556 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
558 } else { /* AV_PICTURE_TYPE_B */
560 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
563 for (i = 0; i < 4; i++)
564 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
565 0, 4 * 2 * sizeof(int16_t));
568 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
571 for (i = 0; i < 4; i++)
572 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
573 0, 4 * 2 * sizeof(int16_t));
577 mb_type = MB_TYPE_16x16;
578 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
579 memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
583 for (i = 0; i < 4; i++)
584 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
585 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
586 h->left_samples_available = 0x5F5F;
589 h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
590 h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
591 h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
592 h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
594 if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
595 h->top_samples_available = 0x33FF;
598 /* decode prediction codes for luma blocks */
599 for (i = 0; i < 16; i += 2) {
600 vlc = svq3_get_ue_golomb(&h->gb);
603 av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
607 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
608 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
610 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
611 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
613 if (left[1] == -1 || left[2] == -1) {
614 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
618 } else { /* mb_type == 33, DC_128_PRED block type */
619 for (i = 0; i < 4; i++)
620 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
623 write_back_intra_pred_mode(h);
626 ff_h264_check_intra4x4_pred_mode(h);
628 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
629 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
631 for (i = 0; i < 4; i++)
632 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
634 h->top_samples_available = 0x33FF;
635 h->left_samples_available = 0x5F5F;
638 mb_type = MB_TYPE_INTRA4x4;
639 } else { /* INTRA16x16 */
640 dir = i_mb_type_info[mb_type - 8].pred_mode;
641 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
643 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1) {
644 av_log(h->avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
648 cbp = i_mb_type_info[mb_type - 8].cbp;
649 mb_type = MB_TYPE_INTRA16x16;
652 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
653 for (i = 0; i < 4; i++)
654 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
655 0, 4 * 2 * sizeof(int16_t));
656 if (h->pict_type == AV_PICTURE_TYPE_B) {
657 for (i = 0; i < 4; i++)
658 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
659 0, 4 * 2 * sizeof(int16_t));
662 if (!IS_INTRA4x4(mb_type)) {
663 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
665 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
666 memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
667 h->dsp.clear_blocks(h->mb + 0);
668 h->dsp.clear_blocks(h->mb + 384);
671 if (!IS_INTRA16x16(mb_type) &&
672 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
673 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48) {
674 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
678 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
679 : golomb_to_inter_cbp[vlc];
681 if (IS_INTRA16x16(mb_type) ||
682 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
683 h->qscale += svq3_get_se_golomb(&h->gb);
685 if (h->qscale > 31u) {
686 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
690 if (IS_INTRA16x16(mb_type)) {
691 AV_ZERO128(h->mb_luma_dc[0] + 0);
692 AV_ZERO128(h->mb_luma_dc[0] + 8);
693 if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
694 av_log(h->avctx, AV_LOG_ERROR,
695 "error while decoding intra luma dc\n");
701 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
702 const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
704 for (i = 0; i < 4; i++)
705 if ((cbp & (1 << i))) {
706 for (j = 0; j < 4; j++) {
707 k = index ? (1 * (j & 1) + 2 * (i & 1) +
708 2 * (j & 2) + 4 * (i & 2))
710 h->non_zero_count_cache[scan8[k]] = 1;
712 if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
713 av_log(h->avctx, AV_LOG_ERROR,
714 "error while decoding block\n");
721 for (i = 1; i < 3; ++i)
722 if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
723 av_log(h->avctx, AV_LOG_ERROR,
724 "error while decoding chroma dc block\n");
729 for (i = 1; i < 3; i++) {
730 for (j = 0; j < 4; j++) {
732 h->non_zero_count_cache[scan8[k]] = 1;
734 if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
735 av_log(h->avctx, AV_LOG_ERROR,
736 "error while decoding chroma ac block\n");
746 h->cur_pic.mb_type[mb_xy] = mb_type;
748 if (IS_INTRA(mb_type))
749 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
754 static int svq3_decode_slice_header(AVCodecContext *avctx)
756 SVQ3Context *s = avctx->priv_data;
757 H264Context *h = &s->h;
758 const int mb_xy = h->mb_xy;
762 header = get_bits(&h->gb, 8);
764 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
766 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
769 int length = header >> 5 & 3;
771 s->next_slice_index = get_bits_count(&h->gb) +
772 8 * show_bits(&h->gb, 8 * length) +
775 if (s->next_slice_index > h->gb.size_in_bits) {
776 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
780 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
781 skip_bits(&h->gb, 8);
783 if (s->watermark_key) {
784 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
785 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
786 header ^ s->watermark_key);
789 memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
790 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
792 skip_bits_long(&h->gb, 0);
795 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
796 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
800 h->slice_type = golomb_to_pict_type[slice_id];
802 if ((header & 0x9F) == 2) {
803 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
804 h->mb_skip_run = get_bits(&h->gb, i) -
805 (h->mb_y * h->mb_width + h->mb_x);
811 h->slice_num = get_bits(&h->gb, 8);
812 h->qscale = get_bits(&h->gb, 5);
813 s->adaptive_quant = get_bits1(&h->gb);
822 skip_bits(&h->gb, 2);
824 while (get_bits1(&h->gb))
825 skip_bits(&h->gb, 8);
827 /* reset intra predictors and invalidate motion vector references */
829 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
830 -1, 4 * sizeof(int8_t));
831 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
832 -1, 8 * sizeof(int8_t) * h->mb_x);
835 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
836 -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
839 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
845 static av_cold int svq3_decode_init(AVCodecContext *avctx)
847 SVQ3Context *s = avctx->priv_data;
848 H264Context *h = &s->h;
850 unsigned char *extradata;
851 unsigned char *extradata_end;
853 int marker_found = 0;
855 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
856 s->last_pic = av_mallocz(sizeof(*s->last_pic));
857 s->next_pic = av_mallocz(sizeof(*s->next_pic));
858 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
859 av_freep(&s->cur_pic);
860 av_freep(&s->last_pic);
861 av_freep(&s->next_pic);
862 return AVERROR(ENOMEM);
865 if (ff_h264_decode_init(avctx) < 0)
868 h->flags = avctx->flags;
870 h->picture_structure = PICT_FRAME;
871 avctx->pix_fmt = avctx->codec->pix_fmts[0];
873 h->chroma_qp[0] = h->chroma_qp[1] = 4;
874 h->chroma_x_shift = h->chroma_y_shift = 1;
877 s->thirdpel_flag = 1;
880 /* prowl for the "SEQH" marker in the extradata */
881 extradata = (unsigned char *)avctx->extradata;
882 extradata_end = avctx->extradata + avctx->extradata_size;
884 for (m = 0; m + 8 < avctx->extradata_size; m++) {
885 if (!memcmp(extradata, "SEQH", 4)) {
893 /* if a match was found, parse the extra data */
898 size = AV_RB32(&extradata[4]);
899 if (size > extradata_end - extradata - 8)
900 return AVERROR_INVALIDDATA;
901 init_get_bits(&gb, extradata + 8, size * 8);
903 /* 'frame size code' and optional 'width, height' */
904 frame_size_code = get_bits(&gb, 3);
905 switch (frame_size_code) {
935 avctx->width = get_bits(&gb, 12);
936 avctx->height = get_bits(&gb, 12);
940 s->halfpel_flag = get_bits1(&gb);
941 s->thirdpel_flag = get_bits1(&gb);
949 h->low_delay = get_bits1(&gb);
954 while (get_bits1(&gb))
957 s->unknown_flag = get_bits1(&gb);
958 avctx->has_b_frames = !h->low_delay;
959 if (s->unknown_flag) {
961 unsigned watermark_width = svq3_get_ue_golomb(&gb);
962 unsigned watermark_height = svq3_get_ue_golomb(&gb);
963 int u1 = svq3_get_ue_golomb(&gb);
964 int u2 = get_bits(&gb, 8);
965 int u3 = get_bits(&gb, 2);
966 int u4 = svq3_get_ue_golomb(&gb);
967 unsigned long buf_len = watermark_width *
968 watermark_height * 4;
969 int offset = get_bits_count(&gb) + 7 >> 3;
972 if ((uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
975 buf = av_malloc(buf_len);
976 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
977 watermark_width, watermark_height);
978 av_log(avctx, AV_LOG_DEBUG,
979 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
980 u1, u2, u3, u4, offset);
981 if (uncompress(buf, &buf_len, extradata + 8 + offset,
982 size - offset) != Z_OK) {
983 av_log(avctx, AV_LOG_ERROR,
984 "could not uncompress watermark logo\n");
988 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
989 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
990 av_log(avctx, AV_LOG_DEBUG,
991 "watermark key %#x\n", s->watermark_key);
994 av_log(avctx, AV_LOG_ERROR,
995 "this svq3 file contains watermark which need zlib support compiled in\n");
1001 h->width = avctx->width;
1002 h->height = avctx->height;
1003 h->mb_width = (h->width + 15) / 16;
1004 h->mb_height = (h->height + 15) / 16;
1005 h->mb_stride = h->mb_width + 1;
1006 h->mb_num = h->mb_width * h->mb_height;
1007 h->b_stride = 4 * h->mb_width;
1008 s->h_edge_pos = h->mb_width * 16;
1009 s->v_edge_pos = h->mb_height * 16;
1011 if (ff_h264_alloc_tables(h) < 0) {
1012 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1013 return AVERROR(ENOMEM);
1019 static void free_picture(AVCodecContext *avctx, Picture *pic)
1022 for (i = 0; i < 2; i++) {
1023 av_buffer_unref(&pic->motion_val_buf[i]);
1024 av_buffer_unref(&pic->ref_index_buf[i]);
1026 av_buffer_unref(&pic->mb_type_buf);
1028 av_frame_unref(&pic->f);
1031 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1033 SVQ3Context *s = avctx->priv_data;
1034 H264Context *h = &s->h;
1035 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1036 const int mb_array_size = h->mb_stride * h->mb_height;
1037 const int b4_stride = h->mb_width * 4 + 1;
1038 const int b4_array_size = b4_stride * h->mb_height * 4;
1041 if (!pic->motion_val_buf[0]) {
1044 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1045 if (!pic->mb_type_buf)
1046 return AVERROR(ENOMEM);
1047 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1049 for (i = 0; i < 2; i++) {
1050 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1051 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1052 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1053 ret = AVERROR(ENOMEM);
1057 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1058 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1061 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1063 ret = ff_get_buffer(avctx, &pic->f,
1064 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1068 if (!h->edge_emu_buffer) {
1069 h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1070 if (!h->edge_emu_buffer)
1071 return AVERROR(ENOMEM);
1074 h->linesize = pic->f.linesize[0];
1075 h->uvlinesize = pic->f.linesize[1];
1079 free_picture(avctx, pic);
1083 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1084 int *got_frame, AVPacket *avpkt)
1086 const uint8_t *buf = avpkt->data;
1087 SVQ3Context *s = avctx->priv_data;
1088 H264Context *h = &s->h;
1089 int buf_size = avpkt->size;
1092 /* special case for last picture */
1093 if (buf_size == 0) {
1094 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1095 ret = av_frame_ref(data, &s->next_pic->f);
1098 s->last_frame_output = 1;
1104 init_get_bits(&h->gb, buf, 8 * buf_size);
1106 h->mb_x = h->mb_y = h->mb_xy = 0;
1108 if (svq3_decode_slice_header(avctx))
1111 h->pict_type = h->slice_type;
1113 if (h->pict_type != AV_PICTURE_TYPE_B)
1114 FFSWAP(Picture*, s->next_pic, s->last_pic);
1116 av_frame_unref(&s->cur_pic->f);
1118 /* for skipping the frame */
1119 s->cur_pic->f.pict_type = h->pict_type;
1120 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1122 ret = get_buffer(avctx, s->cur_pic);
1126 h->cur_pic_ptr = s->cur_pic;
1127 av_frame_unref(&h->cur_pic.f);
1128 h->cur_pic = *s->cur_pic;
1129 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1133 for (i = 0; i < 16; i++) {
1134 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1135 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1137 for (i = 0; i < 16; i++) {
1138 h->block_offset[16 + i] =
1139 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1140 h->block_offset[48 + 16 + i] =
1141 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1144 if (h->pict_type != AV_PICTURE_TYPE_I) {
1145 if (!s->last_pic->f.data[0]) {
1146 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1147 ret = get_buffer(avctx, s->last_pic);
1150 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1151 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1152 s->last_pic->f.linesize[1]);
1153 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1154 s->last_pic->f.linesize[2]);
1157 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1158 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1159 ret = get_buffer(avctx, s->next_pic);
1162 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1163 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1164 s->next_pic->f.linesize[1]);
1165 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1166 s->next_pic->f.linesize[2]);
1170 if (avctx->debug & FF_DEBUG_PICT_INFO)
1171 av_log(h->avctx, AV_LOG_DEBUG,
1172 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1173 av_get_picture_type_char(h->pict_type),
1174 s->halfpel_flag, s->thirdpel_flag,
1175 s->adaptive_quant, h->qscale, h->slice_num);
1177 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1178 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1179 avctx->skip_frame >= AVDISCARD_ALL)
1182 if (s->next_p_frame_damaged) {
1183 if (h->pict_type == AV_PICTURE_TYPE_B)
1186 s->next_p_frame_damaged = 0;
1189 if (h->pict_type == AV_PICTURE_TYPE_B) {
1190 h->frame_num_offset = h->slice_num - h->prev_frame_num;
1192 if (h->frame_num_offset < 0)
1193 h->frame_num_offset += 256;
1194 if (h->frame_num_offset == 0 ||
1195 h->frame_num_offset >= h->prev_frame_num_offset) {
1196 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1200 h->prev_frame_num = h->frame_num;
1201 h->frame_num = h->slice_num;
1202 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1204 if (h->prev_frame_num_offset < 0)
1205 h->prev_frame_num_offset += 256;
1208 for (m = 0; m < 2; m++) {
1210 for (i = 0; i < 4; i++) {
1212 for (j = -1; j < 4; j++)
1213 h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1215 h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1219 for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1220 for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1222 h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1224 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1225 ((get_bits_count(&h->gb) & 7) == 0 ||
1226 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1227 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1228 h->gb.size_in_bits = 8 * buf_size;
1230 if (svq3_decode_slice_header(avctx))
1233 /* TODO: support s->mb_skip_run */
1236 mb_type = svq3_get_ue_golomb(&h->gb);
1238 if (h->pict_type == AV_PICTURE_TYPE_I)
1240 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1242 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1243 av_log(h->avctx, AV_LOG_ERROR,
1244 "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1249 ff_h264_hl_decode_mb(h);
1251 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1252 h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1253 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1256 ff_draw_horiz_band(avctx, NULL, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL,
1257 16 * h->mb_y, 16, h->picture_structure, 0, 0,
1258 h->low_delay, h->mb_height * 16, h->mb_width * 16);
1261 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1262 ret = av_frame_ref(data, &s->cur_pic->f);
1263 else if (s->last_pic->f.data[0])
1264 ret = av_frame_ref(data, &s->last_pic->f);
1268 /* Do not output the last pic after seeking. */
1269 if (s->last_pic->f.data[0] || h->low_delay)
1272 if (h->pict_type != AV_PICTURE_TYPE_B) {
1273 FFSWAP(Picture*, s->cur_pic, s->next_pic);
1275 av_frame_unref(&s->cur_pic->f);
1281 static int svq3_decode_end(AVCodecContext *avctx)
1283 SVQ3Context *s = avctx->priv_data;
1284 H264Context *h = &s->h;
1286 free_picture(avctx, s->cur_pic);
1287 free_picture(avctx, s->next_pic);
1288 free_picture(avctx, s->last_pic);
1289 av_freep(&s->cur_pic);
1290 av_freep(&s->next_pic);
1291 av_freep(&s->last_pic);
1293 av_frame_unref(&h->cur_pic.f);
1295 ff_h264_free_context(h);
1300 AVCodec ff_svq3_decoder = {
1302 .type = AVMEDIA_TYPE_VIDEO,
1303 .id = AV_CODEC_ID_SVQ3,
1304 .priv_data_size = sizeof(SVQ3Context),
1305 .init = svq3_decode_init,
1306 .close = svq3_decode_end,
1307 .decode = svq3_decode_frame,
1308 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1311 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1312 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,