2 * Copyright (c) 2003 The FFmpeg Project
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
43 #include "libavutil/attributes.h"
46 #include "mpegvideo.h"
49 #include "h264data.h" // FIXME FIXME FIXME
51 #include "h264_mvpred.h"
54 #include "rectangle.h"
55 #include "vdpau_internal.h"
79 uint32_t watermark_key;
83 int next_p_frame_damaged;
86 int last_frame_output;
89 #define FULLPEL_MODE 1
90 #define HALFPEL_MODE 2
91 #define THIRDPEL_MODE 3
92 #define PREDICT_MODE 4
94 /* dual scan (from some older h264 draft)
103 static const uint8_t svq3_scan[16] = {
104 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
105 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
106 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
107 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
110 static const uint8_t svq3_pred_0[25][2] = {
113 { 0, 2 }, { 1, 1 }, { 2, 0 },
114 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
115 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
116 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
117 { 2, 4 }, { 3, 3 }, { 4, 2 },
122 static const int8_t svq3_pred_1[6][6][5] = {
123 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
124 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
125 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
126 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
127 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
128 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
129 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
130 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
131 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
132 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
133 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
134 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
137 static const struct {
140 } svq3_dct_tables[2][16] = {
141 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
142 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
143 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
144 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
147 static const uint32_t svq3_dequant_coeff[32] = {
148 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
149 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
150 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
151 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
154 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
156 const int qmul = svq3_dequant_coeff[qp];
160 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
162 for (i = 0; i < 4; i++) {
163 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
164 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
165 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
166 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
168 temp[4 * i + 0] = z0 + z3;
169 temp[4 * i + 1] = z1 + z2;
170 temp[4 * i + 2] = z1 - z2;
171 temp[4 * i + 3] = z0 - z3;
174 for (i = 0; i < 4; i++) {
175 const int offset = x_offset[i];
176 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
177 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
178 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
179 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
181 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
182 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
183 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
184 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
189 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
190 int stride, int qp, int dc)
192 const int qmul = svq3_dequant_coeff[qp];
196 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
197 : qmul * (block[0] >> 3) / 2);
201 for (i = 0; i < 4; i++) {
202 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
203 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
204 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
205 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
207 block[0 + 4 * i] = z0 + z3;
208 block[1 + 4 * i] = z1 + z2;
209 block[2 + 4 * i] = z1 - z2;
210 block[3 + 4 * i] = z0 - z3;
213 for (i = 0; i < 4; i++) {
214 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
215 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
216 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
217 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
218 const int rr = (dc + 0x80000);
220 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
221 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
222 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
223 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
226 memset(block, 0, 16 * sizeof(int16_t));
229 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
230 int index, const int type)
232 static const uint8_t *const scan_patterns[4] =
233 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
235 int run, level, sign, limit;
237 const int intra = 3 * type >> 2;
238 const uint8_t *const scan = scan_patterns[type];
240 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
241 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
242 if ((int32_t)vlc < 0)
245 sign = (vlc & 1) ? 0 : -1;
252 } else if (vlc < 4) {
257 level = (vlc + 9 >> 2) - run;
261 run = svq3_dct_tables[intra][vlc].run;
262 level = svq3_dct_tables[intra][vlc].level;
265 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
268 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
273 if ((index += run) >= limit)
276 block[scan[index]] = (level ^ sign) - sign;
287 static inline void svq3_mc_dir_part(SVQ3Context *s,
288 int x, int y, int width, int height,
289 int mx, int my, int dxy,
290 int thirdpel, int dir, int avg)
292 H264Context *h = &s->h;
293 const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
296 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
301 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
302 my < 0 || my >= s->v_edge_pos - height - 1) {
304 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
305 my = av_clip(my, -16, s->v_edge_pos - height + 15);
308 /* form component predictions */
309 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
310 src = pic->f.data[0] + mx + my * h->linesize;
313 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->linesize,
314 width + 1, height + 1,
315 mx, my, s->h_edge_pos, s->v_edge_pos);
316 src = h->edge_emu_buffer;
319 (avg ? h->dsp.avg_tpel_pixels_tab
320 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
323 (avg ? s->hdsp.avg_pixels_tab
324 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
327 if (!(h->flags & CODEC_FLAG_GRAY)) {
328 mx = mx + (mx < (int) x) >> 1;
329 my = my + (my < (int) y) >> 1;
331 height = height >> 1;
334 for (i = 1; i < 3; i++) {
335 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
336 src = pic->f.data[i] + mx + my * h->uvlinesize;
339 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->uvlinesize,
340 width + 1, height + 1,
341 mx, my, (s->h_edge_pos >> 1),
343 src = h->edge_emu_buffer;
346 (avg ? h->dsp.avg_tpel_pixels_tab
347 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
351 (avg ? s->hdsp.avg_pixels_tab
352 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
359 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
362 int i, j, k, mx, my, dx, dy, x, y;
363 H264Context *h = &s->h;
364 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
365 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
366 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
367 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
368 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
370 for (i = 0; i < 16; i += part_height)
371 for (j = 0; j < 16; j += part_width) {
372 const int b_xy = (4 * h->mb_x + (j >> 2)) +
373 (4 * h->mb_y + (i >> 2)) * h->b_stride;
375 x = 16 * h->mb_x + j;
376 y = 16 * h->mb_y + i;
377 k = (j >> 2 & 1) + (i >> 1 & 2) +
378 (j >> 1 & 4) + (i & 8);
380 if (mode != PREDICT_MODE) {
381 pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
383 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
384 my = s->next_pic->motion_val[0][b_xy][1] << 1;
387 mx = mx * h->frame_num_offset /
388 h->prev_frame_num_offset + 1 >> 1;
389 my = my * h->frame_num_offset /
390 h->prev_frame_num_offset + 1 >> 1;
392 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
393 h->prev_frame_num_offset + 1 >> 1;
394 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
395 h->prev_frame_num_offset + 1 >> 1;
399 /* clip motion vector prediction to frame border */
400 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
401 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
403 /* get (optional) motion vector differential */
404 if (mode == PREDICT_MODE) {
407 dy = svq3_get_se_golomb(&h->gb);
408 dx = svq3_get_se_golomb(&h->gb);
410 if (dx == INVALID_VLC || dy == INVALID_VLC) {
411 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
416 /* compute motion vector */
417 if (mode == THIRDPEL_MODE) {
419 mx = (mx + 1 >> 1) + dx;
420 my = (my + 1 >> 1) + dy;
421 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
422 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
423 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
425 svq3_mc_dir_part(s, x, y, part_width, part_height,
426 fx, fy, dxy, 1, dir, avg);
429 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
430 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
431 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
432 dxy = (mx & 1) + 2 * (my & 1);
434 svq3_mc_dir_part(s, x, y, part_width, part_height,
435 mx >> 1, my >> 1, dxy, 0, dir, avg);
439 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
440 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
442 svq3_mc_dir_part(s, x, y, part_width, part_height,
443 mx, my, 0, 0, dir, avg);
448 /* update mv_cache */
449 if (mode != PREDICT_MODE) {
450 int32_t mv = pack16to32(mx, my);
452 if (part_height == 8 && i < 8) {
453 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
455 if (part_width == 8 && j < 8)
456 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
458 if (part_width == 8 && j < 8)
459 AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
460 if (part_width == 4 || part_height == 4)
461 AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
464 /* write back motion vectors */
465 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
466 part_width >> 2, part_height >> 2, h->b_stride,
467 pack16to32(mx, my), 4);
473 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
475 H264Context *h = &s->h;
476 int i, j, k, m, dir, mode;
480 const int mb_xy = h->mb_xy;
481 const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
483 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
484 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
485 h->topright_samples_available = 0xFFFF;
487 if (mb_type == 0) { /* SKIP */
488 if (h->pict_type == AV_PICTURE_TYPE_P ||
489 s->next_pic->mb_type[mb_xy] == -1) {
490 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
493 if (h->pict_type == AV_PICTURE_TYPE_B)
494 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
497 mb_type = MB_TYPE_SKIP;
499 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
500 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
502 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
505 mb_type = MB_TYPE_16x16;
507 } else if (mb_type < 8) { /* INTER */
508 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
509 mode = THIRDPEL_MODE;
510 else if (s->halfpel_flag &&
511 s->thirdpel_flag == !get_bits1(&h->gb))
517 /* note ref_cache should contain here:
525 for (m = 0; m < 2; m++) {
526 if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
527 for (i = 0; i < 4; i++)
528 AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
529 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
531 for (i = 0; i < 4; i++)
532 AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
535 memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
536 h->cur_pic.motion_val[m][b_xy - h->b_stride],
537 4 * 2 * sizeof(int16_t));
538 memset(&h->ref_cache[m][scan8[0] - 1 * 8],
539 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
541 if (h->mb_x < h->mb_width - 1) {
542 AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
543 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
544 h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
545 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
546 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
548 h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
550 AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
551 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
552 h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
553 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
555 h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
557 memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
558 PART_NOT_AVAILABLE, 8);
560 if (h->pict_type != AV_PICTURE_TYPE_B)
564 /* decode motion vector(s) and form prediction(s) */
565 if (h->pict_type == AV_PICTURE_TYPE_P) {
566 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
568 } else { /* AV_PICTURE_TYPE_B */
570 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
573 for (i = 0; i < 4; i++)
574 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
575 0, 4 * 2 * sizeof(int16_t));
578 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
581 for (i = 0; i < 4; i++)
582 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
583 0, 4 * 2 * sizeof(int16_t));
587 mb_type = MB_TYPE_16x16;
588 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
589 memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
593 for (i = 0; i < 4; i++)
594 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
595 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
596 h->left_samples_available = 0x5F5F;
599 h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
600 h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
601 h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
602 h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
604 if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
605 h->top_samples_available = 0x33FF;
608 /* decode prediction codes for luma blocks */
609 for (i = 0; i < 16; i += 2) {
610 vlc = svq3_get_ue_golomb(&h->gb);
613 av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
617 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
618 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
620 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
621 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
623 if (left[1] == -1 || left[2] == -1) {
624 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
628 } else { /* mb_type == 33, DC_128_PRED block type */
629 for (i = 0; i < 4; i++)
630 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
633 write_back_intra_pred_mode(h);
636 ff_h264_check_intra4x4_pred_mode(h);
638 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
639 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
641 for (i = 0; i < 4; i++)
642 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
644 h->top_samples_available = 0x33FF;
645 h->left_samples_available = 0x5F5F;
648 mb_type = MB_TYPE_INTRA4x4;
649 } else { /* INTRA16x16 */
650 dir = i_mb_type_info[mb_type - 8].pred_mode;
651 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
653 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1) {
654 av_log(h->avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
658 cbp = i_mb_type_info[mb_type - 8].cbp;
659 mb_type = MB_TYPE_INTRA16x16;
662 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
663 for (i = 0; i < 4; i++)
664 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
665 0, 4 * 2 * sizeof(int16_t));
666 if (h->pict_type == AV_PICTURE_TYPE_B) {
667 for (i = 0; i < 4; i++)
668 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
669 0, 4 * 2 * sizeof(int16_t));
672 if (!IS_INTRA4x4(mb_type)) {
673 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
675 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
676 memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
679 if (!IS_INTRA16x16(mb_type) &&
680 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
681 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
682 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
686 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
687 : golomb_to_inter_cbp[vlc];
689 if (IS_INTRA16x16(mb_type) ||
690 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
691 h->qscale += svq3_get_se_golomb(&h->gb);
693 if (h->qscale > 31u) {
694 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
698 if (IS_INTRA16x16(mb_type)) {
699 AV_ZERO128(h->mb_luma_dc[0] + 0);
700 AV_ZERO128(h->mb_luma_dc[0] + 8);
701 if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
702 av_log(h->avctx, AV_LOG_ERROR,
703 "error while decoding intra luma dc\n");
709 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
710 const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
712 for (i = 0; i < 4; i++)
713 if ((cbp & (1 << i))) {
714 for (j = 0; j < 4; j++) {
715 k = index ? (1 * (j & 1) + 2 * (i & 1) +
716 2 * (j & 2) + 4 * (i & 2))
718 h->non_zero_count_cache[scan8[k]] = 1;
720 if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
721 av_log(h->avctx, AV_LOG_ERROR,
722 "error while decoding block\n");
729 for (i = 1; i < 3; ++i)
730 if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
731 av_log(h->avctx, AV_LOG_ERROR,
732 "error while decoding chroma dc block\n");
737 for (i = 1; i < 3; i++) {
738 for (j = 0; j < 4; j++) {
740 h->non_zero_count_cache[scan8[k]] = 1;
742 if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
743 av_log(h->avctx, AV_LOG_ERROR,
744 "error while decoding chroma ac block\n");
754 h->cur_pic.mb_type[mb_xy] = mb_type;
756 if (IS_INTRA(mb_type))
757 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
762 static int svq3_decode_slice_header(AVCodecContext *avctx)
764 SVQ3Context *s = avctx->priv_data;
765 H264Context *h = &s->h;
766 const int mb_xy = h->mb_xy;
770 header = get_bits(&h->gb, 8);
772 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
774 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
777 int length = header >> 5 & 3;
779 s->next_slice_index = get_bits_count(&h->gb) +
780 8 * show_bits(&h->gb, 8 * length) +
783 if (s->next_slice_index > h->gb.size_in_bits) {
784 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
788 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
789 skip_bits(&h->gb, 8);
791 if (s->watermark_key) {
792 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
793 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
794 header ^ s->watermark_key);
797 memmove((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
798 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
800 skip_bits_long(&h->gb, 0);
803 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
804 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
808 h->slice_type = golomb_to_pict_type[slice_id];
810 if ((header & 0x9F) == 2) {
811 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
812 h->mb_skip_run = get_bits(&h->gb, i) -
813 (h->mb_y * h->mb_width + h->mb_x);
819 h->slice_num = get_bits(&h->gb, 8);
820 h->qscale = get_bits(&h->gb, 5);
821 s->adaptive_quant = get_bits1(&h->gb);
830 skip_bits(&h->gb, 2);
832 while (get_bits1(&h->gb))
833 skip_bits(&h->gb, 8);
835 /* reset intra predictors and invalidate motion vector references */
837 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
838 -1, 4 * sizeof(int8_t));
839 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
840 -1, 8 * sizeof(int8_t) * h->mb_x);
843 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
844 -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
847 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
853 static av_cold int svq3_decode_init(AVCodecContext *avctx)
855 SVQ3Context *s = avctx->priv_data;
856 H264Context *h = &s->h;
858 unsigned char *extradata;
859 unsigned char *extradata_end;
861 int marker_found = 0;
863 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
864 s->last_pic = av_mallocz(sizeof(*s->last_pic));
865 s->next_pic = av_mallocz(sizeof(*s->next_pic));
866 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
867 av_freep(&s->cur_pic);
868 av_freep(&s->last_pic);
869 av_freep(&s->next_pic);
870 return AVERROR(ENOMEM);
873 if (ff_h264_decode_init(avctx) < 0)
876 ff_hpeldsp_init(&s->hdsp, avctx->flags);
877 h->flags = avctx->flags;
879 h->sps.chroma_format_idc = 1;
880 h->picture_structure = PICT_FRAME;
881 avctx->pix_fmt = avctx->codec->pix_fmts[0];
883 h->chroma_qp[0] = h->chroma_qp[1] = 4;
884 h->chroma_x_shift = h->chroma_y_shift = 1;
887 s->thirdpel_flag = 1;
890 /* prowl for the "SEQH" marker in the extradata */
891 extradata = (unsigned char *)avctx->extradata;
892 extradata_end = avctx->extradata + avctx->extradata_size;
894 for (m = 0; m + 8 < avctx->extradata_size; m++) {
895 if (!memcmp(extradata, "SEQH", 4)) {
903 /* if a match was found, parse the extra data */
908 size = AV_RB32(&extradata[4]);
909 if (size > extradata_end - extradata - 8)
910 return AVERROR_INVALIDDATA;
911 init_get_bits(&gb, extradata + 8, size * 8);
913 /* 'frame size code' and optional 'width, height' */
914 frame_size_code = get_bits(&gb, 3);
915 switch (frame_size_code) {
945 avctx->width = get_bits(&gb, 12);
946 avctx->height = get_bits(&gb, 12);
950 s->halfpel_flag = get_bits1(&gb);
951 s->thirdpel_flag = get_bits1(&gb);
959 h->low_delay = get_bits1(&gb);
964 while (get_bits1(&gb))
967 s->unknown_flag = get_bits1(&gb);
968 avctx->has_b_frames = !h->low_delay;
969 if (s->unknown_flag) {
971 unsigned watermark_width = svq3_get_ue_golomb(&gb);
972 unsigned watermark_height = svq3_get_ue_golomb(&gb);
973 int u1 = svq3_get_ue_golomb(&gb);
974 int u2 = get_bits(&gb, 8);
975 int u3 = get_bits(&gb, 2);
976 int u4 = svq3_get_ue_golomb(&gb);
977 unsigned long buf_len = watermark_width *
978 watermark_height * 4;
979 int offset = get_bits_count(&gb) + 7 >> 3;
982 if (watermark_height <= 0 || (uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
985 buf = av_malloc(buf_len);
986 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
987 watermark_width, watermark_height);
988 av_log(avctx, AV_LOG_DEBUG,
989 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
990 u1, u2, u3, u4, offset);
991 if (uncompress(buf, &buf_len, extradata + 8 + offset,
992 size - offset) != Z_OK) {
993 av_log(avctx, AV_LOG_ERROR,
994 "could not uncompress watermark logo\n");
998 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
999 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1000 av_log(avctx, AV_LOG_DEBUG,
1001 "watermark key %#x\n", s->watermark_key);
1004 av_log(avctx, AV_LOG_ERROR,
1005 "this svq3 file contains watermark which need zlib support compiled in\n");
1011 h->width = avctx->width;
1012 h->height = avctx->height;
1013 h->mb_width = (h->width + 15) / 16;
1014 h->mb_height = (h->height + 15) / 16;
1015 h->mb_stride = h->mb_width + 1;
1016 h->mb_num = h->mb_width * h->mb_height;
1017 h->b_stride = 4 * h->mb_width;
1018 s->h_edge_pos = h->mb_width * 16;
1019 s->v_edge_pos = h->mb_height * 16;
1021 if (ff_h264_alloc_tables(h) < 0) {
1022 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1023 return AVERROR(ENOMEM);
1029 static void free_picture(AVCodecContext *avctx, Picture *pic)
1032 for (i = 0; i < 2; i++) {
1033 av_buffer_unref(&pic->motion_val_buf[i]);
1034 av_buffer_unref(&pic->ref_index_buf[i]);
1036 av_buffer_unref(&pic->mb_type_buf);
1038 av_frame_unref(&pic->f);
1041 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1043 SVQ3Context *s = avctx->priv_data;
1044 H264Context *h = &s->h;
1045 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1046 const int mb_array_size = h->mb_stride * h->mb_height;
1047 const int b4_stride = h->mb_width * 4 + 1;
1048 const int b4_array_size = b4_stride * h->mb_height * 4;
1051 if (!pic->motion_val_buf[0]) {
1054 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1055 if (!pic->mb_type_buf)
1056 return AVERROR(ENOMEM);
1057 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1059 for (i = 0; i < 2; i++) {
1060 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1061 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1062 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1063 ret = AVERROR(ENOMEM);
1067 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1068 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1071 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1073 ret = ff_get_buffer(avctx, &pic->f,
1074 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1078 if (!h->edge_emu_buffer) {
1079 h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1080 if (!h->edge_emu_buffer)
1081 return AVERROR(ENOMEM);
1084 h->linesize = pic->f.linesize[0];
1085 h->uvlinesize = pic->f.linesize[1];
1089 free_picture(avctx, pic);
1093 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1094 int *got_frame, AVPacket *avpkt)
1096 SVQ3Context *s = avctx->priv_data;
1097 H264Context *h = &s->h;
1098 int buf_size = avpkt->size;
1103 /* special case for last picture */
1104 if (buf_size == 0) {
1105 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1106 ret = av_frame_ref(data, &s->next_pic->f);
1109 s->last_frame_output = 1;
1115 h->mb_x = h->mb_y = h->mb_xy = 0;
1117 if (s->watermark_key) {
1118 av_fast_malloc(&s->buf, &s->buf_size,
1119 buf_size+FF_INPUT_BUFFER_PADDING_SIZE);
1121 return AVERROR(ENOMEM);
1122 memcpy(s->buf, avpkt->data, buf_size);
1128 init_get_bits(&h->gb, buf, 8 * buf_size);
1130 if (svq3_decode_slice_header(avctx))
1133 h->pict_type = h->slice_type;
1135 if (h->pict_type != AV_PICTURE_TYPE_B)
1136 FFSWAP(Picture*, s->next_pic, s->last_pic);
1138 av_frame_unref(&s->cur_pic->f);
1140 /* for skipping the frame */
1141 s->cur_pic->f.pict_type = h->pict_type;
1142 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1144 ret = get_buffer(avctx, s->cur_pic);
1148 h->cur_pic_ptr = s->cur_pic;
1149 av_frame_unref(&h->cur_pic.f);
1150 h->cur_pic = *s->cur_pic;
1151 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1155 for (i = 0; i < 16; i++) {
1156 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1157 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1159 for (i = 0; i < 16; i++) {
1160 h->block_offset[16 + i] =
1161 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1162 h->block_offset[48 + 16 + i] =
1163 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1166 if (h->pict_type != AV_PICTURE_TYPE_I) {
1167 if (!s->last_pic->f.data[0]) {
1168 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1169 ret = get_buffer(avctx, s->last_pic);
1172 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1173 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1174 s->last_pic->f.linesize[1]);
1175 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1176 s->last_pic->f.linesize[2]);
1179 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1180 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1181 ret = get_buffer(avctx, s->next_pic);
1184 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1185 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1186 s->next_pic->f.linesize[1]);
1187 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1188 s->next_pic->f.linesize[2]);
1192 if (avctx->debug & FF_DEBUG_PICT_INFO)
1193 av_log(h->avctx, AV_LOG_DEBUG,
1194 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1195 av_get_picture_type_char(h->pict_type),
1196 s->halfpel_flag, s->thirdpel_flag,
1197 s->adaptive_quant, h->qscale, h->slice_num);
1199 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1200 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1201 avctx->skip_frame >= AVDISCARD_ALL)
1204 if (s->next_p_frame_damaged) {
1205 if (h->pict_type == AV_PICTURE_TYPE_B)
1208 s->next_p_frame_damaged = 0;
1211 if (h->pict_type == AV_PICTURE_TYPE_B) {
1212 h->frame_num_offset = h->slice_num - h->prev_frame_num;
1214 if (h->frame_num_offset < 0)
1215 h->frame_num_offset += 256;
1216 if (h->frame_num_offset == 0 ||
1217 h->frame_num_offset >= h->prev_frame_num_offset) {
1218 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1222 h->prev_frame_num = h->frame_num;
1223 h->frame_num = h->slice_num;
1224 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1226 if (h->prev_frame_num_offset < 0)
1227 h->prev_frame_num_offset += 256;
1230 for (m = 0; m < 2; m++) {
1232 for (i = 0; i < 4; i++) {
1234 for (j = -1; j < 4; j++)
1235 h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1237 h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1241 for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1242 for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1244 h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1246 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1247 ((get_bits_count(&h->gb) & 7) == 0 ||
1248 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1249 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1250 h->gb.size_in_bits = 8 * buf_size;
1252 if (svq3_decode_slice_header(avctx))
1255 /* TODO: support s->mb_skip_run */
1258 mb_type = svq3_get_ue_golomb(&h->gb);
1260 if (h->pict_type == AV_PICTURE_TYPE_I)
1262 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1264 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1265 av_log(h->avctx, AV_LOG_ERROR,
1266 "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1270 if (mb_type != 0 || h->cbp)
1271 ff_h264_hl_decode_mb(h);
1273 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1274 h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1275 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1278 ff_draw_horiz_band(avctx, NULL, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL,
1279 16 * h->mb_y, 16, h->picture_structure, 0, 0,
1280 h->low_delay, h->mb_height * 16, h->mb_width * 16);
1283 left = buf_size*8 - get_bits_count(&h->gb);
1285 if (h->mb_y != h->mb_height || h->mb_x != h->mb_width) {
1286 av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, h->mb_y, h->mb_x, left);
1287 //av_hex_dump(stderr, buf+buf_size-8, 8);
1291 av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1295 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1296 ret = av_frame_ref(data, &s->cur_pic->f);
1297 else if (s->last_pic->f.data[0])
1298 ret = av_frame_ref(data, &s->last_pic->f);
1302 /* Do not output the last pic after seeking. */
1303 if (s->last_pic->f.data[0] || h->low_delay)
1306 if (h->pict_type != AV_PICTURE_TYPE_B) {
1307 FFSWAP(Picture*, s->cur_pic, s->next_pic);
1309 av_frame_unref(&s->cur_pic->f);
1315 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1317 SVQ3Context *s = avctx->priv_data;
1318 H264Context *h = &s->h;
1320 free_picture(avctx, s->cur_pic);
1321 free_picture(avctx, s->next_pic);
1322 free_picture(avctx, s->last_pic);
1323 av_freep(&s->cur_pic);
1324 av_freep(&s->next_pic);
1325 av_freep(&s->last_pic);
1327 av_frame_unref(&h->cur_pic.f);
1329 ff_h264_free_context(h);
1333 av_freep(&h->edge_emu_buffer);
1338 AVCodec ff_svq3_decoder = {
1340 .type = AVMEDIA_TYPE_VIDEO,
1341 .id = AV_CODEC_ID_SVQ3,
1342 .priv_data_size = sizeof(SVQ3Context),
1343 .init = svq3_decode_init,
1344 .close = svq3_decode_end,
1345 .decode = svq3_decode_frame,
1346 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1349 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1350 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,