2 * Copyright (c) 2003 The FFmpeg Project
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
44 #include "mpegvideo.h"
47 #include "h264data.h" // FIXME FIXME FIXME
49 #include "h264_mvpred.h"
52 #include "rectangle.h"
53 #include "vdpau_internal.h"
77 uint32_t watermark_key;
81 int next_p_frame_damaged;
84 int last_frame_output;
87 #define FULLPEL_MODE 1
88 #define HALFPEL_MODE 2
89 #define THIRDPEL_MODE 3
90 #define PREDICT_MODE 4
92 /* dual scan (from some older h264 draft)
101 static const uint8_t svq3_scan[16] = {
102 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
103 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
104 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
105 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
108 static const uint8_t svq3_pred_0[25][2] = {
111 { 0, 2 }, { 1, 1 }, { 2, 0 },
112 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
113 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
114 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
115 { 2, 4 }, { 3, 3 }, { 4, 2 },
120 static const int8_t svq3_pred_1[6][6][5] = {
121 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
122 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
123 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
124 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
125 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
126 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
127 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
128 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
129 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
130 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
131 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
132 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
135 static const struct {
138 } svq3_dct_tables[2][16] = {
139 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
140 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
141 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
142 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
145 static const uint32_t svq3_dequant_coeff[32] = {
146 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
147 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
148 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
149 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
152 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
154 const int qmul = svq3_dequant_coeff[qp];
158 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
160 for (i = 0; i < 4; i++) {
161 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
162 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
163 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
164 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
166 temp[4 * i + 0] = z0 + z3;
167 temp[4 * i + 1] = z1 + z2;
168 temp[4 * i + 2] = z1 - z2;
169 temp[4 * i + 3] = z0 - z3;
172 for (i = 0; i < 4; i++) {
173 const int offset = x_offset[i];
174 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
175 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
176 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
177 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
179 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
180 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
181 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
182 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
187 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
188 int stride, int qp, int dc)
190 const int qmul = svq3_dequant_coeff[qp];
194 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
195 : qmul * (block[0] >> 3) / 2);
199 for (i = 0; i < 4; i++) {
200 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
201 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
202 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
203 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
205 block[0 + 4 * i] = z0 + z3;
206 block[1 + 4 * i] = z1 + z2;
207 block[2 + 4 * i] = z1 - z2;
208 block[3 + 4 * i] = z0 - z3;
211 for (i = 0; i < 4; i++) {
212 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
213 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
214 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
215 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
216 const int rr = (dc + 0x80000);
218 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
219 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
220 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
221 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
224 memset(block, 0, 16 * sizeof(int16_t));
227 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
228 int index, const int type)
230 static const uint8_t *const scan_patterns[4] =
231 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
233 int run, level, sign, limit;
235 const int intra = 3 * type >> 2;
236 const uint8_t *const scan = scan_patterns[type];
238 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
239 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
240 if ((int32_t)vlc < 0)
243 sign = (vlc & 1) ? 0 : -1;
250 } else if (vlc < 4) {
255 level = (vlc + 9 >> 2) - run;
259 run = svq3_dct_tables[intra][vlc].run;
260 level = svq3_dct_tables[intra][vlc].level;
263 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
266 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
271 if ((index += run) >= limit)
274 block[scan[index]] = (level ^ sign) - sign;
285 static inline void svq3_mc_dir_part(SVQ3Context *s,
286 int x, int y, int width, int height,
287 int mx, int my, int dxy,
288 int thirdpel, int dir, int avg)
290 H264Context *h = &s->h;
291 const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
294 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
299 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
300 my < 0 || my >= s->v_edge_pos - height - 1) {
302 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
303 my = av_clip(my, -16, s->v_edge_pos - height + 15);
306 /* form component predictions */
307 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
308 src = pic->f.data[0] + mx + my * h->linesize;
311 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->linesize,
312 width + 1, height + 1,
313 mx, my, s->h_edge_pos, s->v_edge_pos);
314 src = h->edge_emu_buffer;
317 (avg ? h->dsp.avg_tpel_pixels_tab
318 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
321 (avg ? s->hdsp.avg_pixels_tab
322 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
325 if (!(h->flags & CODEC_FLAG_GRAY)) {
326 mx = mx + (mx < (int) x) >> 1;
327 my = my + (my < (int) y) >> 1;
329 height = height >> 1;
332 for (i = 1; i < 3; i++) {
333 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
334 src = pic->f.data[i] + mx + my * h->uvlinesize;
337 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->uvlinesize,
338 width + 1, height + 1,
339 mx, my, (s->h_edge_pos >> 1),
341 src = h->edge_emu_buffer;
344 (avg ? h->dsp.avg_tpel_pixels_tab
345 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
349 (avg ? s->hdsp.avg_pixels_tab
350 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
357 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
360 int i, j, k, mx, my, dx, dy, x, y;
361 H264Context *h = &s->h;
362 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
363 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
364 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
365 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
366 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
368 for (i = 0; i < 16; i += part_height)
369 for (j = 0; j < 16; j += part_width) {
370 const int b_xy = (4 * h->mb_x + (j >> 2)) +
371 (4 * h->mb_y + (i >> 2)) * h->b_stride;
373 x = 16 * h->mb_x + j;
374 y = 16 * h->mb_y + i;
375 k = (j >> 2 & 1) + (i >> 1 & 2) +
376 (j >> 1 & 4) + (i & 8);
378 if (mode != PREDICT_MODE) {
379 pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
381 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
382 my = s->next_pic->motion_val[0][b_xy][1] << 1;
385 mx = mx * h->frame_num_offset /
386 h->prev_frame_num_offset + 1 >> 1;
387 my = my * h->frame_num_offset /
388 h->prev_frame_num_offset + 1 >> 1;
390 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
391 h->prev_frame_num_offset + 1 >> 1;
392 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
393 h->prev_frame_num_offset + 1 >> 1;
397 /* clip motion vector prediction to frame border */
398 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
399 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
401 /* get (optional) motion vector differential */
402 if (mode == PREDICT_MODE) {
405 dy = svq3_get_se_golomb(&h->gb);
406 dx = svq3_get_se_golomb(&h->gb);
408 if (dx == INVALID_VLC || dy == INVALID_VLC) {
409 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
414 /* compute motion vector */
415 if (mode == THIRDPEL_MODE) {
417 mx = (mx + 1 >> 1) + dx;
418 my = (my + 1 >> 1) + dy;
419 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
420 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
421 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
423 svq3_mc_dir_part(s, x, y, part_width, part_height,
424 fx, fy, dxy, 1, dir, avg);
427 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
428 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
429 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
430 dxy = (mx & 1) + 2 * (my & 1);
432 svq3_mc_dir_part(s, x, y, part_width, part_height,
433 mx >> 1, my >> 1, dxy, 0, dir, avg);
437 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
438 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
440 svq3_mc_dir_part(s, x, y, part_width, part_height,
441 mx, my, 0, 0, dir, avg);
446 /* update mv_cache */
447 if (mode != PREDICT_MODE) {
448 int32_t mv = pack16to32(mx, my);
450 if (part_height == 8 && i < 8) {
451 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
453 if (part_width == 8 && j < 8)
454 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
456 if (part_width == 8 && j < 8)
457 AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
458 if (part_width == 4 || part_height == 4)
459 AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
462 /* write back motion vectors */
463 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
464 part_width >> 2, part_height >> 2, h->b_stride,
465 pack16to32(mx, my), 4);
471 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
473 H264Context *h = &s->h;
474 int i, j, k, m, dir, mode;
478 const int mb_xy = h->mb_xy;
479 const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
481 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
482 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
483 h->topright_samples_available = 0xFFFF;
485 if (mb_type == 0) { /* SKIP */
486 if (h->pict_type == AV_PICTURE_TYPE_P ||
487 s->next_pic->mb_type[mb_xy] == -1) {
488 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
491 if (h->pict_type == AV_PICTURE_TYPE_B)
492 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
495 mb_type = MB_TYPE_SKIP;
497 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
498 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
500 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
503 mb_type = MB_TYPE_16x16;
505 } else if (mb_type < 8) { /* INTER */
506 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
507 mode = THIRDPEL_MODE;
508 else if (s->halfpel_flag &&
509 s->thirdpel_flag == !get_bits1(&h->gb))
515 /* note ref_cache should contain here:
523 for (m = 0; m < 2; m++) {
524 if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
525 for (i = 0; i < 4; i++)
526 AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
527 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
529 for (i = 0; i < 4; i++)
530 AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
533 memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
534 h->cur_pic.motion_val[m][b_xy - h->b_stride],
535 4 * 2 * sizeof(int16_t));
536 memset(&h->ref_cache[m][scan8[0] - 1 * 8],
537 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
539 if (h->mb_x < h->mb_width - 1) {
540 AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
541 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
542 h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
543 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
544 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
546 h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
548 AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
549 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
550 h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
551 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
553 h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
555 memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
556 PART_NOT_AVAILABLE, 8);
558 if (h->pict_type != AV_PICTURE_TYPE_B)
562 /* decode motion vector(s) and form prediction(s) */
563 if (h->pict_type == AV_PICTURE_TYPE_P) {
564 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
566 } else { /* AV_PICTURE_TYPE_B */
568 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
571 for (i = 0; i < 4; i++)
572 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
573 0, 4 * 2 * sizeof(int16_t));
576 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
579 for (i = 0; i < 4; i++)
580 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
581 0, 4 * 2 * sizeof(int16_t));
585 mb_type = MB_TYPE_16x16;
586 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
587 memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
591 for (i = 0; i < 4; i++)
592 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
593 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
594 h->left_samples_available = 0x5F5F;
597 h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
598 h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
599 h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
600 h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
602 if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
603 h->top_samples_available = 0x33FF;
606 /* decode prediction codes for luma blocks */
607 for (i = 0; i < 16; i += 2) {
608 vlc = svq3_get_ue_golomb(&h->gb);
611 av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
615 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
616 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
618 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
619 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
621 if (left[1] == -1 || left[2] == -1) {
622 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
626 } else { /* mb_type == 33, DC_128_PRED block type */
627 for (i = 0; i < 4; i++)
628 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
631 write_back_intra_pred_mode(h);
634 ff_h264_check_intra4x4_pred_mode(h);
636 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
637 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
639 for (i = 0; i < 4; i++)
640 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
642 h->top_samples_available = 0x33FF;
643 h->left_samples_available = 0x5F5F;
646 mb_type = MB_TYPE_INTRA4x4;
647 } else { /* INTRA16x16 */
648 dir = i_mb_type_info[mb_type - 8].pred_mode;
649 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
651 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1) {
652 av_log(h->avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
656 cbp = i_mb_type_info[mb_type - 8].cbp;
657 mb_type = MB_TYPE_INTRA16x16;
660 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
661 for (i = 0; i < 4; i++)
662 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
663 0, 4 * 2 * sizeof(int16_t));
664 if (h->pict_type == AV_PICTURE_TYPE_B) {
665 for (i = 0; i < 4; i++)
666 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
667 0, 4 * 2 * sizeof(int16_t));
670 if (!IS_INTRA4x4(mb_type)) {
671 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
673 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
674 memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
677 if (!IS_INTRA16x16(mb_type) &&
678 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
679 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
680 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
684 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
685 : golomb_to_inter_cbp[vlc];
687 if (IS_INTRA16x16(mb_type) ||
688 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
689 h->qscale += svq3_get_se_golomb(&h->gb);
691 if (h->qscale > 31u) {
692 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
696 if (IS_INTRA16x16(mb_type)) {
697 AV_ZERO128(h->mb_luma_dc[0] + 0);
698 AV_ZERO128(h->mb_luma_dc[0] + 8);
699 if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
700 av_log(h->avctx, AV_LOG_ERROR,
701 "error while decoding intra luma dc\n");
707 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
708 const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
710 for (i = 0; i < 4; i++)
711 if ((cbp & (1 << i))) {
712 for (j = 0; j < 4; j++) {
713 k = index ? (1 * (j & 1) + 2 * (i & 1) +
714 2 * (j & 2) + 4 * (i & 2))
716 h->non_zero_count_cache[scan8[k]] = 1;
718 if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
719 av_log(h->avctx, AV_LOG_ERROR,
720 "error while decoding block\n");
727 for (i = 1; i < 3; ++i)
728 if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
729 av_log(h->avctx, AV_LOG_ERROR,
730 "error while decoding chroma dc block\n");
735 for (i = 1; i < 3; i++) {
736 for (j = 0; j < 4; j++) {
738 h->non_zero_count_cache[scan8[k]] = 1;
740 if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
741 av_log(h->avctx, AV_LOG_ERROR,
742 "error while decoding chroma ac block\n");
752 h->cur_pic.mb_type[mb_xy] = mb_type;
754 if (IS_INTRA(mb_type))
755 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
760 static int svq3_decode_slice_header(AVCodecContext *avctx)
762 SVQ3Context *s = avctx->priv_data;
763 H264Context *h = &s->h;
764 const int mb_xy = h->mb_xy;
768 header = get_bits(&h->gb, 8);
770 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
772 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
775 int length = header >> 5 & 3;
777 s->next_slice_index = get_bits_count(&h->gb) +
778 8 * show_bits(&h->gb, 8 * length) +
781 if (s->next_slice_index > h->gb.size_in_bits) {
782 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
786 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
787 skip_bits(&h->gb, 8);
789 if (s->watermark_key) {
790 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
791 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
792 header ^ s->watermark_key);
795 memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
796 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
798 skip_bits_long(&h->gb, 0);
801 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
802 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
806 h->slice_type = golomb_to_pict_type[slice_id];
808 if ((header & 0x9F) == 2) {
809 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
810 h->mb_skip_run = get_bits(&h->gb, i) -
811 (h->mb_y * h->mb_width + h->mb_x);
817 h->slice_num = get_bits(&h->gb, 8);
818 h->qscale = get_bits(&h->gb, 5);
819 s->adaptive_quant = get_bits1(&h->gb);
828 skip_bits(&h->gb, 2);
830 while (get_bits1(&h->gb))
831 skip_bits(&h->gb, 8);
833 /* reset intra predictors and invalidate motion vector references */
835 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
836 -1, 4 * sizeof(int8_t));
837 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
838 -1, 8 * sizeof(int8_t) * h->mb_x);
841 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
842 -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
845 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
851 static av_cold int svq3_decode_init(AVCodecContext *avctx)
853 SVQ3Context *s = avctx->priv_data;
854 H264Context *h = &s->h;
856 unsigned char *extradata;
857 unsigned char *extradata_end;
859 int marker_found = 0;
861 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
862 s->last_pic = av_mallocz(sizeof(*s->last_pic));
863 s->next_pic = av_mallocz(sizeof(*s->next_pic));
864 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
865 av_freep(&s->cur_pic);
866 av_freep(&s->last_pic);
867 av_freep(&s->next_pic);
868 return AVERROR(ENOMEM);
871 if (ff_h264_decode_init(avctx) < 0)
874 ff_hpeldsp_init(&s->hdsp, avctx->flags);
875 h->flags = avctx->flags;
877 h->sps.chroma_format_idc = 1;
878 h->picture_structure = PICT_FRAME;
879 avctx->pix_fmt = avctx->codec->pix_fmts[0];
881 h->chroma_qp[0] = h->chroma_qp[1] = 4;
882 h->chroma_x_shift = h->chroma_y_shift = 1;
885 s->thirdpel_flag = 1;
888 /* prowl for the "SEQH" marker in the extradata */
889 extradata = (unsigned char *)avctx->extradata;
890 extradata_end = avctx->extradata + avctx->extradata_size;
892 for (m = 0; m + 8 < avctx->extradata_size; m++) {
893 if (!memcmp(extradata, "SEQH", 4)) {
901 /* if a match was found, parse the extra data */
906 size = AV_RB32(&extradata[4]);
907 if (size > extradata_end - extradata - 8)
908 return AVERROR_INVALIDDATA;
909 init_get_bits(&gb, extradata + 8, size * 8);
911 /* 'frame size code' and optional 'width, height' */
912 frame_size_code = get_bits(&gb, 3);
913 switch (frame_size_code) {
943 avctx->width = get_bits(&gb, 12);
944 avctx->height = get_bits(&gb, 12);
948 s->halfpel_flag = get_bits1(&gb);
949 s->thirdpel_flag = get_bits1(&gb);
957 h->low_delay = get_bits1(&gb);
962 while (get_bits1(&gb))
965 s->unknown_flag = get_bits1(&gb);
966 avctx->has_b_frames = !h->low_delay;
967 if (s->unknown_flag) {
969 unsigned watermark_width = svq3_get_ue_golomb(&gb);
970 unsigned watermark_height = svq3_get_ue_golomb(&gb);
971 int u1 = svq3_get_ue_golomb(&gb);
972 int u2 = get_bits(&gb, 8);
973 int u3 = get_bits(&gb, 2);
974 int u4 = svq3_get_ue_golomb(&gb);
975 unsigned long buf_len = watermark_width *
976 watermark_height * 4;
977 int offset = get_bits_count(&gb) + 7 >> 3;
980 if (watermark_height <= 0 || (uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
983 buf = av_malloc(buf_len);
984 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
985 watermark_width, watermark_height);
986 av_log(avctx, AV_LOG_DEBUG,
987 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
988 u1, u2, u3, u4, offset);
989 if (uncompress(buf, &buf_len, extradata + 8 + offset,
990 size - offset) != Z_OK) {
991 av_log(avctx, AV_LOG_ERROR,
992 "could not uncompress watermark logo\n");
996 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
997 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
998 av_log(avctx, AV_LOG_DEBUG,
999 "watermark key %#x\n", s->watermark_key);
1002 av_log(avctx, AV_LOG_ERROR,
1003 "this svq3 file contains watermark which need zlib support compiled in\n");
1009 h->width = avctx->width;
1010 h->height = avctx->height;
1011 h->mb_width = (h->width + 15) / 16;
1012 h->mb_height = (h->height + 15) / 16;
1013 h->mb_stride = h->mb_width + 1;
1014 h->mb_num = h->mb_width * h->mb_height;
1015 h->b_stride = 4 * h->mb_width;
1016 s->h_edge_pos = h->mb_width * 16;
1017 s->v_edge_pos = h->mb_height * 16;
1019 if (ff_h264_alloc_tables(h) < 0) {
1020 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1021 return AVERROR(ENOMEM);
1027 static void free_picture(AVCodecContext *avctx, Picture *pic)
1030 for (i = 0; i < 2; i++) {
1031 av_buffer_unref(&pic->motion_val_buf[i]);
1032 av_buffer_unref(&pic->ref_index_buf[i]);
1034 av_buffer_unref(&pic->mb_type_buf);
1036 av_frame_unref(&pic->f);
1039 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1041 SVQ3Context *s = avctx->priv_data;
1042 H264Context *h = &s->h;
1043 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1044 const int mb_array_size = h->mb_stride * h->mb_height;
1045 const int b4_stride = h->mb_width * 4 + 1;
1046 const int b4_array_size = b4_stride * h->mb_height * 4;
1049 if (!pic->motion_val_buf[0]) {
1052 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1053 if (!pic->mb_type_buf)
1054 return AVERROR(ENOMEM);
1055 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1057 for (i = 0; i < 2; i++) {
1058 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1059 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1060 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1061 ret = AVERROR(ENOMEM);
1065 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1066 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1069 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1071 ret = ff_get_buffer(avctx, &pic->f,
1072 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1076 if (!h->edge_emu_buffer) {
1077 h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1078 if (!h->edge_emu_buffer)
1079 return AVERROR(ENOMEM);
1082 h->linesize = pic->f.linesize[0];
1083 h->uvlinesize = pic->f.linesize[1];
1087 free_picture(avctx, pic);
1091 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1092 int *got_frame, AVPacket *avpkt)
1094 SVQ3Context *s = avctx->priv_data;
1095 H264Context *h = &s->h;
1096 int buf_size = avpkt->size;
1101 /* special case for last picture */
1102 if (buf_size == 0) {
1103 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1104 ret = av_frame_ref(data, &s->next_pic->f);
1107 s->last_frame_output = 1;
1113 h->mb_x = h->mb_y = h->mb_xy = 0;
1115 if (s->watermark_key) {
1116 av_fast_malloc(&s->buf, &s->buf_size,
1117 buf_size+FF_INPUT_BUFFER_PADDING_SIZE);
1119 return AVERROR(ENOMEM);
1120 memcpy(s->buf, avpkt->data, buf_size);
1126 init_get_bits(&h->gb, buf, 8 * buf_size);
1128 if (svq3_decode_slice_header(avctx))
1131 h->pict_type = h->slice_type;
1133 if (h->pict_type != AV_PICTURE_TYPE_B)
1134 FFSWAP(Picture*, s->next_pic, s->last_pic);
1136 av_frame_unref(&s->cur_pic->f);
1138 /* for skipping the frame */
1139 s->cur_pic->f.pict_type = h->pict_type;
1140 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1142 ret = get_buffer(avctx, s->cur_pic);
1146 h->cur_pic_ptr = s->cur_pic;
1147 av_frame_unref(&h->cur_pic.f);
1148 h->cur_pic = *s->cur_pic;
1149 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1153 for (i = 0; i < 16; i++) {
1154 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1155 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1157 for (i = 0; i < 16; i++) {
1158 h->block_offset[16 + i] =
1159 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1160 h->block_offset[48 + 16 + i] =
1161 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1164 if (h->pict_type != AV_PICTURE_TYPE_I) {
1165 if (!s->last_pic->f.data[0]) {
1166 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1167 ret = get_buffer(avctx, s->last_pic);
1170 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1171 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1172 s->last_pic->f.linesize[1]);
1173 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1174 s->last_pic->f.linesize[2]);
1177 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1178 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1179 ret = get_buffer(avctx, s->next_pic);
1182 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1183 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1184 s->next_pic->f.linesize[1]);
1185 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1186 s->next_pic->f.linesize[2]);
1190 if (avctx->debug & FF_DEBUG_PICT_INFO)
1191 av_log(h->avctx, AV_LOG_DEBUG,
1192 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1193 av_get_picture_type_char(h->pict_type),
1194 s->halfpel_flag, s->thirdpel_flag,
1195 s->adaptive_quant, h->qscale, h->slice_num);
1197 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1198 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1199 avctx->skip_frame >= AVDISCARD_ALL)
1202 if (s->next_p_frame_damaged) {
1203 if (h->pict_type == AV_PICTURE_TYPE_B)
1206 s->next_p_frame_damaged = 0;
1209 if (h->pict_type == AV_PICTURE_TYPE_B) {
1210 h->frame_num_offset = h->slice_num - h->prev_frame_num;
1212 if (h->frame_num_offset < 0)
1213 h->frame_num_offset += 256;
1214 if (h->frame_num_offset == 0 ||
1215 h->frame_num_offset >= h->prev_frame_num_offset) {
1216 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1220 h->prev_frame_num = h->frame_num;
1221 h->frame_num = h->slice_num;
1222 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1224 if (h->prev_frame_num_offset < 0)
1225 h->prev_frame_num_offset += 256;
1228 for (m = 0; m < 2; m++) {
1230 for (i = 0; i < 4; i++) {
1232 for (j = -1; j < 4; j++)
1233 h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1235 h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1239 for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1240 for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1242 h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1244 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1245 ((get_bits_count(&h->gb) & 7) == 0 ||
1246 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1247 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1248 h->gb.size_in_bits = 8 * buf_size;
1250 if (svq3_decode_slice_header(avctx))
1253 /* TODO: support s->mb_skip_run */
1256 mb_type = svq3_get_ue_golomb(&h->gb);
1258 if (h->pict_type == AV_PICTURE_TYPE_I)
1260 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1262 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1263 av_log(h->avctx, AV_LOG_ERROR,
1264 "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1268 if (mb_type != 0 || h->cbp)
1269 ff_h264_hl_decode_mb(h);
1271 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1272 h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1273 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1276 ff_draw_horiz_band(avctx, NULL, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL,
1277 16 * h->mb_y, 16, h->picture_structure, 0, 0,
1278 h->low_delay, h->mb_height * 16, h->mb_width * 16);
1281 left = buf_size*8 - get_bits_count(&h->gb);
1283 if (h->mb_y != h->mb_height || h->mb_x != h->mb_width) {
1284 av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, h->mb_y, h->mb_x, left);
1285 //av_hex_dump(stderr, buf+buf_size-8, 8);
1289 av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1293 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1294 ret = av_frame_ref(data, &s->cur_pic->f);
1295 else if (s->last_pic->f.data[0])
1296 ret = av_frame_ref(data, &s->last_pic->f);
1300 /* Do not output the last pic after seeking. */
1301 if (s->last_pic->f.data[0] || h->low_delay)
1304 if (h->pict_type != AV_PICTURE_TYPE_B) {
1305 FFSWAP(Picture*, s->cur_pic, s->next_pic);
1307 av_frame_unref(&s->cur_pic->f);
1313 static int svq3_decode_end(AVCodecContext *avctx)
1315 SVQ3Context *s = avctx->priv_data;
1316 H264Context *h = &s->h;
1318 free_picture(avctx, s->cur_pic);
1319 free_picture(avctx, s->next_pic);
1320 free_picture(avctx, s->last_pic);
1321 av_freep(&s->cur_pic);
1322 av_freep(&s->next_pic);
1323 av_freep(&s->last_pic);
1325 av_frame_unref(&h->cur_pic.f);
1327 ff_h264_free_context(h);
1331 av_freep(&h->edge_emu_buffer);
1336 AVCodec ff_svq3_decoder = {
1338 .type = AVMEDIA_TYPE_VIDEO,
1339 .id = AV_CODEC_ID_SVQ3,
1340 .priv_data_size = sizeof(SVQ3Context),
1341 .init = svq3_decode_init,
1342 .close = svq3_decode_end,
1343 .decode = svq3_decode_frame,
1344 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1347 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1348 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,