2 * Copyright (c) 2003 The FFmpeg Project
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
43 #include "libavutil/attributes.h"
46 #include "mpegvideo.h"
49 #include "h264data.h" // FIXME FIXME FIXME
51 #include "h264_mvpred.h"
54 #include "rectangle.h"
55 #include "vdpau_internal.h"
79 uint32_t watermark_key;
83 int next_p_frame_damaged;
86 int last_frame_output;
89 #define FULLPEL_MODE 1
90 #define HALFPEL_MODE 2
91 #define THIRDPEL_MODE 3
92 #define PREDICT_MODE 4
94 /* dual scan (from some older h264 draft)
103 static const uint8_t svq3_scan[16] = {
104 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
105 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
106 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
107 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
110 static const uint8_t luma_dc_zigzag_scan[16] = {
111 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
112 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
113 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
114 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
117 static const uint8_t svq3_pred_0[25][2] = {
120 { 0, 2 }, { 1, 1 }, { 2, 0 },
121 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
122 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
123 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
124 { 2, 4 }, { 3, 3 }, { 4, 2 },
129 static const int8_t svq3_pred_1[6][6][5] = {
130 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
131 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
132 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
133 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
134 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
135 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
136 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
137 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
138 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
139 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
140 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
141 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
144 static const struct {
147 } svq3_dct_tables[2][16] = {
148 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
149 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
150 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
151 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
154 static const uint32_t svq3_dequant_coeff[32] = {
155 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
156 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
157 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
158 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
161 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
163 const int qmul = svq3_dequant_coeff[qp];
167 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
169 for (i = 0; i < 4; i++) {
170 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
171 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
172 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
173 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
175 temp[4 * i + 0] = z0 + z3;
176 temp[4 * i + 1] = z1 + z2;
177 temp[4 * i + 2] = z1 - z2;
178 temp[4 * i + 3] = z0 - z3;
181 for (i = 0; i < 4; i++) {
182 const int offset = x_offset[i];
183 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
184 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
185 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
186 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
188 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
189 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
190 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
191 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
196 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
197 int stride, int qp, int dc)
199 const int qmul = svq3_dequant_coeff[qp];
203 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
204 : qmul * (block[0] >> 3) / 2);
208 for (i = 0; i < 4; i++) {
209 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
210 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
211 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
212 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
214 block[0 + 4 * i] = z0 + z3;
215 block[1 + 4 * i] = z1 + z2;
216 block[2 + 4 * i] = z1 - z2;
217 block[3 + 4 * i] = z0 - z3;
220 for (i = 0; i < 4; i++) {
221 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
222 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
223 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
224 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
225 const int rr = (dc + 0x80000);
227 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
228 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
229 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
230 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
233 memset(block, 0, 16 * sizeof(int16_t));
236 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
237 int index, const int type)
239 static const uint8_t *const scan_patterns[4] =
240 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
242 int run, level, sign, limit;
244 const int intra = 3 * type >> 2;
245 const uint8_t *const scan = scan_patterns[type];
247 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
248 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
249 if ((int32_t)vlc < 0)
252 sign = (vlc & 1) ? 0 : -1;
259 } else if (vlc < 4) {
264 level = (vlc + 9 >> 2) - run;
268 run = svq3_dct_tables[intra][vlc].run;
269 level = svq3_dct_tables[intra][vlc].level;
272 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
275 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
280 if ((index += run) >= limit)
283 block[scan[index]] = (level ^ sign) - sign;
294 static inline void svq3_mc_dir_part(SVQ3Context *s,
295 int x, int y, int width, int height,
296 int mx, int my, int dxy,
297 int thirdpel, int dir, int avg)
299 H264Context *h = &s->h;
300 const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
303 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
308 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
309 my < 0 || my >= s->v_edge_pos - height - 1) {
311 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
312 my = av_clip(my, -16, s->v_edge_pos - height + 15);
315 /* form component predictions */
316 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
317 src = pic->f.data[0] + mx + my * h->linesize;
320 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->linesize,
321 width + 1, height + 1,
322 mx, my, s->h_edge_pos, s->v_edge_pos);
323 src = h->edge_emu_buffer;
326 (avg ? h->dsp.avg_tpel_pixels_tab
327 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
330 (avg ? s->hdsp.avg_pixels_tab
331 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
334 if (!(h->flags & CODEC_FLAG_GRAY)) {
335 mx = mx + (mx < (int) x) >> 1;
336 my = my + (my < (int) y) >> 1;
338 height = height >> 1;
341 for (i = 1; i < 3; i++) {
342 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
343 src = pic->f.data[i] + mx + my * h->uvlinesize;
346 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->uvlinesize,
347 width + 1, height + 1,
348 mx, my, (s->h_edge_pos >> 1),
350 src = h->edge_emu_buffer;
353 (avg ? h->dsp.avg_tpel_pixels_tab
354 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
358 (avg ? s->hdsp.avg_pixels_tab
359 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
366 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
369 int i, j, k, mx, my, dx, dy, x, y;
370 H264Context *h = &s->h;
371 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
372 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
373 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
374 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
375 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
377 for (i = 0; i < 16; i += part_height)
378 for (j = 0; j < 16; j += part_width) {
379 const int b_xy = (4 * h->mb_x + (j >> 2)) +
380 (4 * h->mb_y + (i >> 2)) * h->b_stride;
382 x = 16 * h->mb_x + j;
383 y = 16 * h->mb_y + i;
384 k = (j >> 2 & 1) + (i >> 1 & 2) +
385 (j >> 1 & 4) + (i & 8);
387 if (mode != PREDICT_MODE) {
388 pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
390 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
391 my = s->next_pic->motion_val[0][b_xy][1] << 1;
394 mx = mx * h->frame_num_offset /
395 h->prev_frame_num_offset + 1 >> 1;
396 my = my * h->frame_num_offset /
397 h->prev_frame_num_offset + 1 >> 1;
399 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
400 h->prev_frame_num_offset + 1 >> 1;
401 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
402 h->prev_frame_num_offset + 1 >> 1;
406 /* clip motion vector prediction to frame border */
407 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
408 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
410 /* get (optional) motion vector differential */
411 if (mode == PREDICT_MODE) {
414 dy = svq3_get_se_golomb(&h->gb);
415 dx = svq3_get_se_golomb(&h->gb);
417 if (dx == INVALID_VLC || dy == INVALID_VLC) {
418 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
423 /* compute motion vector */
424 if (mode == THIRDPEL_MODE) {
426 mx = (mx + 1 >> 1) + dx;
427 my = (my + 1 >> 1) + dy;
428 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
429 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
430 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
432 svq3_mc_dir_part(s, x, y, part_width, part_height,
433 fx, fy, dxy, 1, dir, avg);
436 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
437 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
438 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
439 dxy = (mx & 1) + 2 * (my & 1);
441 svq3_mc_dir_part(s, x, y, part_width, part_height,
442 mx >> 1, my >> 1, dxy, 0, dir, avg);
446 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
447 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
449 svq3_mc_dir_part(s, x, y, part_width, part_height,
450 mx, my, 0, 0, dir, avg);
455 /* update mv_cache */
456 if (mode != PREDICT_MODE) {
457 int32_t mv = pack16to32(mx, my);
459 if (part_height == 8 && i < 8) {
460 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
462 if (part_width == 8 && j < 8)
463 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
465 if (part_width == 8 && j < 8)
466 AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
467 if (part_width == 4 || part_height == 4)
468 AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
471 /* write back motion vectors */
472 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
473 part_width >> 2, part_height >> 2, h->b_stride,
474 pack16to32(mx, my), 4);
480 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
482 H264Context *h = &s->h;
483 int i, j, k, m, dir, mode;
487 const int mb_xy = h->mb_xy;
488 const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
490 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
491 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
492 h->topright_samples_available = 0xFFFF;
494 if (mb_type == 0) { /* SKIP */
495 if (h->pict_type == AV_PICTURE_TYPE_P ||
496 s->next_pic->mb_type[mb_xy] == -1) {
497 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
500 if (h->pict_type == AV_PICTURE_TYPE_B)
501 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
504 mb_type = MB_TYPE_SKIP;
506 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
507 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
509 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
512 mb_type = MB_TYPE_16x16;
514 } else if (mb_type < 8) { /* INTER */
515 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
516 mode = THIRDPEL_MODE;
517 else if (s->halfpel_flag &&
518 s->thirdpel_flag == !get_bits1(&h->gb))
524 /* note ref_cache should contain here:
532 for (m = 0; m < 2; m++) {
533 if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
534 for (i = 0; i < 4; i++)
535 AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
536 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
538 for (i = 0; i < 4; i++)
539 AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
542 memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
543 h->cur_pic.motion_val[m][b_xy - h->b_stride],
544 4 * 2 * sizeof(int16_t));
545 memset(&h->ref_cache[m][scan8[0] - 1 * 8],
546 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
548 if (h->mb_x < h->mb_width - 1) {
549 AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
550 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
551 h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
552 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
553 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
555 h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
557 AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
558 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
559 h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
560 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
562 h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
564 memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
565 PART_NOT_AVAILABLE, 8);
567 if (h->pict_type != AV_PICTURE_TYPE_B)
571 /* decode motion vector(s) and form prediction(s) */
572 if (h->pict_type == AV_PICTURE_TYPE_P) {
573 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
575 } else { /* AV_PICTURE_TYPE_B */
577 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
580 for (i = 0; i < 4; i++)
581 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
582 0, 4 * 2 * sizeof(int16_t));
585 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
588 for (i = 0; i < 4; i++)
589 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
590 0, 4 * 2 * sizeof(int16_t));
594 mb_type = MB_TYPE_16x16;
595 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
596 memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
600 for (i = 0; i < 4; i++)
601 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
602 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
603 h->left_samples_available = 0x5F5F;
606 h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
607 h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
608 h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
609 h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
611 if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
612 h->top_samples_available = 0x33FF;
615 /* decode prediction codes for luma blocks */
616 for (i = 0; i < 16; i += 2) {
617 vlc = svq3_get_ue_golomb(&h->gb);
620 av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
624 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
625 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
627 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
628 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
630 if (left[1] == -1 || left[2] == -1) {
631 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
635 } else { /* mb_type == 33, DC_128_PRED block type */
636 for (i = 0; i < 4; i++)
637 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
640 write_back_intra_pred_mode(h);
643 ff_h264_check_intra4x4_pred_mode(h);
645 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
646 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
648 for (i = 0; i < 4; i++)
649 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
651 h->top_samples_available = 0x33FF;
652 h->left_samples_available = 0x5F5F;
655 mb_type = MB_TYPE_INTRA4x4;
656 } else { /* INTRA16x16 */
657 dir = i_mb_type_info[mb_type - 8].pred_mode;
658 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
660 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
661 av_log(h->avctx, AV_LOG_ERROR, "check_intra_pred_mode < 0\n");
665 cbp = i_mb_type_info[mb_type - 8].cbp;
666 mb_type = MB_TYPE_INTRA16x16;
669 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
670 for (i = 0; i < 4; i++)
671 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
672 0, 4 * 2 * sizeof(int16_t));
673 if (h->pict_type == AV_PICTURE_TYPE_B) {
674 for (i = 0; i < 4; i++)
675 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
676 0, 4 * 2 * sizeof(int16_t));
679 if (!IS_INTRA4x4(mb_type)) {
680 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
682 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
683 memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
686 if (!IS_INTRA16x16(mb_type) &&
687 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
688 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
689 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
693 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
694 : golomb_to_inter_cbp[vlc];
696 if (IS_INTRA16x16(mb_type) ||
697 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
698 h->qscale += svq3_get_se_golomb(&h->gb);
700 if (h->qscale > 31u) {
701 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
705 if (IS_INTRA16x16(mb_type)) {
706 AV_ZERO128(h->mb_luma_dc[0] + 0);
707 AV_ZERO128(h->mb_luma_dc[0] + 8);
708 if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
709 av_log(h->avctx, AV_LOG_ERROR,
710 "error while decoding intra luma dc\n");
716 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
717 const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
719 for (i = 0; i < 4; i++)
720 if ((cbp & (1 << i))) {
721 for (j = 0; j < 4; j++) {
722 k = index ? (1 * (j & 1) + 2 * (i & 1) +
723 2 * (j & 2) + 4 * (i & 2))
725 h->non_zero_count_cache[scan8[k]] = 1;
727 if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
728 av_log(h->avctx, AV_LOG_ERROR,
729 "error while decoding block\n");
736 for (i = 1; i < 3; ++i)
737 if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
738 av_log(h->avctx, AV_LOG_ERROR,
739 "error while decoding chroma dc block\n");
744 for (i = 1; i < 3; i++) {
745 for (j = 0; j < 4; j++) {
747 h->non_zero_count_cache[scan8[k]] = 1;
749 if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
750 av_log(h->avctx, AV_LOG_ERROR,
751 "error while decoding chroma ac block\n");
761 h->cur_pic.mb_type[mb_xy] = mb_type;
763 if (IS_INTRA(mb_type))
764 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
769 static int svq3_decode_slice_header(AVCodecContext *avctx)
771 SVQ3Context *s = avctx->priv_data;
772 H264Context *h = &s->h;
773 const int mb_xy = h->mb_xy;
777 header = get_bits(&h->gb, 8);
779 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
781 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
784 int length = header >> 5 & 3;
786 s->next_slice_index = get_bits_count(&h->gb) +
787 8 * show_bits(&h->gb, 8 * length) +
790 if (s->next_slice_index > h->gb.size_in_bits) {
791 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
795 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
796 skip_bits(&h->gb, 8);
798 if (s->watermark_key) {
799 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
800 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
801 header ^ s->watermark_key);
804 memmove((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
805 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
807 skip_bits_long(&h->gb, 0);
810 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
811 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
815 h->slice_type = golomb_to_pict_type[slice_id];
817 if ((header & 0x9F) == 2) {
818 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
819 h->mb_skip_run = get_bits(&h->gb, i) -
820 (h->mb_y * h->mb_width + h->mb_x);
826 h->slice_num = get_bits(&h->gb, 8);
827 h->qscale = get_bits(&h->gb, 5);
828 s->adaptive_quant = get_bits1(&h->gb);
837 skip_bits(&h->gb, 2);
839 while (get_bits1(&h->gb))
840 skip_bits(&h->gb, 8);
842 /* reset intra predictors and invalidate motion vector references */
844 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
845 -1, 4 * sizeof(int8_t));
846 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
847 -1, 8 * sizeof(int8_t) * h->mb_x);
850 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
851 -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
854 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
860 static av_cold int svq3_decode_init(AVCodecContext *avctx)
862 SVQ3Context *s = avctx->priv_data;
863 H264Context *h = &s->h;
865 unsigned char *extradata;
866 unsigned char *extradata_end;
868 int marker_found = 0;
870 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
871 s->last_pic = av_mallocz(sizeof(*s->last_pic));
872 s->next_pic = av_mallocz(sizeof(*s->next_pic));
873 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
874 av_freep(&s->cur_pic);
875 av_freep(&s->last_pic);
876 av_freep(&s->next_pic);
877 return AVERROR(ENOMEM);
880 if (ff_h264_decode_init(avctx) < 0)
883 ff_hpeldsp_init(&s->hdsp, avctx->flags);
884 h->flags = avctx->flags;
886 h->sps.chroma_format_idc = 1;
887 h->picture_structure = PICT_FRAME;
888 avctx->pix_fmt = avctx->codec->pix_fmts[0];
890 h->chroma_qp[0] = h->chroma_qp[1] = 4;
891 h->chroma_x_shift = h->chroma_y_shift = 1;
894 s->thirdpel_flag = 1;
897 /* prowl for the "SEQH" marker in the extradata */
898 extradata = (unsigned char *)avctx->extradata;
899 extradata_end = avctx->extradata + avctx->extradata_size;
901 for (m = 0; m + 8 < avctx->extradata_size; m++) {
902 if (!memcmp(extradata, "SEQH", 4)) {
910 /* if a match was found, parse the extra data */
915 size = AV_RB32(&extradata[4]);
916 if (size > extradata_end - extradata - 8)
917 return AVERROR_INVALIDDATA;
918 init_get_bits(&gb, extradata + 8, size * 8);
920 /* 'frame size code' and optional 'width, height' */
921 frame_size_code = get_bits(&gb, 3);
922 switch (frame_size_code) {
952 avctx->width = get_bits(&gb, 12);
953 avctx->height = get_bits(&gb, 12);
957 s->halfpel_flag = get_bits1(&gb);
958 s->thirdpel_flag = get_bits1(&gb);
966 h->low_delay = get_bits1(&gb);
971 while (get_bits1(&gb))
974 s->unknown_flag = get_bits1(&gb);
975 avctx->has_b_frames = !h->low_delay;
976 if (s->unknown_flag) {
978 unsigned watermark_width = svq3_get_ue_golomb(&gb);
979 unsigned watermark_height = svq3_get_ue_golomb(&gb);
980 int u1 = svq3_get_ue_golomb(&gb);
981 int u2 = get_bits(&gb, 8);
982 int u3 = get_bits(&gb, 2);
983 int u4 = svq3_get_ue_golomb(&gb);
984 unsigned long buf_len = watermark_width *
985 watermark_height * 4;
986 int offset = get_bits_count(&gb) + 7 >> 3;
989 if (watermark_height <= 0 || (uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
992 buf = av_malloc(buf_len);
993 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
994 watermark_width, watermark_height);
995 av_log(avctx, AV_LOG_DEBUG,
996 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
997 u1, u2, u3, u4, offset);
998 if (uncompress(buf, &buf_len, extradata + 8 + offset,
999 size - offset) != Z_OK) {
1000 av_log(avctx, AV_LOG_ERROR,
1001 "could not uncompress watermark logo\n");
1005 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1006 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1007 av_log(avctx, AV_LOG_DEBUG,
1008 "watermark key %#x\n", s->watermark_key);
1011 av_log(avctx, AV_LOG_ERROR,
1012 "this svq3 file contains watermark which need zlib support compiled in\n");
1018 h->width = avctx->width;
1019 h->height = avctx->height;
1020 h->mb_width = (h->width + 15) / 16;
1021 h->mb_height = (h->height + 15) / 16;
1022 h->mb_stride = h->mb_width + 1;
1023 h->mb_num = h->mb_width * h->mb_height;
1024 h->b_stride = 4 * h->mb_width;
1025 s->h_edge_pos = h->mb_width * 16;
1026 s->v_edge_pos = h->mb_height * 16;
1028 if (ff_h264_alloc_tables(h) < 0) {
1029 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1030 return AVERROR(ENOMEM);
1036 static void free_picture(AVCodecContext *avctx, Picture *pic)
1039 for (i = 0; i < 2; i++) {
1040 av_buffer_unref(&pic->motion_val_buf[i]);
1041 av_buffer_unref(&pic->ref_index_buf[i]);
1043 av_buffer_unref(&pic->mb_type_buf);
1045 av_frame_unref(&pic->f);
1048 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1050 SVQ3Context *s = avctx->priv_data;
1051 H264Context *h = &s->h;
1052 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1053 const int mb_array_size = h->mb_stride * h->mb_height;
1054 const int b4_stride = h->mb_width * 4 + 1;
1055 const int b4_array_size = b4_stride * h->mb_height * 4;
1058 if (!pic->motion_val_buf[0]) {
1061 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1062 if (!pic->mb_type_buf)
1063 return AVERROR(ENOMEM);
1064 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1066 for (i = 0; i < 2; i++) {
1067 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1068 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1069 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1070 ret = AVERROR(ENOMEM);
1074 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1075 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1078 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1080 ret = ff_get_buffer(avctx, &pic->f,
1081 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1085 if (!h->edge_emu_buffer) {
1086 h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1087 if (!h->edge_emu_buffer)
1088 return AVERROR(ENOMEM);
1091 h->linesize = pic->f.linesize[0];
1092 h->uvlinesize = pic->f.linesize[1];
1096 free_picture(avctx, pic);
1100 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1101 int *got_frame, AVPacket *avpkt)
1103 SVQ3Context *s = avctx->priv_data;
1104 H264Context *h = &s->h;
1105 int buf_size = avpkt->size;
1110 /* special case for last picture */
1111 if (buf_size == 0) {
1112 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1113 ret = av_frame_ref(data, &s->next_pic->f);
1116 s->last_frame_output = 1;
1122 h->mb_x = h->mb_y = h->mb_xy = 0;
1124 if (s->watermark_key) {
1125 av_fast_malloc(&s->buf, &s->buf_size,
1126 buf_size+FF_INPUT_BUFFER_PADDING_SIZE);
1128 return AVERROR(ENOMEM);
1129 memcpy(s->buf, avpkt->data, buf_size);
1135 init_get_bits(&h->gb, buf, 8 * buf_size);
1137 if (svq3_decode_slice_header(avctx))
1140 h->pict_type = h->slice_type;
1142 if (h->pict_type != AV_PICTURE_TYPE_B)
1143 FFSWAP(Picture*, s->next_pic, s->last_pic);
1145 av_frame_unref(&s->cur_pic->f);
1147 /* for skipping the frame */
1148 s->cur_pic->f.pict_type = h->pict_type;
1149 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1151 ret = get_buffer(avctx, s->cur_pic);
1155 h->cur_pic_ptr = s->cur_pic;
1156 av_frame_unref(&h->cur_pic.f);
1157 h->cur_pic = *s->cur_pic;
1158 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1162 for (i = 0; i < 16; i++) {
1163 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1164 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1166 for (i = 0; i < 16; i++) {
1167 h->block_offset[16 + i] =
1168 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1169 h->block_offset[48 + 16 + i] =
1170 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1173 if (h->pict_type != AV_PICTURE_TYPE_I) {
1174 if (!s->last_pic->f.data[0]) {
1175 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1176 ret = get_buffer(avctx, s->last_pic);
1179 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1180 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1181 s->last_pic->f.linesize[1]);
1182 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1183 s->last_pic->f.linesize[2]);
1186 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1187 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1188 ret = get_buffer(avctx, s->next_pic);
1191 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1192 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1193 s->next_pic->f.linesize[1]);
1194 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1195 s->next_pic->f.linesize[2]);
1199 if (avctx->debug & FF_DEBUG_PICT_INFO)
1200 av_log(h->avctx, AV_LOG_DEBUG,
1201 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1202 av_get_picture_type_char(h->pict_type),
1203 s->halfpel_flag, s->thirdpel_flag,
1204 s->adaptive_quant, h->qscale, h->slice_num);
1206 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1207 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1208 avctx->skip_frame >= AVDISCARD_ALL)
1211 if (s->next_p_frame_damaged) {
1212 if (h->pict_type == AV_PICTURE_TYPE_B)
1215 s->next_p_frame_damaged = 0;
1218 if (h->pict_type == AV_PICTURE_TYPE_B) {
1219 h->frame_num_offset = h->slice_num - h->prev_frame_num;
1221 if (h->frame_num_offset < 0)
1222 h->frame_num_offset += 256;
1223 if (h->frame_num_offset == 0 ||
1224 h->frame_num_offset >= h->prev_frame_num_offset) {
1225 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1229 h->prev_frame_num = h->frame_num;
1230 h->frame_num = h->slice_num;
1231 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1233 if (h->prev_frame_num_offset < 0)
1234 h->prev_frame_num_offset += 256;
1237 for (m = 0; m < 2; m++) {
1239 for (i = 0; i < 4; i++) {
1241 for (j = -1; j < 4; j++)
1242 h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1244 h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1248 for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1249 for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1251 h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1253 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1254 ((get_bits_count(&h->gb) & 7) == 0 ||
1255 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1256 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1257 h->gb.size_in_bits = 8 * buf_size;
1259 if (svq3_decode_slice_header(avctx))
1262 /* TODO: support s->mb_skip_run */
1265 mb_type = svq3_get_ue_golomb(&h->gb);
1267 if (h->pict_type == AV_PICTURE_TYPE_I)
1269 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1271 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1272 av_log(h->avctx, AV_LOG_ERROR,
1273 "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1277 if (mb_type != 0 || h->cbp)
1278 ff_h264_hl_decode_mb(h);
1280 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1281 h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1282 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1285 ff_draw_horiz_band(avctx, NULL, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL,
1286 16 * h->mb_y, 16, h->picture_structure, 0, 0,
1287 h->low_delay, h->mb_height * 16, h->mb_width * 16);
1290 left = buf_size*8 - get_bits_count(&h->gb);
1292 if (h->mb_y != h->mb_height || h->mb_x != h->mb_width) {
1293 av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, h->mb_y, h->mb_x, left);
1294 //av_hex_dump(stderr, buf+buf_size-8, 8);
1298 av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1302 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1303 ret = av_frame_ref(data, &s->cur_pic->f);
1304 else if (s->last_pic->f.data[0])
1305 ret = av_frame_ref(data, &s->last_pic->f);
1309 /* Do not output the last pic after seeking. */
1310 if (s->last_pic->f.data[0] || h->low_delay)
1313 if (h->pict_type != AV_PICTURE_TYPE_B) {
1314 FFSWAP(Picture*, s->cur_pic, s->next_pic);
1316 av_frame_unref(&s->cur_pic->f);
1322 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1324 SVQ3Context *s = avctx->priv_data;
1325 H264Context *h = &s->h;
1327 free_picture(avctx, s->cur_pic);
1328 free_picture(avctx, s->next_pic);
1329 free_picture(avctx, s->last_pic);
1330 av_freep(&s->cur_pic);
1331 av_freep(&s->next_pic);
1332 av_freep(&s->last_pic);
1334 av_frame_unref(&h->cur_pic.f);
1336 ff_h264_free_context(h);
1340 av_freep(&h->edge_emu_buffer);
1345 AVCodec ff_svq3_decoder = {
1347 .type = AVMEDIA_TYPE_VIDEO,
1348 .id = AV_CODEC_ID_SVQ3,
1349 .priv_data_size = sizeof(SVQ3Context),
1350 .init = svq3_decode_init,
1351 .close = svq3_decode_end,
1352 .decode = svq3_decode_frame,
1353 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1356 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1357 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,