2 * Copyright (c) 2003 The FFmpeg Project
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
43 #include "libavutil/attributes.h"
46 #include "mpegvideo.h"
49 #include "h264data.h" // FIXME FIXME FIXME
51 #include "h264_mvpred.h"
54 #include "rectangle.h"
55 #include "vdpau_internal.h"
79 uint32_t watermark_key;
83 int next_p_frame_damaged;
86 int last_frame_output;
89 #define FULLPEL_MODE 1
90 #define HALFPEL_MODE 2
91 #define THIRDPEL_MODE 3
92 #define PREDICT_MODE 4
94 /* dual scan (from some older h264 draft)
103 static const uint8_t svq3_scan[16] = {
104 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
105 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
106 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
107 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
110 static const uint8_t luma_dc_zigzag_scan[16] = {
111 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
112 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
113 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
114 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
117 static const uint8_t svq3_pred_0[25][2] = {
120 { 0, 2 }, { 1, 1 }, { 2, 0 },
121 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
122 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
123 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
124 { 2, 4 }, { 3, 3 }, { 4, 2 },
129 static const int8_t svq3_pred_1[6][6][5] = {
130 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
131 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
132 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
133 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
134 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
135 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
136 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
137 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
138 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
139 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
140 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
141 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
144 static const struct {
147 } svq3_dct_tables[2][16] = {
148 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
149 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
150 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
151 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
154 static const uint32_t svq3_dequant_coeff[32] = {
155 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
156 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
157 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
158 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
161 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
163 const int qmul = svq3_dequant_coeff[qp];
167 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
169 for (i = 0; i < 4; i++) {
170 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
171 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
172 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
173 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
175 temp[4 * i + 0] = z0 + z3;
176 temp[4 * i + 1] = z1 + z2;
177 temp[4 * i + 2] = z1 - z2;
178 temp[4 * i + 3] = z0 - z3;
181 for (i = 0; i < 4; i++) {
182 const int offset = x_offset[i];
183 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
184 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
185 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
186 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
188 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
189 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
190 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
191 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
196 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
197 int stride, int qp, int dc)
199 const int qmul = svq3_dequant_coeff[qp];
203 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
204 : qmul * (block[0] >> 3) / 2);
208 for (i = 0; i < 4; i++) {
209 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
210 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
211 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
212 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
214 block[0 + 4 * i] = z0 + z3;
215 block[1 + 4 * i] = z1 + z2;
216 block[2 + 4 * i] = z1 - z2;
217 block[3 + 4 * i] = z0 - z3;
220 for (i = 0; i < 4; i++) {
221 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
222 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
223 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
224 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
225 const int rr = (dc + 0x80000);
227 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
228 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
229 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
230 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
233 memset(block, 0, 16 * sizeof(int16_t));
236 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
237 int index, const int type)
239 static const uint8_t *const scan_patterns[4] =
240 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
242 int run, level, sign, limit;
244 const int intra = 3 * type >> 2;
245 const uint8_t *const scan = scan_patterns[type];
247 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
248 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
249 if ((int32_t)vlc < 0)
252 sign = (vlc & 1) ? 0 : -1;
259 } else if (vlc < 4) {
264 level = (vlc + 9 >> 2) - run;
268 run = svq3_dct_tables[intra][vlc].run;
269 level = svq3_dct_tables[intra][vlc].level;
272 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
275 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
280 if ((index += run) >= limit)
283 block[scan[index]] = (level ^ sign) - sign;
294 static inline void svq3_mc_dir_part(SVQ3Context *s,
295 int x, int y, int width, int height,
296 int mx, int my, int dxy,
297 int thirdpel, int dir, int avg)
299 H264Context *h = &s->h;
300 const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
303 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
308 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
309 my < 0 || my >= s->v_edge_pos - height - 1) {
311 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
312 my = av_clip(my, -16, s->v_edge_pos - height + 15);
315 /* form component predictions */
316 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
317 src = pic->f.data[0] + mx + my * h->linesize;
320 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
321 h->linesize, h->linesize,
322 width + 1, height + 1,
323 mx, my, s->h_edge_pos, s->v_edge_pos);
324 src = h->edge_emu_buffer;
327 (avg ? h->dsp.avg_tpel_pixels_tab
328 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
331 (avg ? s->hdsp.avg_pixels_tab
332 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
335 if (!(h->flags & CODEC_FLAG_GRAY)) {
336 mx = mx + (mx < (int) x) >> 1;
337 my = my + (my < (int) y) >> 1;
339 height = height >> 1;
342 for (i = 1; i < 3; i++) {
343 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
344 src = pic->f.data[i] + mx + my * h->uvlinesize;
347 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
348 h->uvlinesize, h->uvlinesize,
349 width + 1, height + 1,
350 mx, my, (s->h_edge_pos >> 1),
352 src = h->edge_emu_buffer;
355 (avg ? h->dsp.avg_tpel_pixels_tab
356 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
360 (avg ? s->hdsp.avg_pixels_tab
361 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
368 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
371 int i, j, k, mx, my, dx, dy, x, y;
372 H264Context *h = &s->h;
373 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
374 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
375 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
376 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
377 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
379 for (i = 0; i < 16; i += part_height)
380 for (j = 0; j < 16; j += part_width) {
381 const int b_xy = (4 * h->mb_x + (j >> 2)) +
382 (4 * h->mb_y + (i >> 2)) * h->b_stride;
384 x = 16 * h->mb_x + j;
385 y = 16 * h->mb_y + i;
386 k = (j >> 2 & 1) + (i >> 1 & 2) +
387 (j >> 1 & 4) + (i & 8);
389 if (mode != PREDICT_MODE) {
390 pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
392 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
393 my = s->next_pic->motion_val[0][b_xy][1] << 1;
396 mx = mx * h->frame_num_offset /
397 h->prev_frame_num_offset + 1 >> 1;
398 my = my * h->frame_num_offset /
399 h->prev_frame_num_offset + 1 >> 1;
401 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
402 h->prev_frame_num_offset + 1 >> 1;
403 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
404 h->prev_frame_num_offset + 1 >> 1;
408 /* clip motion vector prediction to frame border */
409 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
410 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
412 /* get (optional) motion vector differential */
413 if (mode == PREDICT_MODE) {
416 dy = svq3_get_se_golomb(&h->gb);
417 dx = svq3_get_se_golomb(&h->gb);
419 if (dx == INVALID_VLC || dy == INVALID_VLC) {
420 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
425 /* compute motion vector */
426 if (mode == THIRDPEL_MODE) {
428 mx = (mx + 1 >> 1) + dx;
429 my = (my + 1 >> 1) + dy;
430 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
431 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
432 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
434 svq3_mc_dir_part(s, x, y, part_width, part_height,
435 fx, fy, dxy, 1, dir, avg);
438 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
439 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
440 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
441 dxy = (mx & 1) + 2 * (my & 1);
443 svq3_mc_dir_part(s, x, y, part_width, part_height,
444 mx >> 1, my >> 1, dxy, 0, dir, avg);
448 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
449 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
451 svq3_mc_dir_part(s, x, y, part_width, part_height,
452 mx, my, 0, 0, dir, avg);
457 /* update mv_cache */
458 if (mode != PREDICT_MODE) {
459 int32_t mv = pack16to32(mx, my);
461 if (part_height == 8 && i < 8) {
462 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
464 if (part_width == 8 && j < 8)
465 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
467 if (part_width == 8 && j < 8)
468 AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
469 if (part_width == 4 || part_height == 4)
470 AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
473 /* write back motion vectors */
474 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
475 part_width >> 2, part_height >> 2, h->b_stride,
476 pack16to32(mx, my), 4);
482 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
484 H264Context *h = &s->h;
485 int i, j, k, m, dir, mode;
489 const int mb_xy = h->mb_xy;
490 const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
492 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
493 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
494 h->topright_samples_available = 0xFFFF;
496 if (mb_type == 0) { /* SKIP */
497 if (h->pict_type == AV_PICTURE_TYPE_P ||
498 s->next_pic->mb_type[mb_xy] == -1) {
499 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
502 if (h->pict_type == AV_PICTURE_TYPE_B)
503 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
506 mb_type = MB_TYPE_SKIP;
508 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
509 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
511 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
514 mb_type = MB_TYPE_16x16;
516 } else if (mb_type < 8) { /* INTER */
517 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
518 mode = THIRDPEL_MODE;
519 else if (s->halfpel_flag &&
520 s->thirdpel_flag == !get_bits1(&h->gb))
526 /* note ref_cache should contain here:
534 for (m = 0; m < 2; m++) {
535 if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
536 for (i = 0; i < 4; i++)
537 AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
538 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
540 for (i = 0; i < 4; i++)
541 AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
544 memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
545 h->cur_pic.motion_val[m][b_xy - h->b_stride],
546 4 * 2 * sizeof(int16_t));
547 memset(&h->ref_cache[m][scan8[0] - 1 * 8],
548 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
550 if (h->mb_x < h->mb_width - 1) {
551 AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
552 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
553 h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
554 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
555 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
557 h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
559 AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
560 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
561 h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
562 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
564 h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
566 memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
567 PART_NOT_AVAILABLE, 8);
569 if (h->pict_type != AV_PICTURE_TYPE_B)
573 /* decode motion vector(s) and form prediction(s) */
574 if (h->pict_type == AV_PICTURE_TYPE_P) {
575 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
577 } else { /* AV_PICTURE_TYPE_B */
579 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
582 for (i = 0; i < 4; i++)
583 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
584 0, 4 * 2 * sizeof(int16_t));
587 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
590 for (i = 0; i < 4; i++)
591 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
592 0, 4 * 2 * sizeof(int16_t));
596 mb_type = MB_TYPE_16x16;
597 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
598 memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
602 for (i = 0; i < 4; i++)
603 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
604 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
605 h->left_samples_available = 0x5F5F;
608 h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
609 h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
610 h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
611 h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
613 if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
614 h->top_samples_available = 0x33FF;
617 /* decode prediction codes for luma blocks */
618 for (i = 0; i < 16; i += 2) {
619 vlc = svq3_get_ue_golomb(&h->gb);
622 av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
626 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
627 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
629 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
630 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
632 if (left[1] == -1 || left[2] == -1) {
633 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
637 } else { /* mb_type == 33, DC_128_PRED block type */
638 for (i = 0; i < 4; i++)
639 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
642 write_back_intra_pred_mode(h);
645 ff_h264_check_intra4x4_pred_mode(h);
647 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
648 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
650 for (i = 0; i < 4; i++)
651 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
653 h->top_samples_available = 0x33FF;
654 h->left_samples_available = 0x5F5F;
657 mb_type = MB_TYPE_INTRA4x4;
658 } else { /* INTRA16x16 */
659 dir = i_mb_type_info[mb_type - 8].pred_mode;
660 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
662 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
663 av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
664 return h->intra16x16_pred_mode;
667 cbp = i_mb_type_info[mb_type - 8].cbp;
668 mb_type = MB_TYPE_INTRA16x16;
671 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
672 for (i = 0; i < 4; i++)
673 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
674 0, 4 * 2 * sizeof(int16_t));
675 if (h->pict_type == AV_PICTURE_TYPE_B) {
676 for (i = 0; i < 4; i++)
677 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
678 0, 4 * 2 * sizeof(int16_t));
681 if (!IS_INTRA4x4(mb_type)) {
682 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
684 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
685 memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
688 if (!IS_INTRA16x16(mb_type) &&
689 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
690 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
691 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
695 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
696 : golomb_to_inter_cbp[vlc];
698 if (IS_INTRA16x16(mb_type) ||
699 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
700 h->qscale += svq3_get_se_golomb(&h->gb);
702 if (h->qscale > 31u) {
703 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
707 if (IS_INTRA16x16(mb_type)) {
708 AV_ZERO128(h->mb_luma_dc[0] + 0);
709 AV_ZERO128(h->mb_luma_dc[0] + 8);
710 if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
711 av_log(h->avctx, AV_LOG_ERROR,
712 "error while decoding intra luma dc\n");
718 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
719 const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
721 for (i = 0; i < 4; i++)
722 if ((cbp & (1 << i))) {
723 for (j = 0; j < 4; j++) {
724 k = index ? (1 * (j & 1) + 2 * (i & 1) +
725 2 * (j & 2) + 4 * (i & 2))
727 h->non_zero_count_cache[scan8[k]] = 1;
729 if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
730 av_log(h->avctx, AV_LOG_ERROR,
731 "error while decoding block\n");
738 for (i = 1; i < 3; ++i)
739 if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
740 av_log(h->avctx, AV_LOG_ERROR,
741 "error while decoding chroma dc block\n");
746 for (i = 1; i < 3; i++) {
747 for (j = 0; j < 4; j++) {
749 h->non_zero_count_cache[scan8[k]] = 1;
751 if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
752 av_log(h->avctx, AV_LOG_ERROR,
753 "error while decoding chroma ac block\n");
763 h->cur_pic.mb_type[mb_xy] = mb_type;
765 if (IS_INTRA(mb_type))
766 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
771 static int svq3_decode_slice_header(AVCodecContext *avctx)
773 SVQ3Context *s = avctx->priv_data;
774 H264Context *h = &s->h;
775 const int mb_xy = h->mb_xy;
779 header = get_bits(&h->gb, 8);
781 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
783 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
786 int length = header >> 5 & 3;
788 s->next_slice_index = get_bits_count(&h->gb) +
789 8 * show_bits(&h->gb, 8 * length) +
792 if (s->next_slice_index > h->gb.size_in_bits) {
793 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
797 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
798 skip_bits(&h->gb, 8);
800 if (s->watermark_key) {
801 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
802 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
803 header ^ s->watermark_key);
806 memmove((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
807 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
809 skip_bits_long(&h->gb, 0);
812 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
813 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
817 h->slice_type = golomb_to_pict_type[slice_id];
819 if ((header & 0x9F) == 2) {
820 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
821 h->mb_skip_run = get_bits(&h->gb, i) -
822 (h->mb_y * h->mb_width + h->mb_x);
828 h->slice_num = get_bits(&h->gb, 8);
829 h->qscale = get_bits(&h->gb, 5);
830 s->adaptive_quant = get_bits1(&h->gb);
839 skip_bits(&h->gb, 2);
841 if (skip_1stop_8data_bits(&h->gb) < 0)
842 return AVERROR_INVALIDDATA;
844 /* reset intra predictors and invalidate motion vector references */
846 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
847 -1, 4 * sizeof(int8_t));
848 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
849 -1, 8 * sizeof(int8_t) * h->mb_x);
852 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
853 -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
856 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
862 static av_cold int svq3_decode_init(AVCodecContext *avctx)
864 SVQ3Context *s = avctx->priv_data;
865 H264Context *h = &s->h;
867 unsigned char *extradata;
868 unsigned char *extradata_end;
870 int marker_found = 0;
872 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
873 s->last_pic = av_mallocz(sizeof(*s->last_pic));
874 s->next_pic = av_mallocz(sizeof(*s->next_pic));
875 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
876 av_freep(&s->cur_pic);
877 av_freep(&s->last_pic);
878 av_freep(&s->next_pic);
879 return AVERROR(ENOMEM);
882 if (ff_h264_decode_init(avctx) < 0)
885 ff_hpeldsp_init(&s->hdsp, avctx->flags);
886 h->flags = avctx->flags;
888 h->sps.chroma_format_idc = 1;
889 h->picture_structure = PICT_FRAME;
890 avctx->pix_fmt = avctx->codec->pix_fmts[0];
892 h->chroma_qp[0] = h->chroma_qp[1] = 4;
893 h->chroma_x_shift = h->chroma_y_shift = 1;
896 s->thirdpel_flag = 1;
899 /* prowl for the "SEQH" marker in the extradata */
900 extradata = (unsigned char *)avctx->extradata;
901 extradata_end = avctx->extradata + avctx->extradata_size;
903 for (m = 0; m + 8 < avctx->extradata_size; m++) {
904 if (!memcmp(extradata, "SEQH", 4)) {
912 /* if a match was found, parse the extra data */
917 size = AV_RB32(&extradata[4]);
918 if (size > extradata_end - extradata - 8)
919 return AVERROR_INVALIDDATA;
920 init_get_bits(&gb, extradata + 8, size * 8);
922 /* 'frame size code' and optional 'width, height' */
923 frame_size_code = get_bits(&gb, 3);
924 switch (frame_size_code) {
954 avctx->width = get_bits(&gb, 12);
955 avctx->height = get_bits(&gb, 12);
959 s->halfpel_flag = get_bits1(&gb);
960 s->thirdpel_flag = get_bits1(&gb);
968 h->low_delay = get_bits1(&gb);
973 if (skip_1stop_8data_bits(&gb) < 0)
974 return AVERROR_INVALIDDATA;
976 s->unknown_flag = get_bits1(&gb);
977 avctx->has_b_frames = !h->low_delay;
978 if (s->unknown_flag) {
980 unsigned watermark_width = svq3_get_ue_golomb(&gb);
981 unsigned watermark_height = svq3_get_ue_golomb(&gb);
982 int u1 = svq3_get_ue_golomb(&gb);
983 int u2 = get_bits(&gb, 8);
984 int u3 = get_bits(&gb, 2);
985 int u4 = svq3_get_ue_golomb(&gb);
986 unsigned long buf_len = watermark_width *
987 watermark_height * 4;
988 int offset = get_bits_count(&gb) + 7 >> 3;
991 if (watermark_height <= 0 ||
992 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
995 buf = av_malloc(buf_len);
996 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
997 watermark_width, watermark_height);
998 av_log(avctx, AV_LOG_DEBUG,
999 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1000 u1, u2, u3, u4, offset);
1001 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1002 size - offset) != Z_OK) {
1003 av_log(avctx, AV_LOG_ERROR,
1004 "could not uncompress watermark logo\n");
1008 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1009 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1010 av_log(avctx, AV_LOG_DEBUG,
1011 "watermark key %#x\n", s->watermark_key);
1014 av_log(avctx, AV_LOG_ERROR,
1015 "this svq3 file contains watermark which need zlib support compiled in\n");
1021 h->width = avctx->width;
1022 h->height = avctx->height;
1023 h->mb_width = (h->width + 15) / 16;
1024 h->mb_height = (h->height + 15) / 16;
1025 h->mb_stride = h->mb_width + 1;
1026 h->mb_num = h->mb_width * h->mb_height;
1027 h->b_stride = 4 * h->mb_width;
1028 s->h_edge_pos = h->mb_width * 16;
1029 s->v_edge_pos = h->mb_height * 16;
1031 if (ff_h264_alloc_tables(h) < 0) {
1032 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1033 return AVERROR(ENOMEM);
1039 static void free_picture(AVCodecContext *avctx, Picture *pic)
1042 for (i = 0; i < 2; i++) {
1043 av_buffer_unref(&pic->motion_val_buf[i]);
1044 av_buffer_unref(&pic->ref_index_buf[i]);
1046 av_buffer_unref(&pic->mb_type_buf);
1048 av_frame_unref(&pic->f);
1051 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1053 SVQ3Context *s = avctx->priv_data;
1054 H264Context *h = &s->h;
1055 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1056 const int mb_array_size = h->mb_stride * h->mb_height;
1057 const int b4_stride = h->mb_width * 4 + 1;
1058 const int b4_array_size = b4_stride * h->mb_height * 4;
1061 if (!pic->motion_val_buf[0]) {
1064 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1065 if (!pic->mb_type_buf)
1066 return AVERROR(ENOMEM);
1067 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1069 for (i = 0; i < 2; i++) {
1070 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1071 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1072 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1073 ret = AVERROR(ENOMEM);
1077 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1078 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1081 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1083 ret = ff_get_buffer(avctx, &pic->f,
1084 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1088 if (!h->edge_emu_buffer) {
1089 h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1090 if (!h->edge_emu_buffer)
1091 return AVERROR(ENOMEM);
1094 h->linesize = pic->f.linesize[0];
1095 h->uvlinesize = pic->f.linesize[1];
1099 free_picture(avctx, pic);
1103 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1104 int *got_frame, AVPacket *avpkt)
1106 SVQ3Context *s = avctx->priv_data;
1107 H264Context *h = &s->h;
1108 int buf_size = avpkt->size;
1113 /* special case for last picture */
1114 if (buf_size == 0) {
1115 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1116 ret = av_frame_ref(data, &s->next_pic->f);
1119 s->last_frame_output = 1;
1125 h->mb_x = h->mb_y = h->mb_xy = 0;
1127 if (s->watermark_key) {
1128 av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1130 return AVERROR(ENOMEM);
1131 memcpy(s->buf, avpkt->data, buf_size);
1137 init_get_bits(&h->gb, buf, 8 * buf_size);
1139 if (svq3_decode_slice_header(avctx))
1142 h->pict_type = h->slice_type;
1144 if (h->pict_type != AV_PICTURE_TYPE_B)
1145 FFSWAP(Picture*, s->next_pic, s->last_pic);
1147 av_frame_unref(&s->cur_pic->f);
1149 /* for skipping the frame */
1150 s->cur_pic->f.pict_type = h->pict_type;
1151 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1153 ret = get_buffer(avctx, s->cur_pic);
1157 h->cur_pic_ptr = s->cur_pic;
1158 av_frame_unref(&h->cur_pic.f);
1159 h->cur_pic = *s->cur_pic;
1160 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1164 for (i = 0; i < 16; i++) {
1165 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1166 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1168 for (i = 0; i < 16; i++) {
1169 h->block_offset[16 + i] =
1170 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1171 h->block_offset[48 + 16 + i] =
1172 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1175 if (h->pict_type != AV_PICTURE_TYPE_I) {
1176 if (!s->last_pic->f.data[0]) {
1177 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1178 ret = get_buffer(avctx, s->last_pic);
1181 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1182 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1183 s->last_pic->f.linesize[1]);
1184 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1185 s->last_pic->f.linesize[2]);
1188 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1189 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1190 ret = get_buffer(avctx, s->next_pic);
1193 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1194 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1195 s->next_pic->f.linesize[1]);
1196 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1197 s->next_pic->f.linesize[2]);
1201 if (avctx->debug & FF_DEBUG_PICT_INFO)
1202 av_log(h->avctx, AV_LOG_DEBUG,
1203 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1204 av_get_picture_type_char(h->pict_type),
1205 s->halfpel_flag, s->thirdpel_flag,
1206 s->adaptive_quant, h->qscale, h->slice_num);
1208 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1209 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1210 avctx->skip_frame >= AVDISCARD_ALL)
1213 if (s->next_p_frame_damaged) {
1214 if (h->pict_type == AV_PICTURE_TYPE_B)
1217 s->next_p_frame_damaged = 0;
1220 if (h->pict_type == AV_PICTURE_TYPE_B) {
1221 h->frame_num_offset = h->slice_num - h->prev_frame_num;
1223 if (h->frame_num_offset < 0)
1224 h->frame_num_offset += 256;
1225 if (h->frame_num_offset == 0 ||
1226 h->frame_num_offset >= h->prev_frame_num_offset) {
1227 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1231 h->prev_frame_num = h->frame_num;
1232 h->frame_num = h->slice_num;
1233 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1235 if (h->prev_frame_num_offset < 0)
1236 h->prev_frame_num_offset += 256;
1239 for (m = 0; m < 2; m++) {
1241 for (i = 0; i < 4; i++) {
1243 for (j = -1; j < 4; j++)
1244 h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1246 h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1250 for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1251 for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1253 h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1255 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1256 ((get_bits_count(&h->gb) & 7) == 0 ||
1257 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1258 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1259 h->gb.size_in_bits = 8 * buf_size;
1261 if (svq3_decode_slice_header(avctx))
1264 /* TODO: support s->mb_skip_run */
1267 mb_type = svq3_get_ue_golomb(&h->gb);
1269 if (h->pict_type == AV_PICTURE_TYPE_I)
1271 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1273 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1274 av_log(h->avctx, AV_LOG_ERROR,
1275 "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1279 if (mb_type != 0 || h->cbp)
1280 ff_h264_hl_decode_mb(h);
1282 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1283 h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1284 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1287 ff_draw_horiz_band(avctx, NULL, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL,
1288 16 * h->mb_y, 16, h->picture_structure, 0, 0,
1289 h->low_delay, h->mb_height * 16, h->mb_width * 16);
1292 left = buf_size*8 - get_bits_count(&h->gb);
1294 if (h->mb_y != h->mb_height || h->mb_x != h->mb_width) {
1295 av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, h->mb_y, h->mb_x, left);
1296 //av_hex_dump(stderr, buf+buf_size-8, 8);
1300 av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1304 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1305 ret = av_frame_ref(data, &s->cur_pic->f);
1306 else if (s->last_pic->f.data[0])
1307 ret = av_frame_ref(data, &s->last_pic->f);
1311 /* Do not output the last pic after seeking. */
1312 if (s->last_pic->f.data[0] || h->low_delay)
1315 if (h->pict_type != AV_PICTURE_TYPE_B) {
1316 FFSWAP(Picture*, s->cur_pic, s->next_pic);
1318 av_frame_unref(&s->cur_pic->f);
1324 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1326 SVQ3Context *s = avctx->priv_data;
1327 H264Context *h = &s->h;
1329 free_picture(avctx, s->cur_pic);
1330 free_picture(avctx, s->next_pic);
1331 free_picture(avctx, s->last_pic);
1332 av_freep(&s->cur_pic);
1333 av_freep(&s->next_pic);
1334 av_freep(&s->last_pic);
1336 av_frame_unref(&h->cur_pic.f);
1338 ff_h264_free_context(h);
1342 av_freep(&h->edge_emu_buffer);
1347 AVCodec ff_svq3_decoder = {
1349 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1350 .type = AVMEDIA_TYPE_VIDEO,
1351 .id = AV_CODEC_ID_SVQ3,
1352 .priv_data_size = sizeof(SVQ3Context),
1353 .init = svq3_decode_init,
1354 .close = svq3_decode_end,
1355 .decode = svq3_decode_frame,
1356 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1359 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,