2 * Copyright (c) 2003 The FFmpeg Project
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
43 #include "libavutil/attributes.h"
46 #include "mpegvideo.h"
49 #include "h264data.h" // FIXME FIXME FIXME
51 #include "h264_mvpred.h"
54 #include "rectangle.h"
55 #include "vdpau_internal.h"
79 uint32_t watermark_key;
83 int next_p_frame_damaged;
86 int last_frame_output;
89 #define FULLPEL_MODE 1
90 #define HALFPEL_MODE 2
91 #define THIRDPEL_MODE 3
92 #define PREDICT_MODE 4
94 /* dual scan (from some older h264 draft)
103 static const uint8_t svq3_scan[16] = {
104 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
105 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
106 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
107 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
110 static const uint8_t luma_dc_zigzag_scan[16] = {
111 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
112 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
113 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
114 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
117 static const uint8_t svq3_pred_0[25][2] = {
120 { 0, 2 }, { 1, 1 }, { 2, 0 },
121 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
122 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
123 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
124 { 2, 4 }, { 3, 3 }, { 4, 2 },
129 static const int8_t svq3_pred_1[6][6][5] = {
130 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
131 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
132 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
133 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
134 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
135 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
136 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
137 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
138 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
139 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
140 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
141 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
144 static const struct {
147 } svq3_dct_tables[2][16] = {
148 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
149 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
150 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
151 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
154 static const uint32_t svq3_dequant_coeff[32] = {
155 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
156 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
157 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
158 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
161 static int svq3_decode_end(AVCodecContext *avctx);
163 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
165 const int qmul = svq3_dequant_coeff[qp];
169 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
171 for (i = 0; i < 4; i++) {
172 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
173 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
174 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
175 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
177 temp[4 * i + 0] = z0 + z3;
178 temp[4 * i + 1] = z1 + z2;
179 temp[4 * i + 2] = z1 - z2;
180 temp[4 * i + 3] = z0 - z3;
183 for (i = 0; i < 4; i++) {
184 const int offset = x_offset[i];
185 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
186 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
187 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
188 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
190 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
191 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
192 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
193 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
198 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
199 int stride, int qp, int dc)
201 const int qmul = svq3_dequant_coeff[qp];
205 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
206 : qmul * (block[0] >> 3) / 2);
210 for (i = 0; i < 4; i++) {
211 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
212 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
213 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
214 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
216 block[0 + 4 * i] = z0 + z3;
217 block[1 + 4 * i] = z1 + z2;
218 block[2 + 4 * i] = z1 - z2;
219 block[3 + 4 * i] = z0 - z3;
222 for (i = 0; i < 4; i++) {
223 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
224 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
225 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
226 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
227 const int rr = (dc + 0x80000);
229 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
230 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
231 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
232 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
235 memset(block, 0, 16 * sizeof(int16_t));
238 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
239 int index, const int type)
241 static const uint8_t *const scan_patterns[4] =
242 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
244 int run, level, sign, limit;
246 const int intra = 3 * type >> 2;
247 const uint8_t *const scan = scan_patterns[type];
249 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
250 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
251 if ((int32_t)vlc < 0)
254 sign = (vlc & 1) ? 0 : -1;
261 } else if (vlc < 4) {
266 level = (vlc + 9 >> 2) - run;
270 run = svq3_dct_tables[intra][vlc].run;
271 level = svq3_dct_tables[intra][vlc].level;
274 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
277 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
282 if ((index += run) >= limit)
285 block[scan[index]] = (level ^ sign) - sign;
296 static inline void svq3_mc_dir_part(SVQ3Context *s,
297 int x, int y, int width, int height,
298 int mx, int my, int dxy,
299 int thirdpel, int dir, int avg)
301 H264Context *h = &s->h;
302 const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
305 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
310 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
311 my < 0 || my >= s->v_edge_pos - height - 1) {
313 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
314 my = av_clip(my, -16, s->v_edge_pos - height + 15);
317 /* form component predictions */
318 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
319 src = pic->f.data[0] + mx + my * h->linesize;
322 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
323 h->linesize, h->linesize,
324 width + 1, height + 1,
325 mx, my, s->h_edge_pos, s->v_edge_pos);
326 src = h->edge_emu_buffer;
329 (avg ? h->dsp.avg_tpel_pixels_tab
330 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
333 (avg ? s->hdsp.avg_pixels_tab
334 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
337 if (!(h->flags & CODEC_FLAG_GRAY)) {
338 mx = mx + (mx < (int) x) >> 1;
339 my = my + (my < (int) y) >> 1;
341 height = height >> 1;
344 for (i = 1; i < 3; i++) {
345 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
346 src = pic->f.data[i] + mx + my * h->uvlinesize;
349 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
350 h->uvlinesize, h->uvlinesize,
351 width + 1, height + 1,
352 mx, my, (s->h_edge_pos >> 1),
354 src = h->edge_emu_buffer;
357 (avg ? h->dsp.avg_tpel_pixels_tab
358 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
362 (avg ? s->hdsp.avg_pixels_tab
363 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
370 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
373 int i, j, k, mx, my, dx, dy, x, y;
374 H264Context *h = &s->h;
375 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
376 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
377 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
378 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
379 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
381 for (i = 0; i < 16; i += part_height)
382 for (j = 0; j < 16; j += part_width) {
383 const int b_xy = (4 * h->mb_x + (j >> 2)) +
384 (4 * h->mb_y + (i >> 2)) * h->b_stride;
386 x = 16 * h->mb_x + j;
387 y = 16 * h->mb_y + i;
388 k = (j >> 2 & 1) + (i >> 1 & 2) +
389 (j >> 1 & 4) + (i & 8);
391 if (mode != PREDICT_MODE) {
392 pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
394 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
395 my = s->next_pic->motion_val[0][b_xy][1] << 1;
398 mx = mx * h->frame_num_offset /
399 h->prev_frame_num_offset + 1 >> 1;
400 my = my * h->frame_num_offset /
401 h->prev_frame_num_offset + 1 >> 1;
403 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
404 h->prev_frame_num_offset + 1 >> 1;
405 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
406 h->prev_frame_num_offset + 1 >> 1;
410 /* clip motion vector prediction to frame border */
411 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
412 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
414 /* get (optional) motion vector differential */
415 if (mode == PREDICT_MODE) {
418 dy = svq3_get_se_golomb(&h->gb);
419 dx = svq3_get_se_golomb(&h->gb);
421 if (dx == INVALID_VLC || dy == INVALID_VLC) {
422 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
427 /* compute motion vector */
428 if (mode == THIRDPEL_MODE) {
430 mx = (mx + 1 >> 1) + dx;
431 my = (my + 1 >> 1) + dy;
432 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
433 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
434 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
436 svq3_mc_dir_part(s, x, y, part_width, part_height,
437 fx, fy, dxy, 1, dir, avg);
440 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
441 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
442 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
443 dxy = (mx & 1) + 2 * (my & 1);
445 svq3_mc_dir_part(s, x, y, part_width, part_height,
446 mx >> 1, my >> 1, dxy, 0, dir, avg);
450 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
451 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
453 svq3_mc_dir_part(s, x, y, part_width, part_height,
454 mx, my, 0, 0, dir, avg);
459 /* update mv_cache */
460 if (mode != PREDICT_MODE) {
461 int32_t mv = pack16to32(mx, my);
463 if (part_height == 8 && i < 8) {
464 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
466 if (part_width == 8 && j < 8)
467 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
469 if (part_width == 8 && j < 8)
470 AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
471 if (part_width == 4 || part_height == 4)
472 AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
475 /* write back motion vectors */
476 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
477 part_width >> 2, part_height >> 2, h->b_stride,
478 pack16to32(mx, my), 4);
484 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
486 H264Context *h = &s->h;
487 int i, j, k, m, dir, mode;
491 const int mb_xy = h->mb_xy;
492 const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
494 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
495 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
496 h->topright_samples_available = 0xFFFF;
498 if (mb_type == 0) { /* SKIP */
499 if (h->pict_type == AV_PICTURE_TYPE_P ||
500 s->next_pic->mb_type[mb_xy] == -1) {
501 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
504 if (h->pict_type == AV_PICTURE_TYPE_B)
505 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
508 mb_type = MB_TYPE_SKIP;
510 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
511 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
513 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
516 mb_type = MB_TYPE_16x16;
518 } else if (mb_type < 8) { /* INTER */
519 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
520 mode = THIRDPEL_MODE;
521 else if (s->halfpel_flag &&
522 s->thirdpel_flag == !get_bits1(&h->gb))
528 /* note ref_cache should contain here:
536 for (m = 0; m < 2; m++) {
537 if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
538 for (i = 0; i < 4; i++)
539 AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
540 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
542 for (i = 0; i < 4; i++)
543 AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
546 memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
547 h->cur_pic.motion_val[m][b_xy - h->b_stride],
548 4 * 2 * sizeof(int16_t));
549 memset(&h->ref_cache[m][scan8[0] - 1 * 8],
550 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
552 if (h->mb_x < h->mb_width - 1) {
553 AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
554 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
555 h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
556 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
557 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
559 h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
561 AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
562 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
563 h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
564 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
566 h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
568 memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
569 PART_NOT_AVAILABLE, 8);
571 if (h->pict_type != AV_PICTURE_TYPE_B)
575 /* decode motion vector(s) and form prediction(s) */
576 if (h->pict_type == AV_PICTURE_TYPE_P) {
577 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
579 } else { /* AV_PICTURE_TYPE_B */
581 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
584 for (i = 0; i < 4; i++)
585 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
586 0, 4 * 2 * sizeof(int16_t));
589 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
592 for (i = 0; i < 4; i++)
593 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
594 0, 4 * 2 * sizeof(int16_t));
598 mb_type = MB_TYPE_16x16;
599 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
600 memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
604 for (i = 0; i < 4; i++)
605 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
606 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
607 h->left_samples_available = 0x5F5F;
610 h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
611 h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
612 h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
613 h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
615 if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
616 h->top_samples_available = 0x33FF;
619 /* decode prediction codes for luma blocks */
620 for (i = 0; i < 16; i += 2) {
621 vlc = svq3_get_ue_golomb(&h->gb);
624 av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
628 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
629 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
631 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
632 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
634 if (left[1] == -1 || left[2] == -1) {
635 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
639 } else { /* mb_type == 33, DC_128_PRED block type */
640 for (i = 0; i < 4; i++)
641 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
644 write_back_intra_pred_mode(h);
647 ff_h264_check_intra4x4_pred_mode(h);
649 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
650 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
652 for (i = 0; i < 4; i++)
653 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
655 h->top_samples_available = 0x33FF;
656 h->left_samples_available = 0x5F5F;
659 mb_type = MB_TYPE_INTRA4x4;
660 } else { /* INTRA16x16 */
661 dir = i_mb_type_info[mb_type - 8].pred_mode;
662 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
664 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
665 av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
666 return h->intra16x16_pred_mode;
669 cbp = i_mb_type_info[mb_type - 8].cbp;
670 mb_type = MB_TYPE_INTRA16x16;
673 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
674 for (i = 0; i < 4; i++)
675 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
676 0, 4 * 2 * sizeof(int16_t));
677 if (h->pict_type == AV_PICTURE_TYPE_B) {
678 for (i = 0; i < 4; i++)
679 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
680 0, 4 * 2 * sizeof(int16_t));
683 if (!IS_INTRA4x4(mb_type)) {
684 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
686 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
687 memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
690 if (!IS_INTRA16x16(mb_type) &&
691 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
692 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
693 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
697 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
698 : golomb_to_inter_cbp[vlc];
700 if (IS_INTRA16x16(mb_type) ||
701 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
702 h->qscale += svq3_get_se_golomb(&h->gb);
704 if (h->qscale > 31u) {
705 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
709 if (IS_INTRA16x16(mb_type)) {
710 AV_ZERO128(h->mb_luma_dc[0] + 0);
711 AV_ZERO128(h->mb_luma_dc[0] + 8);
712 if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
713 av_log(h->avctx, AV_LOG_ERROR,
714 "error while decoding intra luma dc\n");
720 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
721 const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
723 for (i = 0; i < 4; i++)
724 if ((cbp & (1 << i))) {
725 for (j = 0; j < 4; j++) {
726 k = index ? (1 * (j & 1) + 2 * (i & 1) +
727 2 * (j & 2) + 4 * (i & 2))
729 h->non_zero_count_cache[scan8[k]] = 1;
731 if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
732 av_log(h->avctx, AV_LOG_ERROR,
733 "error while decoding block\n");
740 for (i = 1; i < 3; ++i)
741 if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
742 av_log(h->avctx, AV_LOG_ERROR,
743 "error while decoding chroma dc block\n");
748 for (i = 1; i < 3; i++) {
749 for (j = 0; j < 4; j++) {
751 h->non_zero_count_cache[scan8[k]] = 1;
753 if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
754 av_log(h->avctx, AV_LOG_ERROR,
755 "error while decoding chroma ac block\n");
765 h->cur_pic.mb_type[mb_xy] = mb_type;
767 if (IS_INTRA(mb_type))
768 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
773 static int svq3_decode_slice_header(AVCodecContext *avctx)
775 SVQ3Context *s = avctx->priv_data;
776 H264Context *h = &s->h;
777 const int mb_xy = h->mb_xy;
781 header = get_bits(&h->gb, 8);
783 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
785 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
788 int length = header >> 5 & 3;
790 s->next_slice_index = get_bits_count(&h->gb) +
791 8 * show_bits(&h->gb, 8 * length) +
794 if (s->next_slice_index > h->gb.size_in_bits) {
795 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
799 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
800 skip_bits(&h->gb, 8);
802 if (s->watermark_key) {
803 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
804 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
805 header ^ s->watermark_key);
808 memmove((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
809 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
811 skip_bits_long(&h->gb, 0);
814 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
815 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
819 h->slice_type = golomb_to_pict_type[slice_id];
821 if ((header & 0x9F) == 2) {
822 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
823 h->mb_skip_run = get_bits(&h->gb, i) -
824 (h->mb_y * h->mb_width + h->mb_x);
830 h->slice_num = get_bits(&h->gb, 8);
831 h->qscale = get_bits(&h->gb, 5);
832 s->adaptive_quant = get_bits1(&h->gb);
841 skip_bits(&h->gb, 2);
843 if (skip_1stop_8data_bits(&h->gb) < 0)
844 return AVERROR_INVALIDDATA;
846 /* reset intra predictors and invalidate motion vector references */
848 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
849 -1, 4 * sizeof(int8_t));
850 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
851 -1, 8 * sizeof(int8_t) * h->mb_x);
854 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
855 -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
858 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
864 static av_cold int svq3_decode_init(AVCodecContext *avctx)
866 SVQ3Context *s = avctx->priv_data;
867 H264Context *h = &s->h;
869 unsigned char *extradata;
870 unsigned char *extradata_end;
872 int marker_found = 0;
875 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
876 s->last_pic = av_mallocz(sizeof(*s->last_pic));
877 s->next_pic = av_mallocz(sizeof(*s->next_pic));
878 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
879 ret = AVERROR(ENOMEM);
883 if ((ret = ff_h264_decode_init(avctx)) < 0)
886 ff_hpeldsp_init(&s->hdsp, avctx->flags);
887 h->flags = avctx->flags;
889 h->sps.chroma_format_idc = 1;
890 h->picture_structure = PICT_FRAME;
891 avctx->pix_fmt = avctx->codec->pix_fmts[0];
893 h->chroma_qp[0] = h->chroma_qp[1] = 4;
894 h->chroma_x_shift = h->chroma_y_shift = 1;
897 s->thirdpel_flag = 1;
900 /* prowl for the "SEQH" marker in the extradata */
901 extradata = (unsigned char *)avctx->extradata;
902 extradata_end = avctx->extradata + avctx->extradata_size;
904 for (m = 0; m + 8 < avctx->extradata_size; m++) {
905 if (!memcmp(extradata, "SEQH", 4)) {
913 /* if a match was found, parse the extra data */
918 size = AV_RB32(&extradata[4]);
919 if (size > extradata_end - extradata - 8) {
920 ret = AVERROR_INVALIDDATA;
923 init_get_bits(&gb, extradata + 8, size * 8);
925 /* 'frame size code' and optional 'width, height' */
926 frame_size_code = get_bits(&gb, 3);
927 switch (frame_size_code) {
957 avctx->width = get_bits(&gb, 12);
958 avctx->height = get_bits(&gb, 12);
962 s->halfpel_flag = get_bits1(&gb);
963 s->thirdpel_flag = get_bits1(&gb);
971 h->low_delay = get_bits1(&gb);
976 if (skip_1stop_8data_bits(&gb) < 0) {
977 ret = AVERROR_INVALIDDATA;
981 s->unknown_flag = get_bits1(&gb);
982 avctx->has_b_frames = !h->low_delay;
983 if (s->unknown_flag) {
985 unsigned watermark_width = svq3_get_ue_golomb(&gb);
986 unsigned watermark_height = svq3_get_ue_golomb(&gb);
987 int u1 = svq3_get_ue_golomb(&gb);
988 int u2 = get_bits(&gb, 8);
989 int u3 = get_bits(&gb, 2);
990 int u4 = svq3_get_ue_golomb(&gb);
991 unsigned long buf_len = watermark_width *
992 watermark_height * 4;
993 int offset = get_bits_count(&gb) + 7 >> 3;
996 if (watermark_height <= 0 ||
997 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
1002 buf = av_malloc(buf_len);
1003 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
1004 watermark_width, watermark_height);
1005 av_log(avctx, AV_LOG_DEBUG,
1006 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1007 u1, u2, u3, u4, offset);
1008 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1009 size - offset) != Z_OK) {
1010 av_log(avctx, AV_LOG_ERROR,
1011 "could not uncompress watermark logo\n");
1016 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1017 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1018 av_log(avctx, AV_LOG_DEBUG,
1019 "watermark key %#x\n", s->watermark_key);
1022 av_log(avctx, AV_LOG_ERROR,
1023 "this svq3 file contains watermark which need zlib support compiled in\n");
1030 h->width = avctx->width;
1031 h->height = avctx->height;
1032 h->mb_width = (h->width + 15) / 16;
1033 h->mb_height = (h->height + 15) / 16;
1034 h->mb_stride = h->mb_width + 1;
1035 h->mb_num = h->mb_width * h->mb_height;
1036 h->b_stride = 4 * h->mb_width;
1037 s->h_edge_pos = h->mb_width * 16;
1038 s->v_edge_pos = h->mb_height * 16;
1040 if ((ret = ff_h264_alloc_tables(h)) < 0) {
1041 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1047 svq3_decode_end(avctx);
1051 static void free_picture(AVCodecContext *avctx, Picture *pic)
1054 for (i = 0; i < 2; i++) {
1055 av_buffer_unref(&pic->motion_val_buf[i]);
1056 av_buffer_unref(&pic->ref_index_buf[i]);
1058 av_buffer_unref(&pic->mb_type_buf);
1060 av_frame_unref(&pic->f);
1063 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1065 SVQ3Context *s = avctx->priv_data;
1066 H264Context *h = &s->h;
1067 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1068 const int mb_array_size = h->mb_stride * h->mb_height;
1069 const int b4_stride = h->mb_width * 4 + 1;
1070 const int b4_array_size = b4_stride * h->mb_height * 4;
1073 if (!pic->motion_val_buf[0]) {
1076 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1077 if (!pic->mb_type_buf)
1078 return AVERROR(ENOMEM);
1079 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1081 for (i = 0; i < 2; i++) {
1082 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1083 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1084 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1085 ret = AVERROR(ENOMEM);
1089 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1090 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1093 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1095 ret = ff_get_buffer(avctx, &pic->f,
1096 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1100 if (!h->edge_emu_buffer) {
1101 h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1102 if (!h->edge_emu_buffer)
1103 return AVERROR(ENOMEM);
1106 h->linesize = pic->f.linesize[0];
1107 h->uvlinesize = pic->f.linesize[1];
1111 free_picture(avctx, pic);
1115 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1116 int *got_frame, AVPacket *avpkt)
1118 SVQ3Context *s = avctx->priv_data;
1119 H264Context *h = &s->h;
1120 int buf_size = avpkt->size;
1125 /* special case for last picture */
1126 if (buf_size == 0) {
1127 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1128 ret = av_frame_ref(data, &s->next_pic->f);
1131 s->last_frame_output = 1;
1137 h->mb_x = h->mb_y = h->mb_xy = 0;
1139 if (s->watermark_key) {
1140 av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1142 return AVERROR(ENOMEM);
1143 memcpy(s->buf, avpkt->data, buf_size);
1149 init_get_bits(&h->gb, buf, 8 * buf_size);
1151 if (svq3_decode_slice_header(avctx))
1154 h->pict_type = h->slice_type;
1156 if (h->pict_type != AV_PICTURE_TYPE_B)
1157 FFSWAP(Picture*, s->next_pic, s->last_pic);
1159 av_frame_unref(&s->cur_pic->f);
1161 /* for skipping the frame */
1162 s->cur_pic->f.pict_type = h->pict_type;
1163 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1165 ret = get_buffer(avctx, s->cur_pic);
1169 h->cur_pic_ptr = s->cur_pic;
1170 av_frame_unref(&h->cur_pic.f);
1171 h->cur_pic = *s->cur_pic;
1172 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1176 for (i = 0; i < 16; i++) {
1177 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1178 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1180 for (i = 0; i < 16; i++) {
1181 h->block_offset[16 + i] =
1182 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1183 h->block_offset[48 + 16 + i] =
1184 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1187 if (h->pict_type != AV_PICTURE_TYPE_I) {
1188 if (!s->last_pic->f.data[0]) {
1189 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1190 av_frame_unref(s->last_pic);
1191 ret = get_buffer(avctx, s->last_pic);
1194 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1195 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1196 s->last_pic->f.linesize[1]);
1197 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1198 s->last_pic->f.linesize[2]);
1201 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1202 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1203 av_frame_unref(s->next_pic);
1204 ret = get_buffer(avctx, s->next_pic);
1207 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1208 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1209 s->next_pic->f.linesize[1]);
1210 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1211 s->next_pic->f.linesize[2]);
1215 if (avctx->debug & FF_DEBUG_PICT_INFO)
1216 av_log(h->avctx, AV_LOG_DEBUG,
1217 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1218 av_get_picture_type_char(h->pict_type),
1219 s->halfpel_flag, s->thirdpel_flag,
1220 s->adaptive_quant, h->qscale, h->slice_num);
1222 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1223 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1224 avctx->skip_frame >= AVDISCARD_ALL)
1227 if (s->next_p_frame_damaged) {
1228 if (h->pict_type == AV_PICTURE_TYPE_B)
1231 s->next_p_frame_damaged = 0;
1234 if (h->pict_type == AV_PICTURE_TYPE_B) {
1235 h->frame_num_offset = h->slice_num - h->prev_frame_num;
1237 if (h->frame_num_offset < 0)
1238 h->frame_num_offset += 256;
1239 if (h->frame_num_offset == 0 ||
1240 h->frame_num_offset >= h->prev_frame_num_offset) {
1241 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1245 h->prev_frame_num = h->frame_num;
1246 h->frame_num = h->slice_num;
1247 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1249 if (h->prev_frame_num_offset < 0)
1250 h->prev_frame_num_offset += 256;
1253 for (m = 0; m < 2; m++) {
1255 for (i = 0; i < 4; i++) {
1257 for (j = -1; j < 4; j++)
1258 h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1260 h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1264 for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1265 for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1267 h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1269 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1270 ((get_bits_count(&h->gb) & 7) == 0 ||
1271 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1272 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1273 h->gb.size_in_bits = 8 * buf_size;
1275 if (svq3_decode_slice_header(avctx))
1278 /* TODO: support s->mb_skip_run */
1281 mb_type = svq3_get_ue_golomb(&h->gb);
1283 if (h->pict_type == AV_PICTURE_TYPE_I)
1285 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1287 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1288 av_log(h->avctx, AV_LOG_ERROR,
1289 "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1293 if (mb_type != 0 || h->cbp)
1294 ff_h264_hl_decode_mb(h);
1296 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1297 h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1298 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1301 ff_draw_horiz_band(avctx, s->cur_pic,
1302 s->last_pic->f.data[0] ? s->last_pic : NULL,
1303 16 * h->mb_y, 16, h->picture_structure, 0,
1307 left = buf_size*8 - get_bits_count(&h->gb);
1309 if (h->mb_y != h->mb_height || h->mb_x != h->mb_width) {
1310 av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, h->mb_y, h->mb_x, left);
1311 //av_hex_dump(stderr, buf+buf_size-8, 8);
1315 av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1319 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1320 ret = av_frame_ref(data, &s->cur_pic->f);
1321 else if (s->last_pic->f.data[0])
1322 ret = av_frame_ref(data, &s->last_pic->f);
1326 /* Do not output the last pic after seeking. */
1327 if (s->last_pic->f.data[0] || h->low_delay)
1330 if (h->pict_type != AV_PICTURE_TYPE_B) {
1331 FFSWAP(Picture*, s->cur_pic, s->next_pic);
1333 av_frame_unref(&s->cur_pic->f);
1339 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1341 SVQ3Context *s = avctx->priv_data;
1342 H264Context *h = &s->h;
1344 free_picture(avctx, s->cur_pic);
1345 free_picture(avctx, s->next_pic);
1346 free_picture(avctx, s->last_pic);
1347 av_freep(&s->cur_pic);
1348 av_freep(&s->next_pic);
1349 av_freep(&s->last_pic);
1351 av_frame_unref(&h->cur_pic.f);
1353 ff_h264_free_context(h);
1357 av_freep(&h->edge_emu_buffer);
1362 AVCodec ff_svq3_decoder = {
1364 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1365 .type = AVMEDIA_TYPE_VIDEO,
1366 .id = AV_CODEC_ID_SVQ3,
1367 .priv_data_size = sizeof(SVQ3Context),
1368 .init = svq3_decode_init,
1369 .close = svq3_decode_end,
1370 .decode = svq3_decode_frame,
1371 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1374 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,