2 * Copyright (c) 2003 The FFmpeg Project
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
45 #include "libavutil/attributes.h"
48 #include "mpegutils.h"
51 #include "h264data.h" // FIXME FIXME FIXME
53 #include "h264_mvpred.h"
56 #include "rectangle.h"
58 #include "vdpau_internal.h"
72 typedef struct SVQ3Context {
77 H264Picture *next_pic;
78 H264Picture *last_pic;
83 uint32_t watermark_key;
87 int next_p_frame_damaged;
90 int last_frame_output;
93 #define FULLPEL_MODE 1
94 #define HALFPEL_MODE 2
95 #define THIRDPEL_MODE 3
96 #define PREDICT_MODE 4
98 /* dual scan (from some older h264 draft)
107 static const uint8_t svq3_scan[16] = {
108 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
109 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
110 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
111 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
114 static const uint8_t luma_dc_zigzag_scan[16] = {
115 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
116 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
117 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
118 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
121 static const uint8_t svq3_pred_0[25][2] = {
124 { 0, 2 }, { 1, 1 }, { 2, 0 },
125 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
126 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
127 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
128 { 2, 4 }, { 3, 3 }, { 4, 2 },
133 static const int8_t svq3_pred_1[6][6][5] = {
134 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
135 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
136 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
137 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
138 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
139 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
140 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
141 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
142 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
143 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
144 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
145 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
148 static const struct {
151 } svq3_dct_tables[2][16] = {
152 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
153 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
154 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
155 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
158 static const uint32_t svq3_dequant_coeff[32] = {
159 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
160 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
161 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
162 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
165 static int svq3_decode_end(AVCodecContext *avctx);
167 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
169 const int qmul = svq3_dequant_coeff[qp];
173 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
175 for (i = 0; i < 4; i++) {
176 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
177 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
178 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
179 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
181 temp[4 * i + 0] = z0 + z3;
182 temp[4 * i + 1] = z1 + z2;
183 temp[4 * i + 2] = z1 - z2;
184 temp[4 * i + 3] = z0 - z3;
187 for (i = 0; i < 4; i++) {
188 const int offset = x_offset[i];
189 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
190 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
191 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
192 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
194 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
195 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
196 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
197 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
202 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
203 int stride, int qp, int dc)
205 const int qmul = svq3_dequant_coeff[qp];
209 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
210 : qmul * (block[0] >> 3) / 2);
214 for (i = 0; i < 4; i++) {
215 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
216 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
217 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
218 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
220 block[0 + 4 * i] = z0 + z3;
221 block[1 + 4 * i] = z1 + z2;
222 block[2 + 4 * i] = z1 - z2;
223 block[3 + 4 * i] = z0 - z3;
226 for (i = 0; i < 4; i++) {
227 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
228 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
229 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
230 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
231 const int rr = (dc + 0x80000);
233 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
234 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
235 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
236 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
239 memset(block, 0, 16 * sizeof(int16_t));
242 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
243 int index, const int type)
245 static const uint8_t *const scan_patterns[4] =
246 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
248 int run, level, sign, limit;
250 const int intra = 3 * type >> 2;
251 const uint8_t *const scan = scan_patterns[type];
253 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
254 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
255 if ((int32_t)vlc < 0)
258 sign = (vlc & 1) ? 0 : -1;
265 } else if (vlc < 4) {
270 level = (vlc + 9 >> 2) - run;
274 run = svq3_dct_tables[intra][vlc].run;
275 level = svq3_dct_tables[intra][vlc].level;
278 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
281 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
286 if ((index += run) >= limit)
289 block[scan[index]] = (level ^ sign) - sign;
300 static inline void svq3_mc_dir_part(SVQ3Context *s,
301 int x, int y, int width, int height,
302 int mx, int my, int dxy,
303 int thirdpel, int dir, int avg)
305 H264Context *h = &s->h;
306 const H264Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
309 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
314 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
315 my < 0 || my >= s->v_edge_pos - height - 1) {
317 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
318 my = av_clip(my, -16, s->v_edge_pos - height + 15);
321 /* form component predictions */
322 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
323 src = pic->f.data[0] + mx + my * h->linesize;
326 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
327 h->linesize, h->linesize,
328 width + 1, height + 1,
329 mx, my, s->h_edge_pos, s->v_edge_pos);
330 src = h->edge_emu_buffer;
333 (avg ? s->tdsp.avg_tpel_pixels_tab
334 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
337 (avg ? s->hdsp.avg_pixels_tab
338 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
341 if (!(h->flags & CODEC_FLAG_GRAY)) {
342 mx = mx + (mx < (int) x) >> 1;
343 my = my + (my < (int) y) >> 1;
345 height = height >> 1;
348 for (i = 1; i < 3; i++) {
349 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
350 src = pic->f.data[i] + mx + my * h->uvlinesize;
353 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
354 h->uvlinesize, h->uvlinesize,
355 width + 1, height + 1,
356 mx, my, (s->h_edge_pos >> 1),
358 src = h->edge_emu_buffer;
361 (avg ? s->tdsp.avg_tpel_pixels_tab
362 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
366 (avg ? s->hdsp.avg_pixels_tab
367 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
374 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
377 int i, j, k, mx, my, dx, dy, x, y;
378 H264Context *h = &s->h;
379 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
380 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
381 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
382 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
383 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
385 for (i = 0; i < 16; i += part_height)
386 for (j = 0; j < 16; j += part_width) {
387 const int b_xy = (4 * h->mb_x + (j >> 2)) +
388 (4 * h->mb_y + (i >> 2)) * h->b_stride;
390 x = 16 * h->mb_x + j;
391 y = 16 * h->mb_y + i;
392 k = (j >> 2 & 1) + (i >> 1 & 2) +
393 (j >> 1 & 4) + (i & 8);
395 if (mode != PREDICT_MODE) {
396 pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
398 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
399 my = s->next_pic->motion_val[0][b_xy][1] << 1;
402 mx = mx * h->frame_num_offset /
403 h->prev_frame_num_offset + 1 >> 1;
404 my = my * h->frame_num_offset /
405 h->prev_frame_num_offset + 1 >> 1;
407 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
408 h->prev_frame_num_offset + 1 >> 1;
409 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
410 h->prev_frame_num_offset + 1 >> 1;
414 /* clip motion vector prediction to frame border */
415 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
416 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
418 /* get (optional) motion vector differential */
419 if (mode == PREDICT_MODE) {
422 dy = svq3_get_se_golomb(&h->gb);
423 dx = svq3_get_se_golomb(&h->gb);
425 if (dx == INVALID_VLC || dy == INVALID_VLC) {
426 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
431 /* compute motion vector */
432 if (mode == THIRDPEL_MODE) {
434 mx = (mx + 1 >> 1) + dx;
435 my = (my + 1 >> 1) + dy;
436 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
437 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
438 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
440 svq3_mc_dir_part(s, x, y, part_width, part_height,
441 fx, fy, dxy, 1, dir, avg);
444 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
445 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
446 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
447 dxy = (mx & 1) + 2 * (my & 1);
449 svq3_mc_dir_part(s, x, y, part_width, part_height,
450 mx >> 1, my >> 1, dxy, 0, dir, avg);
454 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
455 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
457 svq3_mc_dir_part(s, x, y, part_width, part_height,
458 mx, my, 0, 0, dir, avg);
463 /* update mv_cache */
464 if (mode != PREDICT_MODE) {
465 int32_t mv = pack16to32(mx, my);
467 if (part_height == 8 && i < 8) {
468 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
470 if (part_width == 8 && j < 8)
471 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
473 if (part_width == 8 && j < 8)
474 AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
475 if (part_width == 4 || part_height == 4)
476 AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
479 /* write back motion vectors */
480 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
481 part_width >> 2, part_height >> 2, h->b_stride,
482 pack16to32(mx, my), 4);
488 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
490 H264Context *h = &s->h;
491 int i, j, k, m, dir, mode;
495 const int mb_xy = h->mb_xy;
496 const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
498 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
499 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
500 h->topright_samples_available = 0xFFFF;
502 if (mb_type == 0) { /* SKIP */
503 if (h->pict_type == AV_PICTURE_TYPE_P ||
504 s->next_pic->mb_type[mb_xy] == -1) {
505 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
508 if (h->pict_type == AV_PICTURE_TYPE_B)
509 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
512 mb_type = MB_TYPE_SKIP;
514 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
515 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
517 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
520 mb_type = MB_TYPE_16x16;
522 } else if (mb_type < 8) { /* INTER */
523 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
524 mode = THIRDPEL_MODE;
525 else if (s->halfpel_flag &&
526 s->thirdpel_flag == !get_bits1(&h->gb))
532 /* note ref_cache should contain here:
540 for (m = 0; m < 2; m++) {
541 if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
542 for (i = 0; i < 4; i++)
543 AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
544 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
546 for (i = 0; i < 4; i++)
547 AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
550 memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
551 h->cur_pic.motion_val[m][b_xy - h->b_stride],
552 4 * 2 * sizeof(int16_t));
553 memset(&h->ref_cache[m][scan8[0] - 1 * 8],
554 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
556 if (h->mb_x < h->mb_width - 1) {
557 AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
558 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
559 h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
560 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
561 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
563 h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
565 AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
566 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
567 h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
568 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
570 h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
572 memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
573 PART_NOT_AVAILABLE, 8);
575 if (h->pict_type != AV_PICTURE_TYPE_B)
579 /* decode motion vector(s) and form prediction(s) */
580 if (h->pict_type == AV_PICTURE_TYPE_P) {
581 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
583 } else { /* AV_PICTURE_TYPE_B */
585 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
588 for (i = 0; i < 4; i++)
589 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
590 0, 4 * 2 * sizeof(int16_t));
593 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
596 for (i = 0; i < 4; i++)
597 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
598 0, 4 * 2 * sizeof(int16_t));
602 mb_type = MB_TYPE_16x16;
603 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
604 memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
608 for (i = 0; i < 4; i++)
609 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
610 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
611 h->left_samples_available = 0x5F5F;
614 h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
615 h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
616 h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
617 h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
619 if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
620 h->top_samples_available = 0x33FF;
623 /* decode prediction codes for luma blocks */
624 for (i = 0; i < 16; i += 2) {
625 vlc = svq3_get_ue_golomb(&h->gb);
628 av_log(h->avctx, AV_LOG_ERROR,
629 "luma prediction:%"PRIu32"\n", vlc);
633 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
634 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
636 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
637 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
639 if (left[1] == -1 || left[2] == -1) {
640 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
644 } else { /* mb_type == 33, DC_128_PRED block type */
645 for (i = 0; i < 4; i++)
646 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
649 write_back_intra_pred_mode(h);
652 ff_h264_check_intra4x4_pred_mode(h);
654 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
655 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
657 for (i = 0; i < 4; i++)
658 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
660 h->top_samples_available = 0x33FF;
661 h->left_samples_available = 0x5F5F;
664 mb_type = MB_TYPE_INTRA4x4;
665 } else { /* INTRA16x16 */
666 dir = i_mb_type_info[mb_type - 8].pred_mode;
667 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
669 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
670 av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
671 return h->intra16x16_pred_mode;
674 cbp = i_mb_type_info[mb_type - 8].cbp;
675 mb_type = MB_TYPE_INTRA16x16;
678 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
679 for (i = 0; i < 4; i++)
680 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
681 0, 4 * 2 * sizeof(int16_t));
682 if (h->pict_type == AV_PICTURE_TYPE_B) {
683 for (i = 0; i < 4; i++)
684 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
685 0, 4 * 2 * sizeof(int16_t));
688 if (!IS_INTRA4x4(mb_type)) {
689 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
691 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
692 memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
695 if (!IS_INTRA16x16(mb_type) &&
696 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
697 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
698 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
702 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
703 : golomb_to_inter_cbp[vlc];
705 if (IS_INTRA16x16(mb_type) ||
706 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
707 h->qscale += svq3_get_se_golomb(&h->gb);
709 if (h->qscale > 31u) {
710 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
714 if (IS_INTRA16x16(mb_type)) {
715 AV_ZERO128(h->mb_luma_dc[0] + 0);
716 AV_ZERO128(h->mb_luma_dc[0] + 8);
717 if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
718 av_log(h->avctx, AV_LOG_ERROR,
719 "error while decoding intra luma dc\n");
725 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
726 const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
728 for (i = 0; i < 4; i++)
729 if ((cbp & (1 << i))) {
730 for (j = 0; j < 4; j++) {
731 k = index ? (1 * (j & 1) + 2 * (i & 1) +
732 2 * (j & 2) + 4 * (i & 2))
734 h->non_zero_count_cache[scan8[k]] = 1;
736 if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
737 av_log(h->avctx, AV_LOG_ERROR,
738 "error while decoding block\n");
745 for (i = 1; i < 3; ++i)
746 if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
747 av_log(h->avctx, AV_LOG_ERROR,
748 "error while decoding chroma dc block\n");
753 for (i = 1; i < 3; i++) {
754 for (j = 0; j < 4; j++) {
756 h->non_zero_count_cache[scan8[k]] = 1;
758 if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
759 av_log(h->avctx, AV_LOG_ERROR,
760 "error while decoding chroma ac block\n");
770 h->cur_pic.mb_type[mb_xy] = mb_type;
772 if (IS_INTRA(mb_type))
773 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
778 static int svq3_decode_slice_header(AVCodecContext *avctx)
780 SVQ3Context *s = avctx->priv_data;
781 H264Context *h = &s->h;
782 const int mb_xy = h->mb_xy;
786 header = get_bits(&h->gb, 8);
788 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
790 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
793 int length = header >> 5 & 3;
795 s->next_slice_index = get_bits_count(&h->gb) +
796 8 * show_bits(&h->gb, 8 * length) +
799 if (s->next_slice_index > h->gb.size_in_bits) {
800 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
804 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
805 skip_bits(&h->gb, 8);
807 if (s->watermark_key) {
808 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
809 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
810 header ^ s->watermark_key);
813 memmove((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
814 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
816 skip_bits_long(&h->gb, 0);
819 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
820 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
824 h->slice_type = golomb_to_pict_type[slice_id];
826 if ((header & 0x9F) == 2) {
827 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
828 h->mb_skip_run = get_bits(&h->gb, i) -
829 (h->mb_y * h->mb_width + h->mb_x);
835 h->slice_num = get_bits(&h->gb, 8);
836 h->qscale = get_bits(&h->gb, 5);
837 s->adaptive_quant = get_bits1(&h->gb);
846 skip_bits(&h->gb, 2);
848 if (skip_1stop_8data_bits(&h->gb) < 0)
849 return AVERROR_INVALIDDATA;
851 /* reset intra predictors and invalidate motion vector references */
853 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
854 -1, 4 * sizeof(int8_t));
855 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
856 -1, 8 * sizeof(int8_t) * h->mb_x);
859 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
860 -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
863 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
869 static av_cold int svq3_decode_init(AVCodecContext *avctx)
871 SVQ3Context *s = avctx->priv_data;
872 H264Context *h = &s->h;
874 unsigned char *extradata;
875 unsigned char *extradata_end;
877 int marker_found = 0;
880 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
881 s->last_pic = av_mallocz(sizeof(*s->last_pic));
882 s->next_pic = av_mallocz(sizeof(*s->next_pic));
883 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
884 ret = AVERROR(ENOMEM);
888 if ((ret = ff_h264_decode_init(avctx)) < 0)
891 ff_hpeldsp_init(&s->hdsp, avctx->flags);
892 ff_tpeldsp_init(&s->tdsp);
894 h->flags = avctx->flags;
896 h->sps.chroma_format_idc = 1;
897 h->picture_structure = PICT_FRAME;
898 avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
899 avctx->color_range = AVCOL_RANGE_JPEG;
901 h->chroma_qp[0] = h->chroma_qp[1] = 4;
902 h->chroma_x_shift = h->chroma_y_shift = 1;
905 s->thirdpel_flag = 1;
908 /* prowl for the "SEQH" marker in the extradata */
909 extradata = (unsigned char *)avctx->extradata;
910 extradata_end = avctx->extradata + avctx->extradata_size;
912 for (m = 0; m + 8 < avctx->extradata_size; m++) {
913 if (!memcmp(extradata, "SEQH", 4)) {
921 /* if a match was found, parse the extra data */
926 size = AV_RB32(&extradata[4]);
927 if (size > extradata_end - extradata - 8) {
928 ret = AVERROR_INVALIDDATA;
931 init_get_bits(&gb, extradata + 8, size * 8);
933 /* 'frame size code' and optional 'width, height' */
934 frame_size_code = get_bits(&gb, 3);
935 switch (frame_size_code) {
965 avctx->width = get_bits(&gb, 12);
966 avctx->height = get_bits(&gb, 12);
970 s->halfpel_flag = get_bits1(&gb);
971 s->thirdpel_flag = get_bits1(&gb);
979 h->low_delay = get_bits1(&gb);
984 if (skip_1stop_8data_bits(&gb) < 0) {
985 ret = AVERROR_INVALIDDATA;
989 s->unknown_flag = get_bits1(&gb);
990 avctx->has_b_frames = !h->low_delay;
991 if (s->unknown_flag) {
993 unsigned watermark_width = svq3_get_ue_golomb(&gb);
994 unsigned watermark_height = svq3_get_ue_golomb(&gb);
995 int u1 = svq3_get_ue_golomb(&gb);
996 int u2 = get_bits(&gb, 8);
997 int u3 = get_bits(&gb, 2);
998 int u4 = svq3_get_ue_golomb(&gb);
999 unsigned long buf_len = watermark_width *
1000 watermark_height * 4;
1001 int offset = get_bits_count(&gb) + 7 >> 3;
1004 if (watermark_height <= 0 ||
1005 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
1010 buf = av_malloc(buf_len);
1012 ret = AVERROR(ENOMEM);
1015 av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1016 watermark_width, watermark_height);
1017 av_log(avctx, AV_LOG_DEBUG,
1018 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1019 u1, u2, u3, u4, offset);
1020 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1021 size - offset) != Z_OK) {
1022 av_log(avctx, AV_LOG_ERROR,
1023 "could not uncompress watermark logo\n");
1028 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1029 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1030 av_log(avctx, AV_LOG_DEBUG,
1031 "watermark key %#"PRIx32"\n", s->watermark_key);
1034 av_log(avctx, AV_LOG_ERROR,
1035 "this svq3 file contains watermark which need zlib support compiled in\n");
1042 h->width = avctx->width;
1043 h->height = avctx->height;
1044 h->mb_width = (h->width + 15) / 16;
1045 h->mb_height = (h->height + 15) / 16;
1046 h->mb_stride = h->mb_width + 1;
1047 h->mb_num = h->mb_width * h->mb_height;
1048 h->b_stride = 4 * h->mb_width;
1049 s->h_edge_pos = h->mb_width * 16;
1050 s->v_edge_pos = h->mb_height * 16;
1052 if ((ret = ff_h264_alloc_tables(h)) < 0) {
1053 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1059 svq3_decode_end(avctx);
1063 static void free_picture(AVCodecContext *avctx, H264Picture *pic)
1066 for (i = 0; i < 2; i++) {
1067 av_buffer_unref(&pic->motion_val_buf[i]);
1068 av_buffer_unref(&pic->ref_index_buf[i]);
1070 av_buffer_unref(&pic->mb_type_buf);
1072 av_frame_unref(&pic->f);
1075 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
1077 SVQ3Context *s = avctx->priv_data;
1078 H264Context *h = &s->h;
1079 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1080 const int mb_array_size = h->mb_stride * h->mb_height;
1081 const int b4_stride = h->mb_width * 4 + 1;
1082 const int b4_array_size = b4_stride * h->mb_height * 4;
1085 if (!pic->motion_val_buf[0]) {
1088 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1089 if (!pic->mb_type_buf)
1090 return AVERROR(ENOMEM);
1091 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1093 for (i = 0; i < 2; i++) {
1094 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1095 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1096 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1097 ret = AVERROR(ENOMEM);
1101 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1102 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1105 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1107 ret = ff_get_buffer(avctx, &pic->f,
1108 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1112 if (!h->edge_emu_buffer) {
1113 h->edge_emu_buffer = av_mallocz_array(pic->f.linesize[0], 17);
1114 if (!h->edge_emu_buffer)
1115 return AVERROR(ENOMEM);
1118 h->linesize = pic->f.linesize[0];
1119 h->uvlinesize = pic->f.linesize[1];
1123 free_picture(avctx, pic);
1127 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1128 int *got_frame, AVPacket *avpkt)
1130 SVQ3Context *s = avctx->priv_data;
1131 H264Context *h = &s->h;
1132 int buf_size = avpkt->size;
1137 /* special case for last picture */
1138 if (buf_size == 0) {
1139 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1140 ret = av_frame_ref(data, &s->next_pic->f);
1143 s->last_frame_output = 1;
1149 h->mb_x = h->mb_y = h->mb_xy = 0;
1151 if (s->watermark_key) {
1152 av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1154 return AVERROR(ENOMEM);
1155 memcpy(s->buf, avpkt->data, buf_size);
1161 init_get_bits(&h->gb, buf, 8 * buf_size);
1163 if (svq3_decode_slice_header(avctx))
1166 h->pict_type = h->slice_type;
1168 if (h->pict_type != AV_PICTURE_TYPE_B)
1169 FFSWAP(H264Picture*, s->next_pic, s->last_pic);
1171 av_frame_unref(&s->cur_pic->f);
1173 /* for skipping the frame */
1174 s->cur_pic->f.pict_type = h->pict_type;
1175 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1177 ret = get_buffer(avctx, s->cur_pic);
1181 h->cur_pic_ptr = s->cur_pic;
1182 av_frame_unref(&h->cur_pic.f);
1183 memcpy(&h->cur_pic.tf, &s->cur_pic->tf, sizeof(h->cur_pic) - offsetof(H264Picture, tf));
1184 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1188 for (i = 0; i < 16; i++) {
1189 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1190 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1192 for (i = 0; i < 16; i++) {
1193 h->block_offset[16 + i] =
1194 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1195 h->block_offset[48 + 16 + i] =
1196 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1199 if (h->pict_type != AV_PICTURE_TYPE_I) {
1200 if (!s->last_pic->f.data[0]) {
1201 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1202 av_frame_unref(&s->last_pic->f);
1203 ret = get_buffer(avctx, s->last_pic);
1206 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1207 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1208 s->last_pic->f.linesize[1]);
1209 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1210 s->last_pic->f.linesize[2]);
1213 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1214 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1215 av_frame_unref(&s->next_pic->f);
1216 ret = get_buffer(avctx, s->next_pic);
1219 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1220 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1221 s->next_pic->f.linesize[1]);
1222 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1223 s->next_pic->f.linesize[2]);
1227 if (avctx->debug & FF_DEBUG_PICT_INFO)
1228 av_log(h->avctx, AV_LOG_DEBUG,
1229 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1230 av_get_picture_type_char(h->pict_type),
1231 s->halfpel_flag, s->thirdpel_flag,
1232 s->adaptive_quant, h->qscale, h->slice_num);
1234 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1235 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1236 avctx->skip_frame >= AVDISCARD_ALL)
1239 if (s->next_p_frame_damaged) {
1240 if (h->pict_type == AV_PICTURE_TYPE_B)
1243 s->next_p_frame_damaged = 0;
1246 if (h->pict_type == AV_PICTURE_TYPE_B) {
1247 h->frame_num_offset = h->slice_num - h->prev_frame_num;
1249 if (h->frame_num_offset < 0)
1250 h->frame_num_offset += 256;
1251 if (h->frame_num_offset == 0 ||
1252 h->frame_num_offset >= h->prev_frame_num_offset) {
1253 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1257 h->prev_frame_num = h->frame_num;
1258 h->frame_num = h->slice_num;
1259 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1261 if (h->prev_frame_num_offset < 0)
1262 h->prev_frame_num_offset += 256;
1265 for (m = 0; m < 2; m++) {
1267 for (i = 0; i < 4; i++) {
1269 for (j = -1; j < 4; j++)
1270 h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1272 h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1276 for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1277 for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1279 h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1281 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1282 ((get_bits_count(&h->gb) & 7) == 0 ||
1283 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1284 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1285 h->gb.size_in_bits = 8 * buf_size;
1287 if (svq3_decode_slice_header(avctx))
1290 /* TODO: support s->mb_skip_run */
1293 mb_type = svq3_get_ue_golomb(&h->gb);
1295 if (h->pict_type == AV_PICTURE_TYPE_I)
1297 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1299 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1300 av_log(h->avctx, AV_LOG_ERROR,
1301 "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1305 if (mb_type != 0 || h->cbp)
1306 ff_h264_hl_decode_mb(h);
1308 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1309 h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1310 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1313 ff_draw_horiz_band(avctx, &s->cur_pic->f,
1314 s->last_pic->f.data[0] ? &s->last_pic->f : NULL,
1315 16 * h->mb_y, 16, h->picture_structure, 0,
1319 left = buf_size*8 - get_bits_count(&h->gb);
1321 if (h->mb_y != h->mb_height || h->mb_x != h->mb_width) {
1322 av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, h->mb_y, h->mb_x, left);
1323 //av_hex_dump(stderr, buf+buf_size-8, 8);
1327 av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1331 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1332 ret = av_frame_ref(data, &s->cur_pic->f);
1333 else if (s->last_pic->f.data[0])
1334 ret = av_frame_ref(data, &s->last_pic->f);
1338 /* Do not output the last pic after seeking. */
1339 if (s->last_pic->f.data[0] || h->low_delay)
1342 if (h->pict_type != AV_PICTURE_TYPE_B) {
1343 FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
1345 av_frame_unref(&s->cur_pic->f);
1351 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1353 SVQ3Context *s = avctx->priv_data;
1354 H264Context *h = &s->h;
1356 free_picture(avctx, s->cur_pic);
1357 free_picture(avctx, s->next_pic);
1358 free_picture(avctx, s->last_pic);
1359 av_freep(&s->cur_pic);
1360 av_freep(&s->next_pic);
1361 av_freep(&s->last_pic);
1363 av_frame_unref(&h->cur_pic.f);
1365 ff_h264_free_context(h);
1369 av_freep(&h->edge_emu_buffer);
1374 AVCodec ff_svq3_decoder = {
1376 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1377 .type = AVMEDIA_TYPE_VIDEO,
1378 .id = AV_CODEC_ID_SVQ3,
1379 .priv_data_size = sizeof(SVQ3Context),
1380 .init = svq3_decode_init,
1381 .close = svq3_decode_end,
1382 .decode = svq3_decode_frame,
1383 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1386 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,