2 * Copyright (c) 2003 The FFmpeg Project
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
45 #include "libavutil/attributes.h"
48 #include "mpegutils.h"
51 #include "h264data.h" // FIXME FIXME FIXME
53 #include "h264_mvpred.h"
56 #include "rectangle.h"
58 #include "vdpau_internal.h"
72 typedef struct SVQ3Context {
77 H264Picture *next_pic;
78 H264Picture *last_pic;
83 uint32_t watermark_key;
87 int next_p_frame_damaged;
90 int last_frame_output;
93 #define FULLPEL_MODE 1
94 #define HALFPEL_MODE 2
95 #define THIRDPEL_MODE 3
96 #define PREDICT_MODE 4
98 /* dual scan (from some older h264 draft)
107 static const uint8_t svq3_scan[16] = {
108 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
109 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
110 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
111 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
114 static const uint8_t luma_dc_zigzag_scan[16] = {
115 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
116 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
117 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
118 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
121 static const uint8_t svq3_pred_0[25][2] = {
124 { 0, 2 }, { 1, 1 }, { 2, 0 },
125 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
126 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
127 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
128 { 2, 4 }, { 3, 3 }, { 4, 2 },
133 static const int8_t svq3_pred_1[6][6][5] = {
134 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
135 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
136 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
137 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
138 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
139 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
140 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
141 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
142 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
143 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
144 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
145 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
148 static const struct {
151 } svq3_dct_tables[2][16] = {
152 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
153 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
154 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
155 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
158 static const uint32_t svq3_dequant_coeff[32] = {
159 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
160 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
161 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
162 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
165 static int svq3_decode_end(AVCodecContext *avctx);
167 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
169 const int qmul = svq3_dequant_coeff[qp];
173 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
175 for (i = 0; i < 4; i++) {
176 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
177 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
178 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
179 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
181 temp[4 * i + 0] = z0 + z3;
182 temp[4 * i + 1] = z1 + z2;
183 temp[4 * i + 2] = z1 - z2;
184 temp[4 * i + 3] = z0 - z3;
187 for (i = 0; i < 4; i++) {
188 const int offset = x_offset[i];
189 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
190 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
191 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
192 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
194 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
195 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
196 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
197 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
202 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
203 int stride, int qp, int dc)
205 const int qmul = svq3_dequant_coeff[qp];
209 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
210 : qmul * (block[0] >> 3) / 2);
214 for (i = 0; i < 4; i++) {
215 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
216 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
217 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
218 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
220 block[0 + 4 * i] = z0 + z3;
221 block[1 + 4 * i] = z1 + z2;
222 block[2 + 4 * i] = z1 - z2;
223 block[3 + 4 * i] = z0 - z3;
226 for (i = 0; i < 4; i++) {
227 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
228 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
229 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
230 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
231 const int rr = (dc + 0x80000);
233 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
234 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
235 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
236 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
239 memset(block, 0, 16 * sizeof(int16_t));
242 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
243 int index, const int type)
245 static const uint8_t *const scan_patterns[4] =
246 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
248 int run, level, sign, limit;
250 const int intra = 3 * type >> 2;
251 const uint8_t *const scan = scan_patterns[type];
253 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
254 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
255 if ((int32_t)vlc < 0)
258 sign = (vlc & 1) ? 0 : -1;
265 } else if (vlc < 4) {
270 level = (vlc + 9 >> 2) - run;
274 run = svq3_dct_tables[intra][vlc].run;
275 level = svq3_dct_tables[intra][vlc].level;
278 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
281 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
286 if ((index += run) >= limit)
289 block[scan[index]] = (level ^ sign) - sign;
300 static inline void svq3_mc_dir_part(SVQ3Context *s,
301 int x, int y, int width, int height,
302 int mx, int my, int dxy,
303 int thirdpel, int dir, int avg)
305 H264Context *h = &s->h;
306 const H264Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
309 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
314 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
315 my < 0 || my >= s->v_edge_pos - height - 1) {
317 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
318 my = av_clip(my, -16, s->v_edge_pos - height + 15);
321 /* form component predictions */
322 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
323 src = pic->f.data[0] + mx + my * h->linesize;
326 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
327 h->linesize, h->linesize,
328 width + 1, height + 1,
329 mx, my, s->h_edge_pos, s->v_edge_pos);
330 src = h->edge_emu_buffer;
333 (avg ? s->tdsp.avg_tpel_pixels_tab
334 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
337 (avg ? s->hdsp.avg_pixels_tab
338 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
341 if (!(h->flags & CODEC_FLAG_GRAY)) {
342 mx = mx + (mx < (int) x) >> 1;
343 my = my + (my < (int) y) >> 1;
345 height = height >> 1;
348 for (i = 1; i < 3; i++) {
349 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
350 src = pic->f.data[i] + mx + my * h->uvlinesize;
353 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
354 h->uvlinesize, h->uvlinesize,
355 width + 1, height + 1,
356 mx, my, (s->h_edge_pos >> 1),
358 src = h->edge_emu_buffer;
361 (avg ? s->tdsp.avg_tpel_pixels_tab
362 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
366 (avg ? s->hdsp.avg_pixels_tab
367 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
374 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
377 int i, j, k, mx, my, dx, dy, x, y;
378 H264Context *h = &s->h;
379 H264SliceContext *sl = &h->slice_ctx[0];
380 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
381 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
382 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
383 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
384 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
386 for (i = 0; i < 16; i += part_height)
387 for (j = 0; j < 16; j += part_width) {
388 const int b_xy = (4 * sl->mb_x + (j >> 2)) +
389 (4 * sl->mb_y + (i >> 2)) * h->b_stride;
391 x = 16 * sl->mb_x + j;
392 y = 16 * sl->mb_y + i;
393 k = (j >> 2 & 1) + (i >> 1 & 2) +
394 (j >> 1 & 4) + (i & 8);
396 if (mode != PREDICT_MODE) {
397 pred_motion(h, sl, k, part_width >> 2, dir, 1, &mx, &my);
399 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
400 my = s->next_pic->motion_val[0][b_xy][1] << 1;
403 mx = mx * h->frame_num_offset /
404 h->prev_frame_num_offset + 1 >> 1;
405 my = my * h->frame_num_offset /
406 h->prev_frame_num_offset + 1 >> 1;
408 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
409 h->prev_frame_num_offset + 1 >> 1;
410 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
411 h->prev_frame_num_offset + 1 >> 1;
415 /* clip motion vector prediction to frame border */
416 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
417 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
419 /* get (optional) motion vector differential */
420 if (mode == PREDICT_MODE) {
423 dy = svq3_get_se_golomb(&h->gb);
424 dx = svq3_get_se_golomb(&h->gb);
426 if (dx == INVALID_VLC || dy == INVALID_VLC) {
427 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
432 /* compute motion vector */
433 if (mode == THIRDPEL_MODE) {
435 mx = (mx + 1 >> 1) + dx;
436 my = (my + 1 >> 1) + dy;
437 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
438 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
439 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
441 svq3_mc_dir_part(s, x, y, part_width, part_height,
442 fx, fy, dxy, 1, dir, avg);
445 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
446 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
447 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
448 dxy = (mx & 1) + 2 * (my & 1);
450 svq3_mc_dir_part(s, x, y, part_width, part_height,
451 mx >> 1, my >> 1, dxy, 0, dir, avg);
455 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
456 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
458 svq3_mc_dir_part(s, x, y, part_width, part_height,
459 mx, my, 0, 0, dir, avg);
464 /* update mv_cache */
465 if (mode != PREDICT_MODE) {
466 int32_t mv = pack16to32(mx, my);
468 if (part_height == 8 && i < 8) {
469 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1 * 8], mv);
471 if (part_width == 8 && j < 8)
472 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
474 if (part_width == 8 && j < 8)
475 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1], mv);
476 if (part_width == 4 || part_height == 4)
477 AV_WN32A(sl->mv_cache[dir][scan8[k]], mv);
480 /* write back motion vectors */
481 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
482 part_width >> 2, part_height >> 2, h->b_stride,
483 pack16to32(mx, my), 4);
489 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
491 H264Context *h = &s->h;
492 H264SliceContext *sl = &h->slice_ctx[0];
493 int i, j, k, m, dir, mode;
497 const int mb_xy = sl->mb_xy;
498 const int b_xy = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride;
500 sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
501 sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
502 sl->topright_samples_available = 0xFFFF;
504 if (mb_type == 0) { /* SKIP */
505 if (h->pict_type == AV_PICTURE_TYPE_P ||
506 s->next_pic->mb_type[mb_xy] == -1) {
507 svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
510 if (h->pict_type == AV_PICTURE_TYPE_B)
511 svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
514 mb_type = MB_TYPE_SKIP;
516 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
517 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
519 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
522 mb_type = MB_TYPE_16x16;
524 } else if (mb_type < 8) { /* INTER */
525 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
526 mode = THIRDPEL_MODE;
527 else if (s->halfpel_flag &&
528 s->thirdpel_flag == !get_bits1(&h->gb))
534 /* note ref_cache should contain here:
542 for (m = 0; m < 2; m++) {
543 if (sl->mb_x > 0 && sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
544 for (i = 0; i < 4; i++)
545 AV_COPY32(sl->mv_cache[m][scan8[0] - 1 + i * 8],
546 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
548 for (i = 0; i < 4; i++)
549 AV_ZERO32(sl->mv_cache[m][scan8[0] - 1 + i * 8]);
552 memcpy(sl->mv_cache[m][scan8[0] - 1 * 8],
553 h->cur_pic.motion_val[m][b_xy - h->b_stride],
554 4 * 2 * sizeof(int16_t));
555 memset(&sl->ref_cache[m][scan8[0] - 1 * 8],
556 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
558 if (sl->mb_x < h->mb_width - 1) {
559 AV_COPY32(sl->mv_cache[m][scan8[0] + 4 - 1 * 8],
560 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
561 sl->ref_cache[m][scan8[0] + 4 - 1 * 8] =
562 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
563 sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
565 sl->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
567 AV_COPY32(sl->mv_cache[m][scan8[0] - 1 - 1 * 8],
568 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
569 sl->ref_cache[m][scan8[0] - 1 - 1 * 8] =
570 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
572 sl->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
574 memset(&sl->ref_cache[m][scan8[0] - 1 * 8 - 1],
575 PART_NOT_AVAILABLE, 8);
577 if (h->pict_type != AV_PICTURE_TYPE_B)
581 /* decode motion vector(s) and form prediction(s) */
582 if (h->pict_type == AV_PICTURE_TYPE_P) {
583 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
585 } else { /* AV_PICTURE_TYPE_B */
587 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
590 for (i = 0; i < 4; i++)
591 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
592 0, 4 * 2 * sizeof(int16_t));
595 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
598 for (i = 0; i < 4; i++)
599 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
600 0, 4 * 2 * sizeof(int16_t));
604 mb_type = MB_TYPE_16x16;
605 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
606 memset(sl->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
610 for (i = 0; i < 4; i++)
611 sl->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
612 if (sl->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
613 sl->left_samples_available = 0x5F5F;
616 sl->intra4x4_pred_mode_cache[4 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
617 sl->intra4x4_pred_mode_cache[5 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
618 sl->intra4x4_pred_mode_cache[6 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
619 sl->intra4x4_pred_mode_cache[7 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
621 if (sl->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
622 sl->top_samples_available = 0x33FF;
625 /* decode prediction codes for luma blocks */
626 for (i = 0; i < 16; i += 2) {
627 vlc = svq3_get_ue_golomb(&h->gb);
630 av_log(h->avctx, AV_LOG_ERROR,
631 "luma prediction:%"PRIu32"\n", vlc);
635 left = &sl->intra4x4_pred_mode_cache[scan8[i] - 1];
636 top = &sl->intra4x4_pred_mode_cache[scan8[i] - 8];
638 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
639 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
641 if (left[1] == -1 || left[2] == -1) {
642 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
646 } else { /* mb_type == 33, DC_128_PRED block type */
647 for (i = 0; i < 4; i++)
648 memset(&sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
651 write_back_intra_pred_mode(h, sl);
654 ff_h264_check_intra4x4_pred_mode(h, sl);
656 sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
657 sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
659 for (i = 0; i < 4; i++)
660 memset(&sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
662 sl->top_samples_available = 0x33FF;
663 sl->left_samples_available = 0x5F5F;
666 mb_type = MB_TYPE_INTRA4x4;
667 } else { /* INTRA16x16 */
668 dir = i_mb_type_info[mb_type - 8].pred_mode;
669 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
671 if ((sl->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, sl, dir, 0)) < 0) {
672 av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
673 return sl->intra16x16_pred_mode;
676 cbp = i_mb_type_info[mb_type - 8].cbp;
677 mb_type = MB_TYPE_INTRA16x16;
680 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
681 for (i = 0; i < 4; i++)
682 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
683 0, 4 * 2 * sizeof(int16_t));
684 if (h->pict_type == AV_PICTURE_TYPE_B) {
685 for (i = 0; i < 4; i++)
686 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
687 0, 4 * 2 * sizeof(int16_t));
690 if (!IS_INTRA4x4(mb_type)) {
691 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
693 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
694 memset(sl->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
697 if (!IS_INTRA16x16(mb_type) &&
698 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
699 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
700 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
704 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
705 : golomb_to_inter_cbp[vlc];
707 if (IS_INTRA16x16(mb_type) ||
708 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
709 sl->qscale += svq3_get_se_golomb(&h->gb);
711 if (sl->qscale > 31u) {
712 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", sl->qscale);
716 if (IS_INTRA16x16(mb_type)) {
717 AV_ZERO128(sl->mb_luma_dc[0] + 0);
718 AV_ZERO128(sl->mb_luma_dc[0] + 8);
719 if (svq3_decode_block(&h->gb, sl->mb_luma_dc[0], 0, 1)) {
720 av_log(h->avctx, AV_LOG_ERROR,
721 "error while decoding intra luma dc\n");
727 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
728 const int type = ((sl->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
730 for (i = 0; i < 4; i++)
731 if ((cbp & (1 << i))) {
732 for (j = 0; j < 4; j++) {
733 k = index ? (1 * (j & 1) + 2 * (i & 1) +
734 2 * (j & 2) + 4 * (i & 2))
736 sl->non_zero_count_cache[scan8[k]] = 1;
738 if (svq3_decode_block(&h->gb, &sl->mb[16 * k], index, type)) {
739 av_log(h->avctx, AV_LOG_ERROR,
740 "error while decoding block\n");
747 for (i = 1; i < 3; ++i)
748 if (svq3_decode_block(&h->gb, &sl->mb[16 * 16 * i], 0, 3)) {
749 av_log(h->avctx, AV_LOG_ERROR,
750 "error while decoding chroma dc block\n");
755 for (i = 1; i < 3; i++) {
756 for (j = 0; j < 4; j++) {
758 sl->non_zero_count_cache[scan8[k]] = 1;
760 if (svq3_decode_block(&h->gb, &sl->mb[16 * k], 1, 1)) {
761 av_log(h->avctx, AV_LOG_ERROR,
762 "error while decoding chroma ac block\n");
772 h->cur_pic.mb_type[mb_xy] = mb_type;
774 if (IS_INTRA(mb_type))
775 sl->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, sl, DC_PRED8x8, 1);
780 static int svq3_decode_slice_header(AVCodecContext *avctx)
782 SVQ3Context *s = avctx->priv_data;
783 H264Context *h = &s->h;
784 H264SliceContext *sl = &h->slice_ctx[0];
785 const int mb_xy = sl->mb_xy;
789 header = get_bits(&h->gb, 8);
791 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
793 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
796 int length = header >> 5 & 3;
798 s->next_slice_index = get_bits_count(&h->gb) +
799 8 * show_bits(&h->gb, 8 * length) +
802 if (s->next_slice_index > h->gb.size_in_bits) {
803 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
807 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
808 skip_bits(&h->gb, 8);
810 if (s->watermark_key) {
811 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
812 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
813 header ^ s->watermark_key);
816 memmove((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
817 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
819 skip_bits_long(&h->gb, 0);
822 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
823 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
827 sl->slice_type = golomb_to_pict_type[slice_id];
829 if ((header & 0x9F) == 2) {
830 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
831 sl->mb_skip_run = get_bits(&h->gb, i) -
832 (sl->mb_y * h->mb_width + sl->mb_x);
838 sl->slice_num = get_bits(&h->gb, 8);
839 sl->qscale = get_bits(&h->gb, 5);
840 s->adaptive_quant = get_bits1(&h->gb);
849 skip_bits(&h->gb, 2);
851 if (skip_1stop_8data_bits(&h->gb) < 0)
852 return AVERROR_INVALIDDATA;
854 /* reset intra predictors and invalidate motion vector references */
856 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
857 -1, 4 * sizeof(int8_t));
858 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - sl->mb_x],
859 -1, 8 * sizeof(int8_t) * sl->mb_x);
862 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
863 -1, 8 * sizeof(int8_t) * (h->mb_width - sl->mb_x));
866 sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
872 static av_cold int svq3_decode_init(AVCodecContext *avctx)
874 SVQ3Context *s = avctx->priv_data;
875 H264Context *h = &s->h;
876 H264SliceContext *sl;
878 unsigned char *extradata;
879 unsigned char *extradata_end;
881 int marker_found = 0;
884 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
885 s->last_pic = av_mallocz(sizeof(*s->last_pic));
886 s->next_pic = av_mallocz(sizeof(*s->next_pic));
887 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
888 ret = AVERROR(ENOMEM);
892 if ((ret = ff_h264_decode_init(avctx)) < 0)
895 ff_hpeldsp_init(&s->hdsp, avctx->flags);
896 ff_tpeldsp_init(&s->tdsp);
900 h->flags = avctx->flags;
902 h->sps.chroma_format_idc = 1;
903 h->picture_structure = PICT_FRAME;
904 avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
905 avctx->color_range = AVCOL_RANGE_JPEG;
907 h->slice_ctx[0].chroma_qp[0] = h->slice_ctx[0].chroma_qp[1] = 4;
908 h->chroma_x_shift = h->chroma_y_shift = 1;
911 s->thirdpel_flag = 1;
914 /* prowl for the "SEQH" marker in the extradata */
915 extradata = (unsigned char *)avctx->extradata;
916 extradata_end = avctx->extradata + avctx->extradata_size;
918 for (m = 0; m + 8 < avctx->extradata_size; m++) {
919 if (!memcmp(extradata, "SEQH", 4)) {
927 /* if a match was found, parse the extra data */
932 size = AV_RB32(&extradata[4]);
933 if (size > extradata_end - extradata - 8) {
934 ret = AVERROR_INVALIDDATA;
937 init_get_bits(&gb, extradata + 8, size * 8);
939 /* 'frame size code' and optional 'width, height' */
940 frame_size_code = get_bits(&gb, 3);
941 switch (frame_size_code) {
971 avctx->width = get_bits(&gb, 12);
972 avctx->height = get_bits(&gb, 12);
976 s->halfpel_flag = get_bits1(&gb);
977 s->thirdpel_flag = get_bits1(&gb);
985 h->low_delay = get_bits1(&gb);
990 if (skip_1stop_8data_bits(&gb) < 0) {
991 ret = AVERROR_INVALIDDATA;
995 s->unknown_flag = get_bits1(&gb);
996 avctx->has_b_frames = !h->low_delay;
997 if (s->unknown_flag) {
999 unsigned watermark_width = svq3_get_ue_golomb(&gb);
1000 unsigned watermark_height = svq3_get_ue_golomb(&gb);
1001 int u1 = svq3_get_ue_golomb(&gb);
1002 int u2 = get_bits(&gb, 8);
1003 int u3 = get_bits(&gb, 2);
1004 int u4 = svq3_get_ue_golomb(&gb);
1005 unsigned long buf_len = watermark_width *
1006 watermark_height * 4;
1007 int offset = get_bits_count(&gb) + 7 >> 3;
1010 if (watermark_height <= 0 ||
1011 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
1016 buf = av_malloc(buf_len);
1018 ret = AVERROR(ENOMEM);
1021 av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1022 watermark_width, watermark_height);
1023 av_log(avctx, AV_LOG_DEBUG,
1024 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1025 u1, u2, u3, u4, offset);
1026 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1027 size - offset) != Z_OK) {
1028 av_log(avctx, AV_LOG_ERROR,
1029 "could not uncompress watermark logo\n");
1034 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1035 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1036 av_log(avctx, AV_LOG_DEBUG,
1037 "watermark key %#"PRIx32"\n", s->watermark_key);
1040 av_log(avctx, AV_LOG_ERROR,
1041 "this svq3 file contains watermark which need zlib support compiled in\n");
1048 h->width = avctx->width;
1049 h->height = avctx->height;
1050 h->mb_width = (h->width + 15) / 16;
1051 h->mb_height = (h->height + 15) / 16;
1052 h->mb_stride = h->mb_width + 1;
1053 h->mb_num = h->mb_width * h->mb_height;
1054 h->b_stride = 4 * h->mb_width;
1055 s->h_edge_pos = h->mb_width * 16;
1056 s->v_edge_pos = h->mb_height * 16;
1058 if ((ret = ff_h264_alloc_tables(h)) < 0) {
1059 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1065 svq3_decode_end(avctx);
1069 static void free_picture(AVCodecContext *avctx, H264Picture *pic)
1072 for (i = 0; i < 2; i++) {
1073 av_buffer_unref(&pic->motion_val_buf[i]);
1074 av_buffer_unref(&pic->ref_index_buf[i]);
1076 av_buffer_unref(&pic->mb_type_buf);
1078 av_frame_unref(&pic->f);
1081 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
1083 SVQ3Context *s = avctx->priv_data;
1084 H264Context *h = &s->h;
1085 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1086 const int mb_array_size = h->mb_stride * h->mb_height;
1087 const int b4_stride = h->mb_width * 4 + 1;
1088 const int b4_array_size = b4_stride * h->mb_height * 4;
1091 if (!pic->motion_val_buf[0]) {
1094 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1095 if (!pic->mb_type_buf)
1096 return AVERROR(ENOMEM);
1097 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1099 for (i = 0; i < 2; i++) {
1100 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1101 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1102 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1103 ret = AVERROR(ENOMEM);
1107 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1108 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1111 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1113 ret = ff_get_buffer(avctx, &pic->f,
1114 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1118 if (!h->edge_emu_buffer) {
1119 h->edge_emu_buffer = av_mallocz_array(pic->f.linesize[0], 17);
1120 if (!h->edge_emu_buffer)
1121 return AVERROR(ENOMEM);
1124 h->linesize = pic->f.linesize[0];
1125 h->uvlinesize = pic->f.linesize[1];
1129 free_picture(avctx, pic);
1133 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1134 int *got_frame, AVPacket *avpkt)
1136 SVQ3Context *s = avctx->priv_data;
1137 H264Context *h = &s->h;
1138 H264SliceContext *sl = &h->slice_ctx[0];
1139 int buf_size = avpkt->size;
1144 /* special case for last picture */
1145 if (buf_size == 0) {
1146 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1147 ret = av_frame_ref(data, &s->next_pic->f);
1150 s->last_frame_output = 1;
1156 sl->mb_x = sl->mb_y = sl->mb_xy = 0;
1158 if (s->watermark_key) {
1159 av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1161 return AVERROR(ENOMEM);
1162 memcpy(s->buf, avpkt->data, buf_size);
1168 init_get_bits(&h->gb, buf, 8 * buf_size);
1170 if (svq3_decode_slice_header(avctx))
1173 h->pict_type = sl->slice_type;
1175 if (h->pict_type != AV_PICTURE_TYPE_B)
1176 FFSWAP(H264Picture*, s->next_pic, s->last_pic);
1178 av_frame_unref(&s->cur_pic->f);
1180 /* for skipping the frame */
1181 s->cur_pic->f.pict_type = h->pict_type;
1182 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1184 ret = get_buffer(avctx, s->cur_pic);
1188 h->cur_pic_ptr = s->cur_pic;
1189 av_frame_unref(&h->cur_pic.f);
1190 memcpy(&h->cur_pic.tf, &s->cur_pic->tf, sizeof(h->cur_pic) - offsetof(H264Picture, tf));
1191 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1195 for (i = 0; i < 16; i++) {
1196 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1197 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1199 for (i = 0; i < 16; i++) {
1200 h->block_offset[16 + i] =
1201 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1202 h->block_offset[48 + 16 + i] =
1203 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1206 if (h->pict_type != AV_PICTURE_TYPE_I) {
1207 if (!s->last_pic->f.data[0]) {
1208 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1209 av_frame_unref(&s->last_pic->f);
1210 ret = get_buffer(avctx, s->last_pic);
1213 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1214 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1215 s->last_pic->f.linesize[1]);
1216 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1217 s->last_pic->f.linesize[2]);
1220 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1221 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1222 av_frame_unref(&s->next_pic->f);
1223 ret = get_buffer(avctx, s->next_pic);
1226 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1227 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1228 s->next_pic->f.linesize[1]);
1229 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1230 s->next_pic->f.linesize[2]);
1234 if (avctx->debug & FF_DEBUG_PICT_INFO)
1235 av_log(h->avctx, AV_LOG_DEBUG,
1236 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1237 av_get_picture_type_char(h->pict_type),
1238 s->halfpel_flag, s->thirdpel_flag,
1239 s->adaptive_quant, h->slice_ctx[0].qscale, sl->slice_num);
1241 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1242 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1243 avctx->skip_frame >= AVDISCARD_ALL)
1246 if (s->next_p_frame_damaged) {
1247 if (h->pict_type == AV_PICTURE_TYPE_B)
1250 s->next_p_frame_damaged = 0;
1253 if (h->pict_type == AV_PICTURE_TYPE_B) {
1254 h->frame_num_offset = sl->slice_num - h->prev_frame_num;
1256 if (h->frame_num_offset < 0)
1257 h->frame_num_offset += 256;
1258 if (h->frame_num_offset == 0 ||
1259 h->frame_num_offset >= h->prev_frame_num_offset) {
1260 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1264 h->prev_frame_num = h->frame_num;
1265 h->frame_num = sl->slice_num;
1266 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1268 if (h->prev_frame_num_offset < 0)
1269 h->prev_frame_num_offset += 256;
1272 for (m = 0; m < 2; m++) {
1274 for (i = 0; i < 4; i++) {
1276 for (j = -1; j < 4; j++)
1277 sl->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1279 sl->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1283 for (sl->mb_y = 0; sl->mb_y < h->mb_height; sl->mb_y++) {
1284 for (sl->mb_x = 0; sl->mb_x < h->mb_width; sl->mb_x++) {
1286 sl->mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
1288 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1289 ((get_bits_count(&h->gb) & 7) == 0 ||
1290 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1291 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1292 h->gb.size_in_bits = 8 * buf_size;
1294 if (svq3_decode_slice_header(avctx))
1297 /* TODO: support s->mb_skip_run */
1300 mb_type = svq3_get_ue_golomb(&h->gb);
1302 if (h->pict_type == AV_PICTURE_TYPE_I)
1304 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1306 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1307 av_log(h->avctx, AV_LOG_ERROR,
1308 "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
1312 if (mb_type != 0 || sl->cbp)
1313 ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
1315 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1316 h->cur_pic.mb_type[sl->mb_x + sl->mb_y * h->mb_stride] =
1317 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1320 ff_draw_horiz_band(avctx, &s->cur_pic->f,
1321 s->last_pic->f.data[0] ? &s->last_pic->f : NULL,
1322 16 * sl->mb_y, 16, h->picture_structure, 0,
1326 left = buf_size*8 - get_bits_count(&h->gb);
1328 if (sl->mb_y != h->mb_height || sl->mb_x != h->mb_width) {
1329 av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, sl->mb_y, sl->mb_x, left);
1330 //av_hex_dump(stderr, buf+buf_size-8, 8);
1334 av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1338 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1339 ret = av_frame_ref(data, &s->cur_pic->f);
1340 else if (s->last_pic->f.data[0])
1341 ret = av_frame_ref(data, &s->last_pic->f);
1345 /* Do not output the last pic after seeking. */
1346 if (s->last_pic->f.data[0] || h->low_delay)
1349 if (h->pict_type != AV_PICTURE_TYPE_B) {
1350 FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
1352 av_frame_unref(&s->cur_pic->f);
1358 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1360 SVQ3Context *s = avctx->priv_data;
1361 H264Context *h = &s->h;
1363 free_picture(avctx, s->cur_pic);
1364 free_picture(avctx, s->next_pic);
1365 free_picture(avctx, s->last_pic);
1366 av_freep(&s->cur_pic);
1367 av_freep(&s->next_pic);
1368 av_freep(&s->last_pic);
1370 av_frame_unref(&h->cur_pic.f);
1372 ff_h264_free_context(h);
1376 av_freep(&h->edge_emu_buffer);
1381 AVCodec ff_svq3_decoder = {
1383 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1384 .type = AVMEDIA_TYPE_VIDEO,
1385 .id = AV_CODEC_ID_SVQ3,
1386 .priv_data_size = sizeof(SVQ3Context),
1387 .init = svq3_decode_init,
1388 .close = svq3_decode_end,
1389 .decode = svq3_decode_frame,
1390 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1393 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,