2 * Copyright (c) 2003 The FFmpeg Project
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
45 #include "libavutil/attributes.h"
48 #include "mpegutils.h"
51 #include "h264data.h" // FIXME FIXME FIXME
53 #include "h264_mvpred.h"
56 #include "rectangle.h"
58 #include "vdpau_internal.h"
72 typedef struct SVQ3Context {
77 H264Picture *next_pic;
78 H264Picture *last_pic;
83 uint32_t watermark_key;
87 int next_p_frame_damaged;
90 int last_frame_output;
93 #define FULLPEL_MODE 1
94 #define HALFPEL_MODE 2
95 #define THIRDPEL_MODE 3
96 #define PREDICT_MODE 4
98 /* dual scan (from some older h264 draft)
107 static const uint8_t svq3_scan[16] = {
108 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
109 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
110 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
111 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
114 static const uint8_t luma_dc_zigzag_scan[16] = {
115 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
116 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
117 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
118 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
121 static const uint8_t svq3_pred_0[25][2] = {
124 { 0, 2 }, { 1, 1 }, { 2, 0 },
125 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
126 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
127 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
128 { 2, 4 }, { 3, 3 }, { 4, 2 },
133 static const int8_t svq3_pred_1[6][6][5] = {
134 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
135 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
136 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
137 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
138 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
139 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
140 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
141 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
142 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
143 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
144 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
145 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
148 static const struct {
151 } svq3_dct_tables[2][16] = {
152 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
153 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
154 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
155 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
158 static const uint32_t svq3_dequant_coeff[32] = {
159 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
160 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
161 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
162 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
165 static int svq3_decode_end(AVCodecContext *avctx);
167 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
169 const int qmul = svq3_dequant_coeff[qp];
173 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
175 for (i = 0; i < 4; i++) {
176 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
177 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
178 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
179 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
181 temp[4 * i + 0] = z0 + z3;
182 temp[4 * i + 1] = z1 + z2;
183 temp[4 * i + 2] = z1 - z2;
184 temp[4 * i + 3] = z0 - z3;
187 for (i = 0; i < 4; i++) {
188 const int offset = x_offset[i];
189 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
190 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
191 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
192 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
194 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
195 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
196 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
197 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
202 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
203 int stride, int qp, int dc)
205 const int qmul = svq3_dequant_coeff[qp];
209 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
210 : qmul * (block[0] >> 3) / 2);
214 for (i = 0; i < 4; i++) {
215 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
216 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
217 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
218 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
220 block[0 + 4 * i] = z0 + z3;
221 block[1 + 4 * i] = z1 + z2;
222 block[2 + 4 * i] = z1 - z2;
223 block[3 + 4 * i] = z0 - z3;
226 for (i = 0; i < 4; i++) {
227 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
228 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
229 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
230 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
231 const int rr = (dc + 0x80000);
233 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
234 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
235 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
236 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
239 memset(block, 0, 16 * sizeof(int16_t));
242 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
243 int index, const int type)
245 static const uint8_t *const scan_patterns[4] =
246 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
248 int run, level, sign, limit;
250 const int intra = 3 * type >> 2;
251 const uint8_t *const scan = scan_patterns[type];
253 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
254 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
255 if ((int32_t)vlc < 0)
258 sign = (vlc & 1) ? 0 : -1;
265 } else if (vlc < 4) {
270 level = (vlc + 9 >> 2) - run;
274 run = svq3_dct_tables[intra][vlc].run;
275 level = svq3_dct_tables[intra][vlc].level;
278 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
281 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
286 if ((index += run) >= limit)
289 block[scan[index]] = (level ^ sign) - sign;
300 static inline void svq3_mc_dir_part(SVQ3Context *s,
301 int x, int y, int width, int height,
302 int mx, int my, int dxy,
303 int thirdpel, int dir, int avg)
305 H264Context *h = &s->h;
306 H264SliceContext *sl = &h->slice_ctx[0];
307 const H264Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
310 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
315 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
316 my < 0 || my >= s->v_edge_pos - height - 1) {
318 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
319 my = av_clip(my, -16, s->v_edge_pos - height + 15);
322 /* form component predictions */
323 dest = h->cur_pic.f->data[0] + x + y * sl->linesize;
324 src = pic->f->data[0] + mx + my * sl->linesize;
327 h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src,
328 sl->linesize, sl->linesize,
329 width + 1, height + 1,
330 mx, my, s->h_edge_pos, s->v_edge_pos);
331 src = sl->edge_emu_buffer;
334 (avg ? s->tdsp.avg_tpel_pixels_tab
335 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, sl->linesize,
338 (avg ? s->hdsp.avg_pixels_tab
339 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, sl->linesize,
342 if (!(h->flags & AV_CODEC_FLAG_GRAY)) {
343 mx = mx + (mx < (int) x) >> 1;
344 my = my + (my < (int) y) >> 1;
346 height = height >> 1;
349 for (i = 1; i < 3; i++) {
350 dest = h->cur_pic.f->data[i] + (x >> 1) + (y >> 1) * sl->uvlinesize;
351 src = pic->f->data[i] + mx + my * sl->uvlinesize;
354 h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src,
355 sl->uvlinesize, sl->uvlinesize,
356 width + 1, height + 1,
357 mx, my, (s->h_edge_pos >> 1),
359 src = sl->edge_emu_buffer;
362 (avg ? s->tdsp.avg_tpel_pixels_tab
363 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
367 (avg ? s->hdsp.avg_pixels_tab
368 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
375 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
378 int i, j, k, mx, my, dx, dy, x, y;
379 H264Context *h = &s->h;
380 H264SliceContext *sl = &h->slice_ctx[0];
381 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
382 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
383 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
384 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
385 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
387 for (i = 0; i < 16; i += part_height)
388 for (j = 0; j < 16; j += part_width) {
389 const int b_xy = (4 * sl->mb_x + (j >> 2)) +
390 (4 * sl->mb_y + (i >> 2)) * h->b_stride;
392 x = 16 * sl->mb_x + j;
393 y = 16 * sl->mb_y + i;
394 k = (j >> 2 & 1) + (i >> 1 & 2) +
395 (j >> 1 & 4) + (i & 8);
397 if (mode != PREDICT_MODE) {
398 pred_motion(h, sl, k, part_width >> 2, dir, 1, &mx, &my);
400 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
401 my = s->next_pic->motion_val[0][b_xy][1] << 1;
404 mx = mx * h->frame_num_offset /
405 h->prev_frame_num_offset + 1 >> 1;
406 my = my * h->frame_num_offset /
407 h->prev_frame_num_offset + 1 >> 1;
409 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
410 h->prev_frame_num_offset + 1 >> 1;
411 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
412 h->prev_frame_num_offset + 1 >> 1;
416 /* clip motion vector prediction to frame border */
417 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
418 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
420 /* get (optional) motion vector differential */
421 if (mode == PREDICT_MODE) {
424 dy = svq3_get_se_golomb(&h->gb);
425 dx = svq3_get_se_golomb(&h->gb);
427 if (dx == INVALID_VLC || dy == INVALID_VLC) {
428 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
433 /* compute motion vector */
434 if (mode == THIRDPEL_MODE) {
436 mx = (mx + 1 >> 1) + dx;
437 my = (my + 1 >> 1) + dy;
438 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
439 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
440 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
442 svq3_mc_dir_part(s, x, y, part_width, part_height,
443 fx, fy, dxy, 1, dir, avg);
446 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
447 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
448 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
449 dxy = (mx & 1) + 2 * (my & 1);
451 svq3_mc_dir_part(s, x, y, part_width, part_height,
452 mx >> 1, my >> 1, dxy, 0, dir, avg);
456 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
457 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
459 svq3_mc_dir_part(s, x, y, part_width, part_height,
460 mx, my, 0, 0, dir, avg);
465 /* update mv_cache */
466 if (mode != PREDICT_MODE) {
467 int32_t mv = pack16to32(mx, my);
469 if (part_height == 8 && i < 8) {
470 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1 * 8], mv);
472 if (part_width == 8 && j < 8)
473 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
475 if (part_width == 8 && j < 8)
476 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1], mv);
477 if (part_width == 4 || part_height == 4)
478 AV_WN32A(sl->mv_cache[dir][scan8[k]], mv);
481 /* write back motion vectors */
482 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
483 part_width >> 2, part_height >> 2, h->b_stride,
484 pack16to32(mx, my), 4);
490 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
492 H264Context *h = &s->h;
493 H264SliceContext *sl = &h->slice_ctx[0];
494 int i, j, k, m, dir, mode;
498 const int mb_xy = sl->mb_xy;
499 const int b_xy = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride;
501 sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
502 sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
503 sl->topright_samples_available = 0xFFFF;
505 if (mb_type == 0) { /* SKIP */
506 if (h->pict_type == AV_PICTURE_TYPE_P ||
507 s->next_pic->mb_type[mb_xy] == -1) {
508 svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
511 if (h->pict_type == AV_PICTURE_TYPE_B)
512 svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
515 mb_type = MB_TYPE_SKIP;
517 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
518 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
520 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
523 mb_type = MB_TYPE_16x16;
525 } else if (mb_type < 8) { /* INTER */
526 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
527 mode = THIRDPEL_MODE;
528 else if (s->halfpel_flag &&
529 s->thirdpel_flag == !get_bits1(&h->gb))
535 /* note ref_cache should contain here:
543 for (m = 0; m < 2; m++) {
544 if (sl->mb_x > 0 && sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
545 for (i = 0; i < 4; i++)
546 AV_COPY32(sl->mv_cache[m][scan8[0] - 1 + i * 8],
547 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
549 for (i = 0; i < 4; i++)
550 AV_ZERO32(sl->mv_cache[m][scan8[0] - 1 + i * 8]);
553 memcpy(sl->mv_cache[m][scan8[0] - 1 * 8],
554 h->cur_pic.motion_val[m][b_xy - h->b_stride],
555 4 * 2 * sizeof(int16_t));
556 memset(&sl->ref_cache[m][scan8[0] - 1 * 8],
557 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
559 if (sl->mb_x < h->mb_width - 1) {
560 AV_COPY32(sl->mv_cache[m][scan8[0] + 4 - 1 * 8],
561 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
562 sl->ref_cache[m][scan8[0] + 4 - 1 * 8] =
563 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
564 sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
566 sl->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
568 AV_COPY32(sl->mv_cache[m][scan8[0] - 1 - 1 * 8],
569 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
570 sl->ref_cache[m][scan8[0] - 1 - 1 * 8] =
571 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
573 sl->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
575 memset(&sl->ref_cache[m][scan8[0] - 1 * 8 - 1],
576 PART_NOT_AVAILABLE, 8);
578 if (h->pict_type != AV_PICTURE_TYPE_B)
582 /* decode motion vector(s) and form prediction(s) */
583 if (h->pict_type == AV_PICTURE_TYPE_P) {
584 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
586 } else { /* AV_PICTURE_TYPE_B */
588 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
591 for (i = 0; i < 4; i++)
592 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
593 0, 4 * 2 * sizeof(int16_t));
596 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
599 for (i = 0; i < 4; i++)
600 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
601 0, 4 * 2 * sizeof(int16_t));
605 mb_type = MB_TYPE_16x16;
606 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
607 memset(sl->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
611 for (i = 0; i < 4; i++)
612 sl->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
613 if (sl->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
614 sl->left_samples_available = 0x5F5F;
617 sl->intra4x4_pred_mode_cache[4 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
618 sl->intra4x4_pred_mode_cache[5 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
619 sl->intra4x4_pred_mode_cache[6 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
620 sl->intra4x4_pred_mode_cache[7 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
622 if (sl->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
623 sl->top_samples_available = 0x33FF;
626 /* decode prediction codes for luma blocks */
627 for (i = 0; i < 16; i += 2) {
628 vlc = svq3_get_ue_golomb(&h->gb);
631 av_log(h->avctx, AV_LOG_ERROR,
632 "luma prediction:%"PRIu32"\n", vlc);
636 left = &sl->intra4x4_pred_mode_cache[scan8[i] - 1];
637 top = &sl->intra4x4_pred_mode_cache[scan8[i] - 8];
639 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
640 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
642 if (left[1] == -1 || left[2] == -1) {
643 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
647 } else { /* mb_type == 33, DC_128_PRED block type */
648 for (i = 0; i < 4; i++)
649 memset(&sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
652 write_back_intra_pred_mode(h, sl);
655 ff_h264_check_intra4x4_pred_mode(h, sl);
657 sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
658 sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
660 for (i = 0; i < 4; i++)
661 memset(&sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
663 sl->top_samples_available = 0x33FF;
664 sl->left_samples_available = 0x5F5F;
667 mb_type = MB_TYPE_INTRA4x4;
668 } else { /* INTRA16x16 */
669 dir = i_mb_type_info[mb_type - 8].pred_mode;
670 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
672 if ((sl->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, sl, dir, 0)) < 0) {
673 av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
674 return sl->intra16x16_pred_mode;
677 cbp = i_mb_type_info[mb_type - 8].cbp;
678 mb_type = MB_TYPE_INTRA16x16;
681 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
682 for (i = 0; i < 4; i++)
683 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
684 0, 4 * 2 * sizeof(int16_t));
685 if (h->pict_type == AV_PICTURE_TYPE_B) {
686 for (i = 0; i < 4; i++)
687 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
688 0, 4 * 2 * sizeof(int16_t));
691 if (!IS_INTRA4x4(mb_type)) {
692 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
694 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
695 memset(sl->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
698 if (!IS_INTRA16x16(mb_type) &&
699 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
700 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
701 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
705 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
706 : golomb_to_inter_cbp[vlc];
708 if (IS_INTRA16x16(mb_type) ||
709 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
710 sl->qscale += svq3_get_se_golomb(&h->gb);
712 if (sl->qscale > 31u) {
713 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", sl->qscale);
717 if (IS_INTRA16x16(mb_type)) {
718 AV_ZERO128(sl->mb_luma_dc[0] + 0);
719 AV_ZERO128(sl->mb_luma_dc[0] + 8);
720 if (svq3_decode_block(&h->gb, sl->mb_luma_dc[0], 0, 1)) {
721 av_log(h->avctx, AV_LOG_ERROR,
722 "error while decoding intra luma dc\n");
728 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
729 const int type = ((sl->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
731 for (i = 0; i < 4; i++)
732 if ((cbp & (1 << i))) {
733 for (j = 0; j < 4; j++) {
734 k = index ? (1 * (j & 1) + 2 * (i & 1) +
735 2 * (j & 2) + 4 * (i & 2))
737 sl->non_zero_count_cache[scan8[k]] = 1;
739 if (svq3_decode_block(&h->gb, &sl->mb[16 * k], index, type)) {
740 av_log(h->avctx, AV_LOG_ERROR,
741 "error while decoding block\n");
748 for (i = 1; i < 3; ++i)
749 if (svq3_decode_block(&h->gb, &sl->mb[16 * 16 * i], 0, 3)) {
750 av_log(h->avctx, AV_LOG_ERROR,
751 "error while decoding chroma dc block\n");
756 for (i = 1; i < 3; i++) {
757 for (j = 0; j < 4; j++) {
759 sl->non_zero_count_cache[scan8[k]] = 1;
761 if (svq3_decode_block(&h->gb, &sl->mb[16 * k], 1, 1)) {
762 av_log(h->avctx, AV_LOG_ERROR,
763 "error while decoding chroma ac block\n");
773 h->cur_pic.mb_type[mb_xy] = mb_type;
775 if (IS_INTRA(mb_type))
776 sl->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, sl, DC_PRED8x8, 1);
781 static int svq3_decode_slice_header(AVCodecContext *avctx)
783 SVQ3Context *s = avctx->priv_data;
784 H264Context *h = &s->h;
785 H264SliceContext *sl = &h->slice_ctx[0];
786 const int mb_xy = sl->mb_xy;
790 header = get_bits(&h->gb, 8);
792 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
794 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
797 int length = header >> 5 & 3;
799 s->next_slice_index = get_bits_count(&h->gb) +
800 8 * show_bits(&h->gb, 8 * length) +
803 if (s->next_slice_index > h->gb.size_in_bits) {
804 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
808 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
809 skip_bits(&h->gb, 8);
811 if (s->watermark_key) {
812 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
813 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
814 header ^ s->watermark_key);
817 memmove((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
818 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
820 skip_bits_long(&h->gb, 0);
823 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
824 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
828 sl->slice_type = golomb_to_pict_type[slice_id];
830 if ((header & 0x9F) == 2) {
831 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
832 sl->mb_skip_run = get_bits(&h->gb, i) -
833 (sl->mb_y * h->mb_width + sl->mb_x);
839 sl->slice_num = get_bits(&h->gb, 8);
840 sl->qscale = get_bits(&h->gb, 5);
841 s->adaptive_quant = get_bits1(&h->gb);
846 if (s->has_watermark)
850 skip_bits(&h->gb, 2);
852 if (skip_1stop_8data_bits(&h->gb) < 0)
853 return AVERROR_INVALIDDATA;
855 /* reset intra predictors and invalidate motion vector references */
857 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
858 -1, 4 * sizeof(int8_t));
859 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - sl->mb_x],
860 -1, 8 * sizeof(int8_t) * sl->mb_x);
863 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
864 -1, 8 * sizeof(int8_t) * (h->mb_width - sl->mb_x));
867 sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
873 static av_cold int svq3_decode_init(AVCodecContext *avctx)
875 SVQ3Context *s = avctx->priv_data;
876 H264Context *h = &s->h;
877 H264SliceContext *sl;
879 unsigned char *extradata;
880 unsigned char *extradata_end;
882 int marker_found = 0;
885 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
886 s->last_pic = av_mallocz(sizeof(*s->last_pic));
887 s->next_pic = av_mallocz(sizeof(*s->next_pic));
888 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
889 ret = AVERROR(ENOMEM);
893 s->cur_pic->f = av_frame_alloc();
894 s->last_pic->f = av_frame_alloc();
895 s->next_pic->f = av_frame_alloc();
896 if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
897 return AVERROR(ENOMEM);
899 if ((ret = ff_h264_decode_init(avctx)) < 0)
902 // we will overwrite it later during decoding
903 av_frame_free(&h->cur_pic.f);
905 av_frame_free(&h->last_pic_for_ec.f);
907 ff_h264dsp_init(&h->h264dsp, 8, 1);
908 av_assert0(h->sps.bit_depth_chroma == 0);
909 ff_h264_pred_init(&h->hpc, AV_CODEC_ID_SVQ3, 8, 1);
910 ff_videodsp_init(&h->vdsp, 8);
912 memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t));
913 memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
915 avctx->bits_per_raw_sample = 8;
916 h->sps.bit_depth_luma = 8;
917 h->chroma_format_idc = 1;
919 ff_hpeldsp_init(&s->hdsp, avctx->flags);
920 ff_tpeldsp_init(&s->tdsp);
924 h->flags = avctx->flags;
926 h->sps.chroma_format_idc = 1;
927 h->picture_structure = PICT_FRAME;
928 avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
929 avctx->color_range = AVCOL_RANGE_JPEG;
931 h->slice_ctx[0].chroma_qp[0] = h->slice_ctx[0].chroma_qp[1] = 4;
932 h->chroma_x_shift = h->chroma_y_shift = 1;
935 s->thirdpel_flag = 1;
936 s->has_watermark = 0;
938 /* prowl for the "SEQH" marker in the extradata */
939 extradata = (unsigned char *)avctx->extradata;
940 extradata_end = avctx->extradata + avctx->extradata_size;
942 for (m = 0; m + 8 < avctx->extradata_size; m++) {
943 if (!memcmp(extradata, "SEQH", 4)) {
951 /* if a match was found, parse the extra data */
955 int unk0, unk1, unk2, unk3, unk4;
957 size = AV_RB32(&extradata[4]);
958 if (size > extradata_end - extradata - 8) {
959 ret = AVERROR_INVALIDDATA;
962 init_get_bits(&gb, extradata + 8, size * 8);
964 /* 'frame size code' and optional 'width, height' */
965 frame_size_code = get_bits(&gb, 3);
966 switch (frame_size_code) {
996 avctx->width = get_bits(&gb, 12);
997 avctx->height = get_bits(&gb, 12);
1001 s->halfpel_flag = get_bits1(&gb);
1002 s->thirdpel_flag = get_bits1(&gb);
1004 /* unknown fields */
1005 unk0 = get_bits1(&gb);
1006 unk1 = get_bits1(&gb);
1007 unk2 = get_bits1(&gb);
1008 unk3 = get_bits1(&gb);
1010 h->low_delay = get_bits1(&gb);
1013 unk4 = get_bits1(&gb);
1015 av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1016 unk0, unk1, unk2, unk3, unk4);
1018 if (skip_1stop_8data_bits(&gb) < 0) {
1019 ret = AVERROR_INVALIDDATA;
1023 s->has_watermark = get_bits1(&gb);
1024 avctx->has_b_frames = !h->low_delay;
1025 if (s->has_watermark) {
1027 unsigned watermark_width = svq3_get_ue_golomb(&gb);
1028 unsigned watermark_height = svq3_get_ue_golomb(&gb);
1029 int u1 = svq3_get_ue_golomb(&gb);
1030 int u2 = get_bits(&gb, 8);
1031 int u3 = get_bits(&gb, 2);
1032 int u4 = svq3_get_ue_golomb(&gb);
1033 unsigned long buf_len = watermark_width *
1034 watermark_height * 4;
1035 int offset = get_bits_count(&gb) + 7 >> 3;
1038 if (watermark_height <= 0 ||
1039 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
1044 buf = av_malloc(buf_len);
1046 ret = AVERROR(ENOMEM);
1049 av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1050 watermark_width, watermark_height);
1051 av_log(avctx, AV_LOG_DEBUG,
1052 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1053 u1, u2, u3, u4, offset);
1054 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1055 size - offset) != Z_OK) {
1056 av_log(avctx, AV_LOG_ERROR,
1057 "could not uncompress watermark logo\n");
1062 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1063 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1064 av_log(avctx, AV_LOG_DEBUG,
1065 "watermark key %#"PRIx32"\n", s->watermark_key);
1068 av_log(avctx, AV_LOG_ERROR,
1069 "this svq3 file contains watermark which need zlib support compiled in\n");
1076 h->width = avctx->width;
1077 h->height = avctx->height;
1078 h->mb_width = (h->width + 15) / 16;
1079 h->mb_height = (h->height + 15) / 16;
1080 h->mb_stride = h->mb_width + 1;
1081 h->mb_num = h->mb_width * h->mb_height;
1082 h->b_stride = 4 * h->mb_width;
1083 s->h_edge_pos = h->mb_width * 16;
1084 s->v_edge_pos = h->mb_height * 16;
1086 if ((ret = ff_h264_alloc_tables(h)) < 0) {
1087 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1093 svq3_decode_end(avctx);
1097 static void free_picture(AVCodecContext *avctx, H264Picture *pic)
1100 for (i = 0; i < 2; i++) {
1101 av_buffer_unref(&pic->motion_val_buf[i]);
1102 av_buffer_unref(&pic->ref_index_buf[i]);
1104 av_buffer_unref(&pic->mb_type_buf);
1106 av_frame_unref(pic->f);
1109 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
1111 SVQ3Context *s = avctx->priv_data;
1112 H264Context *h = &s->h;
1113 H264SliceContext *sl = &h->slice_ctx[0];
1114 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1115 const int mb_array_size = h->mb_stride * h->mb_height;
1116 const int b4_stride = h->mb_width * 4 + 1;
1117 const int b4_array_size = b4_stride * h->mb_height * 4;
1120 if (!pic->motion_val_buf[0]) {
1123 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1124 if (!pic->mb_type_buf)
1125 return AVERROR(ENOMEM);
1126 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1128 for (i = 0; i < 2; i++) {
1129 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1130 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1131 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1132 ret = AVERROR(ENOMEM);
1136 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1137 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1140 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1142 ret = ff_get_buffer(avctx, pic->f,
1143 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1147 if (!sl->edge_emu_buffer) {
1148 sl->edge_emu_buffer = av_mallocz_array(pic->f->linesize[0], 17);
1149 if (!sl->edge_emu_buffer)
1150 return AVERROR(ENOMEM);
1153 sl->linesize = pic->f->linesize[0];
1154 sl->uvlinesize = pic->f->linesize[1];
1158 free_picture(avctx, pic);
1162 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1163 int *got_frame, AVPacket *avpkt)
1165 SVQ3Context *s = avctx->priv_data;
1166 H264Context *h = &s->h;
1167 H264SliceContext *sl = &h->slice_ctx[0];
1168 int buf_size = avpkt->size;
1173 /* special case for last picture */
1174 if (buf_size == 0) {
1175 if (s->next_pic->f->data[0] && !h->low_delay && !s->last_frame_output) {
1176 ret = av_frame_ref(data, s->next_pic->f);
1179 s->last_frame_output = 1;
1185 sl->mb_x = sl->mb_y = sl->mb_xy = 0;
1187 if (s->watermark_key) {
1188 av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1190 return AVERROR(ENOMEM);
1191 memcpy(s->buf, avpkt->data, buf_size);
1197 init_get_bits(&h->gb, buf, 8 * buf_size);
1199 if (svq3_decode_slice_header(avctx))
1202 h->pict_type = sl->slice_type;
1204 if (h->pict_type != AV_PICTURE_TYPE_B)
1205 FFSWAP(H264Picture*, s->next_pic, s->last_pic);
1207 av_frame_unref(s->cur_pic->f);
1209 /* for skipping the frame */
1210 s->cur_pic->f->pict_type = h->pict_type;
1211 s->cur_pic->f->key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1213 ret = get_buffer(avctx, s->cur_pic);
1217 h->cur_pic_ptr = s->cur_pic;
1218 h->cur_pic = *s->cur_pic;
1220 for (i = 0; i < 16; i++) {
1221 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * sl->linesize * ((scan8[i] - scan8[0]) >> 3);
1222 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * sl->linesize * ((scan8[i] - scan8[0]) >> 3);
1224 for (i = 0; i < 16; i++) {
1225 h->block_offset[16 + i] =
1226 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * sl->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1227 h->block_offset[48 + 16 + i] =
1228 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * sl->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1231 if (h->pict_type != AV_PICTURE_TYPE_I) {
1232 if (!s->last_pic->f->data[0]) {
1233 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1234 av_frame_unref(s->last_pic->f);
1235 ret = get_buffer(avctx, s->last_pic);
1238 memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1239 memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1240 s->last_pic->f->linesize[1]);
1241 memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1242 s->last_pic->f->linesize[2]);
1245 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1246 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1247 av_frame_unref(s->next_pic->f);
1248 ret = get_buffer(avctx, s->next_pic);
1251 memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1252 memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1253 s->next_pic->f->linesize[1]);
1254 memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1255 s->next_pic->f->linesize[2]);
1259 if (avctx->debug & FF_DEBUG_PICT_INFO)
1260 av_log(h->avctx, AV_LOG_DEBUG,
1261 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1262 av_get_picture_type_char(h->pict_type),
1263 s->halfpel_flag, s->thirdpel_flag,
1264 s->adaptive_quant, h->slice_ctx[0].qscale, sl->slice_num);
1266 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1267 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1268 avctx->skip_frame >= AVDISCARD_ALL)
1271 if (s->next_p_frame_damaged) {
1272 if (h->pict_type == AV_PICTURE_TYPE_B)
1275 s->next_p_frame_damaged = 0;
1278 if (h->pict_type == AV_PICTURE_TYPE_B) {
1279 h->frame_num_offset = sl->slice_num - h->prev_frame_num;
1281 if (h->frame_num_offset < 0)
1282 h->frame_num_offset += 256;
1283 if (h->frame_num_offset == 0 ||
1284 h->frame_num_offset >= h->prev_frame_num_offset) {
1285 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1289 h->prev_frame_num = h->frame_num;
1290 h->frame_num = sl->slice_num;
1291 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1293 if (h->prev_frame_num_offset < 0)
1294 h->prev_frame_num_offset += 256;
1297 for (m = 0; m < 2; m++) {
1299 for (i = 0; i < 4; i++) {
1301 for (j = -1; j < 4; j++)
1302 sl->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1304 sl->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1308 for (sl->mb_y = 0; sl->mb_y < h->mb_height; sl->mb_y++) {
1309 for (sl->mb_x = 0; sl->mb_x < h->mb_width; sl->mb_x++) {
1311 sl->mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
1313 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1314 ((get_bits_count(&h->gb) & 7) == 0 ||
1315 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1316 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1317 h->gb.size_in_bits = 8 * buf_size;
1319 if (svq3_decode_slice_header(avctx))
1322 /* TODO: support s->mb_skip_run */
1325 mb_type = svq3_get_ue_golomb(&h->gb);
1327 if (h->pict_type == AV_PICTURE_TYPE_I)
1329 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1331 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1332 av_log(h->avctx, AV_LOG_ERROR,
1333 "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
1337 if (mb_type != 0 || sl->cbp)
1338 ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
1340 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1341 h->cur_pic.mb_type[sl->mb_x + sl->mb_y * h->mb_stride] =
1342 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1345 ff_draw_horiz_band(avctx, s->cur_pic->f,
1346 s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1347 16 * sl->mb_y, 16, h->picture_structure, 0,
1351 left = buf_size*8 - get_bits_count(&h->gb);
1353 if (sl->mb_y != h->mb_height || sl->mb_x != h->mb_width) {
1354 av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, sl->mb_y, sl->mb_x, left);
1355 //av_hex_dump(stderr, buf+buf_size-8, 8);
1359 av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1363 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1364 ret = av_frame_ref(data, s->cur_pic->f);
1365 else if (s->last_pic->f->data[0])
1366 ret = av_frame_ref(data, s->last_pic->f);
1370 /* Do not output the last pic after seeking. */
1371 if (s->last_pic->f->data[0] || h->low_delay)
1374 if (h->pict_type != AV_PICTURE_TYPE_B) {
1375 FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
1377 av_frame_unref(s->cur_pic->f);
1383 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1385 SVQ3Context *s = avctx->priv_data;
1386 H264Context *h = &s->h;
1388 free_picture(avctx, s->cur_pic);
1389 free_picture(avctx, s->next_pic);
1390 free_picture(avctx, s->last_pic);
1391 av_frame_free(&s->cur_pic->f);
1392 av_frame_free(&s->next_pic->f);
1393 av_frame_free(&s->last_pic->f);
1394 av_freep(&s->cur_pic);
1395 av_freep(&s->next_pic);
1396 av_freep(&s->last_pic);
1398 memset(&h->cur_pic, 0, sizeof(h->cur_pic));
1400 ff_h264_free_context(h);
1408 AVCodec ff_svq3_decoder = {
1410 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1411 .type = AVMEDIA_TYPE_VIDEO,
1412 .id = AV_CODEC_ID_SVQ3,
1413 .priv_data_size = sizeof(SVQ3Context),
1414 .init = svq3_decode_init,
1415 .close = svq3_decode_end,
1416 .decode = svq3_decode_frame,
1417 .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1420 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,