2 * Copyright (c) 2003 The FFmpeg Project
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
45 #include "libavutil/attributes.h"
48 #include "mpegutils.h"
51 #include "h264data.h" // FIXME FIXME FIXME
53 #include "h264_mvpred.h"
56 #include "rectangle.h"
58 #include "vdpau_internal.h"
72 typedef struct SVQ3Context {
77 H264Picture *next_pic;
78 H264Picture *last_pic;
85 uint32_t watermark_key;
89 int next_p_frame_damaged;
92 int last_frame_output;
95 #define FULLPEL_MODE 1
96 #define HALFPEL_MODE 2
97 #define THIRDPEL_MODE 3
98 #define PREDICT_MODE 4
100 /* dual scan (from some older h264 draft)
109 static const uint8_t svq3_scan[16] = {
110 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
111 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
112 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
113 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
116 static const uint8_t luma_dc_zigzag_scan[16] = {
117 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
118 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
119 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
120 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
123 static const uint8_t svq3_pred_0[25][2] = {
126 { 0, 2 }, { 1, 1 }, { 2, 0 },
127 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
128 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
129 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
130 { 2, 4 }, { 3, 3 }, { 4, 2 },
135 static const int8_t svq3_pred_1[6][6][5] = {
136 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
137 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
138 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
139 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
140 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
141 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
142 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
143 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
144 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
145 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
146 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
147 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
150 static const struct {
153 } svq3_dct_tables[2][16] = {
154 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
155 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
156 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
157 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
160 static const uint32_t svq3_dequant_coeff[32] = {
161 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
162 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
163 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
164 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
167 static int svq3_decode_end(AVCodecContext *avctx);
169 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
171 const int qmul = svq3_dequant_coeff[qp];
175 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
177 for (i = 0; i < 4; i++) {
178 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
179 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
180 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
181 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
183 temp[4 * i + 0] = z0 + z3;
184 temp[4 * i + 1] = z1 + z2;
185 temp[4 * i + 2] = z1 - z2;
186 temp[4 * i + 3] = z0 - z3;
189 for (i = 0; i < 4; i++) {
190 const int offset = x_offset[i];
191 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
192 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
193 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
194 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
196 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
197 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
198 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
199 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
204 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
205 int stride, int qp, int dc)
207 const int qmul = svq3_dequant_coeff[qp];
211 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
212 : qmul * (block[0] >> 3) / 2);
216 for (i = 0; i < 4; i++) {
217 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
218 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
219 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
220 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
222 block[0 + 4 * i] = z0 + z3;
223 block[1 + 4 * i] = z1 + z2;
224 block[2 + 4 * i] = z1 - z2;
225 block[3 + 4 * i] = z0 - z3;
228 for (i = 0; i < 4; i++) {
229 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
230 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
231 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
232 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
233 const int rr = (dc + 0x80000);
235 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
236 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
237 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
238 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
241 memset(block, 0, 16 * sizeof(int16_t));
244 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
245 int index, const int type)
247 static const uint8_t *const scan_patterns[4] =
248 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
250 int run, level, sign, limit;
252 const int intra = 3 * type >> 2;
253 const uint8_t *const scan = scan_patterns[type];
255 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
256 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
257 if ((int32_t)vlc < 0)
260 sign = (vlc & 1) ? 0 : -1;
267 } else if (vlc < 4) {
272 level = (vlc + 9 >> 2) - run;
276 run = svq3_dct_tables[intra][vlc].run;
277 level = svq3_dct_tables[intra][vlc].level;
280 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
283 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
288 if ((index += run) >= limit)
291 block[scan[index]] = (level ^ sign) - sign;
302 static inline void svq3_mc_dir_part(SVQ3Context *s,
303 int x, int y, int width, int height,
304 int mx, int my, int dxy,
305 int thirdpel, int dir, int avg)
307 H264Context *h = &s->h;
308 H264SliceContext *sl = &h->slice_ctx[0];
309 const H264Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
312 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
317 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
318 my < 0 || my >= s->v_edge_pos - height - 1) {
320 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
321 my = av_clip(my, -16, s->v_edge_pos - height + 15);
324 /* form component predictions */
325 dest = h->cur_pic.f->data[0] + x + y * sl->linesize;
326 src = pic->f->data[0] + mx + my * sl->linesize;
329 h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src,
330 sl->linesize, sl->linesize,
331 width + 1, height + 1,
332 mx, my, s->h_edge_pos, s->v_edge_pos);
333 src = sl->edge_emu_buffer;
336 (avg ? s->tdsp.avg_tpel_pixels_tab
337 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, sl->linesize,
340 (avg ? s->hdsp.avg_pixels_tab
341 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, sl->linesize,
344 if (!(h->flags & AV_CODEC_FLAG_GRAY)) {
345 mx = mx + (mx < (int) x) >> 1;
346 my = my + (my < (int) y) >> 1;
348 height = height >> 1;
351 for (i = 1; i < 3; i++) {
352 dest = h->cur_pic.f->data[i] + (x >> 1) + (y >> 1) * sl->uvlinesize;
353 src = pic->f->data[i] + mx + my * sl->uvlinesize;
356 h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src,
357 sl->uvlinesize, sl->uvlinesize,
358 width + 1, height + 1,
359 mx, my, (s->h_edge_pos >> 1),
361 src = sl->edge_emu_buffer;
364 (avg ? s->tdsp.avg_tpel_pixels_tab
365 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
369 (avg ? s->hdsp.avg_pixels_tab
370 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
377 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
380 int i, j, k, mx, my, dx, dy, x, y;
381 H264Context *h = &s->h;
382 H264SliceContext *sl = &h->slice_ctx[0];
383 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
384 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
385 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
386 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
387 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
389 for (i = 0; i < 16; i += part_height)
390 for (j = 0; j < 16; j += part_width) {
391 const int b_xy = (4 * sl->mb_x + (j >> 2)) +
392 (4 * sl->mb_y + (i >> 2)) * h->b_stride;
394 x = 16 * sl->mb_x + j;
395 y = 16 * sl->mb_y + i;
396 k = (j >> 2 & 1) + (i >> 1 & 2) +
397 (j >> 1 & 4) + (i & 8);
399 if (mode != PREDICT_MODE) {
400 pred_motion(h, sl, k, part_width >> 2, dir, 1, &mx, &my);
402 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
403 my = s->next_pic->motion_val[0][b_xy][1] << 1;
406 mx = mx * h->frame_num_offset /
407 h->prev_frame_num_offset + 1 >> 1;
408 my = my * h->frame_num_offset /
409 h->prev_frame_num_offset + 1 >> 1;
411 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
412 h->prev_frame_num_offset + 1 >> 1;
413 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
414 h->prev_frame_num_offset + 1 >> 1;
418 /* clip motion vector prediction to frame border */
419 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
420 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
422 /* get (optional) motion vector differential */
423 if (mode == PREDICT_MODE) {
426 dy = svq3_get_se_golomb(&h->gb);
427 dx = svq3_get_se_golomb(&h->gb);
429 if (dx == INVALID_VLC || dy == INVALID_VLC) {
430 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
435 /* compute motion vector */
436 if (mode == THIRDPEL_MODE) {
438 mx = (mx + 1 >> 1) + dx;
439 my = (my + 1 >> 1) + dy;
440 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
441 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
442 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
444 svq3_mc_dir_part(s, x, y, part_width, part_height,
445 fx, fy, dxy, 1, dir, avg);
448 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
449 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
450 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
451 dxy = (mx & 1) + 2 * (my & 1);
453 svq3_mc_dir_part(s, x, y, part_width, part_height,
454 mx >> 1, my >> 1, dxy, 0, dir, avg);
458 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
459 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
461 svq3_mc_dir_part(s, x, y, part_width, part_height,
462 mx, my, 0, 0, dir, avg);
467 /* update mv_cache */
468 if (mode != PREDICT_MODE) {
469 int32_t mv = pack16to32(mx, my);
471 if (part_height == 8 && i < 8) {
472 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1 * 8], mv);
474 if (part_width == 8 && j < 8)
475 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
477 if (part_width == 8 && j < 8)
478 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1], mv);
479 if (part_width == 4 || part_height == 4)
480 AV_WN32A(sl->mv_cache[dir][scan8[k]], mv);
483 /* write back motion vectors */
484 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
485 part_width >> 2, part_height >> 2, h->b_stride,
486 pack16to32(mx, my), 4);
492 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
494 H264Context *h = &s->h;
495 H264SliceContext *sl = &h->slice_ctx[0];
496 int i, j, k, m, dir, mode;
500 const int mb_xy = sl->mb_xy;
501 const int b_xy = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride;
503 sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
504 sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
505 sl->topright_samples_available = 0xFFFF;
507 if (mb_type == 0) { /* SKIP */
508 if (h->pict_type == AV_PICTURE_TYPE_P ||
509 s->next_pic->mb_type[mb_xy] == -1) {
510 svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
513 if (h->pict_type == AV_PICTURE_TYPE_B)
514 svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
517 mb_type = MB_TYPE_SKIP;
519 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
520 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
522 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
525 mb_type = MB_TYPE_16x16;
527 } else if (mb_type < 8) { /* INTER */
528 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
529 mode = THIRDPEL_MODE;
530 else if (s->halfpel_flag &&
531 s->thirdpel_flag == !get_bits1(&h->gb))
537 /* note ref_cache should contain here:
545 for (m = 0; m < 2; m++) {
546 if (sl->mb_x > 0 && sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
547 for (i = 0; i < 4; i++)
548 AV_COPY32(sl->mv_cache[m][scan8[0] - 1 + i * 8],
549 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
551 for (i = 0; i < 4; i++)
552 AV_ZERO32(sl->mv_cache[m][scan8[0] - 1 + i * 8]);
555 memcpy(sl->mv_cache[m][scan8[0] - 1 * 8],
556 h->cur_pic.motion_val[m][b_xy - h->b_stride],
557 4 * 2 * sizeof(int16_t));
558 memset(&sl->ref_cache[m][scan8[0] - 1 * 8],
559 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
561 if (sl->mb_x < h->mb_width - 1) {
562 AV_COPY32(sl->mv_cache[m][scan8[0] + 4 - 1 * 8],
563 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
564 sl->ref_cache[m][scan8[0] + 4 - 1 * 8] =
565 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
566 sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
568 sl->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
570 AV_COPY32(sl->mv_cache[m][scan8[0] - 1 - 1 * 8],
571 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
572 sl->ref_cache[m][scan8[0] - 1 - 1 * 8] =
573 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
575 sl->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
577 memset(&sl->ref_cache[m][scan8[0] - 1 * 8 - 1],
578 PART_NOT_AVAILABLE, 8);
580 if (h->pict_type != AV_PICTURE_TYPE_B)
584 /* decode motion vector(s) and form prediction(s) */
585 if (h->pict_type == AV_PICTURE_TYPE_P) {
586 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
588 } else { /* AV_PICTURE_TYPE_B */
590 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
593 for (i = 0; i < 4; i++)
594 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
595 0, 4 * 2 * sizeof(int16_t));
598 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
601 for (i = 0; i < 4; i++)
602 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
603 0, 4 * 2 * sizeof(int16_t));
607 mb_type = MB_TYPE_16x16;
608 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
609 memset(sl->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
613 for (i = 0; i < 4; i++)
614 sl->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
615 if (sl->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
616 sl->left_samples_available = 0x5F5F;
619 sl->intra4x4_pred_mode_cache[4 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
620 sl->intra4x4_pred_mode_cache[5 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
621 sl->intra4x4_pred_mode_cache[6 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
622 sl->intra4x4_pred_mode_cache[7 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
624 if (sl->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
625 sl->top_samples_available = 0x33FF;
628 /* decode prediction codes for luma blocks */
629 for (i = 0; i < 16; i += 2) {
630 vlc = svq3_get_ue_golomb(&h->gb);
633 av_log(h->avctx, AV_LOG_ERROR,
634 "luma prediction:%"PRIu32"\n", vlc);
638 left = &sl->intra4x4_pred_mode_cache[scan8[i] - 1];
639 top = &sl->intra4x4_pred_mode_cache[scan8[i] - 8];
641 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
642 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
644 if (left[1] == -1 || left[2] == -1) {
645 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
649 } else { /* mb_type == 33, DC_128_PRED block type */
650 for (i = 0; i < 4; i++)
651 memset(&sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
654 write_back_intra_pred_mode(h, sl);
657 ff_h264_check_intra4x4_pred_mode(h, sl);
659 sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
660 sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
662 for (i = 0; i < 4; i++)
663 memset(&sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
665 sl->top_samples_available = 0x33FF;
666 sl->left_samples_available = 0x5F5F;
669 mb_type = MB_TYPE_INTRA4x4;
670 } else { /* INTRA16x16 */
671 dir = i_mb_type_info[mb_type - 8].pred_mode;
672 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
674 if ((sl->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, sl, dir, 0)) < 0) {
675 av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
676 return sl->intra16x16_pred_mode;
679 cbp = i_mb_type_info[mb_type - 8].cbp;
680 mb_type = MB_TYPE_INTRA16x16;
683 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
684 for (i = 0; i < 4; i++)
685 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
686 0, 4 * 2 * sizeof(int16_t));
687 if (h->pict_type == AV_PICTURE_TYPE_B) {
688 for (i = 0; i < 4; i++)
689 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
690 0, 4 * 2 * sizeof(int16_t));
693 if (!IS_INTRA4x4(mb_type)) {
694 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
696 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
697 memset(sl->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
700 if (!IS_INTRA16x16(mb_type) &&
701 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
702 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
703 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
707 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
708 : golomb_to_inter_cbp[vlc];
710 if (IS_INTRA16x16(mb_type) ||
711 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
712 sl->qscale += svq3_get_se_golomb(&h->gb);
714 if (sl->qscale > 31u) {
715 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", sl->qscale);
719 if (IS_INTRA16x16(mb_type)) {
720 AV_ZERO128(sl->mb_luma_dc[0] + 0);
721 AV_ZERO128(sl->mb_luma_dc[0] + 8);
722 if (svq3_decode_block(&h->gb, sl->mb_luma_dc[0], 0, 1)) {
723 av_log(h->avctx, AV_LOG_ERROR,
724 "error while decoding intra luma dc\n");
730 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
731 const int type = ((sl->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
733 for (i = 0; i < 4; i++)
734 if ((cbp & (1 << i))) {
735 for (j = 0; j < 4; j++) {
736 k = index ? (1 * (j & 1) + 2 * (i & 1) +
737 2 * (j & 2) + 4 * (i & 2))
739 sl->non_zero_count_cache[scan8[k]] = 1;
741 if (svq3_decode_block(&h->gb, &sl->mb[16 * k], index, type)) {
742 av_log(h->avctx, AV_LOG_ERROR,
743 "error while decoding block\n");
750 for (i = 1; i < 3; ++i)
751 if (svq3_decode_block(&h->gb, &sl->mb[16 * 16 * i], 0, 3)) {
752 av_log(h->avctx, AV_LOG_ERROR,
753 "error while decoding chroma dc block\n");
758 for (i = 1; i < 3; i++) {
759 for (j = 0; j < 4; j++) {
761 sl->non_zero_count_cache[scan8[k]] = 1;
763 if (svq3_decode_block(&h->gb, &sl->mb[16 * k], 1, 1)) {
764 av_log(h->avctx, AV_LOG_ERROR,
765 "error while decoding chroma ac block\n");
775 h->cur_pic.mb_type[mb_xy] = mb_type;
777 if (IS_INTRA(mb_type))
778 sl->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, sl, DC_PRED8x8, 1);
783 static int svq3_decode_slice_header(AVCodecContext *avctx)
785 SVQ3Context *s = avctx->priv_data;
786 H264Context *h = &s->h;
787 H264SliceContext *sl = &h->slice_ctx[0];
788 const int mb_xy = sl->mb_xy;
792 header = get_bits(&s->gb, 8);
794 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
796 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
799 int slice_bits, slice_bytes, slice_length;
800 int length = header >> 5 & 3;
802 slice_length = show_bits(&s->gb, 8 * length);
803 slice_bits = slice_length * 8;
804 slice_bytes = slice_length + length - 1;
806 if (slice_bytes > get_bits_left(&s->gb)) {
807 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
811 skip_bits(&s->gb, 8);
813 av_fast_malloc(&s->slice_buf, &s->slice_size, slice_bytes + AV_INPUT_BUFFER_PADDING_SIZE);
815 return AVERROR(ENOMEM);
817 memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
819 init_get_bits(&h->gb, s->slice_buf, slice_bits);
821 if (s->watermark_key) {
822 uint32_t header = AV_RL32(&h->gb.buffer[1]);
823 AV_WL32(&h->gb.buffer[1], header ^ s->watermark_key);
826 memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
828 skip_bits_long(&s->gb, slice_bytes * 8);
831 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
832 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
836 sl->slice_type = golomb_to_pict_type[slice_id];
838 if ((header & 0x9F) == 2) {
839 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
840 sl->mb_skip_run = get_bits(&h->gb, i) -
841 (sl->mb_y * h->mb_width + sl->mb_x);
847 sl->slice_num = get_bits(&h->gb, 8);
848 sl->qscale = get_bits(&h->gb, 5);
849 s->adaptive_quant = get_bits1(&h->gb);
854 if (s->has_watermark)
858 skip_bits(&h->gb, 2);
860 if (skip_1stop_8data_bits(&h->gb) < 0)
861 return AVERROR_INVALIDDATA;
863 /* reset intra predictors and invalidate motion vector references */
865 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
866 -1, 4 * sizeof(int8_t));
867 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - sl->mb_x],
868 -1, 8 * sizeof(int8_t) * sl->mb_x);
871 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
872 -1, 8 * sizeof(int8_t) * (h->mb_width - sl->mb_x));
875 sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
881 static av_cold int svq3_decode_init(AVCodecContext *avctx)
883 SVQ3Context *s = avctx->priv_data;
884 H264Context *h = &s->h;
885 H264SliceContext *sl;
887 unsigned char *extradata;
888 unsigned char *extradata_end;
890 int marker_found = 0;
893 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
894 s->last_pic = av_mallocz(sizeof(*s->last_pic));
895 s->next_pic = av_mallocz(sizeof(*s->next_pic));
896 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
897 ret = AVERROR(ENOMEM);
901 s->cur_pic->f = av_frame_alloc();
902 s->last_pic->f = av_frame_alloc();
903 s->next_pic->f = av_frame_alloc();
904 if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
905 return AVERROR(ENOMEM);
907 if ((ret = ff_h264_decode_init(avctx)) < 0)
910 // we will overwrite it later during decoding
911 av_frame_free(&h->cur_pic.f);
913 av_frame_free(&h->last_pic_for_ec.f);
915 ff_h264dsp_init(&h->h264dsp, 8, 1);
916 av_assert0(h->sps.bit_depth_chroma == 0);
917 ff_h264_pred_init(&h->hpc, AV_CODEC_ID_SVQ3, 8, 1);
918 ff_videodsp_init(&h->vdsp, 8);
920 memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t));
921 memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
923 avctx->bits_per_raw_sample = 8;
924 h->sps.bit_depth_luma = 8;
925 h->chroma_format_idc = 1;
927 ff_hpeldsp_init(&s->hdsp, avctx->flags);
928 ff_tpeldsp_init(&s->tdsp);
932 h->flags = avctx->flags;
934 h->sps.chroma_format_idc = 1;
935 h->picture_structure = PICT_FRAME;
936 avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
937 avctx->color_range = AVCOL_RANGE_JPEG;
939 h->slice_ctx[0].chroma_qp[0] = h->slice_ctx[0].chroma_qp[1] = 4;
940 h->chroma_x_shift = h->chroma_y_shift = 1;
943 s->thirdpel_flag = 1;
944 s->has_watermark = 0;
946 /* prowl for the "SEQH" marker in the extradata */
947 extradata = (unsigned char *)avctx->extradata;
948 extradata_end = avctx->extradata + avctx->extradata_size;
950 for (m = 0; m + 8 < avctx->extradata_size; m++) {
951 if (!memcmp(extradata, "SEQH", 4)) {
959 /* if a match was found, parse the extra data */
963 int unk0, unk1, unk2, unk3, unk4;
965 size = AV_RB32(&extradata[4]);
966 if (size > extradata_end - extradata - 8) {
967 ret = AVERROR_INVALIDDATA;
970 init_get_bits(&gb, extradata + 8, size * 8);
972 /* 'frame size code' and optional 'width, height' */
973 frame_size_code = get_bits(&gb, 3);
974 switch (frame_size_code) {
1001 avctx->height = 240;
1004 avctx->width = get_bits(&gb, 12);
1005 avctx->height = get_bits(&gb, 12);
1009 s->halfpel_flag = get_bits1(&gb);
1010 s->thirdpel_flag = get_bits1(&gb);
1012 /* unknown fields */
1013 unk0 = get_bits1(&gb);
1014 unk1 = get_bits1(&gb);
1015 unk2 = get_bits1(&gb);
1016 unk3 = get_bits1(&gb);
1018 h->low_delay = get_bits1(&gb);
1021 unk4 = get_bits1(&gb);
1023 av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1024 unk0, unk1, unk2, unk3, unk4);
1026 if (skip_1stop_8data_bits(&gb) < 0) {
1027 ret = AVERROR_INVALIDDATA;
1031 s->has_watermark = get_bits1(&gb);
1032 avctx->has_b_frames = !h->low_delay;
1033 if (s->has_watermark) {
1035 unsigned watermark_width = svq3_get_ue_golomb(&gb);
1036 unsigned watermark_height = svq3_get_ue_golomb(&gb);
1037 int u1 = svq3_get_ue_golomb(&gb);
1038 int u2 = get_bits(&gb, 8);
1039 int u3 = get_bits(&gb, 2);
1040 int u4 = svq3_get_ue_golomb(&gb);
1041 unsigned long buf_len = watermark_width *
1042 watermark_height * 4;
1043 int offset = get_bits_count(&gb) + 7 >> 3;
1046 if (watermark_height <= 0 ||
1047 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
1052 buf = av_malloc(buf_len);
1054 ret = AVERROR(ENOMEM);
1057 av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1058 watermark_width, watermark_height);
1059 av_log(avctx, AV_LOG_DEBUG,
1060 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1061 u1, u2, u3, u4, offset);
1062 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1063 size - offset) != Z_OK) {
1064 av_log(avctx, AV_LOG_ERROR,
1065 "could not uncompress watermark logo\n");
1070 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1071 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1072 av_log(avctx, AV_LOG_DEBUG,
1073 "watermark key %#"PRIx32"\n", s->watermark_key);
1076 av_log(avctx, AV_LOG_ERROR,
1077 "this svq3 file contains watermark which need zlib support compiled in\n");
1084 h->width = avctx->width;
1085 h->height = avctx->height;
1086 h->mb_width = (h->width + 15) / 16;
1087 h->mb_height = (h->height + 15) / 16;
1088 h->mb_stride = h->mb_width + 1;
1089 h->mb_num = h->mb_width * h->mb_height;
1090 h->b_stride = 4 * h->mb_width;
1091 s->h_edge_pos = h->mb_width * 16;
1092 s->v_edge_pos = h->mb_height * 16;
1094 if ((ret = ff_h264_alloc_tables(h)) < 0) {
1095 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1101 svq3_decode_end(avctx);
1105 static void free_picture(AVCodecContext *avctx, H264Picture *pic)
1108 for (i = 0; i < 2; i++) {
1109 av_buffer_unref(&pic->motion_val_buf[i]);
1110 av_buffer_unref(&pic->ref_index_buf[i]);
1112 av_buffer_unref(&pic->mb_type_buf);
1114 av_frame_unref(pic->f);
1117 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
1119 SVQ3Context *s = avctx->priv_data;
1120 H264Context *h = &s->h;
1121 H264SliceContext *sl = &h->slice_ctx[0];
1122 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1123 const int mb_array_size = h->mb_stride * h->mb_height;
1124 const int b4_stride = h->mb_width * 4 + 1;
1125 const int b4_array_size = b4_stride * h->mb_height * 4;
1128 if (!pic->motion_val_buf[0]) {
1131 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1132 if (!pic->mb_type_buf)
1133 return AVERROR(ENOMEM);
1134 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1136 for (i = 0; i < 2; i++) {
1137 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1138 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1139 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1140 ret = AVERROR(ENOMEM);
1144 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1145 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1148 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1150 ret = ff_get_buffer(avctx, pic->f,
1151 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1155 if (!sl->edge_emu_buffer) {
1156 sl->edge_emu_buffer = av_mallocz_array(pic->f->linesize[0], 17);
1157 if (!sl->edge_emu_buffer)
1158 return AVERROR(ENOMEM);
1161 sl->linesize = pic->f->linesize[0];
1162 sl->uvlinesize = pic->f->linesize[1];
1166 free_picture(avctx, pic);
1170 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1171 int *got_frame, AVPacket *avpkt)
1173 SVQ3Context *s = avctx->priv_data;
1174 H264Context *h = &s->h;
1175 H264SliceContext *sl = &h->slice_ctx[0];
1176 int buf_size = avpkt->size;
1181 /* special case for last picture */
1182 if (buf_size == 0) {
1183 if (s->next_pic->f->data[0] && !h->low_delay && !s->last_frame_output) {
1184 ret = av_frame_ref(data, s->next_pic->f);
1187 s->last_frame_output = 1;
1193 sl->mb_x = sl->mb_y = sl->mb_xy = 0;
1195 if (s->watermark_key) {
1196 av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1198 return AVERROR(ENOMEM);
1199 memcpy(s->buf, avpkt->data, buf_size);
1205 ret = init_get_bits(&s->gb, buf, 8 * buf_size);
1209 if (svq3_decode_slice_header(avctx))
1212 h->pict_type = sl->slice_type;
1214 if (h->pict_type != AV_PICTURE_TYPE_B)
1215 FFSWAP(H264Picture*, s->next_pic, s->last_pic);
1217 av_frame_unref(s->cur_pic->f);
1219 /* for skipping the frame */
1220 s->cur_pic->f->pict_type = h->pict_type;
1221 s->cur_pic->f->key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1223 ret = get_buffer(avctx, s->cur_pic);
1227 h->cur_pic_ptr = s->cur_pic;
1228 h->cur_pic = *s->cur_pic;
1230 for (i = 0; i < 16; i++) {
1231 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * sl->linesize * ((scan8[i] - scan8[0]) >> 3);
1232 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * sl->linesize * ((scan8[i] - scan8[0]) >> 3);
1234 for (i = 0; i < 16; i++) {
1235 h->block_offset[16 + i] =
1236 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * sl->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1237 h->block_offset[48 + 16 + i] =
1238 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * sl->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1241 if (h->pict_type != AV_PICTURE_TYPE_I) {
1242 if (!s->last_pic->f->data[0]) {
1243 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1244 av_frame_unref(s->last_pic->f);
1245 ret = get_buffer(avctx, s->last_pic);
1248 memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1249 memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1250 s->last_pic->f->linesize[1]);
1251 memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1252 s->last_pic->f->linesize[2]);
1255 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1256 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1257 av_frame_unref(s->next_pic->f);
1258 ret = get_buffer(avctx, s->next_pic);
1261 memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1262 memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1263 s->next_pic->f->linesize[1]);
1264 memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1265 s->next_pic->f->linesize[2]);
1269 if (avctx->debug & FF_DEBUG_PICT_INFO)
1270 av_log(h->avctx, AV_LOG_DEBUG,
1271 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1272 av_get_picture_type_char(h->pict_type),
1273 s->halfpel_flag, s->thirdpel_flag,
1274 s->adaptive_quant, h->slice_ctx[0].qscale, sl->slice_num);
1276 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1277 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1278 avctx->skip_frame >= AVDISCARD_ALL)
1281 if (s->next_p_frame_damaged) {
1282 if (h->pict_type == AV_PICTURE_TYPE_B)
1285 s->next_p_frame_damaged = 0;
1288 if (h->pict_type == AV_PICTURE_TYPE_B) {
1289 h->frame_num_offset = sl->slice_num - h->prev_frame_num;
1291 if (h->frame_num_offset < 0)
1292 h->frame_num_offset += 256;
1293 if (h->frame_num_offset == 0 ||
1294 h->frame_num_offset >= h->prev_frame_num_offset) {
1295 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1299 h->prev_frame_num = h->frame_num;
1300 h->frame_num = sl->slice_num;
1301 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1303 if (h->prev_frame_num_offset < 0)
1304 h->prev_frame_num_offset += 256;
1307 for (m = 0; m < 2; m++) {
1309 for (i = 0; i < 4; i++) {
1311 for (j = -1; j < 4; j++)
1312 sl->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1314 sl->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1318 for (sl->mb_y = 0; sl->mb_y < h->mb_height; sl->mb_y++) {
1319 for (sl->mb_x = 0; sl->mb_x < h->mb_width; sl->mb_x++) {
1321 sl->mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
1323 if ((get_bits_left(&h->gb)) <= 7) {
1324 if (((get_bits_count(&h->gb) & 7) == 0 ||
1325 show_bits(&h->gb, get_bits_left(&h->gb) & 7) == 0)) {
1327 if (svq3_decode_slice_header(avctx))
1330 /* TODO: support s->mb_skip_run */
1333 mb_type = svq3_get_ue_golomb(&h->gb);
1335 if (h->pict_type == AV_PICTURE_TYPE_I)
1337 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1339 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1340 av_log(h->avctx, AV_LOG_ERROR,
1341 "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
1345 if (mb_type != 0 || sl->cbp)
1346 ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
1348 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1349 h->cur_pic.mb_type[sl->mb_x + sl->mb_y * h->mb_stride] =
1350 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1353 ff_draw_horiz_band(avctx, s->cur_pic->f,
1354 s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1355 16 * sl->mb_y, 16, h->picture_structure, 0,
1359 left = buf_size*8 - get_bits_count(&h->gb);
1361 if (sl->mb_y != h->mb_height || sl->mb_x != h->mb_width) {
1362 av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, sl->mb_y, sl->mb_x, left);
1363 //av_hex_dump(stderr, buf+buf_size-8, 8);
1367 av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1371 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1372 ret = av_frame_ref(data, s->cur_pic->f);
1373 else if (s->last_pic->f->data[0])
1374 ret = av_frame_ref(data, s->last_pic->f);
1378 /* Do not output the last pic after seeking. */
1379 if (s->last_pic->f->data[0] || h->low_delay)
1382 if (h->pict_type != AV_PICTURE_TYPE_B) {
1383 FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
1385 av_frame_unref(s->cur_pic->f);
1391 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1393 SVQ3Context *s = avctx->priv_data;
1394 H264Context *h = &s->h;
1396 free_picture(avctx, s->cur_pic);
1397 free_picture(avctx, s->next_pic);
1398 free_picture(avctx, s->last_pic);
1399 av_frame_free(&s->cur_pic->f);
1400 av_frame_free(&s->next_pic->f);
1401 av_frame_free(&s->last_pic->f);
1402 av_freep(&s->cur_pic);
1403 av_freep(&s->next_pic);
1404 av_freep(&s->last_pic);
1405 av_freep(&s->slice_buf);
1407 memset(&h->cur_pic, 0, sizeof(h->cur_pic));
1409 ff_h264_free_context(h);
1417 AVCodec ff_svq3_decoder = {
1419 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1420 .type = AVMEDIA_TYPE_VIDEO,
1421 .id = AV_CODEC_ID_SVQ3,
1422 .priv_data_size = sizeof(SVQ3Context),
1423 .init = svq3_decode_init,
1424 .close = svq3_decode_end,
1425 .decode = svq3_decode_frame,
1426 .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1429 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,