2 * Copyright (c) 2003 The Libav Project
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
45 #include "libavutil/attributes.h"
48 #include "mpegutils.h"
51 #include "h264data.h" // FIXME FIXME FIXME
53 #include "h264_mvpred.h"
56 #include "rectangle.h"
71 typedef struct SVQ3Context {
76 H264Picture *next_pic;
77 H264Picture *last_pic;
84 uint32_t watermark_key;
86 int next_p_frame_damaged;
89 int last_frame_output;
92 #define FULLPEL_MODE 1
93 #define HALFPEL_MODE 2
94 #define THIRDPEL_MODE 3
95 #define PREDICT_MODE 4
97 /* dual scan (from some older h264 draft)
106 static const uint8_t svq3_scan[16] = {
107 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
108 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
109 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
110 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
113 static const uint8_t luma_dc_zigzag_scan[16] = {
114 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
115 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
116 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
117 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
120 static const uint8_t svq3_pred_0[25][2] = {
123 { 0, 2 }, { 1, 1 }, { 2, 0 },
124 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
125 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
126 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
127 { 2, 4 }, { 3, 3 }, { 4, 2 },
132 static const int8_t svq3_pred_1[6][6][5] = {
133 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
134 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
135 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
136 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
137 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
138 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
139 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
140 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
141 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
142 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
143 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
144 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
147 static const struct {
150 } svq3_dct_tables[2][16] = {
151 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
152 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
153 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
154 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
157 static const uint32_t svq3_dequant_coeff[32] = {
158 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
159 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
160 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
161 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
164 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
166 const int qmul = svq3_dequant_coeff[qp];
170 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
172 for (i = 0; i < 4; i++) {
173 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
174 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
175 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
176 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
178 temp[4 * i + 0] = z0 + z3;
179 temp[4 * i + 1] = z1 + z2;
180 temp[4 * i + 2] = z1 - z2;
181 temp[4 * i + 3] = z0 - z3;
184 for (i = 0; i < 4; i++) {
185 const int offset = x_offset[i];
186 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
187 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
188 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
189 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
191 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
192 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
193 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
194 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
199 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
200 int stride, int qp, int dc)
202 const int qmul = svq3_dequant_coeff[qp];
206 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
207 : qmul * (block[0] >> 3) / 2);
211 for (i = 0; i < 4; i++) {
212 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
213 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
214 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
215 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
217 block[0 + 4 * i] = z0 + z3;
218 block[1 + 4 * i] = z1 + z2;
219 block[2 + 4 * i] = z1 - z2;
220 block[3 + 4 * i] = z0 - z3;
223 for (i = 0; i < 4; i++) {
224 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
225 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
226 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
227 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
228 const int rr = (dc + 0x80000);
230 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
231 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
232 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
233 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
236 memset(block, 0, 16 * sizeof(int16_t));
239 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
240 int index, const int type)
242 static const uint8_t *const scan_patterns[4] =
243 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
245 int run, level, limit;
247 const int intra = 3 * type >> 2;
248 const uint8_t *const scan = scan_patterns[type];
250 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
251 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
252 int sign = (vlc & 1) ? 0 : -1;
259 } else if (vlc < 4) {
264 level = (vlc + 9 >> 2) - run;
268 run = svq3_dct_tables[intra][vlc].run;
269 level = svq3_dct_tables[intra][vlc].level;
273 ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
277 ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
281 if ((index += run) >= limit)
284 block[scan[index]] = (level ^ sign) - sign;
295 static inline void svq3_mc_dir_part(SVQ3Context *s,
296 int x, int y, int width, int height,
297 int mx, int my, int dxy,
298 int thirdpel, int dir, int avg)
300 H264Context *h = &s->h;
301 H264SliceContext *sl = &h->slice_ctx[0];
302 const H264Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
305 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
310 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
311 my < 0 || my >= s->v_edge_pos - height - 1) {
313 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
314 my = av_clip(my, -16, s->v_edge_pos - height + 15);
317 /* form component predictions */
318 dest = h->cur_pic.f->data[0] + x + y * sl->linesize;
319 src = pic->f->data[0] + mx + my * sl->linesize;
322 h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src,
323 sl->linesize, sl->linesize,
324 width + 1, height + 1,
325 mx, my, s->h_edge_pos, s->v_edge_pos);
326 src = sl->edge_emu_buffer;
329 (avg ? s->tdsp.avg_tpel_pixels_tab
330 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, sl->linesize,
333 (avg ? s->hdsp.avg_pixels_tab
334 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, sl->linesize,
337 if (!(h->flags & AV_CODEC_FLAG_GRAY)) {
338 mx = mx + (mx < (int) x) >> 1;
339 my = my + (my < (int) y) >> 1;
341 height = height >> 1;
344 for (i = 1; i < 3; i++) {
345 dest = h->cur_pic.f->data[i] + (x >> 1) + (y >> 1) * sl->uvlinesize;
346 src = pic->f->data[i] + mx + my * sl->uvlinesize;
349 h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src,
350 sl->uvlinesize, sl->uvlinesize,
351 width + 1, height + 1,
352 mx, my, (s->h_edge_pos >> 1),
354 src = sl->edge_emu_buffer;
357 (avg ? s->tdsp.avg_tpel_pixels_tab
358 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
362 (avg ? s->hdsp.avg_pixels_tab
363 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
370 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
373 int i, j, k, mx, my, dx, dy, x, y;
374 H264Context *h = &s->h;
375 H264SliceContext *sl = &h->slice_ctx[0];
376 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
377 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
378 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
379 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
380 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
382 for (i = 0; i < 16; i += part_height)
383 for (j = 0; j < 16; j += part_width) {
384 const int b_xy = (4 * sl->mb_x + (j >> 2)) +
385 (4 * sl->mb_y + (i >> 2)) * h->b_stride;
387 x = 16 * sl->mb_x + j;
388 y = 16 * sl->mb_y + i;
389 k = (j >> 2 & 1) + (i >> 1 & 2) +
390 (j >> 1 & 4) + (i & 8);
392 if (mode != PREDICT_MODE) {
393 pred_motion(h, sl, k, part_width >> 2, dir, 1, &mx, &my);
395 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
396 my = s->next_pic->motion_val[0][b_xy][1] << 1;
399 mx = mx * h->frame_num_offset /
400 h->prev_frame_num_offset + 1 >> 1;
401 my = my * h->frame_num_offset /
402 h->prev_frame_num_offset + 1 >> 1;
404 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
405 h->prev_frame_num_offset + 1 >> 1;
406 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
407 h->prev_frame_num_offset + 1 >> 1;
411 /* clip motion vector prediction to frame border */
412 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
413 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
415 /* get (optional) motion vector differential */
416 if (mode == PREDICT_MODE) {
419 dy = svq3_get_se_golomb(&h->gb);
420 dx = svq3_get_se_golomb(&h->gb);
422 if (dx == INVALID_VLC || dy == INVALID_VLC) {
423 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
428 /* compute motion vector */
429 if (mode == THIRDPEL_MODE) {
431 mx = (mx + 1 >> 1) + dx;
432 my = (my + 1 >> 1) + dy;
433 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
434 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
435 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
437 svq3_mc_dir_part(s, x, y, part_width, part_height,
438 fx, fy, dxy, 1, dir, avg);
441 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
442 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
443 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
444 dxy = (mx & 1) + 2 * (my & 1);
446 svq3_mc_dir_part(s, x, y, part_width, part_height,
447 mx >> 1, my >> 1, dxy, 0, dir, avg);
451 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
452 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
454 svq3_mc_dir_part(s, x, y, part_width, part_height,
455 mx, my, 0, 0, dir, avg);
460 /* update mv_cache */
461 if (mode != PREDICT_MODE) {
462 int32_t mv = pack16to32(mx, my);
464 if (part_height == 8 && i < 8) {
465 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1 * 8], mv);
467 if (part_width == 8 && j < 8)
468 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
470 if (part_width == 8 && j < 8)
471 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1], mv);
472 if (part_width == 4 || part_height == 4)
473 AV_WN32A(sl->mv_cache[dir][scan8[k]], mv);
476 /* write back motion vectors */
477 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
478 part_width >> 2, part_height >> 2, h->b_stride,
479 pack16to32(mx, my), 4);
485 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
487 H264Context *h = &s->h;
488 H264SliceContext *sl = &h->slice_ctx[0];
489 int i, j, k, m, dir, mode;
493 const int mb_xy = sl->mb_xy;
494 const int b_xy = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride;
496 sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
497 sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
498 sl->topright_samples_available = 0xFFFF;
500 if (mb_type == 0) { /* SKIP */
501 if (h->pict_type == AV_PICTURE_TYPE_P ||
502 s->next_pic->mb_type[mb_xy] == -1) {
503 svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
506 if (h->pict_type == AV_PICTURE_TYPE_B)
507 svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
510 mb_type = MB_TYPE_SKIP;
512 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
513 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
515 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
518 mb_type = MB_TYPE_16x16;
520 } else if (mb_type < 8) { /* INTER */
521 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
522 mode = THIRDPEL_MODE;
523 else if (s->halfpel_flag &&
524 s->thirdpel_flag == !get_bits1(&h->gb))
530 /* note ref_cache should contain here:
538 for (m = 0; m < 2; m++) {
539 if (sl->mb_x > 0 && sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
540 for (i = 0; i < 4; i++)
541 AV_COPY32(sl->mv_cache[m][scan8[0] - 1 + i * 8],
542 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
544 for (i = 0; i < 4; i++)
545 AV_ZERO32(sl->mv_cache[m][scan8[0] - 1 + i * 8]);
548 memcpy(sl->mv_cache[m][scan8[0] - 1 * 8],
549 h->cur_pic.motion_val[m][b_xy - h->b_stride],
550 4 * 2 * sizeof(int16_t));
551 memset(&sl->ref_cache[m][scan8[0] - 1 * 8],
552 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
554 if (sl->mb_x < h->mb_width - 1) {
555 AV_COPY32(sl->mv_cache[m][scan8[0] + 4 - 1 * 8],
556 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
557 sl->ref_cache[m][scan8[0] + 4 - 1 * 8] =
558 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
559 sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
561 sl->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
563 AV_COPY32(sl->mv_cache[m][scan8[0] - 1 - 1 * 8],
564 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
565 sl->ref_cache[m][scan8[0] - 1 - 1 * 8] =
566 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
568 sl->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
570 memset(&sl->ref_cache[m][scan8[0] - 1 * 8 - 1],
571 PART_NOT_AVAILABLE, 8);
573 if (h->pict_type != AV_PICTURE_TYPE_B)
577 /* decode motion vector(s) and form prediction(s) */
578 if (h->pict_type == AV_PICTURE_TYPE_P) {
579 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
581 } else { /* AV_PICTURE_TYPE_B */
583 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
586 for (i = 0; i < 4; i++)
587 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
588 0, 4 * 2 * sizeof(int16_t));
591 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
594 for (i = 0; i < 4; i++)
595 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
596 0, 4 * 2 * sizeof(int16_t));
600 mb_type = MB_TYPE_16x16;
601 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
602 memset(sl->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
606 for (i = 0; i < 4; i++)
607 sl->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
608 if (sl->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
609 sl->left_samples_available = 0x5F5F;
612 sl->intra4x4_pred_mode_cache[4 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
613 sl->intra4x4_pred_mode_cache[5 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
614 sl->intra4x4_pred_mode_cache[6 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
615 sl->intra4x4_pred_mode_cache[7 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
617 if (sl->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
618 sl->top_samples_available = 0x33FF;
621 /* decode prediction codes for luma blocks */
622 for (i = 0; i < 16; i += 2) {
623 vlc = svq3_get_ue_golomb(&h->gb);
626 av_log(h->avctx, AV_LOG_ERROR,
627 "luma prediction:%"PRIu32"\n", vlc);
631 left = &sl->intra4x4_pred_mode_cache[scan8[i] - 1];
632 top = &sl->intra4x4_pred_mode_cache[scan8[i] - 8];
634 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
635 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
637 if (left[1] == -1 || left[2] == -1) {
638 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
642 } else { /* mb_type == 33, DC_128_PRED block type */
643 for (i = 0; i < 4; i++)
644 memset(&sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
647 write_back_intra_pred_mode(h, sl);
650 ff_h264_check_intra4x4_pred_mode(h, sl);
652 sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
653 sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
655 for (i = 0; i < 4; i++)
656 memset(&sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
658 sl->top_samples_available = 0x33FF;
659 sl->left_samples_available = 0x5F5F;
662 mb_type = MB_TYPE_INTRA4x4;
663 } else { /* INTRA16x16 */
664 dir = i_mb_type_info[mb_type - 8].pred_mode;
665 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
667 if ((sl->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, sl, dir, 0)) < 0) {
668 av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
669 return sl->intra16x16_pred_mode;
672 cbp = i_mb_type_info[mb_type - 8].cbp;
673 mb_type = MB_TYPE_INTRA16x16;
676 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
677 for (i = 0; i < 4; i++)
678 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
679 0, 4 * 2 * sizeof(int16_t));
680 if (h->pict_type == AV_PICTURE_TYPE_B) {
681 for (i = 0; i < 4; i++)
682 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
683 0, 4 * 2 * sizeof(int16_t));
686 if (!IS_INTRA4x4(mb_type)) {
687 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
689 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
690 memset(sl->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
693 if (!IS_INTRA16x16(mb_type) &&
694 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
695 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48) {
696 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
700 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
701 : golomb_to_inter_cbp[vlc];
703 if (IS_INTRA16x16(mb_type) ||
704 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
705 sl->qscale += svq3_get_se_golomb(&h->gb);
707 if (sl->qscale > 31u) {
708 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", sl->qscale);
712 if (IS_INTRA16x16(mb_type)) {
713 AV_ZERO128(sl->mb_luma_dc[0] + 0);
714 AV_ZERO128(sl->mb_luma_dc[0] + 8);
715 if (svq3_decode_block(&h->gb, sl->mb_luma_dc[0], 0, 1)) {
716 av_log(h->avctx, AV_LOG_ERROR,
717 "error while decoding intra luma dc\n");
723 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
724 const int type = ((sl->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
726 for (i = 0; i < 4; i++)
727 if ((cbp & (1 << i))) {
728 for (j = 0; j < 4; j++) {
729 k = index ? (1 * (j & 1) + 2 * (i & 1) +
730 2 * (j & 2) + 4 * (i & 2))
732 sl->non_zero_count_cache[scan8[k]] = 1;
734 if (svq3_decode_block(&h->gb, &sl->mb[16 * k], index, type)) {
735 av_log(h->avctx, AV_LOG_ERROR,
736 "error while decoding block\n");
743 for (i = 1; i < 3; ++i)
744 if (svq3_decode_block(&h->gb, &sl->mb[16 * 16 * i], 0, 3)) {
745 av_log(h->avctx, AV_LOG_ERROR,
746 "error while decoding chroma dc block\n");
751 for (i = 1; i < 3; i++) {
752 for (j = 0; j < 4; j++) {
754 sl->non_zero_count_cache[scan8[k]] = 1;
756 if (svq3_decode_block(&h->gb, &sl->mb[16 * k], 1, 1)) {
757 av_log(h->avctx, AV_LOG_ERROR,
758 "error while decoding chroma ac block\n");
768 h->cur_pic.mb_type[mb_xy] = mb_type;
770 if (IS_INTRA(mb_type))
771 sl->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, sl, DC_PRED8x8, 1);
776 static int svq3_decode_slice_header(AVCodecContext *avctx)
778 SVQ3Context *s = avctx->priv_data;
779 H264Context *h = &s->h;
780 H264SliceContext *sl = &h->slice_ctx[0];
781 const int mb_xy = sl->mb_xy;
785 header = get_bits(&s->gb, 8);
787 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
789 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
792 int slice_bits, slice_bytes, slice_length;
793 int length = header >> 5 & 3;
795 slice_length = show_bits(&s->gb, 8 * length);
796 slice_bits = slice_length * 8;
797 slice_bytes = slice_length + length - 1;
799 if (slice_bytes > get_bits_left(&s->gb)) {
800 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
804 skip_bits(&s->gb, 8);
806 av_fast_malloc(&s->slice_buf, &s->slice_size, slice_bytes + AV_INPUT_BUFFER_PADDING_SIZE);
808 return AVERROR(ENOMEM);
810 memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
812 init_get_bits(&h->gb, s->slice_buf, slice_bits);
814 if (s->watermark_key) {
815 uint32_t header = AV_RL32(&h->gb.buffer[1]);
816 AV_WL32(&h->gb.buffer[1], header ^ s->watermark_key);
819 memcpy(s->slice_buf, &s->slice_buf[slice_length], length - 1);
821 skip_bits_long(&s->gb, slice_bytes * 8);
824 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
825 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
829 sl->slice_type = golomb_to_pict_type[slice_id];
831 if ((header & 0x9F) == 2) {
832 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
833 sl->mb_skip_run = get_bits(&h->gb, i) -
834 (sl->mb_y * h->mb_width + sl->mb_x);
840 sl->slice_num = get_bits(&h->gb, 8);
841 sl->qscale = get_bits(&h->gb, 5);
842 s->adaptive_quant = get_bits1(&h->gb);
851 skip_bits(&h->gb, 2);
853 while (get_bits1(&h->gb))
854 skip_bits(&h->gb, 8);
856 /* reset intra predictors and invalidate motion vector references */
858 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
859 -1, 4 * sizeof(int8_t));
860 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - sl->mb_x],
861 -1, 8 * sizeof(int8_t) * sl->mb_x);
864 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
865 -1, 8 * sizeof(int8_t) * (h->mb_width - sl->mb_x));
868 sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
874 static av_cold int svq3_decode_init(AVCodecContext *avctx)
876 SVQ3Context *s = avctx->priv_data;
877 H264Context *h = &s->h;
878 H264SliceContext *sl;
880 unsigned char *extradata;
881 unsigned char *extradata_end;
883 int marker_found = 0;
885 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
886 s->last_pic = av_mallocz(sizeof(*s->last_pic));
887 s->next_pic = av_mallocz(sizeof(*s->next_pic));
888 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
889 av_freep(&s->cur_pic);
890 av_freep(&s->last_pic);
891 av_freep(&s->next_pic);
892 return AVERROR(ENOMEM);
895 s->cur_pic->f = av_frame_alloc();
896 s->last_pic->f = av_frame_alloc();
897 s->next_pic->f = av_frame_alloc();
898 if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
899 return AVERROR(ENOMEM);
901 if (ff_h264_decode_init(avctx) < 0)
904 // we will overwrite it later during decoding
905 av_frame_free(&h->cur_pic.f);
907 ff_h264dsp_init(&h->h264dsp, 8, 1);
908 ff_h264chroma_init(&h->h264chroma, 8);
909 ff_h264qpel_init(&h->h264qpel, 8);
910 ff_h264_pred_init(&h->hpc, AV_CODEC_ID_SVQ3, 8, 1);
911 ff_videodsp_init(&h->vdsp, 8);
913 memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t));
914 memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
916 h->sps.bit_depth_luma = 8;
917 h->chroma_format_idc = 1;
919 ff_hpeldsp_init(&s->hdsp, avctx->flags);
920 ff_tpeldsp_init(&s->tdsp);
924 h->flags = avctx->flags;
926 h->picture_structure = PICT_FRAME;
927 avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
928 avctx->color_range = AVCOL_RANGE_JPEG;
930 h->slice_ctx[0].chroma_qp[0] = h->slice_ctx[0].chroma_qp[1] = 4;
931 h->chroma_x_shift = h->chroma_y_shift = 1;
934 s->thirdpel_flag = 1;
937 /* prowl for the "SEQH" marker in the extradata */
938 extradata = (unsigned char *)avctx->extradata;
939 extradata_end = avctx->extradata + avctx->extradata_size;
941 for (m = 0; m + 8 < avctx->extradata_size; m++) {
942 if (!memcmp(extradata, "SEQH", 4)) {
950 /* if a match was found, parse the extra data */
955 size = AV_RB32(&extradata[4]);
956 if (size > extradata_end - extradata - 8)
957 return AVERROR_INVALIDDATA;
958 init_get_bits(&gb, extradata + 8, size * 8);
960 /* 'frame size code' and optional 'width, height' */
961 frame_size_code = get_bits(&gb, 3);
962 switch (frame_size_code) {
992 avctx->width = get_bits(&gb, 12);
993 avctx->height = get_bits(&gb, 12);
997 s->halfpel_flag = get_bits1(&gb);
998 s->thirdpel_flag = get_bits1(&gb);
1000 /* unknown fields */
1006 h->low_delay = get_bits1(&gb);
1011 while (get_bits1(&gb))
1014 s->unknown_flag = get_bits1(&gb);
1015 avctx->has_b_frames = !h->low_delay;
1016 if (s->unknown_flag) {
1018 unsigned watermark_width = svq3_get_ue_golomb(&gb);
1019 unsigned watermark_height = svq3_get_ue_golomb(&gb);
1020 int u1 = svq3_get_ue_golomb(&gb);
1021 int u2 = get_bits(&gb, 8);
1022 int u3 = get_bits(&gb, 2);
1023 int u4 = svq3_get_ue_golomb(&gb);
1024 unsigned long buf_len = watermark_width *
1025 watermark_height * 4;
1026 int offset = get_bits_count(&gb) + 7 >> 3;
1029 if (watermark_height > 0 &&
1030 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
1033 buf = av_malloc(buf_len);
1034 av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1035 watermark_width, watermark_height);
1036 av_log(avctx, AV_LOG_DEBUG,
1037 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1038 u1, u2, u3, u4, offset);
1039 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1040 size - offset) != Z_OK) {
1041 av_log(avctx, AV_LOG_ERROR,
1042 "could not uncompress watermark logo\n");
1046 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1047 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1048 av_log(avctx, AV_LOG_DEBUG,
1049 "watermark key %#"PRIx32"\n", s->watermark_key);
1052 av_log(avctx, AV_LOG_ERROR,
1053 "this svq3 file contains watermark which need zlib support compiled in\n");
1059 h->width = avctx->width;
1060 h->height = avctx->height;
1061 h->mb_width = (h->width + 15) / 16;
1062 h->mb_height = (h->height + 15) / 16;
1063 h->mb_stride = h->mb_width + 1;
1064 h->mb_num = h->mb_width * h->mb_height;
1065 h->b_stride = 4 * h->mb_width;
1066 s->h_edge_pos = h->mb_width * 16;
1067 s->v_edge_pos = h->mb_height * 16;
1069 if (ff_h264_alloc_tables(h) < 0) {
1070 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1071 return AVERROR(ENOMEM);
1077 static void free_picture(AVCodecContext *avctx, H264Picture *pic)
1080 for (i = 0; i < 2; i++) {
1081 av_buffer_unref(&pic->motion_val_buf[i]);
1082 av_buffer_unref(&pic->ref_index_buf[i]);
1084 av_buffer_unref(&pic->mb_type_buf);
1086 av_frame_unref(pic->f);
1089 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
1091 SVQ3Context *s = avctx->priv_data;
1092 H264Context *h = &s->h;
1093 H264SliceContext *sl = &h->slice_ctx[0];
1094 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1095 const int mb_array_size = h->mb_stride * h->mb_height;
1096 const int b4_stride = h->mb_width * 4 + 1;
1097 const int b4_array_size = b4_stride * h->mb_height * 4;
1100 if (!pic->motion_val_buf[0]) {
1103 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1104 if (!pic->mb_type_buf)
1105 return AVERROR(ENOMEM);
1106 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1108 for (i = 0; i < 2; i++) {
1109 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1110 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1111 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1112 ret = AVERROR(ENOMEM);
1116 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1117 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1120 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1122 ret = ff_get_buffer(avctx, pic->f,
1123 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1127 if (!sl->edge_emu_buffer) {
1128 sl->edge_emu_buffer = av_mallocz(pic->f->linesize[0] * 17);
1129 if (!sl->edge_emu_buffer)
1130 return AVERROR(ENOMEM);
1133 sl->linesize = pic->f->linesize[0];
1134 sl->uvlinesize = pic->f->linesize[1];
1138 free_picture(avctx, pic);
1142 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1143 int *got_frame, AVPacket *avpkt)
1145 const uint8_t *buf = avpkt->data;
1146 SVQ3Context *s = avctx->priv_data;
1147 H264Context *h = &s->h;
1148 H264SliceContext *sl = &h->slice_ctx[0];
1149 int buf_size = avpkt->size;
1152 /* special case for last picture */
1153 if (buf_size == 0) {
1154 if (s->next_pic->f->data[0] && !h->low_delay && !s->last_frame_output) {
1155 ret = av_frame_ref(data, s->next_pic->f);
1158 s->last_frame_output = 1;
1164 ret = init_get_bits(&s->gb, buf, 8 * buf_size);
1168 sl->mb_x = sl->mb_y = sl->mb_xy = 0;
1170 if (svq3_decode_slice_header(avctx))
1173 h->pict_type = sl->slice_type;
1175 if (h->pict_type != AV_PICTURE_TYPE_B)
1176 FFSWAP(H264Picture*, s->next_pic, s->last_pic);
1178 av_frame_unref(s->cur_pic->f);
1180 /* for skipping the frame */
1181 s->cur_pic->f->pict_type = h->pict_type;
1182 s->cur_pic->f->key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1184 ret = get_buffer(avctx, s->cur_pic);
1188 h->cur_pic_ptr = s->cur_pic;
1189 h->cur_pic = *s->cur_pic;
1191 for (i = 0; i < 16; i++) {
1192 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * sl->linesize * ((scan8[i] - scan8[0]) >> 3);
1193 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * sl->linesize * ((scan8[i] - scan8[0]) >> 3);
1195 for (i = 0; i < 16; i++) {
1196 h->block_offset[16 + i] =
1197 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * sl->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1198 h->block_offset[48 + 16 + i] =
1199 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * sl->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1202 if (h->pict_type != AV_PICTURE_TYPE_I) {
1203 if (!s->last_pic->f->data[0]) {
1204 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1205 ret = get_buffer(avctx, s->last_pic);
1208 memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1209 memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1210 s->last_pic->f->linesize[1]);
1211 memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1212 s->last_pic->f->linesize[2]);
1215 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1216 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1217 ret = get_buffer(avctx, s->next_pic);
1220 memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1221 memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1222 s->next_pic->f->linesize[1]);
1223 memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1224 s->next_pic->f->linesize[2]);
1228 if (avctx->debug & FF_DEBUG_PICT_INFO)
1229 av_log(h->avctx, AV_LOG_DEBUG,
1230 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1231 av_get_picture_type_char(h->pict_type),
1232 s->halfpel_flag, s->thirdpel_flag,
1233 s->adaptive_quant, h->slice_ctx[0].qscale, sl->slice_num);
1235 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1236 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1237 avctx->skip_frame >= AVDISCARD_ALL)
1240 if (s->next_p_frame_damaged) {
1241 if (h->pict_type == AV_PICTURE_TYPE_B)
1244 s->next_p_frame_damaged = 0;
1247 if (h->pict_type == AV_PICTURE_TYPE_B) {
1248 h->frame_num_offset = sl->slice_num - h->prev_frame_num;
1250 if (h->frame_num_offset < 0)
1251 h->frame_num_offset += 256;
1252 if (h->frame_num_offset == 0 ||
1253 h->frame_num_offset >= h->prev_frame_num_offset) {
1254 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1258 h->prev_frame_num = h->frame_num;
1259 h->frame_num = sl->slice_num;
1260 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1262 if (h->prev_frame_num_offset < 0)
1263 h->prev_frame_num_offset += 256;
1266 for (m = 0; m < 2; m++) {
1268 for (i = 0; i < 4; i++) {
1270 for (j = -1; j < 4; j++)
1271 sl->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1273 sl->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1277 for (sl->mb_y = 0; sl->mb_y < h->mb_height; sl->mb_y++) {
1278 for (sl->mb_x = 0; sl->mb_x < h->mb_width; sl->mb_x++) {
1280 sl->mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
1282 if ((get_bits_left(&h->gb)) <= 7) {
1283 if (((get_bits_count(&h->gb) & 7) == 0 ||
1284 show_bits(&h->gb, get_bits_left(&h->gb) & 7) == 0)) {
1286 if (svq3_decode_slice_header(avctx))
1289 /* TODO: support s->mb_skip_run */
1292 mb_type = svq3_get_ue_golomb(&h->gb);
1294 if (h->pict_type == AV_PICTURE_TYPE_I)
1296 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1298 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1299 av_log(h->avctx, AV_LOG_ERROR,
1300 "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
1305 ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
1307 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1308 h->cur_pic.mb_type[sl->mb_x + sl->mb_y * h->mb_stride] =
1309 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1312 ff_draw_horiz_band(avctx, s->cur_pic->f,
1313 s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1314 16 * sl->mb_y, 16, h->picture_structure, 0,
1318 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1319 ret = av_frame_ref(data, s->cur_pic->f);
1320 else if (s->last_pic->f->data[0])
1321 ret = av_frame_ref(data, s->last_pic->f);
1325 /* Do not output the last pic after seeking. */
1326 if (s->last_pic->f->data[0] || h->low_delay)
1329 if (h->pict_type != AV_PICTURE_TYPE_B) {
1330 FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
1332 av_frame_unref(s->cur_pic->f);
1338 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1340 SVQ3Context *s = avctx->priv_data;
1341 H264Context *h = &s->h;
1343 free_picture(avctx, s->cur_pic);
1344 free_picture(avctx, s->next_pic);
1345 free_picture(avctx, s->last_pic);
1346 av_frame_free(&s->cur_pic->f);
1347 av_frame_free(&s->next_pic->f);
1348 av_frame_free(&s->last_pic->f);
1349 av_freep(&s->cur_pic);
1350 av_freep(&s->next_pic);
1351 av_freep(&s->last_pic);
1352 av_freep(&s->slice_buf);
1354 memset(&h->cur_pic, 0, sizeof(h->cur_pic));
1356 ff_h264_free_context(h);
1361 AVCodec ff_svq3_decoder = {
1363 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1364 .type = AVMEDIA_TYPE_VIDEO,
1365 .id = AV_CODEC_ID_SVQ3,
1366 .priv_data_size = sizeof(SVQ3Context),
1367 .init = svq3_decode_init,
1368 .close = svq3_decode_end,
1369 .decode = svq3_decode_frame,
1370 .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1373 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,