2 * Copyright (c) 2003 The Libav Project
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
45 #include "libavutil/attributes.h"
48 #include "mpegutils.h"
51 #include "h264data.h" // FIXME FIXME FIXME
53 #include "h264_mvpred.h"
56 #include "rectangle.h"
71 typedef struct SVQ3Context {
76 H264Picture *next_pic;
77 H264Picture *last_pic;
82 uint32_t watermark_key;
84 int next_p_frame_damaged;
87 int last_frame_output;
90 #define FULLPEL_MODE 1
91 #define HALFPEL_MODE 2
92 #define THIRDPEL_MODE 3
93 #define PREDICT_MODE 4
95 /* dual scan (from some older h264 draft)
104 static const uint8_t svq3_scan[16] = {
105 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
106 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
107 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
108 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
111 static const uint8_t luma_dc_zigzag_scan[16] = {
112 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
113 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
114 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
115 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
118 static const uint8_t svq3_pred_0[25][2] = {
121 { 0, 2 }, { 1, 1 }, { 2, 0 },
122 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
123 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
124 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
125 { 2, 4 }, { 3, 3 }, { 4, 2 },
130 static const int8_t svq3_pred_1[6][6][5] = {
131 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
132 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
133 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
134 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
135 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
136 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
137 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
138 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
139 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
140 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
141 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
142 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
145 static const struct {
148 } svq3_dct_tables[2][16] = {
149 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
150 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
151 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
152 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
155 static const uint32_t svq3_dequant_coeff[32] = {
156 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
157 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
158 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
159 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
162 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
164 const int qmul = svq3_dequant_coeff[qp];
168 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
170 for (i = 0; i < 4; i++) {
171 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
172 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
173 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
174 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
176 temp[4 * i + 0] = z0 + z3;
177 temp[4 * i + 1] = z1 + z2;
178 temp[4 * i + 2] = z1 - z2;
179 temp[4 * i + 3] = z0 - z3;
182 for (i = 0; i < 4; i++) {
183 const int offset = x_offset[i];
184 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
185 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
186 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
187 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
189 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
190 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
191 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
192 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
197 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
198 int stride, int qp, int dc)
200 const int qmul = svq3_dequant_coeff[qp];
204 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
205 : qmul * (block[0] >> 3) / 2);
209 for (i = 0; i < 4; i++) {
210 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
211 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
212 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
213 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
215 block[0 + 4 * i] = z0 + z3;
216 block[1 + 4 * i] = z1 + z2;
217 block[2 + 4 * i] = z1 - z2;
218 block[3 + 4 * i] = z0 - z3;
221 for (i = 0; i < 4; i++) {
222 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
223 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
224 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
225 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
226 const int rr = (dc + 0x80000);
228 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
229 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
230 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
231 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
234 memset(block, 0, 16 * sizeof(int16_t));
237 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
238 int index, const int type)
240 static const uint8_t *const scan_patterns[4] =
241 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
243 int run, level, limit;
245 const int intra = 3 * type >> 2;
246 const uint8_t *const scan = scan_patterns[type];
248 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
249 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
250 int sign = (vlc & 1) ? 0 : -1;
257 } else if (vlc < 4) {
262 level = (vlc + 9 >> 2) - run;
266 run = svq3_dct_tables[intra][vlc].run;
267 level = svq3_dct_tables[intra][vlc].level;
271 ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
275 ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
279 if ((index += run) >= limit)
282 block[scan[index]] = (level ^ sign) - sign;
293 static inline void svq3_mc_dir_part(SVQ3Context *s,
294 int x, int y, int width, int height,
295 int mx, int my, int dxy,
296 int thirdpel, int dir, int avg)
298 H264Context *h = &s->h;
299 H264SliceContext *sl = &h->slice_ctx[0];
300 const H264Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
303 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
308 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
309 my < 0 || my >= s->v_edge_pos - height - 1) {
311 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
312 my = av_clip(my, -16, s->v_edge_pos - height + 15);
315 /* form component predictions */
316 dest = h->cur_pic.f->data[0] + x + y * sl->linesize;
317 src = pic->f->data[0] + mx + my * sl->linesize;
320 h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src,
321 sl->linesize, sl->linesize,
322 width + 1, height + 1,
323 mx, my, s->h_edge_pos, s->v_edge_pos);
324 src = sl->edge_emu_buffer;
327 (avg ? s->tdsp.avg_tpel_pixels_tab
328 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, sl->linesize,
331 (avg ? s->hdsp.avg_pixels_tab
332 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, sl->linesize,
335 if (!(h->flags & AV_CODEC_FLAG_GRAY)) {
336 mx = mx + (mx < (int) x) >> 1;
337 my = my + (my < (int) y) >> 1;
339 height = height >> 1;
342 for (i = 1; i < 3; i++) {
343 dest = h->cur_pic.f->data[i] + (x >> 1) + (y >> 1) * sl->uvlinesize;
344 src = pic->f->data[i] + mx + my * sl->uvlinesize;
347 h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src,
348 sl->uvlinesize, sl->uvlinesize,
349 width + 1, height + 1,
350 mx, my, (s->h_edge_pos >> 1),
352 src = sl->edge_emu_buffer;
355 (avg ? s->tdsp.avg_tpel_pixels_tab
356 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
360 (avg ? s->hdsp.avg_pixels_tab
361 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
368 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
371 int i, j, k, mx, my, dx, dy, x, y;
372 H264Context *h = &s->h;
373 H264SliceContext *sl = &h->slice_ctx[0];
374 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
375 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
376 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
377 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
378 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
380 for (i = 0; i < 16; i += part_height)
381 for (j = 0; j < 16; j += part_width) {
382 const int b_xy = (4 * sl->mb_x + (j >> 2)) +
383 (4 * sl->mb_y + (i >> 2)) * h->b_stride;
385 x = 16 * sl->mb_x + j;
386 y = 16 * sl->mb_y + i;
387 k = (j >> 2 & 1) + (i >> 1 & 2) +
388 (j >> 1 & 4) + (i & 8);
390 if (mode != PREDICT_MODE) {
391 pred_motion(h, sl, k, part_width >> 2, dir, 1, &mx, &my);
393 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
394 my = s->next_pic->motion_val[0][b_xy][1] << 1;
397 mx = mx * h->frame_num_offset /
398 h->prev_frame_num_offset + 1 >> 1;
399 my = my * h->frame_num_offset /
400 h->prev_frame_num_offset + 1 >> 1;
402 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
403 h->prev_frame_num_offset + 1 >> 1;
404 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
405 h->prev_frame_num_offset + 1 >> 1;
409 /* clip motion vector prediction to frame border */
410 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
411 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
413 /* get (optional) motion vector differential */
414 if (mode == PREDICT_MODE) {
417 dy = svq3_get_se_golomb(&h->gb);
418 dx = svq3_get_se_golomb(&h->gb);
420 if (dx == INVALID_VLC || dy == INVALID_VLC) {
421 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
426 /* compute motion vector */
427 if (mode == THIRDPEL_MODE) {
429 mx = (mx + 1 >> 1) + dx;
430 my = (my + 1 >> 1) + dy;
431 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
432 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
433 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
435 svq3_mc_dir_part(s, x, y, part_width, part_height,
436 fx, fy, dxy, 1, dir, avg);
439 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
440 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
441 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
442 dxy = (mx & 1) + 2 * (my & 1);
444 svq3_mc_dir_part(s, x, y, part_width, part_height,
445 mx >> 1, my >> 1, dxy, 0, dir, avg);
449 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
450 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
452 svq3_mc_dir_part(s, x, y, part_width, part_height,
453 mx, my, 0, 0, dir, avg);
458 /* update mv_cache */
459 if (mode != PREDICT_MODE) {
460 int32_t mv = pack16to32(mx, my);
462 if (part_height == 8 && i < 8) {
463 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1 * 8], mv);
465 if (part_width == 8 && j < 8)
466 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
468 if (part_width == 8 && j < 8)
469 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1], mv);
470 if (part_width == 4 || part_height == 4)
471 AV_WN32A(sl->mv_cache[dir][scan8[k]], mv);
474 /* write back motion vectors */
475 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
476 part_width >> 2, part_height >> 2, h->b_stride,
477 pack16to32(mx, my), 4);
483 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
485 H264Context *h = &s->h;
486 H264SliceContext *sl = &h->slice_ctx[0];
487 int i, j, k, m, dir, mode;
491 const int mb_xy = sl->mb_xy;
492 const int b_xy = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride;
494 sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
495 sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
496 sl->topright_samples_available = 0xFFFF;
498 if (mb_type == 0) { /* SKIP */
499 if (h->pict_type == AV_PICTURE_TYPE_P ||
500 s->next_pic->mb_type[mb_xy] == -1) {
501 svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
504 if (h->pict_type == AV_PICTURE_TYPE_B)
505 svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
508 mb_type = MB_TYPE_SKIP;
510 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
511 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
513 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
516 mb_type = MB_TYPE_16x16;
518 } else if (mb_type < 8) { /* INTER */
519 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
520 mode = THIRDPEL_MODE;
521 else if (s->halfpel_flag &&
522 s->thirdpel_flag == !get_bits1(&h->gb))
528 /* note ref_cache should contain here:
536 for (m = 0; m < 2; m++) {
537 if (sl->mb_x > 0 && sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
538 for (i = 0; i < 4; i++)
539 AV_COPY32(sl->mv_cache[m][scan8[0] - 1 + i * 8],
540 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
542 for (i = 0; i < 4; i++)
543 AV_ZERO32(sl->mv_cache[m][scan8[0] - 1 + i * 8]);
546 memcpy(sl->mv_cache[m][scan8[0] - 1 * 8],
547 h->cur_pic.motion_val[m][b_xy - h->b_stride],
548 4 * 2 * sizeof(int16_t));
549 memset(&sl->ref_cache[m][scan8[0] - 1 * 8],
550 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
552 if (sl->mb_x < h->mb_width - 1) {
553 AV_COPY32(sl->mv_cache[m][scan8[0] + 4 - 1 * 8],
554 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
555 sl->ref_cache[m][scan8[0] + 4 - 1 * 8] =
556 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
557 sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
559 sl->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
561 AV_COPY32(sl->mv_cache[m][scan8[0] - 1 - 1 * 8],
562 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
563 sl->ref_cache[m][scan8[0] - 1 - 1 * 8] =
564 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
566 sl->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
568 memset(&sl->ref_cache[m][scan8[0] - 1 * 8 - 1],
569 PART_NOT_AVAILABLE, 8);
571 if (h->pict_type != AV_PICTURE_TYPE_B)
575 /* decode motion vector(s) and form prediction(s) */
576 if (h->pict_type == AV_PICTURE_TYPE_P) {
577 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
579 } else { /* AV_PICTURE_TYPE_B */
581 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
584 for (i = 0; i < 4; i++)
585 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
586 0, 4 * 2 * sizeof(int16_t));
589 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
592 for (i = 0; i < 4; i++)
593 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
594 0, 4 * 2 * sizeof(int16_t));
598 mb_type = MB_TYPE_16x16;
599 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
600 memset(sl->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
604 for (i = 0; i < 4; i++)
605 sl->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
606 if (sl->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
607 sl->left_samples_available = 0x5F5F;
610 sl->intra4x4_pred_mode_cache[4 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
611 sl->intra4x4_pred_mode_cache[5 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
612 sl->intra4x4_pred_mode_cache[6 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
613 sl->intra4x4_pred_mode_cache[7 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
615 if (sl->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
616 sl->top_samples_available = 0x33FF;
619 /* decode prediction codes for luma blocks */
620 for (i = 0; i < 16; i += 2) {
621 vlc = svq3_get_ue_golomb(&h->gb);
624 av_log(h->avctx, AV_LOG_ERROR,
625 "luma prediction:%"PRIu32"\n", vlc);
629 left = &sl->intra4x4_pred_mode_cache[scan8[i] - 1];
630 top = &sl->intra4x4_pred_mode_cache[scan8[i] - 8];
632 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
633 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
635 if (left[1] == -1 || left[2] == -1) {
636 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
640 } else { /* mb_type == 33, DC_128_PRED block type */
641 for (i = 0; i < 4; i++)
642 memset(&sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
645 write_back_intra_pred_mode(h, sl);
648 ff_h264_check_intra4x4_pred_mode(h, sl);
650 sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
651 sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
653 for (i = 0; i < 4; i++)
654 memset(&sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
656 sl->top_samples_available = 0x33FF;
657 sl->left_samples_available = 0x5F5F;
660 mb_type = MB_TYPE_INTRA4x4;
661 } else { /* INTRA16x16 */
662 dir = i_mb_type_info[mb_type - 8].pred_mode;
663 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
665 if ((sl->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, sl, dir, 0)) < 0) {
666 av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
667 return sl->intra16x16_pred_mode;
670 cbp = i_mb_type_info[mb_type - 8].cbp;
671 mb_type = MB_TYPE_INTRA16x16;
674 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
675 for (i = 0; i < 4; i++)
676 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
677 0, 4 * 2 * sizeof(int16_t));
678 if (h->pict_type == AV_PICTURE_TYPE_B) {
679 for (i = 0; i < 4; i++)
680 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
681 0, 4 * 2 * sizeof(int16_t));
684 if (!IS_INTRA4x4(mb_type)) {
685 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
687 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
688 memset(sl->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
691 if (!IS_INTRA16x16(mb_type) &&
692 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
693 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48) {
694 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
698 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
699 : golomb_to_inter_cbp[vlc];
701 if (IS_INTRA16x16(mb_type) ||
702 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
703 sl->qscale += svq3_get_se_golomb(&h->gb);
705 if (sl->qscale > 31u) {
706 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", sl->qscale);
710 if (IS_INTRA16x16(mb_type)) {
711 AV_ZERO128(sl->mb_luma_dc[0] + 0);
712 AV_ZERO128(sl->mb_luma_dc[0] + 8);
713 if (svq3_decode_block(&h->gb, sl->mb_luma_dc[0], 0, 1)) {
714 av_log(h->avctx, AV_LOG_ERROR,
715 "error while decoding intra luma dc\n");
721 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
722 const int type = ((sl->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
724 for (i = 0; i < 4; i++)
725 if ((cbp & (1 << i))) {
726 for (j = 0; j < 4; j++) {
727 k = index ? (1 * (j & 1) + 2 * (i & 1) +
728 2 * (j & 2) + 4 * (i & 2))
730 sl->non_zero_count_cache[scan8[k]] = 1;
732 if (svq3_decode_block(&h->gb, &sl->mb[16 * k], index, type)) {
733 av_log(h->avctx, AV_LOG_ERROR,
734 "error while decoding block\n");
741 for (i = 1; i < 3; ++i)
742 if (svq3_decode_block(&h->gb, &sl->mb[16 * 16 * i], 0, 3)) {
743 av_log(h->avctx, AV_LOG_ERROR,
744 "error while decoding chroma dc block\n");
749 for (i = 1; i < 3; i++) {
750 for (j = 0; j < 4; j++) {
752 sl->non_zero_count_cache[scan8[k]] = 1;
754 if (svq3_decode_block(&h->gb, &sl->mb[16 * k], 1, 1)) {
755 av_log(h->avctx, AV_LOG_ERROR,
756 "error while decoding chroma ac block\n");
766 h->cur_pic.mb_type[mb_xy] = mb_type;
768 if (IS_INTRA(mb_type))
769 sl->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, sl, DC_PRED8x8, 1);
774 static int svq3_decode_slice_header(AVCodecContext *avctx)
776 SVQ3Context *s = avctx->priv_data;
777 H264Context *h = &s->h;
778 H264SliceContext *sl = &h->slice_ctx[0];
779 const int mb_xy = sl->mb_xy;
783 header = get_bits(&h->gb, 8);
785 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
787 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
790 int length = header >> 5 & 3;
792 s->next_slice_index = get_bits_count(&h->gb) +
793 8 * show_bits(&h->gb, 8 * length) +
796 if (s->next_slice_index > h->gb.size_in_bits) {
797 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
801 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
802 skip_bits(&h->gb, 8);
804 if (s->watermark_key) {
805 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
806 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
807 header ^ s->watermark_key);
810 memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
811 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
813 skip_bits_long(&h->gb, 0);
816 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
817 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
821 sl->slice_type = golomb_to_pict_type[slice_id];
823 if ((header & 0x9F) == 2) {
824 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
825 sl->mb_skip_run = get_bits(&h->gb, i) -
826 (sl->mb_y * h->mb_width + sl->mb_x);
832 sl->slice_num = get_bits(&h->gb, 8);
833 sl->qscale = get_bits(&h->gb, 5);
834 s->adaptive_quant = get_bits1(&h->gb);
843 skip_bits(&h->gb, 2);
845 while (get_bits1(&h->gb))
846 skip_bits(&h->gb, 8);
848 /* reset intra predictors and invalidate motion vector references */
850 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
851 -1, 4 * sizeof(int8_t));
852 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - sl->mb_x],
853 -1, 8 * sizeof(int8_t) * sl->mb_x);
856 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
857 -1, 8 * sizeof(int8_t) * (h->mb_width - sl->mb_x));
860 sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
866 static av_cold int svq3_decode_init(AVCodecContext *avctx)
868 SVQ3Context *s = avctx->priv_data;
869 H264Context *h = &s->h;
870 H264SliceContext *sl;
872 unsigned char *extradata;
873 unsigned char *extradata_end;
875 int marker_found = 0;
877 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
878 s->last_pic = av_mallocz(sizeof(*s->last_pic));
879 s->next_pic = av_mallocz(sizeof(*s->next_pic));
880 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
881 av_freep(&s->cur_pic);
882 av_freep(&s->last_pic);
883 av_freep(&s->next_pic);
884 return AVERROR(ENOMEM);
887 s->cur_pic->f = av_frame_alloc();
888 s->last_pic->f = av_frame_alloc();
889 s->next_pic->f = av_frame_alloc();
890 if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
891 return AVERROR(ENOMEM);
893 if (ff_h264_decode_init(avctx) < 0)
896 // we will overwrite it later during decoding
897 av_frame_free(&h->cur_pic.f);
899 ff_h264dsp_init(&h->h264dsp, 8, 1);
900 ff_h264chroma_init(&h->h264chroma, 8);
901 ff_h264qpel_init(&h->h264qpel, 8);
902 ff_h264_pred_init(&h->hpc, AV_CODEC_ID_SVQ3, 8, 1);
903 ff_videodsp_init(&h->vdsp, 8);
905 memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t));
906 memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
908 h->sps.bit_depth_luma = 8;
909 h->chroma_format_idc = 1;
911 ff_hpeldsp_init(&s->hdsp, avctx->flags);
912 ff_tpeldsp_init(&s->tdsp);
916 h->flags = avctx->flags;
918 h->picture_structure = PICT_FRAME;
919 avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
920 avctx->color_range = AVCOL_RANGE_JPEG;
922 h->slice_ctx[0].chroma_qp[0] = h->slice_ctx[0].chroma_qp[1] = 4;
923 h->chroma_x_shift = h->chroma_y_shift = 1;
926 s->thirdpel_flag = 1;
929 /* prowl for the "SEQH" marker in the extradata */
930 extradata = (unsigned char *)avctx->extradata;
931 extradata_end = avctx->extradata + avctx->extradata_size;
933 for (m = 0; m + 8 < avctx->extradata_size; m++) {
934 if (!memcmp(extradata, "SEQH", 4)) {
942 /* if a match was found, parse the extra data */
947 size = AV_RB32(&extradata[4]);
948 if (size > extradata_end - extradata - 8)
949 return AVERROR_INVALIDDATA;
950 init_get_bits(&gb, extradata + 8, size * 8);
952 /* 'frame size code' and optional 'width, height' */
953 frame_size_code = get_bits(&gb, 3);
954 switch (frame_size_code) {
984 avctx->width = get_bits(&gb, 12);
985 avctx->height = get_bits(&gb, 12);
989 s->halfpel_flag = get_bits1(&gb);
990 s->thirdpel_flag = get_bits1(&gb);
998 h->low_delay = get_bits1(&gb);
1003 while (get_bits1(&gb))
1006 s->unknown_flag = get_bits1(&gb);
1007 avctx->has_b_frames = !h->low_delay;
1008 if (s->unknown_flag) {
1010 unsigned watermark_width = svq3_get_ue_golomb(&gb);
1011 unsigned watermark_height = svq3_get_ue_golomb(&gb);
1012 int u1 = svq3_get_ue_golomb(&gb);
1013 int u2 = get_bits(&gb, 8);
1014 int u3 = get_bits(&gb, 2);
1015 int u4 = svq3_get_ue_golomb(&gb);
1016 unsigned long buf_len = watermark_width *
1017 watermark_height * 4;
1018 int offset = get_bits_count(&gb) + 7 >> 3;
1021 if (watermark_height > 0 &&
1022 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
1025 buf = av_malloc(buf_len);
1026 av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1027 watermark_width, watermark_height);
1028 av_log(avctx, AV_LOG_DEBUG,
1029 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1030 u1, u2, u3, u4, offset);
1031 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1032 size - offset) != Z_OK) {
1033 av_log(avctx, AV_LOG_ERROR,
1034 "could not uncompress watermark logo\n");
1038 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1039 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1040 av_log(avctx, AV_LOG_DEBUG,
1041 "watermark key %#"PRIx32"\n", s->watermark_key);
1044 av_log(avctx, AV_LOG_ERROR,
1045 "this svq3 file contains watermark which need zlib support compiled in\n");
1051 h->width = avctx->width;
1052 h->height = avctx->height;
1053 h->mb_width = (h->width + 15) / 16;
1054 h->mb_height = (h->height + 15) / 16;
1055 h->mb_stride = h->mb_width + 1;
1056 h->mb_num = h->mb_width * h->mb_height;
1057 h->b_stride = 4 * h->mb_width;
1058 s->h_edge_pos = h->mb_width * 16;
1059 s->v_edge_pos = h->mb_height * 16;
1061 if (ff_h264_alloc_tables(h) < 0) {
1062 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1063 return AVERROR(ENOMEM);
1069 static void free_picture(AVCodecContext *avctx, H264Picture *pic)
1072 for (i = 0; i < 2; i++) {
1073 av_buffer_unref(&pic->motion_val_buf[i]);
1074 av_buffer_unref(&pic->ref_index_buf[i]);
1076 av_buffer_unref(&pic->mb_type_buf);
1078 av_frame_unref(pic->f);
1081 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
1083 SVQ3Context *s = avctx->priv_data;
1084 H264Context *h = &s->h;
1085 H264SliceContext *sl = &h->slice_ctx[0];
1086 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1087 const int mb_array_size = h->mb_stride * h->mb_height;
1088 const int b4_stride = h->mb_width * 4 + 1;
1089 const int b4_array_size = b4_stride * h->mb_height * 4;
1092 if (!pic->motion_val_buf[0]) {
1095 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1096 if (!pic->mb_type_buf)
1097 return AVERROR(ENOMEM);
1098 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1100 for (i = 0; i < 2; i++) {
1101 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1102 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1103 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1104 ret = AVERROR(ENOMEM);
1108 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1109 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1112 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1114 ret = ff_get_buffer(avctx, pic->f,
1115 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1119 if (!sl->edge_emu_buffer) {
1120 sl->edge_emu_buffer = av_mallocz(pic->f->linesize[0] * 17);
1121 if (!sl->edge_emu_buffer)
1122 return AVERROR(ENOMEM);
1125 sl->linesize = pic->f->linesize[0];
1126 sl->uvlinesize = pic->f->linesize[1];
1130 free_picture(avctx, pic);
1134 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1135 int *got_frame, AVPacket *avpkt)
1137 const uint8_t *buf = avpkt->data;
1138 SVQ3Context *s = avctx->priv_data;
1139 H264Context *h = &s->h;
1140 H264SliceContext *sl = &h->slice_ctx[0];
1141 int buf_size = avpkt->size;
1144 /* special case for last picture */
1145 if (buf_size == 0) {
1146 if (s->next_pic->f->data[0] && !h->low_delay && !s->last_frame_output) {
1147 ret = av_frame_ref(data, s->next_pic->f);
1150 s->last_frame_output = 1;
1156 init_get_bits(&h->gb, buf, 8 * buf_size);
1158 sl->mb_x = sl->mb_y = sl->mb_xy = 0;
1160 if (svq3_decode_slice_header(avctx))
1163 h->pict_type = sl->slice_type;
1165 if (h->pict_type != AV_PICTURE_TYPE_B)
1166 FFSWAP(H264Picture*, s->next_pic, s->last_pic);
1168 av_frame_unref(s->cur_pic->f);
1170 /* for skipping the frame */
1171 s->cur_pic->f->pict_type = h->pict_type;
1172 s->cur_pic->f->key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1174 ret = get_buffer(avctx, s->cur_pic);
1178 h->cur_pic_ptr = s->cur_pic;
1179 h->cur_pic = *s->cur_pic;
1181 for (i = 0; i < 16; i++) {
1182 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * sl->linesize * ((scan8[i] - scan8[0]) >> 3);
1183 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * sl->linesize * ((scan8[i] - scan8[0]) >> 3);
1185 for (i = 0; i < 16; i++) {
1186 h->block_offset[16 + i] =
1187 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * sl->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1188 h->block_offset[48 + 16 + i] =
1189 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * sl->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1192 if (h->pict_type != AV_PICTURE_TYPE_I) {
1193 if (!s->last_pic->f->data[0]) {
1194 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1195 ret = get_buffer(avctx, s->last_pic);
1198 memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1199 memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1200 s->last_pic->f->linesize[1]);
1201 memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1202 s->last_pic->f->linesize[2]);
1205 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1206 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1207 ret = get_buffer(avctx, s->next_pic);
1210 memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1211 memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1212 s->next_pic->f->linesize[1]);
1213 memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1214 s->next_pic->f->linesize[2]);
1218 if (avctx->debug & FF_DEBUG_PICT_INFO)
1219 av_log(h->avctx, AV_LOG_DEBUG,
1220 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1221 av_get_picture_type_char(h->pict_type),
1222 s->halfpel_flag, s->thirdpel_flag,
1223 s->adaptive_quant, h->slice_ctx[0].qscale, sl->slice_num);
1225 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1226 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1227 avctx->skip_frame >= AVDISCARD_ALL)
1230 if (s->next_p_frame_damaged) {
1231 if (h->pict_type == AV_PICTURE_TYPE_B)
1234 s->next_p_frame_damaged = 0;
1237 if (h->pict_type == AV_PICTURE_TYPE_B) {
1238 h->frame_num_offset = sl->slice_num - h->prev_frame_num;
1240 if (h->frame_num_offset < 0)
1241 h->frame_num_offset += 256;
1242 if (h->frame_num_offset == 0 ||
1243 h->frame_num_offset >= h->prev_frame_num_offset) {
1244 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1248 h->prev_frame_num = h->frame_num;
1249 h->frame_num = sl->slice_num;
1250 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1252 if (h->prev_frame_num_offset < 0)
1253 h->prev_frame_num_offset += 256;
1256 for (m = 0; m < 2; m++) {
1258 for (i = 0; i < 4; i++) {
1260 for (j = -1; j < 4; j++)
1261 sl->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1263 sl->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1267 for (sl->mb_y = 0; sl->mb_y < h->mb_height; sl->mb_y++) {
1268 for (sl->mb_x = 0; sl->mb_x < h->mb_width; sl->mb_x++) {
1270 sl->mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
1272 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1273 ((get_bits_count(&h->gb) & 7) == 0 ||
1274 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1275 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1276 h->gb.size_in_bits = 8 * buf_size;
1278 if (svq3_decode_slice_header(avctx))
1281 /* TODO: support s->mb_skip_run */
1284 mb_type = svq3_get_ue_golomb(&h->gb);
1286 if (h->pict_type == AV_PICTURE_TYPE_I)
1288 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1290 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1291 av_log(h->avctx, AV_LOG_ERROR,
1292 "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
1297 ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
1299 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1300 h->cur_pic.mb_type[sl->mb_x + sl->mb_y * h->mb_stride] =
1301 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1304 ff_draw_horiz_band(avctx, s->cur_pic->f,
1305 s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1306 16 * sl->mb_y, 16, h->picture_structure, 0,
1310 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1311 ret = av_frame_ref(data, s->cur_pic->f);
1312 else if (s->last_pic->f->data[0])
1313 ret = av_frame_ref(data, s->last_pic->f);
1317 /* Do not output the last pic after seeking. */
1318 if (s->last_pic->f->data[0] || h->low_delay)
1321 if (h->pict_type != AV_PICTURE_TYPE_B) {
1322 FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
1324 av_frame_unref(s->cur_pic->f);
1330 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1332 SVQ3Context *s = avctx->priv_data;
1333 H264Context *h = &s->h;
1335 free_picture(avctx, s->cur_pic);
1336 free_picture(avctx, s->next_pic);
1337 free_picture(avctx, s->last_pic);
1338 av_frame_free(&s->cur_pic->f);
1339 av_frame_free(&s->next_pic->f);
1340 av_frame_free(&s->last_pic->f);
1341 av_freep(&s->cur_pic);
1342 av_freep(&s->next_pic);
1343 av_freep(&s->last_pic);
1345 memset(&h->cur_pic, 0, sizeof(h->cur_pic));
1347 ff_h264_free_context(h);
1352 AVCodec ff_svq3_decoder = {
1354 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1355 .type = AVMEDIA_TYPE_VIDEO,
1356 .id = AV_CODEC_ID_SVQ3,
1357 .priv_data_size = sizeof(SVQ3Context),
1358 .init = svq3_decode_init,
1359 .close = svq3_decode_end,
1360 .decode = svq3_decode_frame,
1361 .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1364 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,