2 * Copyright (c) 2003 The Libav Project
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
45 #include "libavutil/attributes.h"
48 #include "mpegutils.h"
51 #include "h264data.h" // FIXME FIXME FIXME
53 #include "h264_mvpred.h"
56 #include "rectangle.h"
71 typedef struct SVQ3Context {
76 H264Picture *next_pic;
77 H264Picture *last_pic;
82 uint32_t watermark_key;
84 int next_p_frame_damaged;
87 int last_frame_output;
90 #define FULLPEL_MODE 1
91 #define HALFPEL_MODE 2
92 #define THIRDPEL_MODE 3
93 #define PREDICT_MODE 4
95 /* dual scan (from some older h264 draft)
104 static const uint8_t svq3_scan[16] = {
105 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
106 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
107 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
108 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
111 static const uint8_t luma_dc_zigzag_scan[16] = {
112 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
113 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
114 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
115 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
118 static const uint8_t svq3_pred_0[25][2] = {
121 { 0, 2 }, { 1, 1 }, { 2, 0 },
122 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
123 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
124 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
125 { 2, 4 }, { 3, 3 }, { 4, 2 },
130 static const int8_t svq3_pred_1[6][6][5] = {
131 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
132 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
133 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
134 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
135 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
136 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
137 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
138 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
139 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
140 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
141 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
142 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
145 static const struct {
148 } svq3_dct_tables[2][16] = {
149 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
150 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
151 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
152 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
155 static const uint32_t svq3_dequant_coeff[32] = {
156 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
157 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
158 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
159 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
162 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
164 const int qmul = svq3_dequant_coeff[qp];
168 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
170 for (i = 0; i < 4; i++) {
171 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
172 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
173 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
174 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
176 temp[4 * i + 0] = z0 + z3;
177 temp[4 * i + 1] = z1 + z2;
178 temp[4 * i + 2] = z1 - z2;
179 temp[4 * i + 3] = z0 - z3;
182 for (i = 0; i < 4; i++) {
183 const int offset = x_offset[i];
184 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
185 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
186 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
187 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
189 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
190 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
191 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
192 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
197 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
198 int stride, int qp, int dc)
200 const int qmul = svq3_dequant_coeff[qp];
204 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
205 : qmul * (block[0] >> 3) / 2);
209 for (i = 0; i < 4; i++) {
210 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
211 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
212 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
213 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
215 block[0 + 4 * i] = z0 + z3;
216 block[1 + 4 * i] = z1 + z2;
217 block[2 + 4 * i] = z1 - z2;
218 block[3 + 4 * i] = z0 - z3;
221 for (i = 0; i < 4; i++) {
222 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
223 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
224 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
225 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
226 const int rr = (dc + 0x80000);
228 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
229 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
230 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
231 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
234 memset(block, 0, 16 * sizeof(int16_t));
237 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
238 int index, const int type)
240 static const uint8_t *const scan_patterns[4] =
241 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
243 int run, level, limit;
245 const int intra = 3 * type >> 2;
246 const uint8_t *const scan = scan_patterns[type];
248 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
249 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
250 int sign = (vlc & 1) ? 0 : -1;
257 } else if (vlc < 4) {
262 level = (vlc + 9 >> 2) - run;
266 run = svq3_dct_tables[intra][vlc].run;
267 level = svq3_dct_tables[intra][vlc].level;
271 ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
275 ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
279 if ((index += run) >= limit)
282 block[scan[index]] = (level ^ sign) - sign;
293 static inline void svq3_mc_dir_part(SVQ3Context *s,
294 int x, int y, int width, int height,
295 int mx, int my, int dxy,
296 int thirdpel, int dir, int avg)
298 H264Context *h = &s->h;
299 const H264Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
302 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
307 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
308 my < 0 || my >= s->v_edge_pos - height - 1) {
310 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
311 my = av_clip(my, -16, s->v_edge_pos - height + 15);
314 /* form component predictions */
315 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
316 src = pic->f.data[0] + mx + my * h->linesize;
319 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
320 h->linesize, h->linesize,
321 width + 1, height + 1,
322 mx, my, s->h_edge_pos, s->v_edge_pos);
323 src = h->edge_emu_buffer;
326 (avg ? s->tdsp.avg_tpel_pixels_tab
327 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
330 (avg ? s->hdsp.avg_pixels_tab
331 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
334 if (!(h->flags & CODEC_FLAG_GRAY)) {
335 mx = mx + (mx < (int) x) >> 1;
336 my = my + (my < (int) y) >> 1;
338 height = height >> 1;
341 for (i = 1; i < 3; i++) {
342 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
343 src = pic->f.data[i] + mx + my * h->uvlinesize;
346 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
347 h->uvlinesize, h->uvlinesize,
348 width + 1, height + 1,
349 mx, my, (s->h_edge_pos >> 1),
351 src = h->edge_emu_buffer;
354 (avg ? s->tdsp.avg_tpel_pixels_tab
355 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
359 (avg ? s->hdsp.avg_pixels_tab
360 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
367 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
370 int i, j, k, mx, my, dx, dy, x, y;
371 H264Context *h = &s->h;
372 H264SliceContext *sl = &h->slice_ctx[0];
373 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
374 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
375 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
376 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
377 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
379 for (i = 0; i < 16; i += part_height)
380 for (j = 0; j < 16; j += part_width) {
381 const int b_xy = (4 * sl->mb_x + (j >> 2)) +
382 (4 * sl->mb_y + (i >> 2)) * h->b_stride;
384 x = 16 * sl->mb_x + j;
385 y = 16 * sl->mb_y + i;
386 k = (j >> 2 & 1) + (i >> 1 & 2) +
387 (j >> 1 & 4) + (i & 8);
389 if (mode != PREDICT_MODE) {
390 pred_motion(h, sl, k, part_width >> 2, dir, 1, &mx, &my);
392 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
393 my = s->next_pic->motion_val[0][b_xy][1] << 1;
396 mx = mx * h->frame_num_offset /
397 h->prev_frame_num_offset + 1 >> 1;
398 my = my * h->frame_num_offset /
399 h->prev_frame_num_offset + 1 >> 1;
401 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
402 h->prev_frame_num_offset + 1 >> 1;
403 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
404 h->prev_frame_num_offset + 1 >> 1;
408 /* clip motion vector prediction to frame border */
409 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
410 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
412 /* get (optional) motion vector differential */
413 if (mode == PREDICT_MODE) {
416 dy = svq3_get_se_golomb(&h->gb);
417 dx = svq3_get_se_golomb(&h->gb);
419 if (dx == INVALID_VLC || dy == INVALID_VLC) {
420 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
425 /* compute motion vector */
426 if (mode == THIRDPEL_MODE) {
428 mx = (mx + 1 >> 1) + dx;
429 my = (my + 1 >> 1) + dy;
430 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
431 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
432 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
434 svq3_mc_dir_part(s, x, y, part_width, part_height,
435 fx, fy, dxy, 1, dir, avg);
438 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
439 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
440 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
441 dxy = (mx & 1) + 2 * (my & 1);
443 svq3_mc_dir_part(s, x, y, part_width, part_height,
444 mx >> 1, my >> 1, dxy, 0, dir, avg);
448 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
449 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
451 svq3_mc_dir_part(s, x, y, part_width, part_height,
452 mx, my, 0, 0, dir, avg);
457 /* update mv_cache */
458 if (mode != PREDICT_MODE) {
459 int32_t mv = pack16to32(mx, my);
461 if (part_height == 8 && i < 8) {
462 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1 * 8], mv);
464 if (part_width == 8 && j < 8)
465 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
467 if (part_width == 8 && j < 8)
468 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1], mv);
469 if (part_width == 4 || part_height == 4)
470 AV_WN32A(sl->mv_cache[dir][scan8[k]], mv);
473 /* write back motion vectors */
474 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
475 part_width >> 2, part_height >> 2, h->b_stride,
476 pack16to32(mx, my), 4);
482 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
484 H264Context *h = &s->h;
485 H264SliceContext *sl = &h->slice_ctx[0];
486 int i, j, k, m, dir, mode;
490 const int mb_xy = sl->mb_xy;
491 const int b_xy = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride;
493 sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
494 sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
495 sl->topright_samples_available = 0xFFFF;
497 if (mb_type == 0) { /* SKIP */
498 if (h->pict_type == AV_PICTURE_TYPE_P ||
499 s->next_pic->mb_type[mb_xy] == -1) {
500 svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
503 if (h->pict_type == AV_PICTURE_TYPE_B)
504 svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
507 mb_type = MB_TYPE_SKIP;
509 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
510 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
512 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
515 mb_type = MB_TYPE_16x16;
517 } else if (mb_type < 8) { /* INTER */
518 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
519 mode = THIRDPEL_MODE;
520 else if (s->halfpel_flag &&
521 s->thirdpel_flag == !get_bits1(&h->gb))
527 /* note ref_cache should contain here:
535 for (m = 0; m < 2; m++) {
536 if (sl->mb_x > 0 && sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
537 for (i = 0; i < 4; i++)
538 AV_COPY32(sl->mv_cache[m][scan8[0] - 1 + i * 8],
539 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
541 for (i = 0; i < 4; i++)
542 AV_ZERO32(sl->mv_cache[m][scan8[0] - 1 + i * 8]);
545 memcpy(sl->mv_cache[m][scan8[0] - 1 * 8],
546 h->cur_pic.motion_val[m][b_xy - h->b_stride],
547 4 * 2 * sizeof(int16_t));
548 memset(&sl->ref_cache[m][scan8[0] - 1 * 8],
549 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
551 if (sl->mb_x < h->mb_width - 1) {
552 AV_COPY32(sl->mv_cache[m][scan8[0] + 4 - 1 * 8],
553 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
554 sl->ref_cache[m][scan8[0] + 4 - 1 * 8] =
555 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
556 sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
558 sl->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
560 AV_COPY32(sl->mv_cache[m][scan8[0] - 1 - 1 * 8],
561 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
562 sl->ref_cache[m][scan8[0] - 1 - 1 * 8] =
563 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
565 sl->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
567 memset(&sl->ref_cache[m][scan8[0] - 1 * 8 - 1],
568 PART_NOT_AVAILABLE, 8);
570 if (h->pict_type != AV_PICTURE_TYPE_B)
574 /* decode motion vector(s) and form prediction(s) */
575 if (h->pict_type == AV_PICTURE_TYPE_P) {
576 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
578 } else { /* AV_PICTURE_TYPE_B */
580 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
583 for (i = 0; i < 4; i++)
584 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
585 0, 4 * 2 * sizeof(int16_t));
588 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
591 for (i = 0; i < 4; i++)
592 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
593 0, 4 * 2 * sizeof(int16_t));
597 mb_type = MB_TYPE_16x16;
598 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
599 memset(sl->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
603 for (i = 0; i < 4; i++)
604 sl->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
605 if (sl->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
606 sl->left_samples_available = 0x5F5F;
609 sl->intra4x4_pred_mode_cache[4 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
610 sl->intra4x4_pred_mode_cache[5 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
611 sl->intra4x4_pred_mode_cache[6 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
612 sl->intra4x4_pred_mode_cache[7 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
614 if (sl->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
615 sl->top_samples_available = 0x33FF;
618 /* decode prediction codes for luma blocks */
619 for (i = 0; i < 16; i += 2) {
620 vlc = svq3_get_ue_golomb(&h->gb);
623 av_log(h->avctx, AV_LOG_ERROR,
624 "luma prediction:%"PRIu32"\n", vlc);
628 left = &sl->intra4x4_pred_mode_cache[scan8[i] - 1];
629 top = &sl->intra4x4_pred_mode_cache[scan8[i] - 8];
631 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
632 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
634 if (left[1] == -1 || left[2] == -1) {
635 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
639 } else { /* mb_type == 33, DC_128_PRED block type */
640 for (i = 0; i < 4; i++)
641 memset(&sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
644 write_back_intra_pred_mode(h, sl);
647 ff_h264_check_intra4x4_pred_mode(h, sl);
649 sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
650 sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
652 for (i = 0; i < 4; i++)
653 memset(&sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
655 sl->top_samples_available = 0x33FF;
656 sl->left_samples_available = 0x5F5F;
659 mb_type = MB_TYPE_INTRA4x4;
660 } else { /* INTRA16x16 */
661 dir = i_mb_type_info[mb_type - 8].pred_mode;
662 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
664 if ((sl->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, sl, dir, 0)) < 0) {
665 av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
666 return sl->intra16x16_pred_mode;
669 cbp = i_mb_type_info[mb_type - 8].cbp;
670 mb_type = MB_TYPE_INTRA16x16;
673 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
674 for (i = 0; i < 4; i++)
675 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
676 0, 4 * 2 * sizeof(int16_t));
677 if (h->pict_type == AV_PICTURE_TYPE_B) {
678 for (i = 0; i < 4; i++)
679 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
680 0, 4 * 2 * sizeof(int16_t));
683 if (!IS_INTRA4x4(mb_type)) {
684 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
686 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
687 memset(sl->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
690 if (!IS_INTRA16x16(mb_type) &&
691 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
692 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48) {
693 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
697 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
698 : golomb_to_inter_cbp[vlc];
700 if (IS_INTRA16x16(mb_type) ||
701 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
702 sl->qscale += svq3_get_se_golomb(&h->gb);
704 if (sl->qscale > 31u) {
705 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", sl->qscale);
709 if (IS_INTRA16x16(mb_type)) {
710 AV_ZERO128(sl->mb_luma_dc[0] + 0);
711 AV_ZERO128(sl->mb_luma_dc[0] + 8);
712 if (svq3_decode_block(&h->gb, sl->mb_luma_dc[0], 0, 1)) {
713 av_log(h->avctx, AV_LOG_ERROR,
714 "error while decoding intra luma dc\n");
720 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
721 const int type = ((sl->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
723 for (i = 0; i < 4; i++)
724 if ((cbp & (1 << i))) {
725 for (j = 0; j < 4; j++) {
726 k = index ? (1 * (j & 1) + 2 * (i & 1) +
727 2 * (j & 2) + 4 * (i & 2))
729 sl->non_zero_count_cache[scan8[k]] = 1;
731 if (svq3_decode_block(&h->gb, &sl->mb[16 * k], index, type)) {
732 av_log(h->avctx, AV_LOG_ERROR,
733 "error while decoding block\n");
740 for (i = 1; i < 3; ++i)
741 if (svq3_decode_block(&h->gb, &sl->mb[16 * 16 * i], 0, 3)) {
742 av_log(h->avctx, AV_LOG_ERROR,
743 "error while decoding chroma dc block\n");
748 for (i = 1; i < 3; i++) {
749 for (j = 0; j < 4; j++) {
751 sl->non_zero_count_cache[scan8[k]] = 1;
753 if (svq3_decode_block(&h->gb, &sl->mb[16 * k], 1, 1)) {
754 av_log(h->avctx, AV_LOG_ERROR,
755 "error while decoding chroma ac block\n");
765 h->cur_pic.mb_type[mb_xy] = mb_type;
767 if (IS_INTRA(mb_type))
768 sl->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, sl, DC_PRED8x8, 1);
773 static int svq3_decode_slice_header(AVCodecContext *avctx)
775 SVQ3Context *s = avctx->priv_data;
776 H264Context *h = &s->h;
777 H264SliceContext *sl = &h->slice_ctx[0];
778 const int mb_xy = sl->mb_xy;
782 header = get_bits(&h->gb, 8);
784 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
786 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
789 int length = header >> 5 & 3;
791 s->next_slice_index = get_bits_count(&h->gb) +
792 8 * show_bits(&h->gb, 8 * length) +
795 if (s->next_slice_index > h->gb.size_in_bits) {
796 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
800 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
801 skip_bits(&h->gb, 8);
803 if (s->watermark_key) {
804 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
805 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
806 header ^ s->watermark_key);
809 memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
810 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
812 skip_bits_long(&h->gb, 0);
815 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
816 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
820 sl->slice_type = golomb_to_pict_type[slice_id];
822 if ((header & 0x9F) == 2) {
823 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
824 sl->mb_skip_run = get_bits(&h->gb, i) -
825 (sl->mb_y * h->mb_width + sl->mb_x);
831 sl->slice_num = get_bits(&h->gb, 8);
832 sl->qscale = get_bits(&h->gb, 5);
833 s->adaptive_quant = get_bits1(&h->gb);
842 skip_bits(&h->gb, 2);
844 while (get_bits1(&h->gb))
845 skip_bits(&h->gb, 8);
847 /* reset intra predictors and invalidate motion vector references */
849 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
850 -1, 4 * sizeof(int8_t));
851 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - sl->mb_x],
852 -1, 8 * sizeof(int8_t) * sl->mb_x);
855 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
856 -1, 8 * sizeof(int8_t) * (h->mb_width - sl->mb_x));
859 sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
865 static av_cold int svq3_decode_init(AVCodecContext *avctx)
867 SVQ3Context *s = avctx->priv_data;
868 H264Context *h = &s->h;
869 H264SliceContext *sl;
871 unsigned char *extradata;
872 unsigned char *extradata_end;
874 int marker_found = 0;
876 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
877 s->last_pic = av_mallocz(sizeof(*s->last_pic));
878 s->next_pic = av_mallocz(sizeof(*s->next_pic));
879 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
880 av_freep(&s->cur_pic);
881 av_freep(&s->last_pic);
882 av_freep(&s->next_pic);
883 return AVERROR(ENOMEM);
886 if (ff_h264_decode_init(avctx) < 0)
889 ff_hpeldsp_init(&s->hdsp, avctx->flags);
890 ff_tpeldsp_init(&s->tdsp);
894 h->flags = avctx->flags;
896 h->picture_structure = PICT_FRAME;
897 avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
898 avctx->color_range = AVCOL_RANGE_JPEG;
900 h->slice_ctx[0].chroma_qp[0] = h->slice_ctx[0].chroma_qp[1] = 4;
901 h->chroma_x_shift = h->chroma_y_shift = 1;
904 s->thirdpel_flag = 1;
907 /* prowl for the "SEQH" marker in the extradata */
908 extradata = (unsigned char *)avctx->extradata;
909 extradata_end = avctx->extradata + avctx->extradata_size;
911 for (m = 0; m + 8 < avctx->extradata_size; m++) {
912 if (!memcmp(extradata, "SEQH", 4)) {
920 /* if a match was found, parse the extra data */
925 size = AV_RB32(&extradata[4]);
926 if (size > extradata_end - extradata - 8)
927 return AVERROR_INVALIDDATA;
928 init_get_bits(&gb, extradata + 8, size * 8);
930 /* 'frame size code' and optional 'width, height' */
931 frame_size_code = get_bits(&gb, 3);
932 switch (frame_size_code) {
962 avctx->width = get_bits(&gb, 12);
963 avctx->height = get_bits(&gb, 12);
967 s->halfpel_flag = get_bits1(&gb);
968 s->thirdpel_flag = get_bits1(&gb);
976 h->low_delay = get_bits1(&gb);
981 while (get_bits1(&gb))
984 s->unknown_flag = get_bits1(&gb);
985 avctx->has_b_frames = !h->low_delay;
986 if (s->unknown_flag) {
988 unsigned watermark_width = svq3_get_ue_golomb(&gb);
989 unsigned watermark_height = svq3_get_ue_golomb(&gb);
990 int u1 = svq3_get_ue_golomb(&gb);
991 int u2 = get_bits(&gb, 8);
992 int u3 = get_bits(&gb, 2);
993 int u4 = svq3_get_ue_golomb(&gb);
994 unsigned long buf_len = watermark_width *
995 watermark_height * 4;
996 int offset = get_bits_count(&gb) + 7 >> 3;
999 if (watermark_height > 0 &&
1000 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
1003 buf = av_malloc(buf_len);
1004 av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1005 watermark_width, watermark_height);
1006 av_log(avctx, AV_LOG_DEBUG,
1007 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1008 u1, u2, u3, u4, offset);
1009 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1010 size - offset) != Z_OK) {
1011 av_log(avctx, AV_LOG_ERROR,
1012 "could not uncompress watermark logo\n");
1016 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1017 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1018 av_log(avctx, AV_LOG_DEBUG,
1019 "watermark key %#"PRIx32"\n", s->watermark_key);
1022 av_log(avctx, AV_LOG_ERROR,
1023 "this svq3 file contains watermark which need zlib support compiled in\n");
1029 h->width = avctx->width;
1030 h->height = avctx->height;
1031 h->mb_width = (h->width + 15) / 16;
1032 h->mb_height = (h->height + 15) / 16;
1033 h->mb_stride = h->mb_width + 1;
1034 h->mb_num = h->mb_width * h->mb_height;
1035 h->b_stride = 4 * h->mb_width;
1036 s->h_edge_pos = h->mb_width * 16;
1037 s->v_edge_pos = h->mb_height * 16;
1039 if (ff_h264_alloc_tables(h) < 0) {
1040 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1041 return AVERROR(ENOMEM);
1047 static void free_picture(AVCodecContext *avctx, H264Picture *pic)
1050 for (i = 0; i < 2; i++) {
1051 av_buffer_unref(&pic->motion_val_buf[i]);
1052 av_buffer_unref(&pic->ref_index_buf[i]);
1054 av_buffer_unref(&pic->mb_type_buf);
1056 av_frame_unref(&pic->f);
1059 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
1061 SVQ3Context *s = avctx->priv_data;
1062 H264Context *h = &s->h;
1063 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1064 const int mb_array_size = h->mb_stride * h->mb_height;
1065 const int b4_stride = h->mb_width * 4 + 1;
1066 const int b4_array_size = b4_stride * h->mb_height * 4;
1069 if (!pic->motion_val_buf[0]) {
1072 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1073 if (!pic->mb_type_buf)
1074 return AVERROR(ENOMEM);
1075 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1077 for (i = 0; i < 2; i++) {
1078 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1079 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1080 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1081 ret = AVERROR(ENOMEM);
1085 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1086 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1089 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1091 ret = ff_get_buffer(avctx, &pic->f,
1092 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1096 if (!h->edge_emu_buffer) {
1097 h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1098 if (!h->edge_emu_buffer)
1099 return AVERROR(ENOMEM);
1102 h->linesize = pic->f.linesize[0];
1103 h->uvlinesize = pic->f.linesize[1];
1107 free_picture(avctx, pic);
1111 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1112 int *got_frame, AVPacket *avpkt)
1114 const uint8_t *buf = avpkt->data;
1115 SVQ3Context *s = avctx->priv_data;
1116 H264Context *h = &s->h;
1117 H264SliceContext *sl = &h->slice_ctx[0];
1118 int buf_size = avpkt->size;
1121 /* special case for last picture */
1122 if (buf_size == 0) {
1123 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1124 ret = av_frame_ref(data, &s->next_pic->f);
1127 s->last_frame_output = 1;
1133 init_get_bits(&h->gb, buf, 8 * buf_size);
1135 sl->mb_x = sl->mb_y = sl->mb_xy = 0;
1137 if (svq3_decode_slice_header(avctx))
1140 h->pict_type = sl->slice_type;
1142 if (h->pict_type != AV_PICTURE_TYPE_B)
1143 FFSWAP(H264Picture*, s->next_pic, s->last_pic);
1145 av_frame_unref(&s->cur_pic->f);
1147 /* for skipping the frame */
1148 s->cur_pic->f.pict_type = h->pict_type;
1149 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1151 ret = get_buffer(avctx, s->cur_pic);
1155 h->cur_pic_ptr = s->cur_pic;
1156 av_frame_unref(&h->cur_pic.f);
1157 h->cur_pic = *s->cur_pic;
1158 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1162 for (i = 0; i < 16; i++) {
1163 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1164 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1166 for (i = 0; i < 16; i++) {
1167 h->block_offset[16 + i] =
1168 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1169 h->block_offset[48 + 16 + i] =
1170 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1173 if (h->pict_type != AV_PICTURE_TYPE_I) {
1174 if (!s->last_pic->f.data[0]) {
1175 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1176 ret = get_buffer(avctx, s->last_pic);
1179 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1180 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1181 s->last_pic->f.linesize[1]);
1182 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1183 s->last_pic->f.linesize[2]);
1186 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1187 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1188 ret = get_buffer(avctx, s->next_pic);
1191 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1192 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1193 s->next_pic->f.linesize[1]);
1194 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1195 s->next_pic->f.linesize[2]);
1199 if (avctx->debug & FF_DEBUG_PICT_INFO)
1200 av_log(h->avctx, AV_LOG_DEBUG,
1201 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1202 av_get_picture_type_char(h->pict_type),
1203 s->halfpel_flag, s->thirdpel_flag,
1204 s->adaptive_quant, h->slice_ctx[0].qscale, sl->slice_num);
1206 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1207 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1208 avctx->skip_frame >= AVDISCARD_ALL)
1211 if (s->next_p_frame_damaged) {
1212 if (h->pict_type == AV_PICTURE_TYPE_B)
1215 s->next_p_frame_damaged = 0;
1218 if (h->pict_type == AV_PICTURE_TYPE_B) {
1219 h->frame_num_offset = sl->slice_num - h->prev_frame_num;
1221 if (h->frame_num_offset < 0)
1222 h->frame_num_offset += 256;
1223 if (h->frame_num_offset == 0 ||
1224 h->frame_num_offset >= h->prev_frame_num_offset) {
1225 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1229 h->prev_frame_num = h->frame_num;
1230 h->frame_num = sl->slice_num;
1231 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1233 if (h->prev_frame_num_offset < 0)
1234 h->prev_frame_num_offset += 256;
1237 for (m = 0; m < 2; m++) {
1239 for (i = 0; i < 4; i++) {
1241 for (j = -1; j < 4; j++)
1242 sl->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1244 sl->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1248 for (sl->mb_y = 0; sl->mb_y < h->mb_height; sl->mb_y++) {
1249 for (sl->mb_x = 0; sl->mb_x < h->mb_width; sl->mb_x++) {
1251 sl->mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
1253 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1254 ((get_bits_count(&h->gb) & 7) == 0 ||
1255 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1256 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1257 h->gb.size_in_bits = 8 * buf_size;
1259 if (svq3_decode_slice_header(avctx))
1262 /* TODO: support s->mb_skip_run */
1265 mb_type = svq3_get_ue_golomb(&h->gb);
1267 if (h->pict_type == AV_PICTURE_TYPE_I)
1269 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1271 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1272 av_log(h->avctx, AV_LOG_ERROR,
1273 "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
1278 ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
1280 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1281 h->cur_pic.mb_type[sl->mb_x + sl->mb_y * h->mb_stride] =
1282 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1285 ff_draw_horiz_band(avctx, &s->cur_pic->f,
1286 s->last_pic->f.data[0] ? &s->last_pic->f : NULL,
1287 16 * sl->mb_y, 16, h->picture_structure, 0,
1291 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1292 ret = av_frame_ref(data, &s->cur_pic->f);
1293 else if (s->last_pic->f.data[0])
1294 ret = av_frame_ref(data, &s->last_pic->f);
1298 /* Do not output the last pic after seeking. */
1299 if (s->last_pic->f.data[0] || h->low_delay)
1302 if (h->pict_type != AV_PICTURE_TYPE_B) {
1303 FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
1305 av_frame_unref(&s->cur_pic->f);
1311 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1313 SVQ3Context *s = avctx->priv_data;
1314 H264Context *h = &s->h;
1316 free_picture(avctx, s->cur_pic);
1317 free_picture(avctx, s->next_pic);
1318 free_picture(avctx, s->last_pic);
1319 av_freep(&s->cur_pic);
1320 av_freep(&s->next_pic);
1321 av_freep(&s->last_pic);
1323 av_frame_unref(&h->cur_pic.f);
1325 ff_h264_free_context(h);
1330 AVCodec ff_svq3_decoder = {
1332 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1333 .type = AVMEDIA_TYPE_VIDEO,
1334 .id = AV_CODEC_ID_SVQ3,
1335 .priv_data_size = sizeof(SVQ3Context),
1336 .init = svq3_decode_init,
1337 .close = svq3_decode_end,
1338 .decode = svq3_decode_frame,
1339 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1342 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,