2 * Copyright (c) 2003 The Libav Project
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
45 #include "libavutil/attributes.h"
48 #include "mpegutils.h"
51 #include "h264data.h" // FIXME FIXME FIXME
53 #include "h264_mvpred.h"
56 #include "rectangle.h"
71 typedef struct SVQ3Context {
76 H264Picture *next_pic;
77 H264Picture *last_pic;
82 uint32_t watermark_key;
84 int next_p_frame_damaged;
87 int last_frame_output;
90 #define FULLPEL_MODE 1
91 #define HALFPEL_MODE 2
92 #define THIRDPEL_MODE 3
93 #define PREDICT_MODE 4
95 /* dual scan (from some older h264 draft)
104 static const uint8_t svq3_scan[16] = {
105 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
106 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
107 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
108 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
111 static const uint8_t luma_dc_zigzag_scan[16] = {
112 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
113 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
114 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
115 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
118 static const uint8_t svq3_pred_0[25][2] = {
121 { 0, 2 }, { 1, 1 }, { 2, 0 },
122 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
123 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
124 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
125 { 2, 4 }, { 3, 3 }, { 4, 2 },
130 static const int8_t svq3_pred_1[6][6][5] = {
131 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
132 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
133 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
134 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
135 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
136 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
137 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
138 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
139 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
140 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
141 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
142 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
145 static const struct {
148 } svq3_dct_tables[2][16] = {
149 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
150 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
151 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
152 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
155 static const uint32_t svq3_dequant_coeff[32] = {
156 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
157 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
158 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
159 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
162 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
164 const int qmul = svq3_dequant_coeff[qp];
168 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
170 for (i = 0; i < 4; i++) {
171 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
172 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
173 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
174 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
176 temp[4 * i + 0] = z0 + z3;
177 temp[4 * i + 1] = z1 + z2;
178 temp[4 * i + 2] = z1 - z2;
179 temp[4 * i + 3] = z0 - z3;
182 for (i = 0; i < 4; i++) {
183 const int offset = x_offset[i];
184 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
185 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
186 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
187 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
189 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
190 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
191 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
192 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
197 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
198 int stride, int qp, int dc)
200 const int qmul = svq3_dequant_coeff[qp];
204 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
205 : qmul * (block[0] >> 3) / 2);
209 for (i = 0; i < 4; i++) {
210 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
211 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
212 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
213 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
215 block[0 + 4 * i] = z0 + z3;
216 block[1 + 4 * i] = z1 + z2;
217 block[2 + 4 * i] = z1 - z2;
218 block[3 + 4 * i] = z0 - z3;
221 for (i = 0; i < 4; i++) {
222 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
223 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
224 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
225 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
226 const int rr = (dc + 0x80000);
228 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
229 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
230 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
231 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
234 memset(block, 0, 16 * sizeof(int16_t));
237 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
238 int index, const int type)
240 static const uint8_t *const scan_patterns[4] =
241 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
243 int run, level, limit;
245 const int intra = 3 * type >> 2;
246 const uint8_t *const scan = scan_patterns[type];
248 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
249 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
250 int sign = (vlc & 1) ? 0 : -1;
257 } else if (vlc < 4) {
262 level = (vlc + 9 >> 2) - run;
266 run = svq3_dct_tables[intra][vlc].run;
267 level = svq3_dct_tables[intra][vlc].level;
271 ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
275 ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
279 if ((index += run) >= limit)
282 block[scan[index]] = (level ^ sign) - sign;
293 static inline void svq3_mc_dir_part(SVQ3Context *s,
294 int x, int y, int width, int height,
295 int mx, int my, int dxy,
296 int thirdpel, int dir, int avg)
298 H264Context *h = &s->h;
299 const H264Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
302 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
307 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
308 my < 0 || my >= s->v_edge_pos - height - 1) {
310 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
311 my = av_clip(my, -16, s->v_edge_pos - height + 15);
314 /* form component predictions */
315 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
316 src = pic->f.data[0] + mx + my * h->linesize;
319 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
320 h->linesize, h->linesize,
321 width + 1, height + 1,
322 mx, my, s->h_edge_pos, s->v_edge_pos);
323 src = h->edge_emu_buffer;
326 (avg ? s->tdsp.avg_tpel_pixels_tab
327 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
330 (avg ? s->hdsp.avg_pixels_tab
331 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
334 if (!(h->flags & CODEC_FLAG_GRAY)) {
335 mx = mx + (mx < (int) x) >> 1;
336 my = my + (my < (int) y) >> 1;
338 height = height >> 1;
341 for (i = 1; i < 3; i++) {
342 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
343 src = pic->f.data[i] + mx + my * h->uvlinesize;
346 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
347 h->uvlinesize, h->uvlinesize,
348 width + 1, height + 1,
349 mx, my, (s->h_edge_pos >> 1),
351 src = h->edge_emu_buffer;
354 (avg ? s->tdsp.avg_tpel_pixels_tab
355 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
359 (avg ? s->hdsp.avg_pixels_tab
360 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
367 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
370 int i, j, k, mx, my, dx, dy, x, y;
371 H264Context *h = &s->h;
372 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
373 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
374 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
375 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
376 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
378 for (i = 0; i < 16; i += part_height)
379 for (j = 0; j < 16; j += part_width) {
380 const int b_xy = (4 * h->mb_x + (j >> 2)) +
381 (4 * h->mb_y + (i >> 2)) * h->b_stride;
383 x = 16 * h->mb_x + j;
384 y = 16 * h->mb_y + i;
385 k = (j >> 2 & 1) + (i >> 1 & 2) +
386 (j >> 1 & 4) + (i & 8);
388 if (mode != PREDICT_MODE) {
389 pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
391 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
392 my = s->next_pic->motion_val[0][b_xy][1] << 1;
395 mx = mx * h->frame_num_offset /
396 h->prev_frame_num_offset + 1 >> 1;
397 my = my * h->frame_num_offset /
398 h->prev_frame_num_offset + 1 >> 1;
400 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
401 h->prev_frame_num_offset + 1 >> 1;
402 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
403 h->prev_frame_num_offset + 1 >> 1;
407 /* clip motion vector prediction to frame border */
408 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
409 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
411 /* get (optional) motion vector differential */
412 if (mode == PREDICT_MODE) {
415 dy = svq3_get_se_golomb(&h->gb);
416 dx = svq3_get_se_golomb(&h->gb);
418 if (dx == INVALID_VLC || dy == INVALID_VLC) {
419 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
424 /* compute motion vector */
425 if (mode == THIRDPEL_MODE) {
427 mx = (mx + 1 >> 1) + dx;
428 my = (my + 1 >> 1) + dy;
429 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
430 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
431 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
433 svq3_mc_dir_part(s, x, y, part_width, part_height,
434 fx, fy, dxy, 1, dir, avg);
437 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
438 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
439 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
440 dxy = (mx & 1) + 2 * (my & 1);
442 svq3_mc_dir_part(s, x, y, part_width, part_height,
443 mx >> 1, my >> 1, dxy, 0, dir, avg);
447 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
448 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
450 svq3_mc_dir_part(s, x, y, part_width, part_height,
451 mx, my, 0, 0, dir, avg);
456 /* update mv_cache */
457 if (mode != PREDICT_MODE) {
458 int32_t mv = pack16to32(mx, my);
460 if (part_height == 8 && i < 8) {
461 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
463 if (part_width == 8 && j < 8)
464 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
466 if (part_width == 8 && j < 8)
467 AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
468 if (part_width == 4 || part_height == 4)
469 AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
472 /* write back motion vectors */
473 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
474 part_width >> 2, part_height >> 2, h->b_stride,
475 pack16to32(mx, my), 4);
481 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
483 H264Context *h = &s->h;
484 H264SliceContext *sl = &h->slice_ctx[0];
485 int i, j, k, m, dir, mode;
489 const int mb_xy = h->mb_xy;
490 const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
492 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
493 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
494 h->topright_samples_available = 0xFFFF;
496 if (mb_type == 0) { /* SKIP */
497 if (h->pict_type == AV_PICTURE_TYPE_P ||
498 s->next_pic->mb_type[mb_xy] == -1) {
499 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
502 if (h->pict_type == AV_PICTURE_TYPE_B)
503 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
506 mb_type = MB_TYPE_SKIP;
508 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
509 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
511 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
514 mb_type = MB_TYPE_16x16;
516 } else if (mb_type < 8) { /* INTER */
517 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
518 mode = THIRDPEL_MODE;
519 else if (s->halfpel_flag &&
520 s->thirdpel_flag == !get_bits1(&h->gb))
526 /* note ref_cache should contain here:
534 for (m = 0; m < 2; m++) {
535 if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
536 for (i = 0; i < 4; i++)
537 AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
538 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
540 for (i = 0; i < 4; i++)
541 AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
544 memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
545 h->cur_pic.motion_val[m][b_xy - h->b_stride],
546 4 * 2 * sizeof(int16_t));
547 memset(&h->ref_cache[m][scan8[0] - 1 * 8],
548 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
550 if (h->mb_x < h->mb_width - 1) {
551 AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
552 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
553 h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
554 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
555 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
557 h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
559 AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
560 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
561 h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
562 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
564 h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
566 memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
567 PART_NOT_AVAILABLE, 8);
569 if (h->pict_type != AV_PICTURE_TYPE_B)
573 /* decode motion vector(s) and form prediction(s) */
574 if (h->pict_type == AV_PICTURE_TYPE_P) {
575 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
577 } else { /* AV_PICTURE_TYPE_B */
579 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
582 for (i = 0; i < 4; i++)
583 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
584 0, 4 * 2 * sizeof(int16_t));
587 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
590 for (i = 0; i < 4; i++)
591 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
592 0, 4 * 2 * sizeof(int16_t));
596 mb_type = MB_TYPE_16x16;
597 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
598 memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
602 for (i = 0; i < 4; i++)
603 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
604 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
605 h->left_samples_available = 0x5F5F;
608 h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
609 h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
610 h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
611 h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
613 if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
614 h->top_samples_available = 0x33FF;
617 /* decode prediction codes for luma blocks */
618 for (i = 0; i < 16; i += 2) {
619 vlc = svq3_get_ue_golomb(&h->gb);
622 av_log(h->avctx, AV_LOG_ERROR,
623 "luma prediction:%"PRIu32"\n", vlc);
627 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
628 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
630 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
631 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
633 if (left[1] == -1 || left[2] == -1) {
634 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
638 } else { /* mb_type == 33, DC_128_PRED block type */
639 for (i = 0; i < 4; i++)
640 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
643 write_back_intra_pred_mode(h);
646 ff_h264_check_intra4x4_pred_mode(h);
648 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
649 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
651 for (i = 0; i < 4; i++)
652 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
654 h->top_samples_available = 0x33FF;
655 h->left_samples_available = 0x5F5F;
658 mb_type = MB_TYPE_INTRA4x4;
659 } else { /* INTRA16x16 */
660 dir = i_mb_type_info[mb_type - 8].pred_mode;
661 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
663 if ((sl->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
664 av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
665 return sl->intra16x16_pred_mode;
668 cbp = i_mb_type_info[mb_type - 8].cbp;
669 mb_type = MB_TYPE_INTRA16x16;
672 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
673 for (i = 0; i < 4; i++)
674 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
675 0, 4 * 2 * sizeof(int16_t));
676 if (h->pict_type == AV_PICTURE_TYPE_B) {
677 for (i = 0; i < 4; i++)
678 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
679 0, 4 * 2 * sizeof(int16_t));
682 if (!IS_INTRA4x4(mb_type)) {
683 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
685 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
686 memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
689 if (!IS_INTRA16x16(mb_type) &&
690 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
691 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48) {
692 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
696 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
697 : golomb_to_inter_cbp[vlc];
699 if (IS_INTRA16x16(mb_type) ||
700 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
701 sl->qscale += svq3_get_se_golomb(&h->gb);
703 if (sl->qscale > 31u) {
704 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", sl->qscale);
708 if (IS_INTRA16x16(mb_type)) {
709 AV_ZERO128(h->mb_luma_dc[0] + 0);
710 AV_ZERO128(h->mb_luma_dc[0] + 8);
711 if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
712 av_log(h->avctx, AV_LOG_ERROR,
713 "error while decoding intra luma dc\n");
719 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
720 const int type = ((sl->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
722 for (i = 0; i < 4; i++)
723 if ((cbp & (1 << i))) {
724 for (j = 0; j < 4; j++) {
725 k = index ? (1 * (j & 1) + 2 * (i & 1) +
726 2 * (j & 2) + 4 * (i & 2))
728 h->non_zero_count_cache[scan8[k]] = 1;
730 if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
731 av_log(h->avctx, AV_LOG_ERROR,
732 "error while decoding block\n");
739 for (i = 1; i < 3; ++i)
740 if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
741 av_log(h->avctx, AV_LOG_ERROR,
742 "error while decoding chroma dc block\n");
747 for (i = 1; i < 3; i++) {
748 for (j = 0; j < 4; j++) {
750 h->non_zero_count_cache[scan8[k]] = 1;
752 if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
753 av_log(h->avctx, AV_LOG_ERROR,
754 "error while decoding chroma ac block\n");
764 h->cur_pic.mb_type[mb_xy] = mb_type;
766 if (IS_INTRA(mb_type))
767 sl->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
772 static int svq3_decode_slice_header(AVCodecContext *avctx)
774 SVQ3Context *s = avctx->priv_data;
775 H264Context *h = &s->h;
776 H264SliceContext *sl = &h->slice_ctx[0];
777 const int mb_xy = h->mb_xy;
781 header = get_bits(&h->gb, 8);
783 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
785 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
788 int length = header >> 5 & 3;
790 s->next_slice_index = get_bits_count(&h->gb) +
791 8 * show_bits(&h->gb, 8 * length) +
794 if (s->next_slice_index > h->gb.size_in_bits) {
795 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
799 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
800 skip_bits(&h->gb, 8);
802 if (s->watermark_key) {
803 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
804 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
805 header ^ s->watermark_key);
808 memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
809 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
811 skip_bits_long(&h->gb, 0);
814 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
815 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
819 h->slice_type = golomb_to_pict_type[slice_id];
821 if ((header & 0x9F) == 2) {
822 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
823 h->mb_skip_run = get_bits(&h->gb, i) -
824 (h->mb_y * h->mb_width + h->mb_x);
830 h->slice_num = get_bits(&h->gb, 8);
831 sl->qscale = get_bits(&h->gb, 5);
832 s->adaptive_quant = get_bits1(&h->gb);
841 skip_bits(&h->gb, 2);
843 while (get_bits1(&h->gb))
844 skip_bits(&h->gb, 8);
846 /* reset intra predictors and invalidate motion vector references */
848 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
849 -1, 4 * sizeof(int8_t));
850 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
851 -1, 8 * sizeof(int8_t) * h->mb_x);
854 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
855 -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
858 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
864 static av_cold int svq3_decode_init(AVCodecContext *avctx)
866 SVQ3Context *s = avctx->priv_data;
867 H264Context *h = &s->h;
869 unsigned char *extradata;
870 unsigned char *extradata_end;
872 int marker_found = 0;
874 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
875 s->last_pic = av_mallocz(sizeof(*s->last_pic));
876 s->next_pic = av_mallocz(sizeof(*s->next_pic));
877 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
878 av_freep(&s->cur_pic);
879 av_freep(&s->last_pic);
880 av_freep(&s->next_pic);
881 return AVERROR(ENOMEM);
884 if (ff_h264_decode_init(avctx) < 0)
887 ff_hpeldsp_init(&s->hdsp, avctx->flags);
888 ff_tpeldsp_init(&s->tdsp);
890 h->flags = avctx->flags;
892 h->picture_structure = PICT_FRAME;
893 avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
894 avctx->color_range = AVCOL_RANGE_JPEG;
896 h->slice_ctx[0].chroma_qp[0] = h->slice_ctx[0].chroma_qp[1] = 4;
897 h->chroma_x_shift = h->chroma_y_shift = 1;
900 s->thirdpel_flag = 1;
903 /* prowl for the "SEQH" marker in the extradata */
904 extradata = (unsigned char *)avctx->extradata;
905 extradata_end = avctx->extradata + avctx->extradata_size;
907 for (m = 0; m + 8 < avctx->extradata_size; m++) {
908 if (!memcmp(extradata, "SEQH", 4)) {
916 /* if a match was found, parse the extra data */
921 size = AV_RB32(&extradata[4]);
922 if (size > extradata_end - extradata - 8)
923 return AVERROR_INVALIDDATA;
924 init_get_bits(&gb, extradata + 8, size * 8);
926 /* 'frame size code' and optional 'width, height' */
927 frame_size_code = get_bits(&gb, 3);
928 switch (frame_size_code) {
958 avctx->width = get_bits(&gb, 12);
959 avctx->height = get_bits(&gb, 12);
963 s->halfpel_flag = get_bits1(&gb);
964 s->thirdpel_flag = get_bits1(&gb);
972 h->low_delay = get_bits1(&gb);
977 while (get_bits1(&gb))
980 s->unknown_flag = get_bits1(&gb);
981 avctx->has_b_frames = !h->low_delay;
982 if (s->unknown_flag) {
984 unsigned watermark_width = svq3_get_ue_golomb(&gb);
985 unsigned watermark_height = svq3_get_ue_golomb(&gb);
986 int u1 = svq3_get_ue_golomb(&gb);
987 int u2 = get_bits(&gb, 8);
988 int u3 = get_bits(&gb, 2);
989 int u4 = svq3_get_ue_golomb(&gb);
990 unsigned long buf_len = watermark_width *
991 watermark_height * 4;
992 int offset = get_bits_count(&gb) + 7 >> 3;
995 if (watermark_height > 0 &&
996 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
999 buf = av_malloc(buf_len);
1000 av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1001 watermark_width, watermark_height);
1002 av_log(avctx, AV_LOG_DEBUG,
1003 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1004 u1, u2, u3, u4, offset);
1005 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1006 size - offset) != Z_OK) {
1007 av_log(avctx, AV_LOG_ERROR,
1008 "could not uncompress watermark logo\n");
1012 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1013 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1014 av_log(avctx, AV_LOG_DEBUG,
1015 "watermark key %#"PRIx32"\n", s->watermark_key);
1018 av_log(avctx, AV_LOG_ERROR,
1019 "this svq3 file contains watermark which need zlib support compiled in\n");
1025 h->width = avctx->width;
1026 h->height = avctx->height;
1027 h->mb_width = (h->width + 15) / 16;
1028 h->mb_height = (h->height + 15) / 16;
1029 h->mb_stride = h->mb_width + 1;
1030 h->mb_num = h->mb_width * h->mb_height;
1031 h->b_stride = 4 * h->mb_width;
1032 s->h_edge_pos = h->mb_width * 16;
1033 s->v_edge_pos = h->mb_height * 16;
1035 if (ff_h264_alloc_tables(h) < 0) {
1036 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1037 return AVERROR(ENOMEM);
1043 static void free_picture(AVCodecContext *avctx, H264Picture *pic)
1046 for (i = 0; i < 2; i++) {
1047 av_buffer_unref(&pic->motion_val_buf[i]);
1048 av_buffer_unref(&pic->ref_index_buf[i]);
1050 av_buffer_unref(&pic->mb_type_buf);
1052 av_frame_unref(&pic->f);
1055 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
1057 SVQ3Context *s = avctx->priv_data;
1058 H264Context *h = &s->h;
1059 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1060 const int mb_array_size = h->mb_stride * h->mb_height;
1061 const int b4_stride = h->mb_width * 4 + 1;
1062 const int b4_array_size = b4_stride * h->mb_height * 4;
1065 if (!pic->motion_val_buf[0]) {
1068 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1069 if (!pic->mb_type_buf)
1070 return AVERROR(ENOMEM);
1071 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1073 for (i = 0; i < 2; i++) {
1074 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1075 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1076 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1077 ret = AVERROR(ENOMEM);
1081 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1082 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1085 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1087 ret = ff_get_buffer(avctx, &pic->f,
1088 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1092 if (!h->edge_emu_buffer) {
1093 h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1094 if (!h->edge_emu_buffer)
1095 return AVERROR(ENOMEM);
1098 h->linesize = pic->f.linesize[0];
1099 h->uvlinesize = pic->f.linesize[1];
1103 free_picture(avctx, pic);
1107 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1108 int *got_frame, AVPacket *avpkt)
1110 const uint8_t *buf = avpkt->data;
1111 SVQ3Context *s = avctx->priv_data;
1112 H264Context *h = &s->h;
1113 int buf_size = avpkt->size;
1116 /* special case for last picture */
1117 if (buf_size == 0) {
1118 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1119 ret = av_frame_ref(data, &s->next_pic->f);
1122 s->last_frame_output = 1;
1128 init_get_bits(&h->gb, buf, 8 * buf_size);
1130 h->mb_x = h->mb_y = h->mb_xy = 0;
1132 if (svq3_decode_slice_header(avctx))
1135 h->pict_type = h->slice_type;
1137 if (h->pict_type != AV_PICTURE_TYPE_B)
1138 FFSWAP(H264Picture*, s->next_pic, s->last_pic);
1140 av_frame_unref(&s->cur_pic->f);
1142 /* for skipping the frame */
1143 s->cur_pic->f.pict_type = h->pict_type;
1144 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1146 ret = get_buffer(avctx, s->cur_pic);
1150 h->cur_pic_ptr = s->cur_pic;
1151 av_frame_unref(&h->cur_pic.f);
1152 h->cur_pic = *s->cur_pic;
1153 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1157 for (i = 0; i < 16; i++) {
1158 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1159 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1161 for (i = 0; i < 16; i++) {
1162 h->block_offset[16 + i] =
1163 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1164 h->block_offset[48 + 16 + i] =
1165 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1168 if (h->pict_type != AV_PICTURE_TYPE_I) {
1169 if (!s->last_pic->f.data[0]) {
1170 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1171 ret = get_buffer(avctx, s->last_pic);
1174 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1175 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1176 s->last_pic->f.linesize[1]);
1177 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1178 s->last_pic->f.linesize[2]);
1181 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1182 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1183 ret = get_buffer(avctx, s->next_pic);
1186 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1187 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1188 s->next_pic->f.linesize[1]);
1189 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1190 s->next_pic->f.linesize[2]);
1194 if (avctx->debug & FF_DEBUG_PICT_INFO)
1195 av_log(h->avctx, AV_LOG_DEBUG,
1196 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1197 av_get_picture_type_char(h->pict_type),
1198 s->halfpel_flag, s->thirdpel_flag,
1199 s->adaptive_quant, h->slice_ctx[0].qscale, h->slice_num);
1201 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1202 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1203 avctx->skip_frame >= AVDISCARD_ALL)
1206 if (s->next_p_frame_damaged) {
1207 if (h->pict_type == AV_PICTURE_TYPE_B)
1210 s->next_p_frame_damaged = 0;
1213 if (h->pict_type == AV_PICTURE_TYPE_B) {
1214 h->frame_num_offset = h->slice_num - h->prev_frame_num;
1216 if (h->frame_num_offset < 0)
1217 h->frame_num_offset += 256;
1218 if (h->frame_num_offset == 0 ||
1219 h->frame_num_offset >= h->prev_frame_num_offset) {
1220 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1224 h->prev_frame_num = h->frame_num;
1225 h->frame_num = h->slice_num;
1226 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1228 if (h->prev_frame_num_offset < 0)
1229 h->prev_frame_num_offset += 256;
1232 for (m = 0; m < 2; m++) {
1234 for (i = 0; i < 4; i++) {
1236 for (j = -1; j < 4; j++)
1237 h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1239 h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1243 for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1244 for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1246 h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1248 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1249 ((get_bits_count(&h->gb) & 7) == 0 ||
1250 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1251 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1252 h->gb.size_in_bits = 8 * buf_size;
1254 if (svq3_decode_slice_header(avctx))
1257 /* TODO: support s->mb_skip_run */
1260 mb_type = svq3_get_ue_golomb(&h->gb);
1262 if (h->pict_type == AV_PICTURE_TYPE_I)
1264 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1266 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1267 av_log(h->avctx, AV_LOG_ERROR,
1268 "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1273 ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
1275 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1276 h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1277 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1280 ff_draw_horiz_band(avctx, &s->cur_pic->f,
1281 s->last_pic->f.data[0] ? &s->last_pic->f : NULL,
1282 16 * h->mb_y, 16, h->picture_structure, 0,
1286 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1287 ret = av_frame_ref(data, &s->cur_pic->f);
1288 else if (s->last_pic->f.data[0])
1289 ret = av_frame_ref(data, &s->last_pic->f);
1293 /* Do not output the last pic after seeking. */
1294 if (s->last_pic->f.data[0] || h->low_delay)
1297 if (h->pict_type != AV_PICTURE_TYPE_B) {
1298 FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
1300 av_frame_unref(&s->cur_pic->f);
1306 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1308 SVQ3Context *s = avctx->priv_data;
1309 H264Context *h = &s->h;
1311 free_picture(avctx, s->cur_pic);
1312 free_picture(avctx, s->next_pic);
1313 free_picture(avctx, s->last_pic);
1314 av_freep(&s->cur_pic);
1315 av_freep(&s->next_pic);
1316 av_freep(&s->last_pic);
1318 av_frame_unref(&h->cur_pic.f);
1320 ff_h264_free_context(h);
1325 AVCodec ff_svq3_decoder = {
1327 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1328 .type = AVMEDIA_TYPE_VIDEO,
1329 .id = AV_CODEC_ID_SVQ3,
1330 .priv_data_size = sizeof(SVQ3Context),
1331 .init = svq3_decode_init,
1332 .close = svq3_decode_end,
1333 .decode = svq3_decode_frame,
1334 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1337 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,