2 * Copyright (c) 2003 The Libav Project
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
43 #include "libavutil/attributes.h"
46 #include "mpegutils.h"
49 #include "h264data.h" // FIXME FIXME FIXME
51 #include "h264_mvpred.h"
54 #include "rectangle.h"
72 H264Picture *next_pic;
73 H264Picture *last_pic;
78 uint32_t watermark_key;
80 int next_p_frame_damaged;
83 int last_frame_output;
86 #define FULLPEL_MODE 1
87 #define HALFPEL_MODE 2
88 #define THIRDPEL_MODE 3
89 #define PREDICT_MODE 4
91 /* dual scan (from some older h264 draft)
100 static const uint8_t svq3_scan[16] = {
101 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
102 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
103 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
104 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
107 static const uint8_t luma_dc_zigzag_scan[16] = {
108 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
109 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
110 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
111 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
114 static const uint8_t svq3_pred_0[25][2] = {
117 { 0, 2 }, { 1, 1 }, { 2, 0 },
118 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
119 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
120 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
121 { 2, 4 }, { 3, 3 }, { 4, 2 },
126 static const int8_t svq3_pred_1[6][6][5] = {
127 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
128 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
129 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
130 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
131 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
132 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
133 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
134 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
135 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
136 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
137 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
138 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
141 static const struct {
144 } svq3_dct_tables[2][16] = {
145 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
146 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
147 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
148 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
151 static const uint32_t svq3_dequant_coeff[32] = {
152 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
153 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
154 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
155 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
158 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
160 const int qmul = svq3_dequant_coeff[qp];
164 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
166 for (i = 0; i < 4; i++) {
167 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
168 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
169 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
170 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
172 temp[4 * i + 0] = z0 + z3;
173 temp[4 * i + 1] = z1 + z2;
174 temp[4 * i + 2] = z1 - z2;
175 temp[4 * i + 3] = z0 - z3;
178 for (i = 0; i < 4; i++) {
179 const int offset = x_offset[i];
180 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
181 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
182 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
183 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
185 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
186 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
187 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
188 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
193 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
194 int stride, int qp, int dc)
196 const int qmul = svq3_dequant_coeff[qp];
200 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
201 : qmul * (block[0] >> 3) / 2);
205 for (i = 0; i < 4; i++) {
206 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
207 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
208 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
209 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
211 block[0 + 4 * i] = z0 + z3;
212 block[1 + 4 * i] = z1 + z2;
213 block[2 + 4 * i] = z1 - z2;
214 block[3 + 4 * i] = z0 - z3;
217 for (i = 0; i < 4; i++) {
218 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
219 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
220 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
221 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
222 const int rr = (dc + 0x80000);
224 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
225 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
226 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
227 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
230 memset(block, 0, 16 * sizeof(int16_t));
233 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
234 int index, const int type)
236 static const uint8_t *const scan_patterns[4] =
237 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
239 int run, level, limit;
241 const int intra = 3 * type >> 2;
242 const uint8_t *const scan = scan_patterns[type];
244 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
245 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
246 int sign = (vlc & 1) ? 0 : -1;
253 } else if (vlc < 4) {
258 level = (vlc + 9 >> 2) - run;
262 run = svq3_dct_tables[intra][vlc].run;
263 level = svq3_dct_tables[intra][vlc].level;
267 ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
271 ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
275 if ((index += run) >= limit)
278 block[scan[index]] = (level ^ sign) - sign;
289 static inline void svq3_mc_dir_part(SVQ3Context *s,
290 int x, int y, int width, int height,
291 int mx, int my, int dxy,
292 int thirdpel, int dir, int avg)
294 H264Context *h = &s->h;
295 const H264Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
298 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
303 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
304 my < 0 || my >= s->v_edge_pos - height - 1) {
306 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
307 my = av_clip(my, -16, s->v_edge_pos - height + 15);
310 /* form component predictions */
311 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
312 src = pic->f.data[0] + mx + my * h->linesize;
315 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
316 h->linesize, h->linesize,
317 width + 1, height + 1,
318 mx, my, s->h_edge_pos, s->v_edge_pos);
319 src = h->edge_emu_buffer;
322 (avg ? h->dsp.avg_tpel_pixels_tab
323 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
326 (avg ? s->hdsp.avg_pixels_tab
327 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
330 if (!(h->flags & CODEC_FLAG_GRAY)) {
331 mx = mx + (mx < (int) x) >> 1;
332 my = my + (my < (int) y) >> 1;
334 height = height >> 1;
337 for (i = 1; i < 3; i++) {
338 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
339 src = pic->f.data[i] + mx + my * h->uvlinesize;
342 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
343 h->uvlinesize, h->uvlinesize,
344 width + 1, height + 1,
345 mx, my, (s->h_edge_pos >> 1),
347 src = h->edge_emu_buffer;
350 (avg ? h->dsp.avg_tpel_pixels_tab
351 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
355 (avg ? s->hdsp.avg_pixels_tab
356 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
363 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
366 int i, j, k, mx, my, dx, dy, x, y;
367 H264Context *h = &s->h;
368 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
369 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
370 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
371 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
372 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
374 for (i = 0; i < 16; i += part_height)
375 for (j = 0; j < 16; j += part_width) {
376 const int b_xy = (4 * h->mb_x + (j >> 2)) +
377 (4 * h->mb_y + (i >> 2)) * h->b_stride;
379 x = 16 * h->mb_x + j;
380 y = 16 * h->mb_y + i;
381 k = (j >> 2 & 1) + (i >> 1 & 2) +
382 (j >> 1 & 4) + (i & 8);
384 if (mode != PREDICT_MODE) {
385 pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
387 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
388 my = s->next_pic->motion_val[0][b_xy][1] << 1;
391 mx = mx * h->frame_num_offset /
392 h->prev_frame_num_offset + 1 >> 1;
393 my = my * h->frame_num_offset /
394 h->prev_frame_num_offset + 1 >> 1;
396 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
397 h->prev_frame_num_offset + 1 >> 1;
398 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
399 h->prev_frame_num_offset + 1 >> 1;
403 /* clip motion vector prediction to frame border */
404 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
405 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
407 /* get (optional) motion vector differential */
408 if (mode == PREDICT_MODE) {
411 dy = svq3_get_se_golomb(&h->gb);
412 dx = svq3_get_se_golomb(&h->gb);
414 if (dx == INVALID_VLC || dy == INVALID_VLC) {
415 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
420 /* compute motion vector */
421 if (mode == THIRDPEL_MODE) {
423 mx = (mx + 1 >> 1) + dx;
424 my = (my + 1 >> 1) + dy;
425 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
426 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
427 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
429 svq3_mc_dir_part(s, x, y, part_width, part_height,
430 fx, fy, dxy, 1, dir, avg);
433 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
434 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
435 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
436 dxy = (mx & 1) + 2 * (my & 1);
438 svq3_mc_dir_part(s, x, y, part_width, part_height,
439 mx >> 1, my >> 1, dxy, 0, dir, avg);
443 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
444 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
446 svq3_mc_dir_part(s, x, y, part_width, part_height,
447 mx, my, 0, 0, dir, avg);
452 /* update mv_cache */
453 if (mode != PREDICT_MODE) {
454 int32_t mv = pack16to32(mx, my);
456 if (part_height == 8 && i < 8) {
457 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
459 if (part_width == 8 && j < 8)
460 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
462 if (part_width == 8 && j < 8)
463 AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
464 if (part_width == 4 || part_height == 4)
465 AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
468 /* write back motion vectors */
469 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
470 part_width >> 2, part_height >> 2, h->b_stride,
471 pack16to32(mx, my), 4);
477 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
479 H264Context *h = &s->h;
480 int i, j, k, m, dir, mode;
484 const int mb_xy = h->mb_xy;
485 const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
487 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
488 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
489 h->topright_samples_available = 0xFFFF;
491 if (mb_type == 0) { /* SKIP */
492 if (h->pict_type == AV_PICTURE_TYPE_P ||
493 s->next_pic->mb_type[mb_xy] == -1) {
494 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
497 if (h->pict_type == AV_PICTURE_TYPE_B)
498 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
501 mb_type = MB_TYPE_SKIP;
503 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
504 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
506 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
509 mb_type = MB_TYPE_16x16;
511 } else if (mb_type < 8) { /* INTER */
512 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
513 mode = THIRDPEL_MODE;
514 else if (s->halfpel_flag &&
515 s->thirdpel_flag == !get_bits1(&h->gb))
521 /* note ref_cache should contain here:
529 for (m = 0; m < 2; m++) {
530 if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
531 for (i = 0; i < 4; i++)
532 AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
533 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
535 for (i = 0; i < 4; i++)
536 AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
539 memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
540 h->cur_pic.motion_val[m][b_xy - h->b_stride],
541 4 * 2 * sizeof(int16_t));
542 memset(&h->ref_cache[m][scan8[0] - 1 * 8],
543 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
545 if (h->mb_x < h->mb_width - 1) {
546 AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
547 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
548 h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
549 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
550 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
552 h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
554 AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
555 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
556 h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
557 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
559 h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
561 memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
562 PART_NOT_AVAILABLE, 8);
564 if (h->pict_type != AV_PICTURE_TYPE_B)
568 /* decode motion vector(s) and form prediction(s) */
569 if (h->pict_type == AV_PICTURE_TYPE_P) {
570 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
572 } else { /* AV_PICTURE_TYPE_B */
574 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
577 for (i = 0; i < 4; i++)
578 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
579 0, 4 * 2 * sizeof(int16_t));
582 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
585 for (i = 0; i < 4; i++)
586 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
587 0, 4 * 2 * sizeof(int16_t));
591 mb_type = MB_TYPE_16x16;
592 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
593 memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
597 for (i = 0; i < 4; i++)
598 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
599 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
600 h->left_samples_available = 0x5F5F;
603 h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
604 h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
605 h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
606 h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
608 if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
609 h->top_samples_available = 0x33FF;
612 /* decode prediction codes for luma blocks */
613 for (i = 0; i < 16; i += 2) {
614 vlc = svq3_get_ue_golomb(&h->gb);
617 av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
621 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
622 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
624 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
625 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
627 if (left[1] == -1 || left[2] == -1) {
628 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
632 } else { /* mb_type == 33, DC_128_PRED block type */
633 for (i = 0; i < 4; i++)
634 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
637 write_back_intra_pred_mode(h);
640 ff_h264_check_intra4x4_pred_mode(h);
642 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
643 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
645 for (i = 0; i < 4; i++)
646 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
648 h->top_samples_available = 0x33FF;
649 h->left_samples_available = 0x5F5F;
652 mb_type = MB_TYPE_INTRA4x4;
653 } else { /* INTRA16x16 */
654 dir = i_mb_type_info[mb_type - 8].pred_mode;
655 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
657 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
658 av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
659 return h->intra16x16_pred_mode;
662 cbp = i_mb_type_info[mb_type - 8].cbp;
663 mb_type = MB_TYPE_INTRA16x16;
666 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
667 for (i = 0; i < 4; i++)
668 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
669 0, 4 * 2 * sizeof(int16_t));
670 if (h->pict_type == AV_PICTURE_TYPE_B) {
671 for (i = 0; i < 4; i++)
672 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
673 0, 4 * 2 * sizeof(int16_t));
676 if (!IS_INTRA4x4(mb_type)) {
677 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
679 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
680 memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
683 if (!IS_INTRA16x16(mb_type) &&
684 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
685 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48) {
686 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
690 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
691 : golomb_to_inter_cbp[vlc];
693 if (IS_INTRA16x16(mb_type) ||
694 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
695 h->qscale += svq3_get_se_golomb(&h->gb);
697 if (h->qscale > 31u) {
698 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
702 if (IS_INTRA16x16(mb_type)) {
703 AV_ZERO128(h->mb_luma_dc[0] + 0);
704 AV_ZERO128(h->mb_luma_dc[0] + 8);
705 if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
706 av_log(h->avctx, AV_LOG_ERROR,
707 "error while decoding intra luma dc\n");
713 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
714 const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
716 for (i = 0; i < 4; i++)
717 if ((cbp & (1 << i))) {
718 for (j = 0; j < 4; j++) {
719 k = index ? (1 * (j & 1) + 2 * (i & 1) +
720 2 * (j & 2) + 4 * (i & 2))
722 h->non_zero_count_cache[scan8[k]] = 1;
724 if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
725 av_log(h->avctx, AV_LOG_ERROR,
726 "error while decoding block\n");
733 for (i = 1; i < 3; ++i)
734 if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
735 av_log(h->avctx, AV_LOG_ERROR,
736 "error while decoding chroma dc block\n");
741 for (i = 1; i < 3; i++) {
742 for (j = 0; j < 4; j++) {
744 h->non_zero_count_cache[scan8[k]] = 1;
746 if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
747 av_log(h->avctx, AV_LOG_ERROR,
748 "error while decoding chroma ac block\n");
758 h->cur_pic.mb_type[mb_xy] = mb_type;
760 if (IS_INTRA(mb_type))
761 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
766 static int svq3_decode_slice_header(AVCodecContext *avctx)
768 SVQ3Context *s = avctx->priv_data;
769 H264Context *h = &s->h;
770 const int mb_xy = h->mb_xy;
774 header = get_bits(&h->gb, 8);
776 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
778 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
781 int length = header >> 5 & 3;
783 s->next_slice_index = get_bits_count(&h->gb) +
784 8 * show_bits(&h->gb, 8 * length) +
787 if (s->next_slice_index > h->gb.size_in_bits) {
788 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
792 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
793 skip_bits(&h->gb, 8);
795 if (s->watermark_key) {
796 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
797 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
798 header ^ s->watermark_key);
801 memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
802 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
804 skip_bits_long(&h->gb, 0);
807 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
808 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
812 h->slice_type = golomb_to_pict_type[slice_id];
814 if ((header & 0x9F) == 2) {
815 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
816 h->mb_skip_run = get_bits(&h->gb, i) -
817 (h->mb_y * h->mb_width + h->mb_x);
823 h->slice_num = get_bits(&h->gb, 8);
824 h->qscale = get_bits(&h->gb, 5);
825 s->adaptive_quant = get_bits1(&h->gb);
834 skip_bits(&h->gb, 2);
836 while (get_bits1(&h->gb))
837 skip_bits(&h->gb, 8);
839 /* reset intra predictors and invalidate motion vector references */
841 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
842 -1, 4 * sizeof(int8_t));
843 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
844 -1, 8 * sizeof(int8_t) * h->mb_x);
847 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
848 -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
851 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
857 static av_cold int svq3_decode_init(AVCodecContext *avctx)
859 SVQ3Context *s = avctx->priv_data;
860 H264Context *h = &s->h;
862 unsigned char *extradata;
863 unsigned char *extradata_end;
865 int marker_found = 0;
867 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
868 s->last_pic = av_mallocz(sizeof(*s->last_pic));
869 s->next_pic = av_mallocz(sizeof(*s->next_pic));
870 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
871 av_freep(&s->cur_pic);
872 av_freep(&s->last_pic);
873 av_freep(&s->next_pic);
874 return AVERROR(ENOMEM);
877 if (ff_h264_decode_init(avctx) < 0)
880 ff_hpeldsp_init(&s->hdsp, avctx->flags);
881 h->flags = avctx->flags;
883 h->picture_structure = PICT_FRAME;
884 avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
885 avctx->color_range = AVCOL_RANGE_JPEG;
887 h->chroma_qp[0] = h->chroma_qp[1] = 4;
888 h->chroma_x_shift = h->chroma_y_shift = 1;
891 s->thirdpel_flag = 1;
894 /* prowl for the "SEQH" marker in the extradata */
895 extradata = (unsigned char *)avctx->extradata;
896 extradata_end = avctx->extradata + avctx->extradata_size;
898 for (m = 0; m + 8 < avctx->extradata_size; m++) {
899 if (!memcmp(extradata, "SEQH", 4)) {
907 /* if a match was found, parse the extra data */
912 size = AV_RB32(&extradata[4]);
913 if (size > extradata_end - extradata - 8)
914 return AVERROR_INVALIDDATA;
915 init_get_bits(&gb, extradata + 8, size * 8);
917 /* 'frame size code' and optional 'width, height' */
918 frame_size_code = get_bits(&gb, 3);
919 switch (frame_size_code) {
949 avctx->width = get_bits(&gb, 12);
950 avctx->height = get_bits(&gb, 12);
954 s->halfpel_flag = get_bits1(&gb);
955 s->thirdpel_flag = get_bits1(&gb);
963 h->low_delay = get_bits1(&gb);
968 while (get_bits1(&gb))
971 s->unknown_flag = get_bits1(&gb);
972 avctx->has_b_frames = !h->low_delay;
973 if (s->unknown_flag) {
975 unsigned watermark_width = svq3_get_ue_golomb(&gb);
976 unsigned watermark_height = svq3_get_ue_golomb(&gb);
977 int u1 = svq3_get_ue_golomb(&gb);
978 int u2 = get_bits(&gb, 8);
979 int u3 = get_bits(&gb, 2);
980 int u4 = svq3_get_ue_golomb(&gb);
981 unsigned long buf_len = watermark_width *
982 watermark_height * 4;
983 int offset = get_bits_count(&gb) + 7 >> 3;
986 if (watermark_height > 0 &&
987 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
990 buf = av_malloc(buf_len);
991 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
992 watermark_width, watermark_height);
993 av_log(avctx, AV_LOG_DEBUG,
994 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
995 u1, u2, u3, u4, offset);
996 if (uncompress(buf, &buf_len, extradata + 8 + offset,
997 size - offset) != Z_OK) {
998 av_log(avctx, AV_LOG_ERROR,
999 "could not uncompress watermark logo\n");
1003 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1004 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1005 av_log(avctx, AV_LOG_DEBUG,
1006 "watermark key %#x\n", s->watermark_key);
1009 av_log(avctx, AV_LOG_ERROR,
1010 "this svq3 file contains watermark which need zlib support compiled in\n");
1016 h->width = avctx->width;
1017 h->height = avctx->height;
1018 h->mb_width = (h->width + 15) / 16;
1019 h->mb_height = (h->height + 15) / 16;
1020 h->mb_stride = h->mb_width + 1;
1021 h->mb_num = h->mb_width * h->mb_height;
1022 h->b_stride = 4 * h->mb_width;
1023 s->h_edge_pos = h->mb_width * 16;
1024 s->v_edge_pos = h->mb_height * 16;
1026 if (ff_h264_alloc_tables(h) < 0) {
1027 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1028 return AVERROR(ENOMEM);
1034 static void free_picture(AVCodecContext *avctx, H264Picture *pic)
1037 for (i = 0; i < 2; i++) {
1038 av_buffer_unref(&pic->motion_val_buf[i]);
1039 av_buffer_unref(&pic->ref_index_buf[i]);
1041 av_buffer_unref(&pic->mb_type_buf);
1043 av_frame_unref(&pic->f);
1046 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
1048 SVQ3Context *s = avctx->priv_data;
1049 H264Context *h = &s->h;
1050 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1051 const int mb_array_size = h->mb_stride * h->mb_height;
1052 const int b4_stride = h->mb_width * 4 + 1;
1053 const int b4_array_size = b4_stride * h->mb_height * 4;
1056 if (!pic->motion_val_buf[0]) {
1059 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1060 if (!pic->mb_type_buf)
1061 return AVERROR(ENOMEM);
1062 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1064 for (i = 0; i < 2; i++) {
1065 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1066 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1067 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1068 ret = AVERROR(ENOMEM);
1072 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1073 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1076 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1078 ret = ff_get_buffer(avctx, &pic->f,
1079 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1083 if (!h->edge_emu_buffer) {
1084 h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1085 if (!h->edge_emu_buffer)
1086 return AVERROR(ENOMEM);
1089 h->linesize = pic->f.linesize[0];
1090 h->uvlinesize = pic->f.linesize[1];
1094 free_picture(avctx, pic);
1098 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1099 int *got_frame, AVPacket *avpkt)
1101 const uint8_t *buf = avpkt->data;
1102 SVQ3Context *s = avctx->priv_data;
1103 H264Context *h = &s->h;
1104 int buf_size = avpkt->size;
1107 /* special case for last picture */
1108 if (buf_size == 0) {
1109 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1110 ret = av_frame_ref(data, &s->next_pic->f);
1113 s->last_frame_output = 1;
1119 init_get_bits(&h->gb, buf, 8 * buf_size);
1121 h->mb_x = h->mb_y = h->mb_xy = 0;
1123 if (svq3_decode_slice_header(avctx))
1126 h->pict_type = h->slice_type;
1128 if (h->pict_type != AV_PICTURE_TYPE_B)
1129 FFSWAP(H264Picture*, s->next_pic, s->last_pic);
1131 av_frame_unref(&s->cur_pic->f);
1133 /* for skipping the frame */
1134 s->cur_pic->f.pict_type = h->pict_type;
1135 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1137 ret = get_buffer(avctx, s->cur_pic);
1141 h->cur_pic_ptr = s->cur_pic;
1142 av_frame_unref(&h->cur_pic.f);
1143 h->cur_pic = *s->cur_pic;
1144 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1148 for (i = 0; i < 16; i++) {
1149 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1150 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1152 for (i = 0; i < 16; i++) {
1153 h->block_offset[16 + i] =
1154 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1155 h->block_offset[48 + 16 + i] =
1156 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1159 if (h->pict_type != AV_PICTURE_TYPE_I) {
1160 if (!s->last_pic->f.data[0]) {
1161 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1162 ret = get_buffer(avctx, s->last_pic);
1165 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1166 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1167 s->last_pic->f.linesize[1]);
1168 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1169 s->last_pic->f.linesize[2]);
1172 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1173 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1174 ret = get_buffer(avctx, s->next_pic);
1177 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1178 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1179 s->next_pic->f.linesize[1]);
1180 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1181 s->next_pic->f.linesize[2]);
1185 if (avctx->debug & FF_DEBUG_PICT_INFO)
1186 av_log(h->avctx, AV_LOG_DEBUG,
1187 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1188 av_get_picture_type_char(h->pict_type),
1189 s->halfpel_flag, s->thirdpel_flag,
1190 s->adaptive_quant, h->qscale, h->slice_num);
1192 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1193 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1194 avctx->skip_frame >= AVDISCARD_ALL)
1197 if (s->next_p_frame_damaged) {
1198 if (h->pict_type == AV_PICTURE_TYPE_B)
1201 s->next_p_frame_damaged = 0;
1204 if (h->pict_type == AV_PICTURE_TYPE_B) {
1205 h->frame_num_offset = h->slice_num - h->prev_frame_num;
1207 if (h->frame_num_offset < 0)
1208 h->frame_num_offset += 256;
1209 if (h->frame_num_offset == 0 ||
1210 h->frame_num_offset >= h->prev_frame_num_offset) {
1211 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1215 h->prev_frame_num = h->frame_num;
1216 h->frame_num = h->slice_num;
1217 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1219 if (h->prev_frame_num_offset < 0)
1220 h->prev_frame_num_offset += 256;
1223 for (m = 0; m < 2; m++) {
1225 for (i = 0; i < 4; i++) {
1227 for (j = -1; j < 4; j++)
1228 h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1230 h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1234 for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1235 for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1237 h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1239 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1240 ((get_bits_count(&h->gb) & 7) == 0 ||
1241 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1242 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1243 h->gb.size_in_bits = 8 * buf_size;
1245 if (svq3_decode_slice_header(avctx))
1248 /* TODO: support s->mb_skip_run */
1251 mb_type = svq3_get_ue_golomb(&h->gb);
1253 if (h->pict_type == AV_PICTURE_TYPE_I)
1255 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1257 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1258 av_log(h->avctx, AV_LOG_ERROR,
1259 "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1264 ff_h264_hl_decode_mb(h);
1266 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1267 h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1268 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1271 ff_draw_horiz_band(avctx, &s->cur_pic->f,
1272 s->last_pic->f.data[0] ? &s->last_pic->f : NULL,
1273 16 * h->mb_y, 16, h->picture_structure, 0,
1277 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1278 ret = av_frame_ref(data, &s->cur_pic->f);
1279 else if (s->last_pic->f.data[0])
1280 ret = av_frame_ref(data, &s->last_pic->f);
1284 /* Do not output the last pic after seeking. */
1285 if (s->last_pic->f.data[0] || h->low_delay)
1288 if (h->pict_type != AV_PICTURE_TYPE_B) {
1289 FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
1291 av_frame_unref(&s->cur_pic->f);
1297 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1299 SVQ3Context *s = avctx->priv_data;
1300 H264Context *h = &s->h;
1302 free_picture(avctx, s->cur_pic);
1303 free_picture(avctx, s->next_pic);
1304 free_picture(avctx, s->last_pic);
1305 av_freep(&s->cur_pic);
1306 av_freep(&s->next_pic);
1307 av_freep(&s->last_pic);
1309 av_frame_unref(&h->cur_pic.f);
1311 ff_h264_free_context(h);
1316 AVCodec ff_svq3_decoder = {
1318 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1319 .type = AVMEDIA_TYPE_VIDEO,
1320 .id = AV_CODEC_ID_SVQ3,
1321 .priv_data_size = sizeof(SVQ3Context),
1322 .init = svq3_decode_init,
1323 .close = svq3_decode_end,
1324 .decode = svq3_decode_frame,
1325 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1328 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,