2 * Copyright (c) 2003 The Libav Project
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
43 #include "libavutil/attributes.h"
46 #include "mpegutils.h"
47 #include "mpegvideo.h"
50 #include "h264data.h" // FIXME FIXME FIXME
52 #include "h264_mvpred.h"
55 #include "rectangle.h"
73 H264Picture *next_pic;
74 H264Picture *last_pic;
79 uint32_t watermark_key;
81 int next_p_frame_damaged;
84 int last_frame_output;
87 #define FULLPEL_MODE 1
88 #define HALFPEL_MODE 2
89 #define THIRDPEL_MODE 3
90 #define PREDICT_MODE 4
92 /* dual scan (from some older h264 draft)
101 static const uint8_t svq3_scan[16] = {
102 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
103 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
104 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
105 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
108 static const uint8_t luma_dc_zigzag_scan[16] = {
109 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
110 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
111 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
112 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
115 static const uint8_t svq3_pred_0[25][2] = {
118 { 0, 2 }, { 1, 1 }, { 2, 0 },
119 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
120 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
121 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
122 { 2, 4 }, { 3, 3 }, { 4, 2 },
127 static const int8_t svq3_pred_1[6][6][5] = {
128 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
129 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
130 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
131 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
132 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
133 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
134 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
135 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
136 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
137 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
138 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
139 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
142 static const struct {
145 } svq3_dct_tables[2][16] = {
146 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
147 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
148 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
149 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
152 static const uint32_t svq3_dequant_coeff[32] = {
153 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
154 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
155 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
156 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
159 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
161 const int qmul = svq3_dequant_coeff[qp];
165 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
167 for (i = 0; i < 4; i++) {
168 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
169 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
170 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
171 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
173 temp[4 * i + 0] = z0 + z3;
174 temp[4 * i + 1] = z1 + z2;
175 temp[4 * i + 2] = z1 - z2;
176 temp[4 * i + 3] = z0 - z3;
179 for (i = 0; i < 4; i++) {
180 const int offset = x_offset[i];
181 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
182 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
183 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
184 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
186 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
187 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
188 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
189 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
194 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
195 int stride, int qp, int dc)
197 const int qmul = svq3_dequant_coeff[qp];
201 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
202 : qmul * (block[0] >> 3) / 2);
206 for (i = 0; i < 4; i++) {
207 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
208 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
209 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
210 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
212 block[0 + 4 * i] = z0 + z3;
213 block[1 + 4 * i] = z1 + z2;
214 block[2 + 4 * i] = z1 - z2;
215 block[3 + 4 * i] = z0 - z3;
218 for (i = 0; i < 4; i++) {
219 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
220 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
221 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
222 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
223 const int rr = (dc + 0x80000);
225 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
226 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
227 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
228 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
231 memset(block, 0, 16 * sizeof(int16_t));
234 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
235 int index, const int type)
237 static const uint8_t *const scan_patterns[4] =
238 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
240 int run, level, limit;
242 const int intra = 3 * type >> 2;
243 const uint8_t *const scan = scan_patterns[type];
245 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
246 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
247 int sign = (vlc & 1) ? 0 : -1;
254 } else if (vlc < 4) {
259 level = (vlc + 9 >> 2) - run;
263 run = svq3_dct_tables[intra][vlc].run;
264 level = svq3_dct_tables[intra][vlc].level;
268 ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
272 ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
276 if ((index += run) >= limit)
279 block[scan[index]] = (level ^ sign) - sign;
290 static inline void svq3_mc_dir_part(SVQ3Context *s,
291 int x, int y, int width, int height,
292 int mx, int my, int dxy,
293 int thirdpel, int dir, int avg)
295 H264Context *h = &s->h;
296 const H264Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
299 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
304 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
305 my < 0 || my >= s->v_edge_pos - height - 1) {
307 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
308 my = av_clip(my, -16, s->v_edge_pos - height + 15);
311 /* form component predictions */
312 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
313 src = pic->f.data[0] + mx + my * h->linesize;
316 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
317 h->linesize, h->linesize,
318 width + 1, height + 1,
319 mx, my, s->h_edge_pos, s->v_edge_pos);
320 src = h->edge_emu_buffer;
323 (avg ? h->dsp.avg_tpel_pixels_tab
324 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
327 (avg ? s->hdsp.avg_pixels_tab
328 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
331 if (!(h->flags & CODEC_FLAG_GRAY)) {
332 mx = mx + (mx < (int) x) >> 1;
333 my = my + (my < (int) y) >> 1;
335 height = height >> 1;
338 for (i = 1; i < 3; i++) {
339 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
340 src = pic->f.data[i] + mx + my * h->uvlinesize;
343 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
344 h->uvlinesize, h->uvlinesize,
345 width + 1, height + 1,
346 mx, my, (s->h_edge_pos >> 1),
348 src = h->edge_emu_buffer;
351 (avg ? h->dsp.avg_tpel_pixels_tab
352 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
356 (avg ? s->hdsp.avg_pixels_tab
357 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
364 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
367 int i, j, k, mx, my, dx, dy, x, y;
368 H264Context *h = &s->h;
369 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
370 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
371 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
372 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
373 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
375 for (i = 0; i < 16; i += part_height)
376 for (j = 0; j < 16; j += part_width) {
377 const int b_xy = (4 * h->mb_x + (j >> 2)) +
378 (4 * h->mb_y + (i >> 2)) * h->b_stride;
380 x = 16 * h->mb_x + j;
381 y = 16 * h->mb_y + i;
382 k = (j >> 2 & 1) + (i >> 1 & 2) +
383 (j >> 1 & 4) + (i & 8);
385 if (mode != PREDICT_MODE) {
386 pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
388 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
389 my = s->next_pic->motion_val[0][b_xy][1] << 1;
392 mx = mx * h->frame_num_offset /
393 h->prev_frame_num_offset + 1 >> 1;
394 my = my * h->frame_num_offset /
395 h->prev_frame_num_offset + 1 >> 1;
397 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
398 h->prev_frame_num_offset + 1 >> 1;
399 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
400 h->prev_frame_num_offset + 1 >> 1;
404 /* clip motion vector prediction to frame border */
405 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
406 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
408 /* get (optional) motion vector differential */
409 if (mode == PREDICT_MODE) {
412 dy = svq3_get_se_golomb(&h->gb);
413 dx = svq3_get_se_golomb(&h->gb);
415 if (dx == INVALID_VLC || dy == INVALID_VLC) {
416 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
421 /* compute motion vector */
422 if (mode == THIRDPEL_MODE) {
424 mx = (mx + 1 >> 1) + dx;
425 my = (my + 1 >> 1) + dy;
426 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
427 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
428 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
430 svq3_mc_dir_part(s, x, y, part_width, part_height,
431 fx, fy, dxy, 1, dir, avg);
434 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
435 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
436 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
437 dxy = (mx & 1) + 2 * (my & 1);
439 svq3_mc_dir_part(s, x, y, part_width, part_height,
440 mx >> 1, my >> 1, dxy, 0, dir, avg);
444 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
445 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
447 svq3_mc_dir_part(s, x, y, part_width, part_height,
448 mx, my, 0, 0, dir, avg);
453 /* update mv_cache */
454 if (mode != PREDICT_MODE) {
455 int32_t mv = pack16to32(mx, my);
457 if (part_height == 8 && i < 8) {
458 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
460 if (part_width == 8 && j < 8)
461 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
463 if (part_width == 8 && j < 8)
464 AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
465 if (part_width == 4 || part_height == 4)
466 AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
469 /* write back motion vectors */
470 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
471 part_width >> 2, part_height >> 2, h->b_stride,
472 pack16to32(mx, my), 4);
478 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
480 H264Context *h = &s->h;
481 int i, j, k, m, dir, mode;
485 const int mb_xy = h->mb_xy;
486 const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
488 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
489 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
490 h->topright_samples_available = 0xFFFF;
492 if (mb_type == 0) { /* SKIP */
493 if (h->pict_type == AV_PICTURE_TYPE_P ||
494 s->next_pic->mb_type[mb_xy] == -1) {
495 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
498 if (h->pict_type == AV_PICTURE_TYPE_B)
499 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
502 mb_type = MB_TYPE_SKIP;
504 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
505 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
507 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
510 mb_type = MB_TYPE_16x16;
512 } else if (mb_type < 8) { /* INTER */
513 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
514 mode = THIRDPEL_MODE;
515 else if (s->halfpel_flag &&
516 s->thirdpel_flag == !get_bits1(&h->gb))
522 /* note ref_cache should contain here:
530 for (m = 0; m < 2; m++) {
531 if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
532 for (i = 0; i < 4; i++)
533 AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
534 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
536 for (i = 0; i < 4; i++)
537 AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
540 memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
541 h->cur_pic.motion_val[m][b_xy - h->b_stride],
542 4 * 2 * sizeof(int16_t));
543 memset(&h->ref_cache[m][scan8[0] - 1 * 8],
544 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
546 if (h->mb_x < h->mb_width - 1) {
547 AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
548 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
549 h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
550 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
551 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
553 h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
555 AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
556 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
557 h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
558 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
560 h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
562 memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
563 PART_NOT_AVAILABLE, 8);
565 if (h->pict_type != AV_PICTURE_TYPE_B)
569 /* decode motion vector(s) and form prediction(s) */
570 if (h->pict_type == AV_PICTURE_TYPE_P) {
571 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
573 } else { /* AV_PICTURE_TYPE_B */
575 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
578 for (i = 0; i < 4; i++)
579 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
580 0, 4 * 2 * sizeof(int16_t));
583 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
586 for (i = 0; i < 4; i++)
587 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
588 0, 4 * 2 * sizeof(int16_t));
592 mb_type = MB_TYPE_16x16;
593 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
594 memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
598 for (i = 0; i < 4; i++)
599 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
600 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
601 h->left_samples_available = 0x5F5F;
604 h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
605 h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
606 h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
607 h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
609 if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
610 h->top_samples_available = 0x33FF;
613 /* decode prediction codes for luma blocks */
614 for (i = 0; i < 16; i += 2) {
615 vlc = svq3_get_ue_golomb(&h->gb);
618 av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
622 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
623 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
625 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
626 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
628 if (left[1] == -1 || left[2] == -1) {
629 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
633 } else { /* mb_type == 33, DC_128_PRED block type */
634 for (i = 0; i < 4; i++)
635 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
638 write_back_intra_pred_mode(h);
641 ff_h264_check_intra4x4_pred_mode(h);
643 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
644 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
646 for (i = 0; i < 4; i++)
647 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
649 h->top_samples_available = 0x33FF;
650 h->left_samples_available = 0x5F5F;
653 mb_type = MB_TYPE_INTRA4x4;
654 } else { /* INTRA16x16 */
655 dir = i_mb_type_info[mb_type - 8].pred_mode;
656 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
658 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
659 av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
660 return h->intra16x16_pred_mode;
663 cbp = i_mb_type_info[mb_type - 8].cbp;
664 mb_type = MB_TYPE_INTRA16x16;
667 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
668 for (i = 0; i < 4; i++)
669 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
670 0, 4 * 2 * sizeof(int16_t));
671 if (h->pict_type == AV_PICTURE_TYPE_B) {
672 for (i = 0; i < 4; i++)
673 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
674 0, 4 * 2 * sizeof(int16_t));
677 if (!IS_INTRA4x4(mb_type)) {
678 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
680 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
681 memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
684 if (!IS_INTRA16x16(mb_type) &&
685 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
686 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48) {
687 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
691 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
692 : golomb_to_inter_cbp[vlc];
694 if (IS_INTRA16x16(mb_type) ||
695 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
696 h->qscale += svq3_get_se_golomb(&h->gb);
698 if (h->qscale > 31u) {
699 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
703 if (IS_INTRA16x16(mb_type)) {
704 AV_ZERO128(h->mb_luma_dc[0] + 0);
705 AV_ZERO128(h->mb_luma_dc[0] + 8);
706 if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
707 av_log(h->avctx, AV_LOG_ERROR,
708 "error while decoding intra luma dc\n");
714 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
715 const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
717 for (i = 0; i < 4; i++)
718 if ((cbp & (1 << i))) {
719 for (j = 0; j < 4; j++) {
720 k = index ? (1 * (j & 1) + 2 * (i & 1) +
721 2 * (j & 2) + 4 * (i & 2))
723 h->non_zero_count_cache[scan8[k]] = 1;
725 if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
726 av_log(h->avctx, AV_LOG_ERROR,
727 "error while decoding block\n");
734 for (i = 1; i < 3; ++i)
735 if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
736 av_log(h->avctx, AV_LOG_ERROR,
737 "error while decoding chroma dc block\n");
742 for (i = 1; i < 3; i++) {
743 for (j = 0; j < 4; j++) {
745 h->non_zero_count_cache[scan8[k]] = 1;
747 if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
748 av_log(h->avctx, AV_LOG_ERROR,
749 "error while decoding chroma ac block\n");
759 h->cur_pic.mb_type[mb_xy] = mb_type;
761 if (IS_INTRA(mb_type))
762 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
767 static int svq3_decode_slice_header(AVCodecContext *avctx)
769 SVQ3Context *s = avctx->priv_data;
770 H264Context *h = &s->h;
771 const int mb_xy = h->mb_xy;
775 header = get_bits(&h->gb, 8);
777 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
779 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
782 int length = header >> 5 & 3;
784 s->next_slice_index = get_bits_count(&h->gb) +
785 8 * show_bits(&h->gb, 8 * length) +
788 if (s->next_slice_index > h->gb.size_in_bits) {
789 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
793 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
794 skip_bits(&h->gb, 8);
796 if (s->watermark_key) {
797 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
798 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
799 header ^ s->watermark_key);
802 memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
803 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
805 skip_bits_long(&h->gb, 0);
808 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
809 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
813 h->slice_type = golomb_to_pict_type[slice_id];
815 if ((header & 0x9F) == 2) {
816 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
817 h->mb_skip_run = get_bits(&h->gb, i) -
818 (h->mb_y * h->mb_width + h->mb_x);
824 h->slice_num = get_bits(&h->gb, 8);
825 h->qscale = get_bits(&h->gb, 5);
826 s->adaptive_quant = get_bits1(&h->gb);
835 skip_bits(&h->gb, 2);
837 while (get_bits1(&h->gb))
838 skip_bits(&h->gb, 8);
840 /* reset intra predictors and invalidate motion vector references */
842 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
843 -1, 4 * sizeof(int8_t));
844 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
845 -1, 8 * sizeof(int8_t) * h->mb_x);
848 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
849 -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
852 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
858 static av_cold int svq3_decode_init(AVCodecContext *avctx)
860 SVQ3Context *s = avctx->priv_data;
861 H264Context *h = &s->h;
863 unsigned char *extradata;
864 unsigned char *extradata_end;
866 int marker_found = 0;
868 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
869 s->last_pic = av_mallocz(sizeof(*s->last_pic));
870 s->next_pic = av_mallocz(sizeof(*s->next_pic));
871 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
872 av_freep(&s->cur_pic);
873 av_freep(&s->last_pic);
874 av_freep(&s->next_pic);
875 return AVERROR(ENOMEM);
878 if (ff_h264_decode_init(avctx) < 0)
881 ff_hpeldsp_init(&s->hdsp, avctx->flags);
882 h->flags = avctx->flags;
884 h->picture_structure = PICT_FRAME;
885 avctx->pix_fmt = avctx->codec->pix_fmts[0];
887 h->chroma_qp[0] = h->chroma_qp[1] = 4;
888 h->chroma_x_shift = h->chroma_y_shift = 1;
891 s->thirdpel_flag = 1;
894 /* prowl for the "SEQH" marker in the extradata */
895 extradata = (unsigned char *)avctx->extradata;
896 extradata_end = avctx->extradata + avctx->extradata_size;
898 for (m = 0; m + 8 < avctx->extradata_size; m++) {
899 if (!memcmp(extradata, "SEQH", 4)) {
907 /* if a match was found, parse the extra data */
912 size = AV_RB32(&extradata[4]);
913 if (size > extradata_end - extradata - 8)
914 return AVERROR_INVALIDDATA;
915 init_get_bits(&gb, extradata + 8, size * 8);
917 /* 'frame size code' and optional 'width, height' */
918 frame_size_code = get_bits(&gb, 3);
919 switch (frame_size_code) {
949 avctx->width = get_bits(&gb, 12);
950 avctx->height = get_bits(&gb, 12);
954 s->halfpel_flag = get_bits1(&gb);
955 s->thirdpel_flag = get_bits1(&gb);
963 h->low_delay = get_bits1(&gb);
968 while (get_bits1(&gb))
971 s->unknown_flag = get_bits1(&gb);
972 avctx->has_b_frames = !h->low_delay;
973 if (s->unknown_flag) {
975 unsigned watermark_width = svq3_get_ue_golomb(&gb);
976 unsigned watermark_height = svq3_get_ue_golomb(&gb);
977 int u1 = svq3_get_ue_golomb(&gb);
978 int u2 = get_bits(&gb, 8);
979 int u3 = get_bits(&gb, 2);
980 int u4 = svq3_get_ue_golomb(&gb);
981 unsigned long buf_len = watermark_width *
982 watermark_height * 4;
983 int offset = get_bits_count(&gb) + 7 >> 3;
986 if (watermark_height > 0 &&
987 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
990 buf = av_malloc(buf_len);
991 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
992 watermark_width, watermark_height);
993 av_log(avctx, AV_LOG_DEBUG,
994 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
995 u1, u2, u3, u4, offset);
996 if (uncompress(buf, &buf_len, extradata + 8 + offset,
997 size - offset) != Z_OK) {
998 av_log(avctx, AV_LOG_ERROR,
999 "could not uncompress watermark logo\n");
1003 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1004 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1005 av_log(avctx, AV_LOG_DEBUG,
1006 "watermark key %#x\n", s->watermark_key);
1009 av_log(avctx, AV_LOG_ERROR,
1010 "this svq3 file contains watermark which need zlib support compiled in\n");
1016 h->width = avctx->width;
1017 h->height = avctx->height;
1018 h->mb_width = (h->width + 15) / 16;
1019 h->mb_height = (h->height + 15) / 16;
1020 h->mb_stride = h->mb_width + 1;
1021 h->mb_num = h->mb_width * h->mb_height;
1022 h->b_stride = 4 * h->mb_width;
1023 s->h_edge_pos = h->mb_width * 16;
1024 s->v_edge_pos = h->mb_height * 16;
1026 if (ff_h264_alloc_tables(h) < 0) {
1027 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1028 return AVERROR(ENOMEM);
1034 static void free_picture(AVCodecContext *avctx, H264Picture *pic)
1037 for (i = 0; i < 2; i++) {
1038 av_buffer_unref(&pic->motion_val_buf[i]);
1039 av_buffer_unref(&pic->ref_index_buf[i]);
1041 av_buffer_unref(&pic->mb_type_buf);
1043 av_frame_unref(&pic->f);
1046 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
1048 SVQ3Context *s = avctx->priv_data;
1049 H264Context *h = &s->h;
1050 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1051 const int mb_array_size = h->mb_stride * h->mb_height;
1052 const int b4_stride = h->mb_width * 4 + 1;
1053 const int b4_array_size = b4_stride * h->mb_height * 4;
1056 if (!pic->motion_val_buf[0]) {
1059 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1060 if (!pic->mb_type_buf)
1061 return AVERROR(ENOMEM);
1062 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1064 for (i = 0; i < 2; i++) {
1065 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1066 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1067 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1068 ret = AVERROR(ENOMEM);
1072 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1073 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1076 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1078 ret = ff_get_buffer(avctx, &pic->f,
1079 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1083 if (!h->edge_emu_buffer) {
1084 h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1085 if (!h->edge_emu_buffer)
1086 return AVERROR(ENOMEM);
1089 h->linesize = pic->f.linesize[0];
1090 h->uvlinesize = pic->f.linesize[1];
1094 free_picture(avctx, pic);
1098 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1099 int *got_frame, AVPacket *avpkt)
1101 const uint8_t *buf = avpkt->data;
1102 SVQ3Context *s = avctx->priv_data;
1103 H264Context *h = &s->h;
1104 int buf_size = avpkt->size;
1107 /* special case for last picture */
1108 if (buf_size == 0) {
1109 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1110 ret = av_frame_ref(data, &s->next_pic->f);
1113 s->last_frame_output = 1;
1119 init_get_bits(&h->gb, buf, 8 * buf_size);
1121 h->mb_x = h->mb_y = h->mb_xy = 0;
1123 if (svq3_decode_slice_header(avctx))
1126 h->pict_type = h->slice_type;
1128 if (h->pict_type != AV_PICTURE_TYPE_B)
1129 FFSWAP(H264Picture*, s->next_pic, s->last_pic);
1131 av_frame_unref(&s->cur_pic->f);
1133 /* for skipping the frame */
1134 s->cur_pic->f.pict_type = h->pict_type;
1135 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1137 ret = get_buffer(avctx, s->cur_pic);
1141 h->cur_pic_ptr = s->cur_pic;
1142 av_frame_unref(&h->cur_pic.f);
1143 h->cur_pic = *s->cur_pic;
1144 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1148 for (i = 0; i < 16; i++) {
1149 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1150 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1152 for (i = 0; i < 16; i++) {
1153 h->block_offset[16 + i] =
1154 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1155 h->block_offset[48 + 16 + i] =
1156 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1159 if (h->pict_type != AV_PICTURE_TYPE_I) {
1160 if (!s->last_pic->f.data[0]) {
1161 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1162 ret = get_buffer(avctx, s->last_pic);
1165 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1166 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1167 s->last_pic->f.linesize[1]);
1168 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1169 s->last_pic->f.linesize[2]);
1172 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1173 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1174 ret = get_buffer(avctx, s->next_pic);
1177 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1178 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1179 s->next_pic->f.linesize[1]);
1180 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1181 s->next_pic->f.linesize[2]);
1185 if (avctx->debug & FF_DEBUG_PICT_INFO)
1186 av_log(h->avctx, AV_LOG_DEBUG,
1187 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1188 av_get_picture_type_char(h->pict_type),
1189 s->halfpel_flag, s->thirdpel_flag,
1190 s->adaptive_quant, h->qscale, h->slice_num);
1192 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1193 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1194 avctx->skip_frame >= AVDISCARD_ALL)
1197 if (s->next_p_frame_damaged) {
1198 if (h->pict_type == AV_PICTURE_TYPE_B)
1201 s->next_p_frame_damaged = 0;
1204 if (h->pict_type == AV_PICTURE_TYPE_B) {
1205 h->frame_num_offset = h->slice_num - h->prev_frame_num;
1207 if (h->frame_num_offset < 0)
1208 h->frame_num_offset += 256;
1209 if (h->frame_num_offset == 0 ||
1210 h->frame_num_offset >= h->prev_frame_num_offset) {
1211 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1215 h->prev_frame_num = h->frame_num;
1216 h->frame_num = h->slice_num;
1217 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1219 if (h->prev_frame_num_offset < 0)
1220 h->prev_frame_num_offset += 256;
1223 for (m = 0; m < 2; m++) {
1225 for (i = 0; i < 4; i++) {
1227 for (j = -1; j < 4; j++)
1228 h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1230 h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1234 for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1235 for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1237 h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1239 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1240 ((get_bits_count(&h->gb) & 7) == 0 ||
1241 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1242 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1243 h->gb.size_in_bits = 8 * buf_size;
1245 if (svq3_decode_slice_header(avctx))
1248 /* TODO: support s->mb_skip_run */
1251 mb_type = svq3_get_ue_golomb(&h->gb);
1253 if (h->pict_type == AV_PICTURE_TYPE_I)
1255 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1257 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1258 av_log(h->avctx, AV_LOG_ERROR,
1259 "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1264 ff_h264_hl_decode_mb(h);
1266 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1267 h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1268 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1271 ff_draw_horiz_band(avctx, &s->cur_pic->f,
1272 s->last_pic->f.data[0] ? &s->last_pic->f : NULL,
1273 16 * h->mb_y, 16, h->picture_structure, 0,
1277 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1278 ret = av_frame_ref(data, &s->cur_pic->f);
1279 else if (s->last_pic->f.data[0])
1280 ret = av_frame_ref(data, &s->last_pic->f);
1284 /* Do not output the last pic after seeking. */
1285 if (s->last_pic->f.data[0] || h->low_delay)
1288 if (h->pict_type != AV_PICTURE_TYPE_B) {
1289 FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
1291 av_frame_unref(&s->cur_pic->f);
1297 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1299 SVQ3Context *s = avctx->priv_data;
1300 H264Context *h = &s->h;
1302 free_picture(avctx, s->cur_pic);
1303 free_picture(avctx, s->next_pic);
1304 free_picture(avctx, s->last_pic);
1305 av_freep(&s->cur_pic);
1306 av_freep(&s->next_pic);
1307 av_freep(&s->last_pic);
1309 av_frame_unref(&h->cur_pic.f);
1311 ff_h264_free_context(h);
1316 AVCodec ff_svq3_decoder = {
1318 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1319 .type = AVMEDIA_TYPE_VIDEO,
1320 .id = AV_CODEC_ID_SVQ3,
1321 .priv_data_size = sizeof(SVQ3Context),
1322 .init = svq3_decode_init,
1323 .close = svq3_decode_end,
1324 .decode = svq3_decode_frame,
1325 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1328 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,