2 * Copyright (c) 2003 The Libav Project
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
43 #include "libavutil/attributes.h"
46 #include "mpegvideo.h"
49 #include "h264data.h" // FIXME FIXME FIXME
51 #include "h264_mvpred.h"
54 #include "rectangle.h"
78 uint32_t watermark_key;
80 int next_p_frame_damaged;
83 int last_frame_output;
86 #define FULLPEL_MODE 1
87 #define HALFPEL_MODE 2
88 #define THIRDPEL_MODE 3
89 #define PREDICT_MODE 4
91 /* dual scan (from some older h264 draft)
100 static const uint8_t svq3_scan[16] = {
101 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
102 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
103 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
104 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
107 static const uint8_t luma_dc_zigzag_scan[16] = {
108 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
109 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
110 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
111 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
114 static const uint8_t svq3_pred_0[25][2] = {
117 { 0, 2 }, { 1, 1 }, { 2, 0 },
118 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
119 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
120 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
121 { 2, 4 }, { 3, 3 }, { 4, 2 },
126 static const int8_t svq3_pred_1[6][6][5] = {
127 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
128 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
129 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
130 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
131 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
132 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
133 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
134 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
135 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
136 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
137 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
138 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
141 static const struct {
144 } svq3_dct_tables[2][16] = {
145 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
146 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
147 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
148 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
151 static const uint32_t svq3_dequant_coeff[32] = {
152 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
153 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
154 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
155 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
158 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
160 const int qmul = svq3_dequant_coeff[qp];
164 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
166 for (i = 0; i < 4; i++) {
167 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
168 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
169 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
170 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
172 temp[4 * i + 0] = z0 + z3;
173 temp[4 * i + 1] = z1 + z2;
174 temp[4 * i + 2] = z1 - z2;
175 temp[4 * i + 3] = z0 - z3;
178 for (i = 0; i < 4; i++) {
179 const int offset = x_offset[i];
180 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
181 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
182 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
183 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
185 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
186 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
187 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
188 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
193 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
194 int stride, int qp, int dc)
196 const int qmul = svq3_dequant_coeff[qp];
200 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
201 : qmul * (block[0] >> 3) / 2);
205 for (i = 0; i < 4; i++) {
206 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
207 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
208 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
209 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
211 block[0 + 4 * i] = z0 + z3;
212 block[1 + 4 * i] = z1 + z2;
213 block[2 + 4 * i] = z1 - z2;
214 block[3 + 4 * i] = z0 - z3;
217 for (i = 0; i < 4; i++) {
218 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
219 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
220 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
221 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
222 const int rr = (dc + 0x80000);
224 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
225 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
226 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
227 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
230 memset(block, 0, 16 * sizeof(int16_t));
233 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
234 int index, const int type)
236 static const uint8_t *const scan_patterns[4] =
237 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
239 int run, level, limit;
241 const int intra = 3 * type >> 2;
242 const uint8_t *const scan = scan_patterns[type];
244 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
245 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
246 int sign = (vlc & 1) ? 0 : -1;
253 } else if (vlc < 4) {
258 level = (vlc + 9 >> 2) - run;
262 run = svq3_dct_tables[intra][vlc].run;
263 level = svq3_dct_tables[intra][vlc].level;
267 ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
271 ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
275 if ((index += run) >= limit)
278 block[scan[index]] = (level ^ sign) - sign;
289 static inline void svq3_mc_dir_part(SVQ3Context *s,
290 int x, int y, int width, int height,
291 int mx, int my, int dxy,
292 int thirdpel, int dir, int avg)
294 H264Context *h = &s->h;
295 const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
298 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
303 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
304 my < 0 || my >= s->v_edge_pos - height - 1) {
306 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
307 my = av_clip(my, -16, s->v_edge_pos - height + 15);
310 /* form component predictions */
311 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
312 src = pic->f.data[0] + mx + my * h->linesize;
315 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->linesize,
316 width + 1, height + 1,
317 mx, my, s->h_edge_pos, s->v_edge_pos);
318 src = h->edge_emu_buffer;
321 (avg ? h->dsp.avg_tpel_pixels_tab
322 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
325 (avg ? s->hdsp.avg_pixels_tab
326 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
329 if (!(h->flags & CODEC_FLAG_GRAY)) {
330 mx = mx + (mx < (int) x) >> 1;
331 my = my + (my < (int) y) >> 1;
333 height = height >> 1;
336 for (i = 1; i < 3; i++) {
337 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
338 src = pic->f.data[i] + mx + my * h->uvlinesize;
341 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->uvlinesize,
342 width + 1, height + 1,
343 mx, my, (s->h_edge_pos >> 1),
345 src = h->edge_emu_buffer;
348 (avg ? h->dsp.avg_tpel_pixels_tab
349 : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
353 (avg ? s->hdsp.avg_pixels_tab
354 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
361 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
364 int i, j, k, mx, my, dx, dy, x, y;
365 H264Context *h = &s->h;
366 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
367 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
368 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
369 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
370 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
372 for (i = 0; i < 16; i += part_height)
373 for (j = 0; j < 16; j += part_width) {
374 const int b_xy = (4 * h->mb_x + (j >> 2)) +
375 (4 * h->mb_y + (i >> 2)) * h->b_stride;
377 x = 16 * h->mb_x + j;
378 y = 16 * h->mb_y + i;
379 k = (j >> 2 & 1) + (i >> 1 & 2) +
380 (j >> 1 & 4) + (i & 8);
382 if (mode != PREDICT_MODE) {
383 pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
385 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
386 my = s->next_pic->motion_val[0][b_xy][1] << 1;
389 mx = mx * h->frame_num_offset /
390 h->prev_frame_num_offset + 1 >> 1;
391 my = my * h->frame_num_offset /
392 h->prev_frame_num_offset + 1 >> 1;
394 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
395 h->prev_frame_num_offset + 1 >> 1;
396 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
397 h->prev_frame_num_offset + 1 >> 1;
401 /* clip motion vector prediction to frame border */
402 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
403 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
405 /* get (optional) motion vector differential */
406 if (mode == PREDICT_MODE) {
409 dy = svq3_get_se_golomb(&h->gb);
410 dx = svq3_get_se_golomb(&h->gb);
412 if (dx == INVALID_VLC || dy == INVALID_VLC) {
413 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
418 /* compute motion vector */
419 if (mode == THIRDPEL_MODE) {
421 mx = (mx + 1 >> 1) + dx;
422 my = (my + 1 >> 1) + dy;
423 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
424 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
425 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
427 svq3_mc_dir_part(s, x, y, part_width, part_height,
428 fx, fy, dxy, 1, dir, avg);
431 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
432 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
433 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
434 dxy = (mx & 1) + 2 * (my & 1);
436 svq3_mc_dir_part(s, x, y, part_width, part_height,
437 mx >> 1, my >> 1, dxy, 0, dir, avg);
441 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
442 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
444 svq3_mc_dir_part(s, x, y, part_width, part_height,
445 mx, my, 0, 0, dir, avg);
450 /* update mv_cache */
451 if (mode != PREDICT_MODE) {
452 int32_t mv = pack16to32(mx, my);
454 if (part_height == 8 && i < 8) {
455 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
457 if (part_width == 8 && j < 8)
458 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
460 if (part_width == 8 && j < 8)
461 AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
462 if (part_width == 4 || part_height == 4)
463 AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
466 /* write back motion vectors */
467 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
468 part_width >> 2, part_height >> 2, h->b_stride,
469 pack16to32(mx, my), 4);
475 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
477 H264Context *h = &s->h;
478 int i, j, k, m, dir, mode;
482 const int mb_xy = h->mb_xy;
483 const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
485 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
486 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
487 h->topright_samples_available = 0xFFFF;
489 if (mb_type == 0) { /* SKIP */
490 if (h->pict_type == AV_PICTURE_TYPE_P ||
491 s->next_pic->mb_type[mb_xy] == -1) {
492 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
495 if (h->pict_type == AV_PICTURE_TYPE_B)
496 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
499 mb_type = MB_TYPE_SKIP;
501 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
502 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
504 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
507 mb_type = MB_TYPE_16x16;
509 } else if (mb_type < 8) { /* INTER */
510 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
511 mode = THIRDPEL_MODE;
512 else if (s->halfpel_flag &&
513 s->thirdpel_flag == !get_bits1(&h->gb))
519 /* note ref_cache should contain here:
527 for (m = 0; m < 2; m++) {
528 if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
529 for (i = 0; i < 4; i++)
530 AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
531 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
533 for (i = 0; i < 4; i++)
534 AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
537 memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
538 h->cur_pic.motion_val[m][b_xy - h->b_stride],
539 4 * 2 * sizeof(int16_t));
540 memset(&h->ref_cache[m][scan8[0] - 1 * 8],
541 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
543 if (h->mb_x < h->mb_width - 1) {
544 AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
545 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
546 h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
547 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
548 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
550 h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
552 AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
553 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
554 h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
555 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
557 h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
559 memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
560 PART_NOT_AVAILABLE, 8);
562 if (h->pict_type != AV_PICTURE_TYPE_B)
566 /* decode motion vector(s) and form prediction(s) */
567 if (h->pict_type == AV_PICTURE_TYPE_P) {
568 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
570 } else { /* AV_PICTURE_TYPE_B */
572 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
575 for (i = 0; i < 4; i++)
576 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
577 0, 4 * 2 * sizeof(int16_t));
580 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
583 for (i = 0; i < 4; i++)
584 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
585 0, 4 * 2 * sizeof(int16_t));
589 mb_type = MB_TYPE_16x16;
590 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
591 memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
595 for (i = 0; i < 4; i++)
596 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
597 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
598 h->left_samples_available = 0x5F5F;
601 h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
602 h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
603 h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
604 h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
606 if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
607 h->top_samples_available = 0x33FF;
610 /* decode prediction codes for luma blocks */
611 for (i = 0; i < 16; i += 2) {
612 vlc = svq3_get_ue_golomb(&h->gb);
615 av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
619 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
620 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
622 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
623 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
625 if (left[1] == -1 || left[2] == -1) {
626 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
630 } else { /* mb_type == 33, DC_128_PRED block type */
631 for (i = 0; i < 4; i++)
632 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
635 write_back_intra_pred_mode(h);
638 ff_h264_check_intra4x4_pred_mode(h);
640 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
641 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
643 for (i = 0; i < 4; i++)
644 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
646 h->top_samples_available = 0x33FF;
647 h->left_samples_available = 0x5F5F;
650 mb_type = MB_TYPE_INTRA4x4;
651 } else { /* INTRA16x16 */
652 dir = i_mb_type_info[mb_type - 8].pred_mode;
653 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
655 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
656 av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
657 return h->intra16x16_pred_mode;
660 cbp = i_mb_type_info[mb_type - 8].cbp;
661 mb_type = MB_TYPE_INTRA16x16;
664 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
665 for (i = 0; i < 4; i++)
666 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
667 0, 4 * 2 * sizeof(int16_t));
668 if (h->pict_type == AV_PICTURE_TYPE_B) {
669 for (i = 0; i < 4; i++)
670 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
671 0, 4 * 2 * sizeof(int16_t));
674 if (!IS_INTRA4x4(mb_type)) {
675 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
677 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
678 memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
681 if (!IS_INTRA16x16(mb_type) &&
682 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
683 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48) {
684 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
688 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
689 : golomb_to_inter_cbp[vlc];
691 if (IS_INTRA16x16(mb_type) ||
692 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
693 h->qscale += svq3_get_se_golomb(&h->gb);
695 if (h->qscale > 31u) {
696 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
700 if (IS_INTRA16x16(mb_type)) {
701 AV_ZERO128(h->mb_luma_dc[0] + 0);
702 AV_ZERO128(h->mb_luma_dc[0] + 8);
703 if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
704 av_log(h->avctx, AV_LOG_ERROR,
705 "error while decoding intra luma dc\n");
711 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
712 const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
714 for (i = 0; i < 4; i++)
715 if ((cbp & (1 << i))) {
716 for (j = 0; j < 4; j++) {
717 k = index ? (1 * (j & 1) + 2 * (i & 1) +
718 2 * (j & 2) + 4 * (i & 2))
720 h->non_zero_count_cache[scan8[k]] = 1;
722 if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
723 av_log(h->avctx, AV_LOG_ERROR,
724 "error while decoding block\n");
731 for (i = 1; i < 3; ++i)
732 if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
733 av_log(h->avctx, AV_LOG_ERROR,
734 "error while decoding chroma dc block\n");
739 for (i = 1; i < 3; i++) {
740 for (j = 0; j < 4; j++) {
742 h->non_zero_count_cache[scan8[k]] = 1;
744 if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
745 av_log(h->avctx, AV_LOG_ERROR,
746 "error while decoding chroma ac block\n");
756 h->cur_pic.mb_type[mb_xy] = mb_type;
758 if (IS_INTRA(mb_type))
759 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
764 static int svq3_decode_slice_header(AVCodecContext *avctx)
766 SVQ3Context *s = avctx->priv_data;
767 H264Context *h = &s->h;
768 const int mb_xy = h->mb_xy;
772 header = get_bits(&h->gb, 8);
774 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
776 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
779 int length = header >> 5 & 3;
781 s->next_slice_index = get_bits_count(&h->gb) +
782 8 * show_bits(&h->gb, 8 * length) +
785 if (s->next_slice_index > h->gb.size_in_bits) {
786 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
790 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
791 skip_bits(&h->gb, 8);
793 if (s->watermark_key) {
794 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
795 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
796 header ^ s->watermark_key);
799 memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
800 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
802 skip_bits_long(&h->gb, 0);
805 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
806 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
810 h->slice_type = golomb_to_pict_type[slice_id];
812 if ((header & 0x9F) == 2) {
813 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
814 h->mb_skip_run = get_bits(&h->gb, i) -
815 (h->mb_y * h->mb_width + h->mb_x);
821 h->slice_num = get_bits(&h->gb, 8);
822 h->qscale = get_bits(&h->gb, 5);
823 s->adaptive_quant = get_bits1(&h->gb);
832 skip_bits(&h->gb, 2);
834 while (get_bits1(&h->gb))
835 skip_bits(&h->gb, 8);
837 /* reset intra predictors and invalidate motion vector references */
839 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
840 -1, 4 * sizeof(int8_t));
841 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
842 -1, 8 * sizeof(int8_t) * h->mb_x);
845 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
846 -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
849 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
855 static av_cold int svq3_decode_init(AVCodecContext *avctx)
857 SVQ3Context *s = avctx->priv_data;
858 H264Context *h = &s->h;
860 unsigned char *extradata;
861 unsigned char *extradata_end;
863 int marker_found = 0;
865 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
866 s->last_pic = av_mallocz(sizeof(*s->last_pic));
867 s->next_pic = av_mallocz(sizeof(*s->next_pic));
868 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
869 av_freep(&s->cur_pic);
870 av_freep(&s->last_pic);
871 av_freep(&s->next_pic);
872 return AVERROR(ENOMEM);
875 if (ff_h264_decode_init(avctx) < 0)
878 ff_hpeldsp_init(&s->hdsp, avctx->flags);
879 h->flags = avctx->flags;
881 h->picture_structure = PICT_FRAME;
882 avctx->pix_fmt = avctx->codec->pix_fmts[0];
884 h->chroma_qp[0] = h->chroma_qp[1] = 4;
885 h->chroma_x_shift = h->chroma_y_shift = 1;
888 s->thirdpel_flag = 1;
891 /* prowl for the "SEQH" marker in the extradata */
892 extradata = (unsigned char *)avctx->extradata;
893 extradata_end = avctx->extradata + avctx->extradata_size;
895 for (m = 0; m + 8 < avctx->extradata_size; m++) {
896 if (!memcmp(extradata, "SEQH", 4)) {
904 /* if a match was found, parse the extra data */
909 size = AV_RB32(&extradata[4]);
910 if (size > extradata_end - extradata - 8)
911 return AVERROR_INVALIDDATA;
912 init_get_bits(&gb, extradata + 8, size * 8);
914 /* 'frame size code' and optional 'width, height' */
915 frame_size_code = get_bits(&gb, 3);
916 switch (frame_size_code) {
946 avctx->width = get_bits(&gb, 12);
947 avctx->height = get_bits(&gb, 12);
951 s->halfpel_flag = get_bits1(&gb);
952 s->thirdpel_flag = get_bits1(&gb);
960 h->low_delay = get_bits1(&gb);
965 while (get_bits1(&gb))
968 s->unknown_flag = get_bits1(&gb);
969 avctx->has_b_frames = !h->low_delay;
970 if (s->unknown_flag) {
972 unsigned watermark_width = svq3_get_ue_golomb(&gb);
973 unsigned watermark_height = svq3_get_ue_golomb(&gb);
974 int u1 = svq3_get_ue_golomb(&gb);
975 int u2 = get_bits(&gb, 8);
976 int u3 = get_bits(&gb, 2);
977 int u4 = svq3_get_ue_golomb(&gb);
978 unsigned long buf_len = watermark_width *
979 watermark_height * 4;
980 int offset = get_bits_count(&gb) + 7 >> 3;
983 if (watermark_height > 0 &&
984 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
987 buf = av_malloc(buf_len);
988 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
989 watermark_width, watermark_height);
990 av_log(avctx, AV_LOG_DEBUG,
991 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
992 u1, u2, u3, u4, offset);
993 if (uncompress(buf, &buf_len, extradata + 8 + offset,
994 size - offset) != Z_OK) {
995 av_log(avctx, AV_LOG_ERROR,
996 "could not uncompress watermark logo\n");
1000 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1001 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1002 av_log(avctx, AV_LOG_DEBUG,
1003 "watermark key %#x\n", s->watermark_key);
1006 av_log(avctx, AV_LOG_ERROR,
1007 "this svq3 file contains watermark which need zlib support compiled in\n");
1013 h->width = avctx->width;
1014 h->height = avctx->height;
1015 h->mb_width = (h->width + 15) / 16;
1016 h->mb_height = (h->height + 15) / 16;
1017 h->mb_stride = h->mb_width + 1;
1018 h->mb_num = h->mb_width * h->mb_height;
1019 h->b_stride = 4 * h->mb_width;
1020 s->h_edge_pos = h->mb_width * 16;
1021 s->v_edge_pos = h->mb_height * 16;
1023 if (ff_h264_alloc_tables(h) < 0) {
1024 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1025 return AVERROR(ENOMEM);
1031 static void free_picture(AVCodecContext *avctx, Picture *pic)
1034 for (i = 0; i < 2; i++) {
1035 av_buffer_unref(&pic->motion_val_buf[i]);
1036 av_buffer_unref(&pic->ref_index_buf[i]);
1038 av_buffer_unref(&pic->mb_type_buf);
1040 av_frame_unref(&pic->f);
1043 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1045 SVQ3Context *s = avctx->priv_data;
1046 H264Context *h = &s->h;
1047 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1048 const int mb_array_size = h->mb_stride * h->mb_height;
1049 const int b4_stride = h->mb_width * 4 + 1;
1050 const int b4_array_size = b4_stride * h->mb_height * 4;
1053 if (!pic->motion_val_buf[0]) {
1056 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1057 if (!pic->mb_type_buf)
1058 return AVERROR(ENOMEM);
1059 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1061 for (i = 0; i < 2; i++) {
1062 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1063 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1064 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1065 ret = AVERROR(ENOMEM);
1069 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1070 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1073 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1075 ret = ff_get_buffer(avctx, &pic->f,
1076 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1080 if (!h->edge_emu_buffer) {
1081 h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1082 if (!h->edge_emu_buffer)
1083 return AVERROR(ENOMEM);
1086 h->linesize = pic->f.linesize[0];
1087 h->uvlinesize = pic->f.linesize[1];
1091 free_picture(avctx, pic);
1095 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1096 int *got_frame, AVPacket *avpkt)
1098 const uint8_t *buf = avpkt->data;
1099 SVQ3Context *s = avctx->priv_data;
1100 H264Context *h = &s->h;
1101 int buf_size = avpkt->size;
1104 /* special case for last picture */
1105 if (buf_size == 0) {
1106 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1107 ret = av_frame_ref(data, &s->next_pic->f);
1110 s->last_frame_output = 1;
1116 init_get_bits(&h->gb, buf, 8 * buf_size);
1118 h->mb_x = h->mb_y = h->mb_xy = 0;
1120 if (svq3_decode_slice_header(avctx))
1123 h->pict_type = h->slice_type;
1125 if (h->pict_type != AV_PICTURE_TYPE_B)
1126 FFSWAP(Picture*, s->next_pic, s->last_pic);
1128 av_frame_unref(&s->cur_pic->f);
1130 /* for skipping the frame */
1131 s->cur_pic->f.pict_type = h->pict_type;
1132 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1134 ret = get_buffer(avctx, s->cur_pic);
1138 h->cur_pic_ptr = s->cur_pic;
1139 av_frame_unref(&h->cur_pic.f);
1140 h->cur_pic = *s->cur_pic;
1141 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1145 for (i = 0; i < 16; i++) {
1146 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1147 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1149 for (i = 0; i < 16; i++) {
1150 h->block_offset[16 + i] =
1151 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1152 h->block_offset[48 + 16 + i] =
1153 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1156 if (h->pict_type != AV_PICTURE_TYPE_I) {
1157 if (!s->last_pic->f.data[0]) {
1158 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1159 ret = get_buffer(avctx, s->last_pic);
1162 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1163 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1164 s->last_pic->f.linesize[1]);
1165 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1166 s->last_pic->f.linesize[2]);
1169 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1170 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1171 ret = get_buffer(avctx, s->next_pic);
1174 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1175 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1176 s->next_pic->f.linesize[1]);
1177 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1178 s->next_pic->f.linesize[2]);
1182 if (avctx->debug & FF_DEBUG_PICT_INFO)
1183 av_log(h->avctx, AV_LOG_DEBUG,
1184 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1185 av_get_picture_type_char(h->pict_type),
1186 s->halfpel_flag, s->thirdpel_flag,
1187 s->adaptive_quant, h->qscale, h->slice_num);
1189 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1190 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1191 avctx->skip_frame >= AVDISCARD_ALL)
1194 if (s->next_p_frame_damaged) {
1195 if (h->pict_type == AV_PICTURE_TYPE_B)
1198 s->next_p_frame_damaged = 0;
1201 if (h->pict_type == AV_PICTURE_TYPE_B) {
1202 h->frame_num_offset = h->slice_num - h->prev_frame_num;
1204 if (h->frame_num_offset < 0)
1205 h->frame_num_offset += 256;
1206 if (h->frame_num_offset == 0 ||
1207 h->frame_num_offset >= h->prev_frame_num_offset) {
1208 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1212 h->prev_frame_num = h->frame_num;
1213 h->frame_num = h->slice_num;
1214 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1216 if (h->prev_frame_num_offset < 0)
1217 h->prev_frame_num_offset += 256;
1220 for (m = 0; m < 2; m++) {
1222 for (i = 0; i < 4; i++) {
1224 for (j = -1; j < 4; j++)
1225 h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1227 h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1231 for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1232 for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1234 h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1236 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1237 ((get_bits_count(&h->gb) & 7) == 0 ||
1238 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1239 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1240 h->gb.size_in_bits = 8 * buf_size;
1242 if (svq3_decode_slice_header(avctx))
1245 /* TODO: support s->mb_skip_run */
1248 mb_type = svq3_get_ue_golomb(&h->gb);
1250 if (h->pict_type == AV_PICTURE_TYPE_I)
1252 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1254 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1255 av_log(h->avctx, AV_LOG_ERROR,
1256 "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1261 ff_h264_hl_decode_mb(h);
1263 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1264 h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1265 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1268 ff_draw_horiz_band(avctx, NULL, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL,
1269 16 * h->mb_y, 16, h->picture_structure, 0, 0,
1270 h->low_delay, h->mb_height * 16, h->mb_width * 16);
1273 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1274 ret = av_frame_ref(data, &s->cur_pic->f);
1275 else if (s->last_pic->f.data[0])
1276 ret = av_frame_ref(data, &s->last_pic->f);
1280 /* Do not output the last pic after seeking. */
1281 if (s->last_pic->f.data[0] || h->low_delay)
1284 if (h->pict_type != AV_PICTURE_TYPE_B) {
1285 FFSWAP(Picture*, s->cur_pic, s->next_pic);
1287 av_frame_unref(&s->cur_pic->f);
1293 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1295 SVQ3Context *s = avctx->priv_data;
1296 H264Context *h = &s->h;
1298 free_picture(avctx, s->cur_pic);
1299 free_picture(avctx, s->next_pic);
1300 free_picture(avctx, s->last_pic);
1301 av_freep(&s->cur_pic);
1302 av_freep(&s->next_pic);
1303 av_freep(&s->last_pic);
1305 av_frame_unref(&h->cur_pic.f);
1307 ff_h264_free_context(h);
1312 AVCodec ff_svq3_decoder = {
1314 .type = AVMEDIA_TYPE_VIDEO,
1315 .id = AV_CODEC_ID_SVQ3,
1316 .priv_data_size = sizeof(SVQ3Context),
1317 .init = svq3_decode_init,
1318 .close = svq3_decode_end,
1319 .decode = svq3_decode_frame,
1320 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1323 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1324 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,