2 * Copyright (c) 2003 The Libav Project
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
45 #include "libavutil/attributes.h"
48 #include "mpegutils.h"
51 #include "h264data.h" // FIXME FIXME FIXME
53 #include "h264_mvpred.h"
56 #include "rectangle.h"
76 H264Picture *next_pic;
77 H264Picture *last_pic;
82 uint32_t watermark_key;
84 int next_p_frame_damaged;
87 int last_frame_output;
90 #define FULLPEL_MODE 1
91 #define HALFPEL_MODE 2
92 #define THIRDPEL_MODE 3
93 #define PREDICT_MODE 4
95 /* dual scan (from some older h264 draft)
104 static const uint8_t svq3_scan[16] = {
105 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
106 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
107 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
108 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
111 static const uint8_t luma_dc_zigzag_scan[16] = {
112 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
113 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
114 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
115 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
118 static const uint8_t svq3_pred_0[25][2] = {
121 { 0, 2 }, { 1, 1 }, { 2, 0 },
122 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
123 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
124 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
125 { 2, 4 }, { 3, 3 }, { 4, 2 },
130 static const int8_t svq3_pred_1[6][6][5] = {
131 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
132 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
133 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
134 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
135 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
136 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
137 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
138 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
139 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
140 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
141 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
142 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
145 static const struct {
148 } svq3_dct_tables[2][16] = {
149 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
150 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
151 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
152 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
155 static const uint32_t svq3_dequant_coeff[32] = {
156 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
157 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
158 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
159 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
162 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
164 const int qmul = svq3_dequant_coeff[qp];
168 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
170 for (i = 0; i < 4; i++) {
171 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
172 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
173 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
174 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
176 temp[4 * i + 0] = z0 + z3;
177 temp[4 * i + 1] = z1 + z2;
178 temp[4 * i + 2] = z1 - z2;
179 temp[4 * i + 3] = z0 - z3;
182 for (i = 0; i < 4; i++) {
183 const int offset = x_offset[i];
184 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
185 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
186 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
187 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
189 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
190 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
191 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
192 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
197 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
198 int stride, int qp, int dc)
200 const int qmul = svq3_dequant_coeff[qp];
204 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
205 : qmul * (block[0] >> 3) / 2);
209 for (i = 0; i < 4; i++) {
210 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
211 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
212 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
213 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
215 block[0 + 4 * i] = z0 + z3;
216 block[1 + 4 * i] = z1 + z2;
217 block[2 + 4 * i] = z1 - z2;
218 block[3 + 4 * i] = z0 - z3;
221 for (i = 0; i < 4; i++) {
222 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
223 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
224 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
225 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
226 const int rr = (dc + 0x80000);
228 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
229 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
230 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
231 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
234 memset(block, 0, 16 * sizeof(int16_t));
237 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
238 int index, const int type)
240 static const uint8_t *const scan_patterns[4] =
241 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
243 int run, level, limit;
245 const int intra = 3 * type >> 2;
246 const uint8_t *const scan = scan_patterns[type];
248 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
249 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
250 int sign = (vlc & 1) ? 0 : -1;
257 } else if (vlc < 4) {
262 level = (vlc + 9 >> 2) - run;
266 run = svq3_dct_tables[intra][vlc].run;
267 level = svq3_dct_tables[intra][vlc].level;
271 ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
275 ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
279 if ((index += run) >= limit)
282 block[scan[index]] = (level ^ sign) - sign;
293 static inline void svq3_mc_dir_part(SVQ3Context *s,
294 int x, int y, int width, int height,
295 int mx, int my, int dxy,
296 int thirdpel, int dir, int avg)
298 H264Context *h = &s->h;
299 const H264Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
302 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
307 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
308 my < 0 || my >= s->v_edge_pos - height - 1) {
310 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
311 my = av_clip(my, -16, s->v_edge_pos - height + 15);
314 /* form component predictions */
315 dest = h->cur_pic.f.data[0] + x + y * h->linesize;
316 src = pic->f.data[0] + mx + my * h->linesize;
319 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
320 h->linesize, h->linesize,
321 width + 1, height + 1,
322 mx, my, s->h_edge_pos, s->v_edge_pos);
323 src = h->edge_emu_buffer;
326 (avg ? s->tdsp.avg_tpel_pixels_tab
327 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
330 (avg ? s->hdsp.avg_pixels_tab
331 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
334 if (!(h->flags & CODEC_FLAG_GRAY)) {
335 mx = mx + (mx < (int) x) >> 1;
336 my = my + (my < (int) y) >> 1;
338 height = height >> 1;
341 for (i = 1; i < 3; i++) {
342 dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
343 src = pic->f.data[i] + mx + my * h->uvlinesize;
346 h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
347 h->uvlinesize, h->uvlinesize,
348 width + 1, height + 1,
349 mx, my, (s->h_edge_pos >> 1),
351 src = h->edge_emu_buffer;
354 (avg ? s->tdsp.avg_tpel_pixels_tab
355 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
359 (avg ? s->hdsp.avg_pixels_tab
360 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
367 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
370 int i, j, k, mx, my, dx, dy, x, y;
371 H264Context *h = &s->h;
372 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
373 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
374 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
375 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
376 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
378 for (i = 0; i < 16; i += part_height)
379 for (j = 0; j < 16; j += part_width) {
380 const int b_xy = (4 * h->mb_x + (j >> 2)) +
381 (4 * h->mb_y + (i >> 2)) * h->b_stride;
383 x = 16 * h->mb_x + j;
384 y = 16 * h->mb_y + i;
385 k = (j >> 2 & 1) + (i >> 1 & 2) +
386 (j >> 1 & 4) + (i & 8);
388 if (mode != PREDICT_MODE) {
389 pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
391 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
392 my = s->next_pic->motion_val[0][b_xy][1] << 1;
395 mx = mx * h->frame_num_offset /
396 h->prev_frame_num_offset + 1 >> 1;
397 my = my * h->frame_num_offset /
398 h->prev_frame_num_offset + 1 >> 1;
400 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
401 h->prev_frame_num_offset + 1 >> 1;
402 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
403 h->prev_frame_num_offset + 1 >> 1;
407 /* clip motion vector prediction to frame border */
408 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
409 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
411 /* get (optional) motion vector differential */
412 if (mode == PREDICT_MODE) {
415 dy = svq3_get_se_golomb(&h->gb);
416 dx = svq3_get_se_golomb(&h->gb);
418 if (dx == INVALID_VLC || dy == INVALID_VLC) {
419 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
424 /* compute motion vector */
425 if (mode == THIRDPEL_MODE) {
427 mx = (mx + 1 >> 1) + dx;
428 my = (my + 1 >> 1) + dy;
429 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
430 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
431 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
433 svq3_mc_dir_part(s, x, y, part_width, part_height,
434 fx, fy, dxy, 1, dir, avg);
437 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
438 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
439 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
440 dxy = (mx & 1) + 2 * (my & 1);
442 svq3_mc_dir_part(s, x, y, part_width, part_height,
443 mx >> 1, my >> 1, dxy, 0, dir, avg);
447 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
448 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
450 svq3_mc_dir_part(s, x, y, part_width, part_height,
451 mx, my, 0, 0, dir, avg);
456 /* update mv_cache */
457 if (mode != PREDICT_MODE) {
458 int32_t mv = pack16to32(mx, my);
460 if (part_height == 8 && i < 8) {
461 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
463 if (part_width == 8 && j < 8)
464 AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
466 if (part_width == 8 && j < 8)
467 AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
468 if (part_width == 4 || part_height == 4)
469 AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
472 /* write back motion vectors */
473 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
474 part_width >> 2, part_height >> 2, h->b_stride,
475 pack16to32(mx, my), 4);
481 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
483 H264Context *h = &s->h;
484 int i, j, k, m, dir, mode;
488 const int mb_xy = h->mb_xy;
489 const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
491 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
492 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
493 h->topright_samples_available = 0xFFFF;
495 if (mb_type == 0) { /* SKIP */
496 if (h->pict_type == AV_PICTURE_TYPE_P ||
497 s->next_pic->mb_type[mb_xy] == -1) {
498 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
501 if (h->pict_type == AV_PICTURE_TYPE_B)
502 svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
505 mb_type = MB_TYPE_SKIP;
507 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
508 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
510 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
513 mb_type = MB_TYPE_16x16;
515 } else if (mb_type < 8) { /* INTER */
516 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
517 mode = THIRDPEL_MODE;
518 else if (s->halfpel_flag &&
519 s->thirdpel_flag == !get_bits1(&h->gb))
525 /* note ref_cache should contain here:
533 for (m = 0; m < 2; m++) {
534 if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
535 for (i = 0; i < 4; i++)
536 AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
537 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
539 for (i = 0; i < 4; i++)
540 AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
543 memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
544 h->cur_pic.motion_val[m][b_xy - h->b_stride],
545 4 * 2 * sizeof(int16_t));
546 memset(&h->ref_cache[m][scan8[0] - 1 * 8],
547 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
549 if (h->mb_x < h->mb_width - 1) {
550 AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
551 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
552 h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
553 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
554 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
556 h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
558 AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
559 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
560 h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
561 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
563 h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
565 memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
566 PART_NOT_AVAILABLE, 8);
568 if (h->pict_type != AV_PICTURE_TYPE_B)
572 /* decode motion vector(s) and form prediction(s) */
573 if (h->pict_type == AV_PICTURE_TYPE_P) {
574 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
576 } else { /* AV_PICTURE_TYPE_B */
578 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
581 for (i = 0; i < 4; i++)
582 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
583 0, 4 * 2 * sizeof(int16_t));
586 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
589 for (i = 0; i < 4; i++)
590 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
591 0, 4 * 2 * sizeof(int16_t));
595 mb_type = MB_TYPE_16x16;
596 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
597 memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
601 for (i = 0; i < 4; i++)
602 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
603 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
604 h->left_samples_available = 0x5F5F;
607 h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
608 h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
609 h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
610 h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
612 if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
613 h->top_samples_available = 0x33FF;
616 /* decode prediction codes for luma blocks */
617 for (i = 0; i < 16; i += 2) {
618 vlc = svq3_get_ue_golomb(&h->gb);
621 av_log(h->avctx, AV_LOG_ERROR,
622 "luma prediction:%"PRIu32"\n", vlc);
626 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
627 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
629 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
630 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
632 if (left[1] == -1 || left[2] == -1) {
633 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
637 } else { /* mb_type == 33, DC_128_PRED block type */
638 for (i = 0; i < 4; i++)
639 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
642 write_back_intra_pred_mode(h);
645 ff_h264_check_intra4x4_pred_mode(h);
647 h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
648 h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
650 for (i = 0; i < 4; i++)
651 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
653 h->top_samples_available = 0x33FF;
654 h->left_samples_available = 0x5F5F;
657 mb_type = MB_TYPE_INTRA4x4;
658 } else { /* INTRA16x16 */
659 dir = i_mb_type_info[mb_type - 8].pred_mode;
660 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
662 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
663 av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
664 return h->intra16x16_pred_mode;
667 cbp = i_mb_type_info[mb_type - 8].cbp;
668 mb_type = MB_TYPE_INTRA16x16;
671 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
672 for (i = 0; i < 4; i++)
673 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
674 0, 4 * 2 * sizeof(int16_t));
675 if (h->pict_type == AV_PICTURE_TYPE_B) {
676 for (i = 0; i < 4; i++)
677 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
678 0, 4 * 2 * sizeof(int16_t));
681 if (!IS_INTRA4x4(mb_type)) {
682 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
684 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
685 memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
688 if (!IS_INTRA16x16(mb_type) &&
689 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
690 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48) {
691 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
695 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
696 : golomb_to_inter_cbp[vlc];
698 if (IS_INTRA16x16(mb_type) ||
699 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
700 h->qscale += svq3_get_se_golomb(&h->gb);
702 if (h->qscale > 31u) {
703 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
707 if (IS_INTRA16x16(mb_type)) {
708 AV_ZERO128(h->mb_luma_dc[0] + 0);
709 AV_ZERO128(h->mb_luma_dc[0] + 8);
710 if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
711 av_log(h->avctx, AV_LOG_ERROR,
712 "error while decoding intra luma dc\n");
718 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
719 const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
721 for (i = 0; i < 4; i++)
722 if ((cbp & (1 << i))) {
723 for (j = 0; j < 4; j++) {
724 k = index ? (1 * (j & 1) + 2 * (i & 1) +
725 2 * (j & 2) + 4 * (i & 2))
727 h->non_zero_count_cache[scan8[k]] = 1;
729 if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
730 av_log(h->avctx, AV_LOG_ERROR,
731 "error while decoding block\n");
738 for (i = 1; i < 3; ++i)
739 if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
740 av_log(h->avctx, AV_LOG_ERROR,
741 "error while decoding chroma dc block\n");
746 for (i = 1; i < 3; i++) {
747 for (j = 0; j < 4; j++) {
749 h->non_zero_count_cache[scan8[k]] = 1;
751 if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
752 av_log(h->avctx, AV_LOG_ERROR,
753 "error while decoding chroma ac block\n");
763 h->cur_pic.mb_type[mb_xy] = mb_type;
765 if (IS_INTRA(mb_type))
766 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
771 static int svq3_decode_slice_header(AVCodecContext *avctx)
773 SVQ3Context *s = avctx->priv_data;
774 H264Context *h = &s->h;
775 const int mb_xy = h->mb_xy;
779 header = get_bits(&h->gb, 8);
781 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
783 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
786 int length = header >> 5 & 3;
788 s->next_slice_index = get_bits_count(&h->gb) +
789 8 * show_bits(&h->gb, 8 * length) +
792 if (s->next_slice_index > h->gb.size_in_bits) {
793 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
797 h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
798 skip_bits(&h->gb, 8);
800 if (s->watermark_key) {
801 uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
802 AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
803 header ^ s->watermark_key);
806 memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
807 &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
809 skip_bits_long(&h->gb, 0);
812 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
813 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
817 h->slice_type = golomb_to_pict_type[slice_id];
819 if ((header & 0x9F) == 2) {
820 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
821 h->mb_skip_run = get_bits(&h->gb, i) -
822 (h->mb_y * h->mb_width + h->mb_x);
828 h->slice_num = get_bits(&h->gb, 8);
829 h->qscale = get_bits(&h->gb, 5);
830 s->adaptive_quant = get_bits1(&h->gb);
839 skip_bits(&h->gb, 2);
841 while (get_bits1(&h->gb))
842 skip_bits(&h->gb, 8);
844 /* reset intra predictors and invalidate motion vector references */
846 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
847 -1, 4 * sizeof(int8_t));
848 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
849 -1, 8 * sizeof(int8_t) * h->mb_x);
852 memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
853 -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
856 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
862 static av_cold int svq3_decode_init(AVCodecContext *avctx)
864 SVQ3Context *s = avctx->priv_data;
865 H264Context *h = &s->h;
867 unsigned char *extradata;
868 unsigned char *extradata_end;
870 int marker_found = 0;
872 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
873 s->last_pic = av_mallocz(sizeof(*s->last_pic));
874 s->next_pic = av_mallocz(sizeof(*s->next_pic));
875 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
876 av_freep(&s->cur_pic);
877 av_freep(&s->last_pic);
878 av_freep(&s->next_pic);
879 return AVERROR(ENOMEM);
882 if (ff_h264_decode_init(avctx) < 0)
885 ff_hpeldsp_init(&s->hdsp, avctx->flags);
886 ff_tpeldsp_init(&s->tdsp);
888 h->flags = avctx->flags;
890 h->picture_structure = PICT_FRAME;
891 avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
892 avctx->color_range = AVCOL_RANGE_JPEG;
894 h->chroma_qp[0] = h->chroma_qp[1] = 4;
895 h->chroma_x_shift = h->chroma_y_shift = 1;
898 s->thirdpel_flag = 1;
901 /* prowl for the "SEQH" marker in the extradata */
902 extradata = (unsigned char *)avctx->extradata;
903 extradata_end = avctx->extradata + avctx->extradata_size;
905 for (m = 0; m + 8 < avctx->extradata_size; m++) {
906 if (!memcmp(extradata, "SEQH", 4)) {
914 /* if a match was found, parse the extra data */
919 size = AV_RB32(&extradata[4]);
920 if (size > extradata_end - extradata - 8)
921 return AVERROR_INVALIDDATA;
922 init_get_bits(&gb, extradata + 8, size * 8);
924 /* 'frame size code' and optional 'width, height' */
925 frame_size_code = get_bits(&gb, 3);
926 switch (frame_size_code) {
956 avctx->width = get_bits(&gb, 12);
957 avctx->height = get_bits(&gb, 12);
961 s->halfpel_flag = get_bits1(&gb);
962 s->thirdpel_flag = get_bits1(&gb);
970 h->low_delay = get_bits1(&gb);
975 while (get_bits1(&gb))
978 s->unknown_flag = get_bits1(&gb);
979 avctx->has_b_frames = !h->low_delay;
980 if (s->unknown_flag) {
982 unsigned watermark_width = svq3_get_ue_golomb(&gb);
983 unsigned watermark_height = svq3_get_ue_golomb(&gb);
984 int u1 = svq3_get_ue_golomb(&gb);
985 int u2 = get_bits(&gb, 8);
986 int u3 = get_bits(&gb, 2);
987 int u4 = svq3_get_ue_golomb(&gb);
988 unsigned long buf_len = watermark_width *
989 watermark_height * 4;
990 int offset = get_bits_count(&gb) + 7 >> 3;
993 if (watermark_height > 0 &&
994 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
997 buf = av_malloc(buf_len);
998 av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
999 watermark_width, watermark_height);
1000 av_log(avctx, AV_LOG_DEBUG,
1001 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1002 u1, u2, u3, u4, offset);
1003 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1004 size - offset) != Z_OK) {
1005 av_log(avctx, AV_LOG_ERROR,
1006 "could not uncompress watermark logo\n");
1010 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1011 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1012 av_log(avctx, AV_LOG_DEBUG,
1013 "watermark key %#"PRIx32"\n", s->watermark_key);
1016 av_log(avctx, AV_LOG_ERROR,
1017 "this svq3 file contains watermark which need zlib support compiled in\n");
1023 h->width = avctx->width;
1024 h->height = avctx->height;
1025 h->mb_width = (h->width + 15) / 16;
1026 h->mb_height = (h->height + 15) / 16;
1027 h->mb_stride = h->mb_width + 1;
1028 h->mb_num = h->mb_width * h->mb_height;
1029 h->b_stride = 4 * h->mb_width;
1030 s->h_edge_pos = h->mb_width * 16;
1031 s->v_edge_pos = h->mb_height * 16;
1033 if (ff_h264_alloc_tables(h) < 0) {
1034 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1035 return AVERROR(ENOMEM);
1041 static void free_picture(AVCodecContext *avctx, H264Picture *pic)
1044 for (i = 0; i < 2; i++) {
1045 av_buffer_unref(&pic->motion_val_buf[i]);
1046 av_buffer_unref(&pic->ref_index_buf[i]);
1048 av_buffer_unref(&pic->mb_type_buf);
1050 av_frame_unref(&pic->f);
1053 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
1055 SVQ3Context *s = avctx->priv_data;
1056 H264Context *h = &s->h;
1057 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1058 const int mb_array_size = h->mb_stride * h->mb_height;
1059 const int b4_stride = h->mb_width * 4 + 1;
1060 const int b4_array_size = b4_stride * h->mb_height * 4;
1063 if (!pic->motion_val_buf[0]) {
1066 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1067 if (!pic->mb_type_buf)
1068 return AVERROR(ENOMEM);
1069 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1071 for (i = 0; i < 2; i++) {
1072 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1073 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1074 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1075 ret = AVERROR(ENOMEM);
1079 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1080 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1083 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1085 ret = ff_get_buffer(avctx, &pic->f,
1086 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1090 if (!h->edge_emu_buffer) {
1091 h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1092 if (!h->edge_emu_buffer)
1093 return AVERROR(ENOMEM);
1096 h->linesize = pic->f.linesize[0];
1097 h->uvlinesize = pic->f.linesize[1];
1101 free_picture(avctx, pic);
1105 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1106 int *got_frame, AVPacket *avpkt)
1108 const uint8_t *buf = avpkt->data;
1109 SVQ3Context *s = avctx->priv_data;
1110 H264Context *h = &s->h;
1111 int buf_size = avpkt->size;
1114 /* special case for last picture */
1115 if (buf_size == 0) {
1116 if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1117 ret = av_frame_ref(data, &s->next_pic->f);
1120 s->last_frame_output = 1;
1126 init_get_bits(&h->gb, buf, 8 * buf_size);
1128 h->mb_x = h->mb_y = h->mb_xy = 0;
1130 if (svq3_decode_slice_header(avctx))
1133 h->pict_type = h->slice_type;
1135 if (h->pict_type != AV_PICTURE_TYPE_B)
1136 FFSWAP(H264Picture*, s->next_pic, s->last_pic);
1138 av_frame_unref(&s->cur_pic->f);
1140 /* for skipping the frame */
1141 s->cur_pic->f.pict_type = h->pict_type;
1142 s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1144 ret = get_buffer(avctx, s->cur_pic);
1148 h->cur_pic_ptr = s->cur_pic;
1149 av_frame_unref(&h->cur_pic.f);
1150 h->cur_pic = *s->cur_pic;
1151 ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1155 for (i = 0; i < 16; i++) {
1156 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1157 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1159 for (i = 0; i < 16; i++) {
1160 h->block_offset[16 + i] =
1161 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1162 h->block_offset[48 + 16 + i] =
1163 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1166 if (h->pict_type != AV_PICTURE_TYPE_I) {
1167 if (!s->last_pic->f.data[0]) {
1168 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1169 ret = get_buffer(avctx, s->last_pic);
1172 memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1173 memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1174 s->last_pic->f.linesize[1]);
1175 memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1176 s->last_pic->f.linesize[2]);
1179 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1180 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1181 ret = get_buffer(avctx, s->next_pic);
1184 memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1185 memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1186 s->next_pic->f.linesize[1]);
1187 memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1188 s->next_pic->f.linesize[2]);
1192 if (avctx->debug & FF_DEBUG_PICT_INFO)
1193 av_log(h->avctx, AV_LOG_DEBUG,
1194 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1195 av_get_picture_type_char(h->pict_type),
1196 s->halfpel_flag, s->thirdpel_flag,
1197 s->adaptive_quant, h->qscale, h->slice_num);
1199 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1200 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1201 avctx->skip_frame >= AVDISCARD_ALL)
1204 if (s->next_p_frame_damaged) {
1205 if (h->pict_type == AV_PICTURE_TYPE_B)
1208 s->next_p_frame_damaged = 0;
1211 if (h->pict_type == AV_PICTURE_TYPE_B) {
1212 h->frame_num_offset = h->slice_num - h->prev_frame_num;
1214 if (h->frame_num_offset < 0)
1215 h->frame_num_offset += 256;
1216 if (h->frame_num_offset == 0 ||
1217 h->frame_num_offset >= h->prev_frame_num_offset) {
1218 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1222 h->prev_frame_num = h->frame_num;
1223 h->frame_num = h->slice_num;
1224 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1226 if (h->prev_frame_num_offset < 0)
1227 h->prev_frame_num_offset += 256;
1230 for (m = 0; m < 2; m++) {
1232 for (i = 0; i < 4; i++) {
1234 for (j = -1; j < 4; j++)
1235 h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1237 h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1241 for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1242 for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1244 h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1246 if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1247 ((get_bits_count(&h->gb) & 7) == 0 ||
1248 show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1249 skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1250 h->gb.size_in_bits = 8 * buf_size;
1252 if (svq3_decode_slice_header(avctx))
1255 /* TODO: support s->mb_skip_run */
1258 mb_type = svq3_get_ue_golomb(&h->gb);
1260 if (h->pict_type == AV_PICTURE_TYPE_I)
1262 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1264 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1265 av_log(h->avctx, AV_LOG_ERROR,
1266 "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1271 ff_h264_hl_decode_mb(h);
1273 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1274 h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1275 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1278 ff_draw_horiz_band(avctx, &s->cur_pic->f,
1279 s->last_pic->f.data[0] ? &s->last_pic->f : NULL,
1280 16 * h->mb_y, 16, h->picture_structure, 0,
1284 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1285 ret = av_frame_ref(data, &s->cur_pic->f);
1286 else if (s->last_pic->f.data[0])
1287 ret = av_frame_ref(data, &s->last_pic->f);
1291 /* Do not output the last pic after seeking. */
1292 if (s->last_pic->f.data[0] || h->low_delay)
1295 if (h->pict_type != AV_PICTURE_TYPE_B) {
1296 FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
1298 av_frame_unref(&s->cur_pic->f);
1304 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1306 SVQ3Context *s = avctx->priv_data;
1307 H264Context *h = &s->h;
1309 free_picture(avctx, s->cur_pic);
1310 free_picture(avctx, s->next_pic);
1311 free_picture(avctx, s->last_pic);
1312 av_freep(&s->cur_pic);
1313 av_freep(&s->next_pic);
1314 av_freep(&s->last_pic);
1316 av_frame_unref(&h->cur_pic.f);
1318 ff_h264_free_context(h);
1323 AVCodec ff_svq3_decoder = {
1325 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1326 .type = AVMEDIA_TYPE_VIDEO,
1327 .id = AV_CODEC_ID_SVQ3,
1328 .priv_data_size = sizeof(SVQ3Context),
1329 .init = svq3_decode_init,
1330 .close = svq3_decode_end,
1331 .decode = svq3_decode_frame,
1332 .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1335 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,