2 * Copyright (c) 2003 The FFmpeg Project
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
45 #include "libavutil/attributes.h"
48 #include "mpegutils.h"
50 #include "h264_mvpred.h"
55 #include "rectangle.h"
57 #include "vdpau_internal.h"
71 typedef struct SVQ3Context {
76 H264Picture *next_pic;
77 H264Picture *last_pic;
84 uint32_t watermark_key;
88 int next_p_frame_damaged;
91 int last_frame_output;
94 #define FULLPEL_MODE 1
95 #define HALFPEL_MODE 2
96 #define THIRDPEL_MODE 3
97 #define PREDICT_MODE 4
99 /* dual scan (from some older h264 draft)
108 static const uint8_t svq3_scan[16] = {
109 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
110 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
111 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
112 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
115 static const uint8_t luma_dc_zigzag_scan[16] = {
116 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
117 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
118 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
119 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
122 static const uint8_t svq3_pred_0[25][2] = {
125 { 0, 2 }, { 1, 1 }, { 2, 0 },
126 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
127 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
128 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
129 { 2, 4 }, { 3, 3 }, { 4, 2 },
134 static const int8_t svq3_pred_1[6][6][5] = {
135 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
136 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
137 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
138 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
139 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
140 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
141 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
142 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
143 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
144 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
145 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
146 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
149 static const struct {
152 } svq3_dct_tables[2][16] = {
153 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
154 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
155 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
156 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
159 static const uint32_t svq3_dequant_coeff[32] = {
160 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
161 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
162 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
163 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
166 static int svq3_decode_end(AVCodecContext *avctx);
168 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
170 const int qmul = svq3_dequant_coeff[qp];
174 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
176 for (i = 0; i < 4; i++) {
177 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
178 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
179 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
180 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
182 temp[4 * i + 0] = z0 + z3;
183 temp[4 * i + 1] = z1 + z2;
184 temp[4 * i + 2] = z1 - z2;
185 temp[4 * i + 3] = z0 - z3;
188 for (i = 0; i < 4; i++) {
189 const int offset = x_offset[i];
190 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
191 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
192 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
193 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
195 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
196 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
197 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
198 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
203 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
204 int stride, int qp, int dc)
206 const int qmul = svq3_dequant_coeff[qp];
210 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
211 : qmul * (block[0] >> 3) / 2);
215 for (i = 0; i < 4; i++) {
216 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
217 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
218 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
219 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
221 block[0 + 4 * i] = z0 + z3;
222 block[1 + 4 * i] = z1 + z2;
223 block[2 + 4 * i] = z1 - z2;
224 block[3 + 4 * i] = z0 - z3;
227 for (i = 0; i < 4; i++) {
228 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
229 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
230 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
231 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
232 const int rr = (dc + 0x80000);
234 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
235 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
236 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
237 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
240 memset(block, 0, 16 * sizeof(int16_t));
243 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
244 int index, const int type)
246 static const uint8_t *const scan_patterns[4] = {
247 luma_dc_zigzag_scan, ff_zigzag_scan, svq3_scan, ff_h264_chroma_dc_scan
250 int run, level, sign, limit;
252 const int intra = 3 * type >> 2;
253 const uint8_t *const scan = scan_patterns[type];
255 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
256 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
257 if ((int32_t)vlc < 0)
260 sign = (vlc & 1) ? 0 : -1;
267 } else if (vlc < 4) {
272 level = (vlc + 9 >> 2) - run;
276 run = svq3_dct_tables[intra][vlc].run;
277 level = svq3_dct_tables[intra][vlc].level;
280 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
283 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
288 if ((index += run) >= limit)
291 block[scan[index]] = (level ^ sign) - sign;
302 static inline void svq3_mc_dir_part(SVQ3Context *s,
303 int x, int y, int width, int height,
304 int mx, int my, int dxy,
305 int thirdpel, int dir, int avg)
307 H264Context *h = &s->h;
308 H264SliceContext *sl = &h->slice_ctx[0];
309 const H264Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
312 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
317 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
318 my < 0 || my >= s->v_edge_pos - height - 1) {
320 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
321 my = av_clip(my, -16, s->v_edge_pos - height + 15);
324 /* form component predictions */
325 dest = h->cur_pic.f->data[0] + x + y * sl->linesize;
326 src = pic->f->data[0] + mx + my * sl->linesize;
329 h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src,
330 sl->linesize, sl->linesize,
331 width + 1, height + 1,
332 mx, my, s->h_edge_pos, s->v_edge_pos);
333 src = sl->edge_emu_buffer;
336 (avg ? s->tdsp.avg_tpel_pixels_tab
337 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, sl->linesize,
340 (avg ? s->hdsp.avg_pixels_tab
341 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, sl->linesize,
344 if (!(h->flags & AV_CODEC_FLAG_GRAY)) {
345 mx = mx + (mx < (int) x) >> 1;
346 my = my + (my < (int) y) >> 1;
348 height = height >> 1;
351 for (i = 1; i < 3; i++) {
352 dest = h->cur_pic.f->data[i] + (x >> 1) + (y >> 1) * sl->uvlinesize;
353 src = pic->f->data[i] + mx + my * sl->uvlinesize;
356 h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src,
357 sl->uvlinesize, sl->uvlinesize,
358 width + 1, height + 1,
359 mx, my, (s->h_edge_pos >> 1),
361 src = sl->edge_emu_buffer;
364 (avg ? s->tdsp.avg_tpel_pixels_tab
365 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
369 (avg ? s->hdsp.avg_pixels_tab
370 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
377 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
380 int i, j, k, mx, my, dx, dy, x, y;
381 H264Context *h = &s->h;
382 H264SliceContext *sl = &h->slice_ctx[0];
383 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
384 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
385 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
386 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
387 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
389 for (i = 0; i < 16; i += part_height)
390 for (j = 0; j < 16; j += part_width) {
391 const int b_xy = (4 * sl->mb_x + (j >> 2)) +
392 (4 * sl->mb_y + (i >> 2)) * h->b_stride;
394 x = 16 * sl->mb_x + j;
395 y = 16 * sl->mb_y + i;
396 k = (j >> 2 & 1) + (i >> 1 & 2) +
397 (j >> 1 & 4) + (i & 8);
399 if (mode != PREDICT_MODE) {
400 pred_motion(h, sl, k, part_width >> 2, dir, 1, &mx, &my);
402 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
403 my = s->next_pic->motion_val[0][b_xy][1] << 1;
406 mx = mx * h->frame_num_offset /
407 h->prev_frame_num_offset + 1 >> 1;
408 my = my * h->frame_num_offset /
409 h->prev_frame_num_offset + 1 >> 1;
411 mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
412 h->prev_frame_num_offset + 1 >> 1;
413 my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
414 h->prev_frame_num_offset + 1 >> 1;
418 /* clip motion vector prediction to frame border */
419 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
420 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
422 /* get (optional) motion vector differential */
423 if (mode == PREDICT_MODE) {
426 dy = svq3_get_se_golomb(&h->gb);
427 dx = svq3_get_se_golomb(&h->gb);
429 if (dx == INVALID_VLC || dy == INVALID_VLC) {
430 av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
435 /* compute motion vector */
436 if (mode == THIRDPEL_MODE) {
438 mx = (mx + 1 >> 1) + dx;
439 my = (my + 1 >> 1) + dy;
440 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
441 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
442 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
444 svq3_mc_dir_part(s, x, y, part_width, part_height,
445 fx, fy, dxy, 1, dir, avg);
448 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
449 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
450 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
451 dxy = (mx & 1) + 2 * (my & 1);
453 svq3_mc_dir_part(s, x, y, part_width, part_height,
454 mx >> 1, my >> 1, dxy, 0, dir, avg);
458 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
459 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
461 svq3_mc_dir_part(s, x, y, part_width, part_height,
462 mx, my, 0, 0, dir, avg);
467 /* update mv_cache */
468 if (mode != PREDICT_MODE) {
469 int32_t mv = pack16to32(mx, my);
471 if (part_height == 8 && i < 8) {
472 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1 * 8], mv);
474 if (part_width == 8 && j < 8)
475 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
477 if (part_width == 8 && j < 8)
478 AV_WN32A(sl->mv_cache[dir][scan8[k] + 1], mv);
479 if (part_width == 4 || part_height == 4)
480 AV_WN32A(sl->mv_cache[dir][scan8[k]], mv);
483 /* write back motion vectors */
484 fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
485 part_width >> 2, part_height >> 2, h->b_stride,
486 pack16to32(mx, my), 4);
492 static av_always_inline void hl_decode_mb_idct_luma(const H264Context *h, H264SliceContext *sl,
493 int mb_type, const int *block_offset,
494 int linesize, uint8_t *dest_y)
497 if (!IS_INTRA4x4(mb_type)) {
498 for (i = 0; i < 16; i++)
499 if (sl->non_zero_count_cache[scan8[i]] || sl->mb[i * 16]) {
500 uint8_t *const ptr = dest_y + block_offset[i];
501 ff_svq3_add_idct_c(ptr, sl->mb + i * 16, linesize,
502 sl->qscale, IS_INTRA(mb_type) ? 1 : 0);
507 static av_always_inline int dctcoef_get(int16_t *mb, int index)
509 return AV_RN16A(mb + index);
512 static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h,
513 H264SliceContext *sl,
515 const int *block_offset,
520 int qscale = sl->qscale;
522 if (IS_INTRA4x4(mb_type)) {
523 for (i = 0; i < 16; i++) {
524 uint8_t *const ptr = dest_y + block_offset[i];
525 const int dir = sl->intra4x4_pred_mode_cache[scan8[i]];
529 if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
530 const int topright_avail = (sl->topright_samples_available << i) & 0x8000;
531 av_assert2(sl->mb_y || linesize <= block_offset[i]);
532 if (!topright_avail) {
533 tr = ptr[3 - linesize] * 0x01010101u;
534 topright = (uint8_t *)&tr;
536 topright = ptr + 4 - linesize;
540 h->hpc.pred4x4[dir](ptr, topright, linesize);
541 nnz = sl->non_zero_count_cache[scan8[i]];
543 ff_svq3_add_idct_c(ptr, sl->mb + i * 16, linesize, qscale, 0);
547 h->hpc.pred16x16[sl->intra16x16_pred_mode](dest_y, linesize);
548 ff_svq3_luma_dc_dequant_idct_c(sl->mb,
549 sl->mb_luma_dc[0], qscale);
553 static void hl_decode_mb(const H264Context *h, H264SliceContext *sl)
555 const int mb_x = sl->mb_x;
556 const int mb_y = sl->mb_y;
557 const int mb_xy = sl->mb_xy;
558 const int mb_type = h->cur_pic.mb_type[mb_xy];
559 uint8_t *dest_y, *dest_cb, *dest_cr;
560 int linesize, uvlinesize;
562 const int *block_offset = &h->block_offset[0];
563 const int block_h = 16 >> h->chroma_y_shift;
565 dest_y = h->cur_pic.f->data[0] + (mb_x + mb_y * sl->linesize) * 16;
566 dest_cb = h->cur_pic.f->data[1] + mb_x * 8 + mb_y * sl->uvlinesize * block_h;
567 dest_cr = h->cur_pic.f->data[2] + mb_x * 8 + mb_y * sl->uvlinesize * block_h;
569 h->vdsp.prefetch(dest_y + (sl->mb_x & 3) * 4 * sl->linesize + 64, sl->linesize, 4);
570 h->vdsp.prefetch(dest_cb + (sl->mb_x & 7) * sl->uvlinesize + 64, dest_cr - dest_cb, 2);
572 h->list_counts[mb_xy] = sl->list_count;
574 linesize = sl->mb_linesize = sl->linesize;
575 uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
577 if (IS_INTRA(mb_type)) {
578 h->hpc.pred8x8[sl->chroma_pred_mode](dest_cb, uvlinesize);
579 h->hpc.pred8x8[sl->chroma_pred_mode](dest_cr, uvlinesize);
581 hl_decode_mb_predict_luma(h, sl, mb_type, block_offset, linesize, dest_y);
584 hl_decode_mb_idct_luma(h, sl, mb_type, block_offset, linesize, dest_y);
586 if (sl->cbp & 0x30) {
587 uint8_t *dest[2] = { dest_cb, dest_cr };
588 h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + 16 * 16 * 1,
589 h->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][sl->chroma_qp[0]][0]);
590 h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + 16 * 16 * 2,
591 h->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][sl->chroma_qp[1]][0]);
592 for (j = 1; j < 3; j++) {
593 for (i = j * 16; i < j * 16 + 4; i++)
594 if (sl->non_zero_count_cache[scan8[i]] || sl->mb[i * 16]) {
595 uint8_t *const ptr = dest[j - 1] + block_offset[i];
596 ff_svq3_add_idct_c(ptr, sl->mb + i * 16,
598 ff_h264_chroma_qp[0][sl->qscale + 12] - 12, 2);
604 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
606 H264Context *h = &s->h;
607 H264SliceContext *sl = &h->slice_ctx[0];
608 int i, j, k, m, dir, mode;
612 const int mb_xy = sl->mb_xy;
613 const int b_xy = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride;
615 sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
616 sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
617 sl->topright_samples_available = 0xFFFF;
619 if (mb_type == 0) { /* SKIP */
620 if (h->pict_type == AV_PICTURE_TYPE_P ||
621 s->next_pic->mb_type[mb_xy] == -1) {
622 svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
625 if (h->pict_type == AV_PICTURE_TYPE_B)
626 svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
629 mb_type = MB_TYPE_SKIP;
631 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
632 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
634 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
637 mb_type = MB_TYPE_16x16;
639 } else if (mb_type < 8) { /* INTER */
640 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
641 mode = THIRDPEL_MODE;
642 else if (s->halfpel_flag &&
643 s->thirdpel_flag == !get_bits1(&h->gb))
649 /* note ref_cache should contain here:
657 for (m = 0; m < 2; m++) {
658 if (sl->mb_x > 0 && sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
659 for (i = 0; i < 4; i++)
660 AV_COPY32(sl->mv_cache[m][scan8[0] - 1 + i * 8],
661 h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
663 for (i = 0; i < 4; i++)
664 AV_ZERO32(sl->mv_cache[m][scan8[0] - 1 + i * 8]);
667 memcpy(sl->mv_cache[m][scan8[0] - 1 * 8],
668 h->cur_pic.motion_val[m][b_xy - h->b_stride],
669 4 * 2 * sizeof(int16_t));
670 memset(&sl->ref_cache[m][scan8[0] - 1 * 8],
671 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
673 if (sl->mb_x < h->mb_width - 1) {
674 AV_COPY32(sl->mv_cache[m][scan8[0] + 4 - 1 * 8],
675 h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
676 sl->ref_cache[m][scan8[0] + 4 - 1 * 8] =
677 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
678 sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
680 sl->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
682 AV_COPY32(sl->mv_cache[m][scan8[0] - 1 - 1 * 8],
683 h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
684 sl->ref_cache[m][scan8[0] - 1 - 1 * 8] =
685 (sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
687 sl->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
689 memset(&sl->ref_cache[m][scan8[0] - 1 * 8 - 1],
690 PART_NOT_AVAILABLE, 8);
692 if (h->pict_type != AV_PICTURE_TYPE_B)
696 /* decode motion vector(s) and form prediction(s) */
697 if (h->pict_type == AV_PICTURE_TYPE_P) {
698 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
700 } else { /* AV_PICTURE_TYPE_B */
702 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
705 for (i = 0; i < 4; i++)
706 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
707 0, 4 * 2 * sizeof(int16_t));
710 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
713 for (i = 0; i < 4; i++)
714 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
715 0, 4 * 2 * sizeof(int16_t));
719 mb_type = MB_TYPE_16x16;
720 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
721 memset(sl->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
725 for (i = 0; i < 4; i++)
726 sl->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
727 if (sl->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
728 sl->left_samples_available = 0x5F5F;
731 sl->intra4x4_pred_mode_cache[4 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
732 sl->intra4x4_pred_mode_cache[5 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
733 sl->intra4x4_pred_mode_cache[6 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
734 sl->intra4x4_pred_mode_cache[7 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
736 if (sl->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
737 sl->top_samples_available = 0x33FF;
740 /* decode prediction codes for luma blocks */
741 for (i = 0; i < 16; i += 2) {
742 vlc = svq3_get_ue_golomb(&h->gb);
745 av_log(h->avctx, AV_LOG_ERROR,
746 "luma prediction:%"PRIu32"\n", vlc);
750 left = &sl->intra4x4_pred_mode_cache[scan8[i] - 1];
751 top = &sl->intra4x4_pred_mode_cache[scan8[i] - 8];
753 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
754 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
756 if (left[1] == -1 || left[2] == -1) {
757 av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
761 } else { /* mb_type == 33, DC_128_PRED block type */
762 for (i = 0; i < 4; i++)
763 memset(&sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
766 write_back_intra_pred_mode(h, sl);
769 ff_h264_check_intra4x4_pred_mode(h, sl);
771 sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
772 sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
774 for (i = 0; i < 4; i++)
775 memset(&sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
777 sl->top_samples_available = 0x33FF;
778 sl->left_samples_available = 0x5F5F;
781 mb_type = MB_TYPE_INTRA4x4;
782 } else { /* INTRA16x16 */
783 dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
784 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
786 if ((sl->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, sl, dir, 0)) < 0) {
787 av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
788 return sl->intra16x16_pred_mode;
791 cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
792 mb_type = MB_TYPE_INTRA16x16;
795 if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
796 for (i = 0; i < 4; i++)
797 memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
798 0, 4 * 2 * sizeof(int16_t));
799 if (h->pict_type == AV_PICTURE_TYPE_B) {
800 for (i = 0; i < 4; i++)
801 memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
802 0, 4 * 2 * sizeof(int16_t));
805 if (!IS_INTRA4x4(mb_type)) {
806 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
808 if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
809 memset(sl->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
812 if (!IS_INTRA16x16(mb_type) &&
813 (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
814 if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
815 av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
819 cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
820 : ff_h264_golomb_to_inter_cbp[vlc];
822 if (IS_INTRA16x16(mb_type) ||
823 (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
824 sl->qscale += svq3_get_se_golomb(&h->gb);
826 if (sl->qscale > 31u) {
827 av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", sl->qscale);
831 if (IS_INTRA16x16(mb_type)) {
832 AV_ZERO128(sl->mb_luma_dc[0] + 0);
833 AV_ZERO128(sl->mb_luma_dc[0] + 8);
834 if (svq3_decode_block(&h->gb, sl->mb_luma_dc[0], 0, 1)) {
835 av_log(h->avctx, AV_LOG_ERROR,
836 "error while decoding intra luma dc\n");
842 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
843 const int type = ((sl->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
845 for (i = 0; i < 4; i++)
846 if ((cbp & (1 << i))) {
847 for (j = 0; j < 4; j++) {
848 k = index ? (1 * (j & 1) + 2 * (i & 1) +
849 2 * (j & 2) + 4 * (i & 2))
851 sl->non_zero_count_cache[scan8[k]] = 1;
853 if (svq3_decode_block(&h->gb, &sl->mb[16 * k], index, type)) {
854 av_log(h->avctx, AV_LOG_ERROR,
855 "error while decoding block\n");
862 for (i = 1; i < 3; ++i)
863 if (svq3_decode_block(&h->gb, &sl->mb[16 * 16 * i], 0, 3)) {
864 av_log(h->avctx, AV_LOG_ERROR,
865 "error while decoding chroma dc block\n");
870 for (i = 1; i < 3; i++) {
871 for (j = 0; j < 4; j++) {
873 sl->non_zero_count_cache[scan8[k]] = 1;
875 if (svq3_decode_block(&h->gb, &sl->mb[16 * k], 1, 1)) {
876 av_log(h->avctx, AV_LOG_ERROR,
877 "error while decoding chroma ac block\n");
887 h->cur_pic.mb_type[mb_xy] = mb_type;
889 if (IS_INTRA(mb_type))
890 sl->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, sl, DC_PRED8x8, 1);
895 static int svq3_decode_slice_header(AVCodecContext *avctx)
897 SVQ3Context *s = avctx->priv_data;
898 H264Context *h = &s->h;
899 H264SliceContext *sl = &h->slice_ctx[0];
900 const int mb_xy = sl->mb_xy;
904 header = get_bits(&s->gb, 8);
906 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
908 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
911 int slice_bits, slice_bytes, slice_length;
912 int length = header >> 5 & 3;
914 slice_length = show_bits(&s->gb, 8 * length);
915 slice_bits = slice_length * 8;
916 slice_bytes = slice_length + length - 1;
918 if (slice_bytes > get_bits_left(&s->gb)) {
919 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
923 skip_bits(&s->gb, 8);
925 av_fast_malloc(&s->slice_buf, &s->slice_size, slice_bytes + AV_INPUT_BUFFER_PADDING_SIZE);
927 return AVERROR(ENOMEM);
929 memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
931 init_get_bits(&h->gb, s->slice_buf, slice_bits);
933 if (s->watermark_key) {
934 uint32_t header = AV_RL32(&h->gb.buffer[1]);
935 AV_WL32(&h->gb.buffer[1], header ^ s->watermark_key);
938 memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
940 skip_bits_long(&s->gb, slice_bytes * 8);
943 if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
944 av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
948 sl->slice_type = ff_h264_golomb_to_pict_type[slice_id];
950 if ((header & 0x9F) == 2) {
951 i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
952 sl->mb_skip_run = get_bits(&h->gb, i) -
953 (sl->mb_y * h->mb_width + sl->mb_x);
959 sl->slice_num = get_bits(&h->gb, 8);
960 sl->qscale = get_bits(&h->gb, 5);
961 s->adaptive_quant = get_bits1(&h->gb);
966 if (s->has_watermark)
970 skip_bits(&h->gb, 2);
972 if (skip_1stop_8data_bits(&h->gb) < 0)
973 return AVERROR_INVALIDDATA;
975 /* reset intra predictors and invalidate motion vector references */
977 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
978 -1, 4 * sizeof(int8_t));
979 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - sl->mb_x],
980 -1, 8 * sizeof(int8_t) * sl->mb_x);
983 memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
984 -1, 8 * sizeof(int8_t) * (h->mb_width - sl->mb_x));
987 sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
993 static av_cold int svq3_decode_init(AVCodecContext *avctx)
995 SVQ3Context *s = avctx->priv_data;
996 H264Context *h = &s->h;
997 H264SliceContext *sl;
999 unsigned char *extradata;
1000 unsigned char *extradata_end;
1002 int marker_found = 0;
1005 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
1006 s->last_pic = av_mallocz(sizeof(*s->last_pic));
1007 s->next_pic = av_mallocz(sizeof(*s->next_pic));
1008 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
1009 ret = AVERROR(ENOMEM);
1013 s->cur_pic->f = av_frame_alloc();
1014 s->last_pic->f = av_frame_alloc();
1015 s->next_pic->f = av_frame_alloc();
1016 if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1017 return AVERROR(ENOMEM);
1019 if ((ret = ff_h264_decode_init(avctx)) < 0)
1022 // we will overwrite it later during decoding
1023 av_frame_free(&h->cur_pic.f);
1025 av_frame_free(&h->last_pic_for_ec.f);
1027 ff_h264dsp_init(&h->h264dsp, 8, 1);
1028 av_assert0(h->sps.bit_depth_chroma == 0);
1029 ff_h264_pred_init(&h->hpc, AV_CODEC_ID_SVQ3, 8, 1);
1030 ff_videodsp_init(&h->vdsp, 8);
1032 memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t));
1033 memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
1035 avctx->bits_per_raw_sample = 8;
1036 h->sps.bit_depth_luma = 8;
1037 h->chroma_format_idc = 1;
1039 ff_hpeldsp_init(&s->hdsp, avctx->flags);
1040 ff_tpeldsp_init(&s->tdsp);
1044 h->flags = avctx->flags;
1046 h->sps.chroma_format_idc = 1;
1047 h->picture_structure = PICT_FRAME;
1048 avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1049 avctx->color_range = AVCOL_RANGE_JPEG;
1051 h->slice_ctx[0].chroma_qp[0] = h->slice_ctx[0].chroma_qp[1] = 4;
1052 h->chroma_x_shift = h->chroma_y_shift = 1;
1054 s->halfpel_flag = 1;
1055 s->thirdpel_flag = 1;
1056 s->has_watermark = 0;
1058 /* prowl for the "SEQH" marker in the extradata */
1059 extradata = (unsigned char *)avctx->extradata;
1060 extradata_end = avctx->extradata + avctx->extradata_size;
1062 for (m = 0; m + 8 < avctx->extradata_size; m++) {
1063 if (!memcmp(extradata, "SEQH", 4)) {
1071 /* if a match was found, parse the extra data */
1074 int frame_size_code;
1075 int unk0, unk1, unk2, unk3, unk4;
1077 size = AV_RB32(&extradata[4]);
1078 if (size > extradata_end - extradata - 8) {
1079 ret = AVERROR_INVALIDDATA;
1082 init_get_bits(&gb, extradata + 8, size * 8);
1084 /* 'frame size code' and optional 'width, height' */
1085 frame_size_code = get_bits(&gb, 3);
1086 switch (frame_size_code) {
1089 avctx->height = 120;
1097 avctx->height = 144;
1101 avctx->height = 288;
1105 avctx->height = 576;
1109 avctx->height = 180;
1113 avctx->height = 240;
1116 avctx->width = get_bits(&gb, 12);
1117 avctx->height = get_bits(&gb, 12);
1121 s->halfpel_flag = get_bits1(&gb);
1122 s->thirdpel_flag = get_bits1(&gb);
1124 /* unknown fields */
1125 unk0 = get_bits1(&gb);
1126 unk1 = get_bits1(&gb);
1127 unk2 = get_bits1(&gb);
1128 unk3 = get_bits1(&gb);
1130 h->low_delay = get_bits1(&gb);
1133 unk4 = get_bits1(&gb);
1135 av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1136 unk0, unk1, unk2, unk3, unk4);
1138 if (skip_1stop_8data_bits(&gb) < 0) {
1139 ret = AVERROR_INVALIDDATA;
1143 s->has_watermark = get_bits1(&gb);
1144 avctx->has_b_frames = !h->low_delay;
1145 if (s->has_watermark) {
1147 unsigned watermark_width = svq3_get_ue_golomb(&gb);
1148 unsigned watermark_height = svq3_get_ue_golomb(&gb);
1149 int u1 = svq3_get_ue_golomb(&gb);
1150 int u2 = get_bits(&gb, 8);
1151 int u3 = get_bits(&gb, 2);
1152 int u4 = svq3_get_ue_golomb(&gb);
1153 unsigned long buf_len = watermark_width *
1154 watermark_height * 4;
1155 int offset = get_bits_count(&gb) + 7 >> 3;
1158 if (watermark_height <= 0 ||
1159 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
1164 buf = av_malloc(buf_len);
1166 ret = AVERROR(ENOMEM);
1169 av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1170 watermark_width, watermark_height);
1171 av_log(avctx, AV_LOG_DEBUG,
1172 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1173 u1, u2, u3, u4, offset);
1174 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1175 size - offset) != Z_OK) {
1176 av_log(avctx, AV_LOG_ERROR,
1177 "could not uncompress watermark logo\n");
1182 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1183 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1184 av_log(avctx, AV_LOG_DEBUG,
1185 "watermark key %#"PRIx32"\n", s->watermark_key);
1188 av_log(avctx, AV_LOG_ERROR,
1189 "this svq3 file contains watermark which need zlib support compiled in\n");
1196 h->width = avctx->width;
1197 h->height = avctx->height;
1198 h->mb_width = (h->width + 15) / 16;
1199 h->mb_height = (h->height + 15) / 16;
1200 h->mb_stride = h->mb_width + 1;
1201 h->mb_num = h->mb_width * h->mb_height;
1202 h->b_stride = 4 * h->mb_width;
1203 s->h_edge_pos = h->mb_width * 16;
1204 s->v_edge_pos = h->mb_height * 16;
1206 if ((ret = ff_h264_alloc_tables(h)) < 0) {
1207 av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1213 svq3_decode_end(avctx);
1217 static void free_picture(AVCodecContext *avctx, H264Picture *pic)
1220 for (i = 0; i < 2; i++) {
1221 av_buffer_unref(&pic->motion_val_buf[i]);
1222 av_buffer_unref(&pic->ref_index_buf[i]);
1224 av_buffer_unref(&pic->mb_type_buf);
1226 av_frame_unref(pic->f);
1229 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
1231 SVQ3Context *s = avctx->priv_data;
1232 H264Context *h = &s->h;
1233 H264SliceContext *sl = &h->slice_ctx[0];
1234 const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1235 const int mb_array_size = h->mb_stride * h->mb_height;
1236 const int b4_stride = h->mb_width * 4 + 1;
1237 const int b4_array_size = b4_stride * h->mb_height * 4;
1240 if (!pic->motion_val_buf[0]) {
1243 pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1244 if (!pic->mb_type_buf)
1245 return AVERROR(ENOMEM);
1246 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1248 for (i = 0; i < 2; i++) {
1249 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1250 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1251 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1252 ret = AVERROR(ENOMEM);
1256 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1257 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1260 pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1262 ret = ff_get_buffer(avctx, pic->f,
1263 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1267 if (!sl->edge_emu_buffer) {
1268 sl->edge_emu_buffer = av_mallocz_array(pic->f->linesize[0], 17);
1269 if (!sl->edge_emu_buffer)
1270 return AVERROR(ENOMEM);
1273 sl->linesize = pic->f->linesize[0];
1274 sl->uvlinesize = pic->f->linesize[1];
1278 free_picture(avctx, pic);
1282 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1283 int *got_frame, AVPacket *avpkt)
1285 SVQ3Context *s = avctx->priv_data;
1286 H264Context *h = &s->h;
1287 H264SliceContext *sl = &h->slice_ctx[0];
1288 int buf_size = avpkt->size;
1293 /* special case for last picture */
1294 if (buf_size == 0) {
1295 if (s->next_pic->f->data[0] && !h->low_delay && !s->last_frame_output) {
1296 ret = av_frame_ref(data, s->next_pic->f);
1299 s->last_frame_output = 1;
1305 sl->mb_x = sl->mb_y = sl->mb_xy = 0;
1307 if (s->watermark_key) {
1308 av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1310 return AVERROR(ENOMEM);
1311 memcpy(s->buf, avpkt->data, buf_size);
1317 ret = init_get_bits(&s->gb, buf, 8 * buf_size);
1321 if (svq3_decode_slice_header(avctx))
1324 h->pict_type = sl->slice_type;
1326 if (h->pict_type != AV_PICTURE_TYPE_B)
1327 FFSWAP(H264Picture*, s->next_pic, s->last_pic);
1329 av_frame_unref(s->cur_pic->f);
1331 /* for skipping the frame */
1332 s->cur_pic->f->pict_type = h->pict_type;
1333 s->cur_pic->f->key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
1335 ret = get_buffer(avctx, s->cur_pic);
1339 h->cur_pic_ptr = s->cur_pic;
1340 h->cur_pic = *s->cur_pic;
1342 for (i = 0; i < 16; i++) {
1343 h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * sl->linesize * ((scan8[i] - scan8[0]) >> 3);
1344 h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * sl->linesize * ((scan8[i] - scan8[0]) >> 3);
1346 for (i = 0; i < 16; i++) {
1347 h->block_offset[16 + i] =
1348 h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * sl->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1349 h->block_offset[48 + 16 + i] =
1350 h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * sl->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1353 if (h->pict_type != AV_PICTURE_TYPE_I) {
1354 if (!s->last_pic->f->data[0]) {
1355 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1356 av_frame_unref(s->last_pic->f);
1357 ret = get_buffer(avctx, s->last_pic);
1360 memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1361 memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1362 s->last_pic->f->linesize[1]);
1363 memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1364 s->last_pic->f->linesize[2]);
1367 if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1368 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1369 av_frame_unref(s->next_pic->f);
1370 ret = get_buffer(avctx, s->next_pic);
1373 memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1374 memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1375 s->next_pic->f->linesize[1]);
1376 memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1377 s->next_pic->f->linesize[2]);
1381 if (avctx->debug & FF_DEBUG_PICT_INFO)
1382 av_log(h->avctx, AV_LOG_DEBUG,
1383 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1384 av_get_picture_type_char(h->pict_type),
1385 s->halfpel_flag, s->thirdpel_flag,
1386 s->adaptive_quant, h->slice_ctx[0].qscale, sl->slice_num);
1388 if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1389 avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
1390 avctx->skip_frame >= AVDISCARD_ALL)
1393 if (s->next_p_frame_damaged) {
1394 if (h->pict_type == AV_PICTURE_TYPE_B)
1397 s->next_p_frame_damaged = 0;
1400 if (h->pict_type == AV_PICTURE_TYPE_B) {
1401 h->frame_num_offset = sl->slice_num - h->prev_frame_num;
1403 if (h->frame_num_offset < 0)
1404 h->frame_num_offset += 256;
1405 if (h->frame_num_offset == 0 ||
1406 h->frame_num_offset >= h->prev_frame_num_offset) {
1407 av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1411 h->prev_frame_num = h->frame_num;
1412 h->frame_num = sl->slice_num;
1413 h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
1415 if (h->prev_frame_num_offset < 0)
1416 h->prev_frame_num_offset += 256;
1419 for (m = 0; m < 2; m++) {
1421 for (i = 0; i < 4; i++) {
1423 for (j = -1; j < 4; j++)
1424 sl->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1426 sl->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1430 for (sl->mb_y = 0; sl->mb_y < h->mb_height; sl->mb_y++) {
1431 for (sl->mb_x = 0; sl->mb_x < h->mb_width; sl->mb_x++) {
1433 sl->mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
1435 if ((get_bits_left(&h->gb)) <= 7) {
1436 if (((get_bits_count(&h->gb) & 7) == 0 ||
1437 show_bits(&h->gb, get_bits_left(&h->gb) & 7) == 0)) {
1439 if (svq3_decode_slice_header(avctx))
1442 /* TODO: support s->mb_skip_run */
1445 mb_type = svq3_get_ue_golomb(&h->gb);
1447 if (h->pict_type == AV_PICTURE_TYPE_I)
1449 else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1451 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1452 av_log(h->avctx, AV_LOG_ERROR,
1453 "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
1457 if (mb_type != 0 || sl->cbp)
1458 hl_decode_mb(h, &h->slice_ctx[0]);
1460 if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1461 h->cur_pic.mb_type[sl->mb_x + sl->mb_y * h->mb_stride] =
1462 (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1465 ff_draw_horiz_band(avctx, s->cur_pic->f,
1466 s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1467 16 * sl->mb_y, 16, h->picture_structure, 0,
1471 left = buf_size*8 - get_bits_count(&h->gb);
1473 if (sl->mb_y != h->mb_height || sl->mb_x != h->mb_width) {
1474 av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, sl->mb_y, sl->mb_x, left);
1475 //av_hex_dump(stderr, buf+buf_size-8, 8);
1479 av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1483 if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1484 ret = av_frame_ref(data, s->cur_pic->f);
1485 else if (s->last_pic->f->data[0])
1486 ret = av_frame_ref(data, s->last_pic->f);
1490 /* Do not output the last pic after seeking. */
1491 if (s->last_pic->f->data[0] || h->low_delay)
1494 if (h->pict_type != AV_PICTURE_TYPE_B) {
1495 FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
1497 av_frame_unref(s->cur_pic->f);
1503 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1505 SVQ3Context *s = avctx->priv_data;
1506 H264Context *h = &s->h;
1508 free_picture(avctx, s->cur_pic);
1509 free_picture(avctx, s->next_pic);
1510 free_picture(avctx, s->last_pic);
1511 av_frame_free(&s->cur_pic->f);
1512 av_frame_free(&s->next_pic->f);
1513 av_frame_free(&s->last_pic->f);
1514 av_freep(&s->cur_pic);
1515 av_freep(&s->next_pic);
1516 av_freep(&s->last_pic);
1517 av_freep(&s->slice_buf);
1519 memset(&h->cur_pic, 0, sizeof(h->cur_pic));
1521 ff_h264_free_context(h);
1529 AVCodec ff_svq3_decoder = {
1531 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1532 .type = AVMEDIA_TYPE_VIDEO,
1533 .id = AV_CODEC_ID_SVQ3,
1534 .priv_data_size = sizeof(SVQ3Context),
1535 .init = svq3_decode_init,
1536 .close = svq3_decode_end,
1537 .decode = svq3_decode_frame,
1538 .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1541 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,