2 * Copyright (c) 2003 The FFmpeg Project
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
45 #include "libavutil/attributes.h"
48 #include "mpegutils.h"
54 #include "rectangle.h"
68 typedef struct SVQ3Frame {
71 AVBufferRef *motion_val_buf[2];
72 int16_t (*motion_val[2])[2];
74 AVBufferRef *mb_type_buf;
78 AVBufferRef *ref_index_buf[2];
82 typedef struct SVQ3Context {
83 AVCodecContext *avctx;
85 H264DSPContext h264dsp;
95 GetBitContext gb_slice;
101 uint32_t watermark_key;
105 int next_p_frame_damaged;
108 int last_frame_output;
113 int frame_num_offset;
114 int prev_frame_num_offset;
117 enum AVPictureType pict_type;
118 enum AVPictureType slice_type;
123 int mb_width, mb_height;
124 int mb_stride, mb_num;
129 int chroma_pred_mode;
130 int intra16x16_pred_mode;
132 int8_t intra4x4_pred_mode_cache[5 * 8];
133 int8_t (*intra4x4_pred_mode);
135 unsigned int top_samples_available;
136 unsigned int topright_samples_available;
137 unsigned int left_samples_available;
139 uint8_t *edge_emu_buffer;
141 DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
142 DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
143 DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
144 DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
145 DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
146 uint32_t dequant4_coeff[QP_MAX_NUM + 1][16];
147 int block_offset[2 * (16 * 3)];
150 #define FULLPEL_MODE 1
151 #define HALFPEL_MODE 2
152 #define THIRDPEL_MODE 3
153 #define PREDICT_MODE 4
155 /* dual scan (from some older H.264 draft)
164 static const uint8_t svq3_scan[16] = {
165 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
166 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
167 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
168 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
171 static const uint8_t luma_dc_zigzag_scan[16] = {
172 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
173 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
174 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
175 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
178 static const uint8_t svq3_pred_0[25][2] = {
181 { 0, 2 }, { 1, 1 }, { 2, 0 },
182 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
183 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
184 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
185 { 2, 4 }, { 3, 3 }, { 4, 2 },
190 static const int8_t svq3_pred_1[6][6][5] = {
191 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
192 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
193 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
194 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
195 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
196 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
197 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
198 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
199 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
200 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
201 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
202 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
205 static const struct {
208 } svq3_dct_tables[2][16] = {
209 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
210 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
211 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
212 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
215 static const uint32_t svq3_dequant_coeff[32] = {
216 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
217 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
218 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
219 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
222 static int svq3_decode_end(AVCodecContext *avctx);
224 static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
226 const unsigned qmul = svq3_dequant_coeff[qp];
230 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
232 for (i = 0; i < 4; i++) {
233 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
234 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
235 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
236 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
238 temp[4 * i + 0] = z0 + z3;
239 temp[4 * i + 1] = z1 + z2;
240 temp[4 * i + 2] = z1 - z2;
241 temp[4 * i + 3] = z0 - z3;
244 for (i = 0; i < 4; i++) {
245 const int offset = x_offset[i];
246 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
247 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
248 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
249 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
251 output[stride * 0 + offset] = (int)((z0 + z3) * qmul + 0x80000) >> 20;
252 output[stride * 2 + offset] = (int)((z1 + z2) * qmul + 0x80000) >> 20;
253 output[stride * 8 + offset] = (int)((z1 - z2) * qmul + 0x80000) >> 20;
254 output[stride * 10 + offset] = (int)((z0 - z3) * qmul + 0x80000) >> 20;
259 static void svq3_add_idct_c(uint8_t *dst, int16_t *block,
260 int stride, int qp, int dc)
262 const int qmul = svq3_dequant_coeff[qp];
266 dc = 13 * 13 * (dc == 1 ? 1538U* block[0]
267 : qmul * (block[0] >> 3) / 2);
271 for (i = 0; i < 4; i++) {
272 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
273 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
274 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
275 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
277 block[0 + 4 * i] = z0 + z3;
278 block[1 + 4 * i] = z1 + z2;
279 block[2 + 4 * i] = z1 - z2;
280 block[3 + 4 * i] = z0 - z3;
283 for (i = 0; i < 4; i++) {
284 const unsigned z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
285 const unsigned z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
286 const unsigned z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
287 const unsigned z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
288 const int rr = (dc + 0x80000u);
290 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((int)((z0 + z3) * qmul + rr) >> 20));
291 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((int)((z1 + z2) * qmul + rr) >> 20));
292 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((int)((z1 - z2) * qmul + rr) >> 20));
293 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((int)((z0 - z3) * qmul + rr) >> 20));
296 memset(block, 0, 16 * sizeof(int16_t));
299 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
300 int index, const int type)
302 static const uint8_t *const scan_patterns[4] = {
303 luma_dc_zigzag_scan, ff_zigzag_scan, svq3_scan, ff_h264_chroma_dc_scan
306 int run, level, sign, limit;
308 const int intra = 3 * type >> 2;
309 const uint8_t *const scan = scan_patterns[type];
311 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
312 for (; (vlc = get_interleaved_ue_golomb(gb)) != 0; index++) {
313 if ((int32_t)vlc < 0)
316 sign = (vlc & 1) ? 0 : -1;
323 } else if (vlc < 4) {
328 level = (vlc + 9 >> 2) - run;
332 run = svq3_dct_tables[intra][vlc].run;
333 level = svq3_dct_tables[intra][vlc].level;
336 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
339 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
344 if ((index += run) >= limit)
347 block[scan[index]] = (level ^ sign) - sign;
358 static av_always_inline int
359 svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C,
360 int i, int list, int part_width)
362 const int topright_ref = s->ref_cache[list][i - 8 + part_width];
364 if (topright_ref != PART_NOT_AVAILABLE) {
365 *C = s->mv_cache[list][i - 8 + part_width];
368 *C = s->mv_cache[list][i - 8 - 1];
369 return s->ref_cache[list][i - 8 - 1];
374 * Get the predicted MV.
375 * @param n the block index
376 * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
377 * @param mx the x component of the predicted motion vector
378 * @param my the y component of the predicted motion vector
380 static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n,
381 int part_width, int list,
382 int ref, int *const mx, int *const my)
384 const int index8 = scan8[n];
385 const int top_ref = s->ref_cache[list][index8 - 8];
386 const int left_ref = s->ref_cache[list][index8 - 1];
387 const int16_t *const A = s->mv_cache[list][index8 - 1];
388 const int16_t *const B = s->mv_cache[list][index8 - 8];
390 int diagonal_ref, match_count;
400 diagonal_ref = svq3_fetch_diagonal_mv(s, &C, index8, list, part_width);
401 match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
402 if (match_count > 1) { //most common
403 *mx = mid_pred(A[0], B[0], C[0]);
404 *my = mid_pred(A[1], B[1], C[1]);
405 } else if (match_count == 1) {
406 if (left_ref == ref) {
409 } else if (top_ref == ref) {
417 if (top_ref == PART_NOT_AVAILABLE &&
418 diagonal_ref == PART_NOT_AVAILABLE &&
419 left_ref != PART_NOT_AVAILABLE) {
423 *mx = mid_pred(A[0], B[0], C[0]);
424 *my = mid_pred(A[1], B[1], C[1]);
429 static inline void svq3_mc_dir_part(SVQ3Context *s,
430 int x, int y, int width, int height,
431 int mx, int my, int dxy,
432 int thirdpel, int dir, int avg)
434 const SVQ3Frame *pic = (dir == 0) ? s->last_pic : s->next_pic;
437 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
438 int linesize = s->cur_pic->f->linesize[0];
439 int uvlinesize = s->cur_pic->f->linesize[1];
444 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
445 my < 0 || my >= s->v_edge_pos - height - 1) {
447 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
448 my = av_clip(my, -16, s->v_edge_pos - height + 15);
451 /* form component predictions */
452 dest = s->cur_pic->f->data[0] + x + y * linesize;
453 src = pic->f->data[0] + mx + my * linesize;
456 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
458 width + 1, height + 1,
459 mx, my, s->h_edge_pos, s->v_edge_pos);
460 src = s->edge_emu_buffer;
463 (avg ? s->tdsp.avg_tpel_pixels_tab
464 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, linesize,
467 (avg ? s->hdsp.avg_pixels_tab
468 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, linesize,
471 if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
472 mx = mx + (mx < (int) x) >> 1;
473 my = my + (my < (int) y) >> 1;
475 height = height >> 1;
478 for (i = 1; i < 3; i++) {
479 dest = s->cur_pic->f->data[i] + (x >> 1) + (y >> 1) * uvlinesize;
480 src = pic->f->data[i] + mx + my * uvlinesize;
483 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
484 uvlinesize, uvlinesize,
485 width + 1, height + 1,
486 mx, my, (s->h_edge_pos >> 1),
488 src = s->edge_emu_buffer;
491 (avg ? s->tdsp.avg_tpel_pixels_tab
492 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
496 (avg ? s->hdsp.avg_pixels_tab
497 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
504 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
507 int i, j, k, mx, my, dx, dy, x, y;
508 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
509 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
510 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
511 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
512 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
514 for (i = 0; i < 16; i += part_height)
515 for (j = 0; j < 16; j += part_width) {
516 const int b_xy = (4 * s->mb_x + (j >> 2)) +
517 (4 * s->mb_y + (i >> 2)) * s->b_stride;
519 x = 16 * s->mb_x + j;
520 y = 16 * s->mb_y + i;
521 k = (j >> 2 & 1) + (i >> 1 & 2) +
522 (j >> 1 & 4) + (i & 8);
524 if (mode != PREDICT_MODE) {
525 svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my);
527 mx = s->next_pic->motion_val[0][b_xy][0] * 2;
528 my = s->next_pic->motion_val[0][b_xy][1] * 2;
531 mx = mx * s->frame_num_offset /
532 s->prev_frame_num_offset + 1 >> 1;
533 my = my * s->frame_num_offset /
534 s->prev_frame_num_offset + 1 >> 1;
536 mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) /
537 s->prev_frame_num_offset + 1 >> 1;
538 my = my * (s->frame_num_offset - s->prev_frame_num_offset) /
539 s->prev_frame_num_offset + 1 >> 1;
543 /* clip motion vector prediction to frame border */
544 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
545 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
547 /* get (optional) motion vector differential */
548 if (mode == PREDICT_MODE) {
551 dy = get_interleaved_se_golomb(&s->gb_slice);
552 dx = get_interleaved_se_golomb(&s->gb_slice);
554 if (dx != (int16_t)dx || dy != (int16_t)dy) {
555 av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
560 /* compute motion vector */
561 if (mode == THIRDPEL_MODE) {
563 mx = (mx + 1 >> 1) + dx;
564 my = (my + 1 >> 1) + dy;
565 fx = (unsigned)(mx + 0x30000) / 3 - 0x10000;
566 fy = (unsigned)(my + 0x30000) / 3 - 0x10000;
567 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
569 svq3_mc_dir_part(s, x, y, part_width, part_height,
570 fx, fy, dxy, 1, dir, avg);
573 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
574 mx = (unsigned)(mx + 1 + 0x30000) / 3 + dx - 0x10000;
575 my = (unsigned)(my + 1 + 0x30000) / 3 + dy - 0x10000;
576 dxy = (mx & 1) + 2 * (my & 1);
578 svq3_mc_dir_part(s, x, y, part_width, part_height,
579 mx >> 1, my >> 1, dxy, 0, dir, avg);
583 mx = (unsigned)(mx + 3 + 0x60000) / 6 + dx - 0x10000;
584 my = (unsigned)(my + 3 + 0x60000) / 6 + dy - 0x10000;
586 svq3_mc_dir_part(s, x, y, part_width, part_height,
587 mx, my, 0, 0, dir, avg);
592 /* update mv_cache */
593 if (mode != PREDICT_MODE) {
594 int32_t mv = pack16to32(mx, my);
596 if (part_height == 8 && i < 8) {
597 AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv);
599 if (part_width == 8 && j < 8)
600 AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
602 if (part_width == 8 && j < 8)
603 AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv);
604 if (part_width == 4 || part_height == 4)
605 AV_WN32A(s->mv_cache[dir][scan8[k]], mv);
608 /* write back motion vectors */
609 fill_rectangle(s->cur_pic->motion_val[dir][b_xy],
610 part_width >> 2, part_height >> 2, s->b_stride,
611 pack16to32(mx, my), 4);
617 static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s,
618 int mb_type, const int *block_offset,
619 int linesize, uint8_t *dest_y)
622 if (!IS_INTRA4x4(mb_type)) {
623 for (i = 0; i < 16; i++)
624 if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
625 uint8_t *const ptr = dest_y + block_offset[i];
626 svq3_add_idct_c(ptr, s->mb + i * 16, linesize,
627 s->qscale, IS_INTRA(mb_type) ? 1 : 0);
632 static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s,
634 const int *block_offset,
639 int qscale = s->qscale;
641 if (IS_INTRA4x4(mb_type)) {
642 for (i = 0; i < 16; i++) {
643 uint8_t *const ptr = dest_y + block_offset[i];
644 const int dir = s->intra4x4_pred_mode_cache[scan8[i]];
648 if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
649 const int topright_avail = (s->topright_samples_available << i) & 0x8000;
650 av_assert2(s->mb_y || linesize <= block_offset[i]);
651 if (!topright_avail) {
652 tr = ptr[3 - linesize] * 0x01010101u;
653 topright = (uint8_t *)&tr;
655 topright = ptr + 4 - linesize;
659 s->hpc.pred4x4[dir](ptr, topright, linesize);
660 nnz = s->non_zero_count_cache[scan8[i]];
662 svq3_add_idct_c(ptr, s->mb + i * 16, linesize, qscale, 0);
666 s->hpc.pred16x16[s->intra16x16_pred_mode](dest_y, linesize);
667 svq3_luma_dc_dequant_idct_c(s->mb, s->mb_luma_dc[0], qscale);
671 static void hl_decode_mb(SVQ3Context *s)
673 const int mb_x = s->mb_x;
674 const int mb_y = s->mb_y;
675 const int mb_xy = s->mb_xy;
676 const int mb_type = s->cur_pic->mb_type[mb_xy];
677 uint8_t *dest_y, *dest_cb, *dest_cr;
678 int linesize, uvlinesize;
680 const int *block_offset = &s->block_offset[0];
681 const int block_h = 16 >> 1;
683 linesize = s->cur_pic->f->linesize[0];
684 uvlinesize = s->cur_pic->f->linesize[1];
686 dest_y = s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
687 dest_cb = s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
688 dest_cr = s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
690 s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
691 s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
693 if (IS_INTRA(mb_type)) {
694 s->hpc.pred8x8[s->chroma_pred_mode](dest_cb, uvlinesize);
695 s->hpc.pred8x8[s->chroma_pred_mode](dest_cr, uvlinesize);
697 hl_decode_mb_predict_luma(s, mb_type, block_offset, linesize, dest_y);
700 hl_decode_mb_idct_luma(s, mb_type, block_offset, linesize, dest_y);
703 uint8_t *dest[2] = { dest_cb, dest_cr };
704 s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 1,
705 s->dequant4_coeff[4][0]);
706 s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 2,
707 s->dequant4_coeff[4][0]);
708 for (j = 1; j < 3; j++) {
709 for (i = j * 16; i < j * 16 + 4; i++)
710 if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
711 uint8_t *const ptr = dest[j - 1] + block_offset[i];
712 svq3_add_idct_c(ptr, s->mb + i * 16,
713 uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
719 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
721 int i, j, k, m, dir, mode;
725 const int mb_xy = s->mb_xy;
726 const int b_xy = 4 * s->mb_x + 4 * s->mb_y * s->b_stride;
728 s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
729 s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
730 s->topright_samples_available = 0xFFFF;
732 if (mb_type == 0) { /* SKIP */
733 if (s->pict_type == AV_PICTURE_TYPE_P ||
734 s->next_pic->mb_type[mb_xy] == -1) {
735 svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
738 if (s->pict_type == AV_PICTURE_TYPE_B)
739 svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
742 mb_type = MB_TYPE_SKIP;
744 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
745 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
747 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
750 mb_type = MB_TYPE_16x16;
752 } else if (mb_type < 8) { /* INTER */
753 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&s->gb_slice))
754 mode = THIRDPEL_MODE;
755 else if (s->halfpel_flag &&
756 s->thirdpel_flag == !get_bits1(&s->gb_slice))
762 /* note ref_cache should contain here:
770 for (m = 0; m < 2; m++) {
771 if (s->mb_x > 0 && s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6] != -1) {
772 for (i = 0; i < 4; i++)
773 AV_COPY32(s->mv_cache[m][scan8[0] - 1 + i * 8],
774 s->cur_pic->motion_val[m][b_xy - 1 + i * s->b_stride]);
776 for (i = 0; i < 4; i++)
777 AV_ZERO32(s->mv_cache[m][scan8[0] - 1 + i * 8]);
780 memcpy(s->mv_cache[m][scan8[0] - 1 * 8],
781 s->cur_pic->motion_val[m][b_xy - s->b_stride],
782 4 * 2 * sizeof(int16_t));
783 memset(&s->ref_cache[m][scan8[0] - 1 * 8],
784 (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
786 if (s->mb_x < s->mb_width - 1) {
787 AV_COPY32(s->mv_cache[m][scan8[0] + 4 - 1 * 8],
788 s->cur_pic->motion_val[m][b_xy - s->b_stride + 4]);
789 s->ref_cache[m][scan8[0] + 4 - 1 * 8] =
790 (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
791 s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
793 s->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
795 AV_COPY32(s->mv_cache[m][scan8[0] - 1 - 1 * 8],
796 s->cur_pic->motion_val[m][b_xy - s->b_stride - 1]);
797 s->ref_cache[m][scan8[0] - 1 - 1 * 8] =
798 (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
800 s->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
802 memset(&s->ref_cache[m][scan8[0] - 1 * 8 - 1],
803 PART_NOT_AVAILABLE, 8);
805 if (s->pict_type != AV_PICTURE_TYPE_B)
809 /* decode motion vector(s) and form prediction(s) */
810 if (s->pict_type == AV_PICTURE_TYPE_P) {
811 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
813 } else { /* AV_PICTURE_TYPE_B */
815 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
818 for (i = 0; i < 4; i++)
819 memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
820 0, 4 * 2 * sizeof(int16_t));
823 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
826 for (i = 0; i < 4; i++)
827 memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
828 0, 4 * 2 * sizeof(int16_t));
832 mb_type = MB_TYPE_16x16;
833 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
834 int8_t *i4x4 = s->intra4x4_pred_mode + s->mb2br_xy[s->mb_xy];
835 int8_t *i4x4_cache = s->intra4x4_pred_mode_cache;
837 memset(s->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
841 for (i = 0; i < 4; i++)
842 s->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6 - i];
843 if (s->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
844 s->left_samples_available = 0x5F5F;
847 s->intra4x4_pred_mode_cache[4 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 0];
848 s->intra4x4_pred_mode_cache[5 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 1];
849 s->intra4x4_pred_mode_cache[6 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 2];
850 s->intra4x4_pred_mode_cache[7 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 3];
852 if (s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
853 s->top_samples_available = 0x33FF;
856 /* decode prediction codes for luma blocks */
857 for (i = 0; i < 16; i += 2) {
858 vlc = get_interleaved_ue_golomb(&s->gb_slice);
861 av_log(s->avctx, AV_LOG_ERROR,
862 "luma prediction:%"PRIu32"\n", vlc);
866 left = &s->intra4x4_pred_mode_cache[scan8[i] - 1];
867 top = &s->intra4x4_pred_mode_cache[scan8[i] - 8];
869 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
870 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
872 if (left[1] == -1 || left[2] == -1) {
873 av_log(s->avctx, AV_LOG_ERROR, "weird prediction\n");
877 } else { /* mb_type == 33, DC_128_PRED block type */
878 for (i = 0; i < 4; i++)
879 memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
882 AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
883 i4x4[4] = i4x4_cache[7 + 8 * 3];
884 i4x4[5] = i4x4_cache[7 + 8 * 2];
885 i4x4[6] = i4x4_cache[7 + 8 * 1];
888 ff_h264_check_intra4x4_pred_mode(s->intra4x4_pred_mode_cache,
889 s->avctx, s->top_samples_available,
890 s->left_samples_available);
892 s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
893 s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
895 for (i = 0; i < 4; i++)
896 memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
898 s->top_samples_available = 0x33FF;
899 s->left_samples_available = 0x5F5F;
902 mb_type = MB_TYPE_INTRA4x4;
903 } else { /* INTRA16x16 */
904 dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
905 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
907 if ((s->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
908 s->left_samples_available, dir, 0)) < 0) {
909 av_log(s->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
910 return s->intra16x16_pred_mode;
913 cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
914 mb_type = MB_TYPE_INTRA16x16;
917 if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
918 for (i = 0; i < 4; i++)
919 memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
920 0, 4 * 2 * sizeof(int16_t));
921 if (s->pict_type == AV_PICTURE_TYPE_B) {
922 for (i = 0; i < 4; i++)
923 memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
924 0, 4 * 2 * sizeof(int16_t));
927 if (!IS_INTRA4x4(mb_type)) {
928 memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy], DC_PRED, 8);
930 if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
931 memset(s->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
934 if (!IS_INTRA16x16(mb_type) &&
935 (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
936 if ((vlc = get_interleaved_ue_golomb(&s->gb_slice)) >= 48U){
937 av_log(s->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
941 cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
942 : ff_h264_golomb_to_inter_cbp[vlc];
944 if (IS_INTRA16x16(mb_type) ||
945 (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
946 s->qscale += get_interleaved_se_golomb(&s->gb_slice);
948 if (s->qscale > 31u) {
949 av_log(s->avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
953 if (IS_INTRA16x16(mb_type)) {
954 AV_ZERO128(s->mb_luma_dc[0] + 0);
955 AV_ZERO128(s->mb_luma_dc[0] + 8);
956 if (svq3_decode_block(&s->gb_slice, s->mb_luma_dc[0], 0, 1)) {
957 av_log(s->avctx, AV_LOG_ERROR,
958 "error while decoding intra luma dc\n");
964 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
965 const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
967 for (i = 0; i < 4; i++)
968 if ((cbp & (1 << i))) {
969 for (j = 0; j < 4; j++) {
970 k = index ? (1 * (j & 1) + 2 * (i & 1) +
971 2 * (j & 2) + 4 * (i & 2))
973 s->non_zero_count_cache[scan8[k]] = 1;
975 if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], index, type)) {
976 av_log(s->avctx, AV_LOG_ERROR,
977 "error while decoding block\n");
984 for (i = 1; i < 3; ++i)
985 if (svq3_decode_block(&s->gb_slice, &s->mb[16 * 16 * i], 0, 3)) {
986 av_log(s->avctx, AV_LOG_ERROR,
987 "error while decoding chroma dc block\n");
992 for (i = 1; i < 3; i++) {
993 for (j = 0; j < 4; j++) {
995 s->non_zero_count_cache[scan8[k]] = 1;
997 if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], 1, 1)) {
998 av_log(s->avctx, AV_LOG_ERROR,
999 "error while decoding chroma ac block\n");
1009 s->cur_pic->mb_type[mb_xy] = mb_type;
1011 if (IS_INTRA(mb_type))
1012 s->chroma_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
1013 s->left_samples_available, DC_PRED8x8, 1);
1018 static int svq3_decode_slice_header(AVCodecContext *avctx)
1020 SVQ3Context *s = avctx->priv_data;
1021 const int mb_xy = s->mb_xy;
1025 header = get_bits(&s->gb, 8);
1027 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1029 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
1032 int slice_bits, slice_bytes, slice_length;
1033 int length = header >> 5 & 3;
1035 slice_length = show_bits(&s->gb, 8 * length);
1036 slice_bits = slice_length * 8;
1037 slice_bytes = slice_length + length - 1;
1039 skip_bits(&s->gb, 8);
1041 av_fast_malloc(&s->slice_buf, &s->slice_size, slice_bytes + AV_INPUT_BUFFER_PADDING_SIZE);
1043 return AVERROR(ENOMEM);
1045 if (slice_bytes * 8LL > get_bits_left(&s->gb)) {
1046 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
1047 return AVERROR_INVALIDDATA;
1049 memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
1051 if (s->watermark_key) {
1052 uint32_t header = AV_RL32(&s->slice_buf[1]);
1053 AV_WL32(&s->slice_buf[1], header ^ s->watermark_key);
1055 init_get_bits(&s->gb_slice, s->slice_buf, slice_bits);
1058 memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
1060 skip_bits_long(&s->gb, slice_bytes * 8);
1063 if ((slice_id = get_interleaved_ue_golomb(&s->gb_slice)) >= 3) {
1064 av_log(s->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
1067 if (get_bits1(&s->gb_slice)) {
1068 avpriv_report_missing_feature(s->avctx, "Media key encryption");
1069 return AVERROR_PATCHWELCOME;
1072 s->slice_type = ff_h264_golomb_to_pict_type[slice_id];
1074 if ((header & 0x9F) == 2) {
1075 i = (s->mb_num < 64) ? 5 : av_log2(s->mb_num - 1);
1076 get_bits(&s->gb_slice, i);
1079 s->slice_num = get_bits(&s->gb_slice, 8);
1080 s->qscale = get_bits(&s->gb_slice, 5);
1081 s->adaptive_quant = get_bits1(&s->gb_slice);
1083 /* unknown fields */
1084 skip_bits1(&s->gb_slice);
1086 if (s->has_watermark)
1087 skip_bits1(&s->gb_slice);
1089 skip_bits1(&s->gb_slice);
1090 skip_bits(&s->gb_slice, 2);
1092 if (skip_1stop_8data_bits(&s->gb_slice) < 0)
1093 return AVERROR_INVALIDDATA;
1095 /* reset intra predictors and invalidate motion vector references */
1097 memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - 1] + 3,
1098 -1, 4 * sizeof(int8_t));
1099 memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_x],
1100 -1, 8 * sizeof(int8_t) * s->mb_x);
1103 memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_stride],
1104 -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
1107 s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
1113 static void init_dequant4_coeff_table(SVQ3Context *s)
1116 const int max_qp = 51;
1118 for (q = 0; q < max_qp + 1; q++) {
1119 int shift = ff_h264_quant_div6[q] + 2;
1120 int idx = ff_h264_quant_rem6[q];
1121 for (x = 0; x < 16; x++)
1122 s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1123 ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * 16) << shift;
1127 static av_cold int svq3_decode_init(AVCodecContext *avctx)
1129 SVQ3Context *s = avctx->priv_data;
1131 unsigned char *extradata;
1132 unsigned char *extradata_end;
1134 int marker_found = 0;
1137 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
1138 s->last_pic = av_mallocz(sizeof(*s->last_pic));
1139 s->next_pic = av_mallocz(sizeof(*s->next_pic));
1140 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
1141 ret = AVERROR(ENOMEM);
1145 s->cur_pic->f = av_frame_alloc();
1146 s->last_pic->f = av_frame_alloc();
1147 s->next_pic->f = av_frame_alloc();
1148 if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1149 return AVERROR(ENOMEM);
1151 ff_h264dsp_init(&s->h264dsp, 8, 1);
1152 ff_h264_pred_init(&s->hpc, AV_CODEC_ID_SVQ3, 8, 1);
1153 ff_videodsp_init(&s->vdsp, 8);
1156 avctx->bits_per_raw_sample = 8;
1158 ff_hpeldsp_init(&s->hdsp, avctx->flags);
1159 ff_tpeldsp_init(&s->tdsp);
1161 avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1162 avctx->color_range = AVCOL_RANGE_JPEG;
1165 s->halfpel_flag = 1;
1166 s->thirdpel_flag = 1;
1167 s->has_watermark = 0;
1169 /* prowl for the "SEQH" marker in the extradata */
1170 extradata = (unsigned char *)avctx->extradata;
1171 extradata_end = avctx->extradata + avctx->extradata_size;
1173 for (m = 0; m + 8 < avctx->extradata_size; m++) {
1174 if (!memcmp(extradata, "SEQH", 4)) {
1182 /* if a match was found, parse the extra data */
1185 int frame_size_code;
1186 int unk0, unk1, unk2, unk3, unk4;
1188 size = AV_RB32(&extradata[4]);
1189 if (size > extradata_end - extradata - 8) {
1190 ret = AVERROR_INVALIDDATA;
1193 init_get_bits(&gb, extradata + 8, size * 8);
1195 /* 'frame size code' and optional 'width, height' */
1196 frame_size_code = get_bits(&gb, 3);
1197 switch (frame_size_code) {
1200 avctx->height = 120;
1208 avctx->height = 144;
1212 avctx->height = 288;
1216 avctx->height = 576;
1220 avctx->height = 180;
1224 avctx->height = 240;
1227 avctx->width = get_bits(&gb, 12);
1228 avctx->height = get_bits(&gb, 12);
1232 s->halfpel_flag = get_bits1(&gb);
1233 s->thirdpel_flag = get_bits1(&gb);
1235 /* unknown fields */
1236 unk0 = get_bits1(&gb);
1237 unk1 = get_bits1(&gb);
1238 unk2 = get_bits1(&gb);
1239 unk3 = get_bits1(&gb);
1241 s->low_delay = get_bits1(&gb);
1244 unk4 = get_bits1(&gb);
1246 av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1247 unk0, unk1, unk2, unk3, unk4);
1249 if (skip_1stop_8data_bits(&gb) < 0) {
1250 ret = AVERROR_INVALIDDATA;
1254 s->has_watermark = get_bits1(&gb);
1255 avctx->has_b_frames = !s->low_delay;
1256 if (s->has_watermark) {
1258 unsigned watermark_width = get_interleaved_ue_golomb(&gb);
1259 unsigned watermark_height = get_interleaved_ue_golomb(&gb);
1260 int u1 = get_interleaved_ue_golomb(&gb);
1261 int u2 = get_bits(&gb, 8);
1262 int u3 = get_bits(&gb, 2);
1263 int u4 = get_interleaved_ue_golomb(&gb);
1264 unsigned long buf_len = watermark_width *
1265 watermark_height * 4;
1266 int offset = get_bits_count(&gb) + 7 >> 3;
1269 if (watermark_height <= 0 ||
1270 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
1275 buf = av_malloc(buf_len);
1277 ret = AVERROR(ENOMEM);
1280 av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1281 watermark_width, watermark_height);
1282 av_log(avctx, AV_LOG_DEBUG,
1283 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1284 u1, u2, u3, u4, offset);
1285 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1286 size - offset) != Z_OK) {
1287 av_log(avctx, AV_LOG_ERROR,
1288 "could not uncompress watermark logo\n");
1293 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1294 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1295 av_log(avctx, AV_LOG_DEBUG,
1296 "watermark key %#"PRIx32"\n", s->watermark_key);
1299 av_log(avctx, AV_LOG_ERROR,
1300 "this svq3 file contains watermark which need zlib support compiled in\n");
1307 s->mb_width = (avctx->width + 15) / 16;
1308 s->mb_height = (avctx->height + 15) / 16;
1309 s->mb_stride = s->mb_width + 1;
1310 s->mb_num = s->mb_width * s->mb_height;
1311 s->b_stride = 4 * s->mb_width;
1312 s->h_edge_pos = s->mb_width * 16;
1313 s->v_edge_pos = s->mb_height * 16;
1315 s->intra4x4_pred_mode = av_mallocz(s->mb_stride * 2 * 8);
1316 if (!s->intra4x4_pred_mode)
1317 return AVERROR(ENOMEM);
1319 s->mb2br_xy = av_mallocz(s->mb_stride * (s->mb_height + 1) *
1320 sizeof(*s->mb2br_xy));
1322 return AVERROR(ENOMEM);
1324 for (y = 0; y < s->mb_height; y++)
1325 for (x = 0; x < s->mb_width; x++) {
1326 const int mb_xy = x + y * s->mb_stride;
1328 s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 * s->mb_stride));
1331 init_dequant4_coeff_table(s);
1335 svq3_decode_end(avctx);
1339 static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
1342 for (i = 0; i < 2; i++) {
1343 av_buffer_unref(&pic->motion_val_buf[i]);
1344 av_buffer_unref(&pic->ref_index_buf[i]);
1346 av_buffer_unref(&pic->mb_type_buf);
1348 av_frame_unref(pic->f);
1351 static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
1353 SVQ3Context *s = avctx->priv_data;
1354 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
1355 const int mb_array_size = s->mb_stride * s->mb_height;
1356 const int b4_stride = s->mb_width * 4 + 1;
1357 const int b4_array_size = b4_stride * s->mb_height * 4;
1360 if (!pic->motion_val_buf[0]) {
1363 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) * sizeof(uint32_t));
1364 if (!pic->mb_type_buf)
1365 return AVERROR(ENOMEM);
1366 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
1368 for (i = 0; i < 2; i++) {
1369 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1370 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1371 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1372 ret = AVERROR(ENOMEM);
1376 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1377 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1381 ret = ff_get_buffer(avctx, pic->f,
1382 (s->pict_type != AV_PICTURE_TYPE_B) ?
1383 AV_GET_BUFFER_FLAG_REF : 0);
1387 if (!s->edge_emu_buffer) {
1388 s->edge_emu_buffer = av_mallocz_array(pic->f->linesize[0], 17);
1389 if (!s->edge_emu_buffer)
1390 return AVERROR(ENOMEM);
1395 free_picture(avctx, pic);
1399 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1400 int *got_frame, AVPacket *avpkt)
1402 SVQ3Context *s = avctx->priv_data;
1403 int buf_size = avpkt->size;
1408 /* special case for last picture */
1409 if (buf_size == 0) {
1410 if (s->next_pic->f->data[0] && !s->low_delay && !s->last_frame_output) {
1411 ret = av_frame_ref(data, s->next_pic->f);
1414 s->last_frame_output = 1;
1420 s->mb_x = s->mb_y = s->mb_xy = 0;
1422 if (s->watermark_key) {
1423 av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1425 return AVERROR(ENOMEM);
1426 memcpy(s->buf, avpkt->data, buf_size);
1432 ret = init_get_bits(&s->gb, buf, 8 * buf_size);
1436 if (svq3_decode_slice_header(avctx))
1439 s->pict_type = s->slice_type;
1441 if (s->pict_type != AV_PICTURE_TYPE_B)
1442 FFSWAP(SVQ3Frame*, s->next_pic, s->last_pic);
1444 av_frame_unref(s->cur_pic->f);
1446 /* for skipping the frame */
1447 s->cur_pic->f->pict_type = s->pict_type;
1448 s->cur_pic->f->key_frame = (s->pict_type == AV_PICTURE_TYPE_I);
1450 ret = get_buffer(avctx, s->cur_pic);
1454 for (i = 0; i < 16; i++) {
1455 s->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1456 s->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1458 for (i = 0; i < 16; i++) {
1459 s->block_offset[16 + i] =
1460 s->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1461 s->block_offset[48 + 16 + i] =
1462 s->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1465 if (s->pict_type != AV_PICTURE_TYPE_I) {
1466 if (!s->last_pic->f->data[0]) {
1467 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1468 av_frame_unref(s->last_pic->f);
1469 ret = get_buffer(avctx, s->last_pic);
1472 memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1473 memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1474 s->last_pic->f->linesize[1]);
1475 memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1476 s->last_pic->f->linesize[2]);
1479 if (s->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1480 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1481 av_frame_unref(s->next_pic->f);
1482 ret = get_buffer(avctx, s->next_pic);
1485 memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1486 memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1487 s->next_pic->f->linesize[1]);
1488 memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1489 s->next_pic->f->linesize[2]);
1493 if (avctx->debug & FF_DEBUG_PICT_INFO)
1494 av_log(s->avctx, AV_LOG_DEBUG,
1495 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1496 av_get_picture_type_char(s->pict_type),
1497 s->halfpel_flag, s->thirdpel_flag,
1498 s->adaptive_quant, s->qscale, s->slice_num);
1500 if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1501 avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I ||
1502 avctx->skip_frame >= AVDISCARD_ALL)
1505 if (s->next_p_frame_damaged) {
1506 if (s->pict_type == AV_PICTURE_TYPE_B)
1509 s->next_p_frame_damaged = 0;
1512 if (s->pict_type == AV_PICTURE_TYPE_B) {
1513 s->frame_num_offset = s->slice_num - s->prev_frame_num;
1515 if (s->frame_num_offset < 0)
1516 s->frame_num_offset += 256;
1517 if (s->frame_num_offset == 0 ||
1518 s->frame_num_offset >= s->prev_frame_num_offset) {
1519 av_log(s->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1523 s->prev_frame_num = s->frame_num;
1524 s->frame_num = s->slice_num;
1525 s->prev_frame_num_offset = s->frame_num - s->prev_frame_num;
1527 if (s->prev_frame_num_offset < 0)
1528 s->prev_frame_num_offset += 256;
1531 for (m = 0; m < 2; m++) {
1533 for (i = 0; i < 4; i++) {
1535 for (j = -1; j < 4; j++)
1536 s->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1538 s->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1542 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1543 for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1545 s->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1547 if ((get_bits_left(&s->gb_slice)) <= 7) {
1548 if (((get_bits_count(&s->gb_slice) & 7) == 0 ||
1549 show_bits(&s->gb_slice, get_bits_left(&s->gb_slice) & 7) == 0)) {
1551 if (svq3_decode_slice_header(avctx))
1554 if (s->slice_type != s->pict_type) {
1555 avpriv_request_sample(avctx, "non constant slice type");
1557 /* TODO: support s->mb_skip_run */
1560 mb_type = get_interleaved_ue_golomb(&s->gb_slice);
1562 if (s->pict_type == AV_PICTURE_TYPE_I)
1564 else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1566 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1567 av_log(s->avctx, AV_LOG_ERROR,
1568 "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1572 if (mb_type != 0 || s->cbp)
1575 if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1576 s->cur_pic->mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1577 (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1580 ff_draw_horiz_band(avctx, s->cur_pic->f,
1581 s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1582 16 * s->mb_y, 16, PICT_FRAME, 0,
1586 left = buf_size*8 - get_bits_count(&s->gb_slice);
1588 if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1589 av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
1590 //av_hex_dump(stderr, buf+buf_size-8, 8);
1594 av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1598 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1599 ret = av_frame_ref(data, s->cur_pic->f);
1600 else if (s->last_pic->f->data[0])
1601 ret = av_frame_ref(data, s->last_pic->f);
1605 /* Do not output the last pic after seeking. */
1606 if (s->last_pic->f->data[0] || s->low_delay)
1609 if (s->pict_type != AV_PICTURE_TYPE_B) {
1610 FFSWAP(SVQ3Frame*, s->cur_pic, s->next_pic);
1612 av_frame_unref(s->cur_pic->f);
1618 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1620 SVQ3Context *s = avctx->priv_data;
1622 free_picture(avctx, s->cur_pic);
1623 free_picture(avctx, s->next_pic);
1624 free_picture(avctx, s->last_pic);
1625 av_frame_free(&s->cur_pic->f);
1626 av_frame_free(&s->next_pic->f);
1627 av_frame_free(&s->last_pic->f);
1628 av_freep(&s->cur_pic);
1629 av_freep(&s->next_pic);
1630 av_freep(&s->last_pic);
1631 av_freep(&s->slice_buf);
1632 av_freep(&s->intra4x4_pred_mode);
1633 av_freep(&s->edge_emu_buffer);
1634 av_freep(&s->mb2br_xy);
1643 AVCodec ff_svq3_decoder = {
1645 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1646 .type = AVMEDIA_TYPE_VIDEO,
1647 .id = AV_CODEC_ID_SVQ3,
1648 .priv_data_size = sizeof(SVQ3Context),
1649 .init = svq3_decode_init,
1650 .close = svq3_decode_end,
1651 .decode = svq3_decode_frame,
1652 .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1655 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,