2 * Copyright (c) 2003 The FFmpeg Project
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
45 #include "mpegvideo.h"
48 #include "h264data.h" //FIXME FIXME FIXME
50 #include "h264_mvpred.h"
52 #include "rectangle.h"
53 #include "vdpau_internal.h"
72 uint32_t watermark_key;
77 #define FULLPEL_MODE 1
78 #define HALFPEL_MODE 2
79 #define THIRDPEL_MODE 3
80 #define PREDICT_MODE 4
82 /* dual scan (from some older h264 draft)
91 static const uint8_t svq3_scan[16] = {
92 0+0*4, 1+0*4, 2+0*4, 2+1*4,
93 2+2*4, 3+0*4, 3+1*4, 3+2*4,
94 0+1*4, 0+2*4, 1+1*4, 1+2*4,
95 0+3*4, 1+3*4, 2+3*4, 3+3*4,
98 static const uint8_t svq3_pred_0[25][2] = {
101 { 0, 2 }, { 1, 1 }, { 2, 0 },
102 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
103 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
104 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
105 { 2, 4 }, { 3, 3 }, { 4, 2 },
110 static const int8_t svq3_pred_1[6][6][5] = {
111 { { 2,-1,-1,-1,-1 }, { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 },
112 { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 }, { 1, 2,-1,-1,-1 } },
113 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
114 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
115 { { 2, 0,-1,-1,-1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
116 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
117 { { 2, 0,-1,-1,-1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
118 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
119 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
120 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
121 { { 0, 2,-1,-1,-1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
122 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
125 static const struct { uint8_t run; uint8_t level; } svq3_dct_tables[2][16] = {
126 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
127 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
128 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
129 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
132 static const uint32_t svq3_dequant_coeff[32] = {
133 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
134 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
135 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
136 61694, 68745, 77615, 89113,100253,109366,126635,141533
139 void ff_svq3_luma_dc_dequant_idct_c(DCTELEM *output, DCTELEM *input, int qp){
140 const int qmul = svq3_dequant_coeff[qp];
144 static const uint8_t x_offset[4]={0, 1*stride, 4*stride, 5*stride};
147 const int z0 = 13*(input[4*i+0] + input[4*i+2]);
148 const int z1 = 13*(input[4*i+0] - input[4*i+2]);
149 const int z2 = 7* input[4*i+1] - 17*input[4*i+3];
150 const int z3 = 17* input[4*i+1] + 7*input[4*i+3];
159 const int offset= x_offset[i];
160 const int z0= 13*(temp[4*0+i] + temp[4*2+i]);
161 const int z1= 13*(temp[4*0+i] - temp[4*2+i]);
162 const int z2= 7* temp[4*1+i] - 17*temp[4*3+i];
163 const int z3= 17* temp[4*1+i] + 7*temp[4*3+i];
165 output[stride* 0+offset] = ((z0 + z3)*qmul + 0x80000) >> 20;
166 output[stride* 2+offset] = ((z1 + z2)*qmul + 0x80000) >> 20;
167 output[stride* 8+offset] = ((z1 - z2)*qmul + 0x80000) >> 20;
168 output[stride*10+offset] = ((z0 - z3)*qmul + 0x80000) >> 20;
173 void ff_svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp,
176 const int qmul = svq3_dequant_coeff[qp];
178 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
181 dc = 13*13*((dc == 1) ? 1538*block[0] : ((qmul*(block[0] >> 3)) / 2));
185 for (i = 0; i < 4; i++) {
186 const int z0 = 13*(block[0 + 4*i] + block[2 + 4*i]);
187 const int z1 = 13*(block[0 + 4*i] - block[2 + 4*i]);
188 const int z2 = 7* block[1 + 4*i] - 17*block[3 + 4*i];
189 const int z3 = 17* block[1 + 4*i] + 7*block[3 + 4*i];
191 block[0 + 4*i] = z0 + z3;
192 block[1 + 4*i] = z1 + z2;
193 block[2 + 4*i] = z1 - z2;
194 block[3 + 4*i] = z0 - z3;
197 for (i = 0; i < 4; i++) {
198 const int z0 = 13*(block[i + 4*0] + block[i + 4*2]);
199 const int z1 = 13*(block[i + 4*0] - block[i + 4*2]);
200 const int z2 = 7* block[i + 4*1] - 17*block[i + 4*3];
201 const int z3 = 17* block[i + 4*1] + 7*block[i + 4*3];
202 const int rr = (dc + 0x80000);
204 dst[i + stride*0] = cm[ dst[i + stride*0] + (((z0 + z3)*qmul + rr) >> 20) ];
205 dst[i + stride*1] = cm[ dst[i + stride*1] + (((z1 + z2)*qmul + rr) >> 20) ];
206 dst[i + stride*2] = cm[ dst[i + stride*2] + (((z1 - z2)*qmul + rr) >> 20) ];
207 dst[i + stride*3] = cm[ dst[i + stride*3] + (((z0 - z3)*qmul + rr) >> 20) ];
211 static inline int svq3_decode_block(GetBitContext *gb, DCTELEM *block,
212 int index, const int type)
214 static const uint8_t *const scan_patterns[4] =
215 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
217 int run, level, sign, vlc, limit;
218 const int intra = (3 * type) >> 2;
219 const uint8_t *const scan = scan_patterns[type];
221 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
222 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
224 if (vlc == INVALID_VLC)
227 sign = (vlc & 0x1) - 1;
228 vlc = (vlc + 1) >> 1;
234 } else if (vlc < 4) {
239 level = ((vlc + 9) >> 2) - run;
243 run = svq3_dct_tables[intra][vlc].run;
244 level = svq3_dct_tables[intra][vlc].level;
247 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
250 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
254 if ((index += run) >= limit)
257 block[scan[index]] = (level ^ sign) - sign;
268 static inline void svq3_mc_dir_part(MpegEncContext *s,
269 int x, int y, int width, int height,
270 int mx, int my, int dxy,
271 int thirdpel, int dir, int avg)
273 const Picture *pic = (dir == 0) ? &s->last_picture : &s->next_picture;
276 int blocksize = 2 - (width>>3); //16->0, 8->1, 4->2
281 if (mx < 0 || mx >= (s->h_edge_pos - width - 1) ||
282 my < 0 || my >= (s->v_edge_pos - height - 1)) {
284 if ((s->flags & CODEC_FLAG_EMU_EDGE)) {
288 mx = av_clip (mx, -16, (s->h_edge_pos - width + 15));
289 my = av_clip (my, -16, (s->v_edge_pos - height + 15));
292 /* form component predictions */
293 dest = s->current_picture.data[0] + x + y*s->linesize;
294 src = pic->data[0] + mx + my*s->linesize;
297 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, (width + 1), (height + 1),
298 mx, my, s->h_edge_pos, s->v_edge_pos);
299 src = s->edge_emu_buffer;
302 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->linesize, width, height);
304 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->linesize, height);
306 if (!(s->flags & CODEC_FLAG_GRAY)) {
307 mx = (mx + (mx < (int) x)) >> 1;
308 my = (my + (my < (int) y)) >> 1;
309 width = (width >> 1);
310 height = (height >> 1);
313 for (i = 1; i < 3; i++) {
314 dest = s->current_picture.data[i] + (x >> 1) + (y >> 1)*s->uvlinesize;
315 src = pic->data[i] + mx + my*s->uvlinesize;
318 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->uvlinesize, (width + 1), (height + 1),
319 mx, my, (s->h_edge_pos >> 1), (s->v_edge_pos >> 1));
320 src = s->edge_emu_buffer;
323 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->uvlinesize, width, height);
325 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->uvlinesize, height);
330 static inline int svq3_mc_dir(H264Context *h, int size, int mode, int dir,
333 int i, j, k, mx, my, dx, dy, x, y;
334 MpegEncContext *const s = (MpegEncContext *) h;
335 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
336 const int part_height = 16 >> ((unsigned) (size + 1) / 3);
337 const int extra_width = (mode == PREDICT_MODE) ? -16*6 : 0;
338 const int h_edge_pos = 6*(s->h_edge_pos - part_width ) - extra_width;
339 const int v_edge_pos = 6*(s->v_edge_pos - part_height) - extra_width;
341 for (i = 0; i < 16; i += part_height) {
342 for (j = 0; j < 16; j += part_width) {
343 const int b_xy = (4*s->mb_x + (j >> 2)) + (4*s->mb_y + (i >> 2))*h->b_stride;
347 k = ((j >> 2) & 1) + ((i >> 1) & 2) + ((j >> 1) & 4) + (i & 8);
349 if (mode != PREDICT_MODE) {
350 pred_motion(h, k, (part_width >> 2), dir, 1, &mx, &my);
352 mx = s->next_picture.motion_val[0][b_xy][0]<<1;
353 my = s->next_picture.motion_val[0][b_xy][1]<<1;
356 mx = ((mx * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
357 my = ((my * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
359 mx = ((mx * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
360 my = ((my * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
364 /* clip motion vector prediction to frame border */
365 mx = av_clip(mx, extra_width - 6*x, h_edge_pos - 6*x);
366 my = av_clip(my, extra_width - 6*y, v_edge_pos - 6*y);
368 /* get (optional) motion vector differential */
369 if (mode == PREDICT_MODE) {
372 dy = svq3_get_se_golomb(&s->gb);
373 dx = svq3_get_se_golomb(&s->gb);
375 if (dx == INVALID_VLC || dy == INVALID_VLC) {
376 av_log(h->s.avctx, AV_LOG_ERROR, "invalid MV vlc\n");
381 /* compute motion vector */
382 if (mode == THIRDPEL_MODE) {
384 mx = ((mx + 1)>>1) + dx;
385 my = ((my + 1)>>1) + dy;
386 fx = ((unsigned)(mx + 0x3000))/3 - 0x1000;
387 fy = ((unsigned)(my + 0x3000))/3 - 0x1000;
388 dxy = (mx - 3*fx) + 4*(my - 3*fy);
390 svq3_mc_dir_part(s, x, y, part_width, part_height, fx, fy, dxy, 1, dir, avg);
393 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
394 mx = ((unsigned)(mx + 1 + 0x3000))/3 + dx - 0x1000;
395 my = ((unsigned)(my + 1 + 0x3000))/3 + dy - 0x1000;
396 dxy = (mx&1) + 2*(my&1);
398 svq3_mc_dir_part(s, x, y, part_width, part_height, mx>>1, my>>1, dxy, 0, dir, avg);
402 mx = ((unsigned)(mx + 3 + 0x6000))/6 + dx - 0x1000;
403 my = ((unsigned)(my + 3 + 0x6000))/6 + dy - 0x1000;
405 svq3_mc_dir_part(s, x, y, part_width, part_height, mx, my, 0, 0, dir, avg);
410 /* update mv_cache */
411 if (mode != PREDICT_MODE) {
412 int32_t mv = pack16to32(mx,my);
414 if (part_height == 8 && i < 8) {
415 *(int32_t *) h->mv_cache[dir][scan8[k] + 1*8] = mv;
417 if (part_width == 8 && j < 8) {
418 *(int32_t *) h->mv_cache[dir][scan8[k] + 1 + 1*8] = mv;
421 if (part_width == 8 && j < 8) {
422 *(int32_t *) h->mv_cache[dir][scan8[k] + 1] = mv;
424 if (part_width == 4 || part_height == 4) {
425 *(int32_t *) h->mv_cache[dir][scan8[k]] = mv;
429 /* write back motion vectors */
430 fill_rectangle(s->current_picture.motion_val[dir][b_xy], part_width>>2, part_height>>2, h->b_stride, pack16to32(mx,my), 4);
437 static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
439 H264Context *h = &svq3->h;
440 int i, j, k, m, dir, mode;
444 MpegEncContext *const s = (MpegEncContext *) h;
445 const int mb_xy = h->mb_xy;
446 const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
448 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
449 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
450 h->topright_samples_available = 0xFFFF;
452 if (mb_type == 0) { /* SKIP */
453 if (s->pict_type == AV_PICTURE_TYPE_P || s->next_picture.mb_type[mb_xy] == -1) {
454 svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
456 if (s->pict_type == AV_PICTURE_TYPE_B) {
457 svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1);
460 mb_type = MB_TYPE_SKIP;
462 mb_type = FFMIN(s->next_picture.mb_type[mb_xy], 6);
463 if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0)
465 if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0)
468 mb_type = MB_TYPE_16x16;
470 } else if (mb_type < 8) { /* INTER */
471 if (svq3->thirdpel_flag && svq3->halfpel_flag == !get_bits1 (&s->gb)) {
472 mode = THIRDPEL_MODE;
473 } else if (svq3->halfpel_flag && svq3->thirdpel_flag == !get_bits1 (&s->gb)) {
480 /* note ref_cache should contain here:
488 for (m = 0; m < 2; m++) {
489 if (s->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6] != -1) {
490 for (i = 0; i < 4; i++) {
491 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - 1 + i*h->b_stride];
494 for (i = 0; i < 4; i++) {
495 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = 0;
499 memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t));
500 memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
502 if (s->mb_x < (s->mb_width - 1)) {
503 *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride + 4];
504 h->ref_cache[m][scan8[0] + 4 - 1*8] =
505 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride + 1]+6] == -1 ||
506 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride ] ] == -1) ? PART_NOT_AVAILABLE : 1;
508 h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE;
510 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride - 1];
511 h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1]+3] == -1) ? PART_NOT_AVAILABLE : 1;
513 h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE;
515 memset(&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8);
517 if (s->pict_type != AV_PICTURE_TYPE_B)
521 /* decode motion vector(s) and form prediction(s) */
522 if (s->pict_type == AV_PICTURE_TYPE_P) {
523 if (svq3_mc_dir(h, (mb_type - 1), mode, 0, 0) < 0)
525 } else { /* AV_PICTURE_TYPE_B */
527 if (svq3_mc_dir(h, 0, mode, 0, 0) < 0)
530 for (i = 0; i < 4; i++) {
531 memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
535 if (svq3_mc_dir(h, 0, mode, 1, (mb_type == 3)) < 0)
538 for (i = 0; i < 4; i++) {
539 memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
544 mb_type = MB_TYPE_16x16;
545 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
546 memset(h->intra4x4_pred_mode_cache, -1, 8*5*sizeof(int8_t));
550 for (i = 0; i < 4; i++) {
551 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i*8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6-i];
553 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) {
554 h->left_samples_available = 0x5F5F;
558 h->intra4x4_pred_mode_cache[4+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+0];
559 h->intra4x4_pred_mode_cache[5+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+1];
560 h->intra4x4_pred_mode_cache[6+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+2];
561 h->intra4x4_pred_mode_cache[7+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+3];
563 if (h->intra4x4_pred_mode_cache[4+8*0] == -1) {
564 h->top_samples_available = 0x33FF;
568 /* decode prediction codes for luma blocks */
569 for (i = 0; i < 16; i+=2) {
570 vlc = svq3_get_ue_golomb(&s->gb);
573 av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
577 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
578 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
580 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
581 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
583 if (left[1] == -1 || left[2] == -1){
584 av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n");
588 } else { /* mb_type == 33, DC_128_PRED block type */
589 for (i = 0; i < 4; i++) {
590 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_PRED, 4);
594 ff_h264_write_back_intra_pred_mode(h);
597 ff_h264_check_intra4x4_pred_mode(h);
599 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
600 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
602 for (i = 0; i < 4; i++) {
603 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_128_PRED, 4);
606 h->top_samples_available = 0x33FF;
607 h->left_samples_available = 0x5F5F;
610 mb_type = MB_TYPE_INTRA4x4;
611 } else { /* INTRA16x16 */
612 dir = i_mb_type_info[mb_type - 8].pred_mode;
613 dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
615 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir)) == -1){
616 av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
620 cbp = i_mb_type_info[mb_type - 8].cbp;
621 mb_type = MB_TYPE_INTRA16x16;
624 if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
625 for (i = 0; i < 4; i++) {
626 memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
628 if (s->pict_type == AV_PICTURE_TYPE_B) {
629 for (i = 0; i < 4; i++) {
630 memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
634 if (!IS_INTRA4x4(mb_type)) {
635 memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy], DC_PRED, 8);
637 if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
638 memset(h->non_zero_count_cache + 8, 0, 4*9*sizeof(uint8_t));
639 s->dsp.clear_blocks(h->mb);
642 if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
643 if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48){
644 av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
648 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc];
650 if (IS_INTRA16x16(mb_type) || (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
651 s->qscale += svq3_get_se_golomb(&s->gb);
654 av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
658 if (IS_INTRA16x16(mb_type)) {
659 AV_ZERO128(h->mb_luma_dc+0);
660 AV_ZERO128(h->mb_luma_dc+8);
661 if (svq3_decode_block(&s->gb, h->mb_luma_dc, 0, 1)){
662 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding intra luma dc\n");
668 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
669 const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
671 for (i = 0; i < 4; i++) {
672 if ((cbp & (1 << i))) {
673 for (j = 0; j < 4; j++) {
674 k = index ? ((j&1) + 2*(i&1) + 2*(j&2) + 4*(i&2)) : (4*i + j);
675 h->non_zero_count_cache[ scan8[k] ] = 1;
677 if (svq3_decode_block(&s->gb, &h->mb[16*k], index, type)){
678 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding block\n");
686 for (i = 0; i < 2; ++i) {
687 if (svq3_decode_block(&s->gb, &h->mb[16*(16 + 4*i)], 0, 3)){
688 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma dc block\n");
694 for (i = 0; i < 8; i++) {
695 h->non_zero_count_cache[ scan8[16+i] ] = 1;
697 if (svq3_decode_block(&s->gb, &h->mb[16*(16 + i)], 1, 1)){
698 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma ac block\n");
707 s->current_picture.mb_type[mb_xy] = mb_type;
709 if (IS_INTRA(mb_type)) {
710 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8);
716 static int svq3_decode_slice_header(AVCodecContext *avctx)
718 SVQ3Context *svq3 = avctx->priv_data;
719 H264Context *h = &svq3->h;
720 MpegEncContext *s = &h->s;
721 const int mb_xy = h->mb_xy;
724 header = get_bits(&s->gb, 8);
726 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
728 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
731 int length = (header >> 5) & 3;
733 svq3->next_slice_index = get_bits_count(&s->gb) + 8*show_bits(&s->gb, 8*length) + 8*length;
735 if (svq3->next_slice_index > s->gb.size_in_bits) {
736 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
740 s->gb.size_in_bits = svq3->next_slice_index - 8*(length - 1);
741 skip_bits(&s->gb, 8);
743 if (svq3->watermark_key) {
744 uint32_t header = AV_RL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1]);
745 AV_WL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1], header ^ svq3->watermark_key);
748 memcpy((uint8_t *) &s->gb.buffer[get_bits_count(&s->gb) >> 3],
749 &s->gb.buffer[s->gb.size_in_bits >> 3], (length - 1));
751 skip_bits_long(&s->gb, 0);
754 if ((i = svq3_get_ue_golomb(&s->gb)) == INVALID_VLC || i >= 3){
755 av_log(h->s.avctx, AV_LOG_ERROR, "illegal slice type %d \n", i);
759 h->slice_type = golomb_to_pict_type[i];
761 if ((header & 0x9F) == 2) {
762 i = (s->mb_num < 64) ? 6 : (1 + av_log2 (s->mb_num - 1));
763 s->mb_skip_run = get_bits(&s->gb, i) - (s->mb_x + (s->mb_y * s->mb_width));
769 h->slice_num = get_bits(&s->gb, 8);
770 s->qscale = get_bits(&s->gb, 5);
771 s->adaptive_quant = get_bits1(&s->gb);
776 if (svq3->unknown_flag) {
781 skip_bits(&s->gb, 2);
783 while (get_bits1(&s->gb)) {
784 skip_bits(&s->gb, 8);
787 /* reset intra predictors and invalidate motion vector references */
789 memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy - 1 ]+3, -1, 4*sizeof(int8_t));
790 memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy - s->mb_x] , -1, 8*sizeof(int8_t)*s->mb_x);
793 memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy - s->mb_stride], -1, 8*sizeof(int8_t)*(s->mb_width - s->mb_x));
796 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1]+3] = -1;
803 static av_cold int svq3_decode_init(AVCodecContext *avctx)
805 SVQ3Context *svq3 = avctx->priv_data;
806 H264Context *h = &svq3->h;
807 MpegEncContext *s = &h->s;
809 unsigned char *extradata;
812 if (ff_h264_decode_init(avctx) < 0)
815 s->flags = avctx->flags;
816 s->flags2 = avctx->flags2;
817 s->unrestricted_mv = 1;
819 avctx->pix_fmt = avctx->codec->pix_fmts[0];
821 if (!s->context_initialized) {
822 h->chroma_qp[0] = h->chroma_qp[1] = 4;
824 svq3->halfpel_flag = 1;
825 svq3->thirdpel_flag = 1;
826 svq3->unknown_flag = 0;
828 /* prowl for the "SEQH" marker in the extradata */
829 extradata = (unsigned char *)avctx->extradata;
830 for (m = 0; m < avctx->extradata_size; m++) {
831 if (!memcmp(extradata, "SEQH", 4))
836 /* if a match was found, parse the extra data */
837 if (extradata && !memcmp(extradata, "SEQH", 4)) {
842 size = AV_RB32(&extradata[4]);
843 init_get_bits(&gb, extradata + 8, size*8);
845 /* 'frame size code' and optional 'width, height' */
846 frame_size_code = get_bits(&gb, 3);
847 switch (frame_size_code) {
848 case 0: avctx->width = 160; avctx->height = 120; break;
849 case 1: avctx->width = 128; avctx->height = 96; break;
850 case 2: avctx->width = 176; avctx->height = 144; break;
851 case 3: avctx->width = 352; avctx->height = 288; break;
852 case 4: avctx->width = 704; avctx->height = 576; break;
853 case 5: avctx->width = 240; avctx->height = 180; break;
854 case 6: avctx->width = 320; avctx->height = 240; break;
856 avctx->width = get_bits(&gb, 12);
857 avctx->height = get_bits(&gb, 12);
861 svq3->halfpel_flag = get_bits1(&gb);
862 svq3->thirdpel_flag = get_bits1(&gb);
870 s->low_delay = get_bits1(&gb);
875 while (get_bits1(&gb)) {
879 svq3->unknown_flag = get_bits1(&gb);
880 avctx->has_b_frames = !s->low_delay;
881 if (svq3->unknown_flag) {
883 unsigned watermark_width = svq3_get_ue_golomb(&gb);
884 unsigned watermark_height = svq3_get_ue_golomb(&gb);
885 int u1 = svq3_get_ue_golomb(&gb);
886 int u2 = get_bits(&gb, 8);
887 int u3 = get_bits(&gb, 2);
888 int u4 = svq3_get_ue_golomb(&gb);
889 unsigned long buf_len = watermark_width*watermark_height*4;
890 int offset = (get_bits_count(&gb)+7)>>3;
893 if ((uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
896 buf = av_malloc(buf_len);
897 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n", watermark_width, watermark_height);
898 av_log(avctx, AV_LOG_DEBUG, "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n", u1, u2, u3, u4, offset);
899 if (uncompress(buf, &buf_len, extradata + 8 + offset, size - offset) != Z_OK) {
900 av_log(avctx, AV_LOG_ERROR, "could not uncompress watermark logo\n");
904 svq3->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
905 svq3->watermark_key = svq3->watermark_key << 16 | svq3->watermark_key;
906 av_log(avctx, AV_LOG_DEBUG, "watermark key %#x\n", svq3->watermark_key);
909 av_log(avctx, AV_LOG_ERROR, "this svq3 file contains watermark which need zlib support compiled in\n");
915 s->width = avctx->width;
916 s->height = avctx->height;
918 if (MPV_common_init(s) < 0)
921 h->b_stride = 4*s->mb_width;
923 ff_h264_alloc_tables(h);
929 static int svq3_decode_frame(AVCodecContext *avctx,
930 void *data, int *data_size,
933 SVQ3Context *svq3 = avctx->priv_data;
934 H264Context *h = &svq3->h;
935 MpegEncContext *s = &h->s;
936 int buf_size = avpkt->size;
937 int m, mb_type, left;
940 /* special case for last picture */
942 if (s->next_picture_ptr && !s->low_delay) {
943 *(AVFrame *) data = *(AVFrame *) &s->next_picture;
944 s->next_picture_ptr = NULL;
945 *data_size = sizeof(AVFrame);
950 s->mb_x = s->mb_y = h->mb_xy = 0;
952 if (svq3->watermark_key) {
953 av_fast_malloc(&svq3->buf, &svq3->buf_size,
954 buf_size+FF_INPUT_BUFFER_PADDING_SIZE);
956 return AVERROR(ENOMEM);
957 memcpy(svq3->buf, avpkt->data, buf_size);
963 init_get_bits(&s->gb, buf, 8*buf_size);
965 if (svq3_decode_slice_header(avctx))
968 s->pict_type = h->slice_type;
969 s->picture_number = h->slice_num;
971 if (avctx->debug&FF_DEBUG_PICT_INFO){
972 av_log(h->s.avctx, AV_LOG_DEBUG, "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
973 av_get_picture_type_char(s->pict_type), svq3->halfpel_flag, svq3->thirdpel_flag,
974 s->adaptive_quant, s->qscale, h->slice_num);
977 /* for skipping the frame */
978 s->current_picture.pict_type = s->pict_type;
979 s->current_picture.key_frame = (s->pict_type == AV_PICTURE_TYPE_I);
981 /* Skip B-frames if we do not have reference frames. */
982 if (s->last_picture_ptr == NULL && s->pict_type == AV_PICTURE_TYPE_B)
984 if ( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
985 ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
986 || avctx->skip_frame >= AVDISCARD_ALL)
989 if (s->next_p_frame_damaged) {
990 if (s->pict_type == AV_PICTURE_TYPE_B)
993 s->next_p_frame_damaged = 0;
996 if (ff_h264_frame_start(h) < 0)
999 if (s->pict_type == AV_PICTURE_TYPE_B) {
1000 h->frame_num_offset = (h->slice_num - h->prev_frame_num);
1002 if (h->frame_num_offset < 0) {
1003 h->frame_num_offset += 256;
1005 if (h->frame_num_offset == 0 || h->frame_num_offset >= h->prev_frame_num_offset) {
1006 av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1010 h->prev_frame_num = h->frame_num;
1011 h->frame_num = h->slice_num;
1012 h->prev_frame_num_offset = (h->frame_num - h->prev_frame_num);
1014 if (h->prev_frame_num_offset < 0) {
1015 h->prev_frame_num_offset += 256;
1019 for (m = 0; m < 2; m++){
1021 for (i = 0; i < 4; i++){
1023 for (j = -1; j < 4; j++)
1024 h->ref_cache[m][scan8[0] + 8*i + j]= 1;
1026 h->ref_cache[m][scan8[0] + 8*i + j]= PART_NOT_AVAILABLE;
1030 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1031 for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1032 h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
1034 if ( (get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits &&
1035 ((get_bits_count(&s->gb) & 7) == 0 || show_bits(&s->gb, (-get_bits_count(&s->gb) & 7)) == 0)) {
1037 skip_bits(&s->gb, svq3->next_slice_index - get_bits_count(&s->gb));
1038 s->gb.size_in_bits = 8*buf_size;
1040 if (svq3_decode_slice_header(avctx))
1043 /* TODO: support s->mb_skip_run */
1046 mb_type = svq3_get_ue_golomb(&s->gb);
1048 if (s->pict_type == AV_PICTURE_TYPE_I) {
1050 } else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4) {
1053 if ((unsigned)mb_type > 33 || svq3_decode_mb(svq3, mb_type)) {
1054 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1059 ff_h264_hl_decode_mb (h);
1062 if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay) {
1063 s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
1064 (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1068 ff_draw_horiz_band(s, 16*s->mb_y, 16);
1071 left = buf_size*8 - get_bits_count(&s->gb);
1073 if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1074 av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
1075 //av_hex_dump(stderr, buf+buf_size-8, 8);
1079 av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1085 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
1086 *(AVFrame *) data = *(AVFrame *) &s->current_picture;
1088 *(AVFrame *) data = *(AVFrame *) &s->last_picture;
1091 /* Do not output the last pic after seeking. */
1092 if (s->last_picture_ptr || s->low_delay) {
1093 *data_size = sizeof(AVFrame);
1099 static int svq3_decode_end(AVCodecContext *avctx)
1101 SVQ3Context *svq3 = avctx->priv_data;
1102 H264Context *h = &svq3->h;
1103 MpegEncContext *s = &h->s;
1105 ff_h264_free_context(h);
1109 av_freep(&svq3->buf);
1115 AVCodec ff_svq3_decoder = {
1119 sizeof(SVQ3Context),
1124 CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_DELAY,
1125 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1126 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_NONE},