3 * VP6 compatible video decoder
5 * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
7 * The VP6F decoder accepts an optional 1 byte extradata. It is composed of:
8 * - upper 4bits: difference between encoded width and visible width
9 * - lower 4bits: difference between encoded height and visible height
11 * This file is part of FFmpeg.
13 * FFmpeg is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Lesser General Public
15 * License as published by the Free Software Foundation; either
16 * version 2.1 of the License, or (at your option) any later version.
18 * FFmpeg is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * Lesser General Public License for more details.
23 * You should have received a copy of the GNU Lesser General Public
24 * License along with FFmpeg; if not, write to the Free Software
25 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
32 #include "bitstream.h"
33 #include "mpegvideo.h"
40 static int vp6_parse_header(vp56_context_t *s, uint8_t *buf, int buf_size,
43 vp56_range_coder_t *c = &s->c;
44 int parse_filter_info = 0;
50 int separated_coeff = buf[0] & 1;
52 s->framep[VP56_FRAME_CURRENT]->key_frame = !(buf[0] & 0x80);
53 vp56_init_dequant(s, (buf[0] >> 1) & 0x3F);
55 if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
56 sub_version = buf[1] >> 3;
59 s->filter_header = buf[1] & 0x06;
61 av_log(s->avctx, AV_LOG_ERROR, "interlacing not supported\n");
64 if (separated_coeff || !s->filter_header) {
65 coeff_offset = AV_RB16(buf+2) - 2;
70 rows = buf[2]; /* number of stored macroblock rows */
71 cols = buf[3]; /* number of stored macroblock cols */
72 /* buf[4] is number of displayed macroblock rows */
73 /* buf[5] is number of displayed macroblock cols */
75 if (16*cols != s->avctx->coded_width ||
76 16*rows != s->avctx->coded_height) {
77 avcodec_set_dimensions(s->avctx, 16*cols, 16*rows);
78 if (s->avctx->extradata_size == 1) {
79 s->avctx->width -= s->avctx->extradata[0] >> 4;
80 s->avctx->height -= s->avctx->extradata[0] & 0x0F;
85 vp56_init_range_decoder(c, buf+6, buf_size-6);
88 parse_filter_info = s->filter_header;
91 s->sub_version = sub_version;
96 if (separated_coeff || !s->filter_header) {
97 coeff_offset = AV_RB16(buf+1) - 2;
101 vp56_init_range_decoder(c, buf+1, buf_size-1);
103 *golden_frame = vp56_rac_get(c);
104 if (s->filter_header) {
105 s->deblock_filtering = vp56_rac_get(c);
106 if (s->deblock_filtering)
108 if (s->sub_version > 7)
109 parse_filter_info = vp56_rac_get(c);
113 if (parse_filter_info) {
114 if (vp56_rac_get(c)) {
116 s->sample_variance_threshold = vp56_rac_gets(c, 5) << vrt_shift;
117 s->max_vector_length = 2 << vp56_rac_gets(c, 3);
118 } else if (vp56_rac_get(c)) {
123 if (s->sub_version > 7)
124 s->filter_selection = vp56_rac_gets(c, 4);
126 s->filter_selection = 16;
132 vp56_init_range_decoder(&s->cc, buf+coeff_offset,
133 buf_size-coeff_offset);
142 static void vp6_coeff_order_table_init(vp56_context_t *s)
146 s->coeff_index_to_pos[0] = 0;
148 for (pos=1; pos<64; pos++)
149 if (s->coeff_reorder[pos] == i)
150 s->coeff_index_to_pos[idx++] = pos;
153 static void vp6_default_models_init(vp56_context_t *s)
155 s->vector_model_dct[0] = 0xA2;
156 s->vector_model_dct[1] = 0xA4;
157 s->vector_model_sig[0] = 0x80;
158 s->vector_model_sig[1] = 0x80;
160 memcpy(s->mb_types_stats, vp56_def_mb_types_stats, sizeof(s->mb_types_stats));
161 memcpy(s->vector_model_fdv, vp6_def_fdv_vector_model, sizeof(s->vector_model_fdv));
162 memcpy(s->vector_model_pdv, vp6_def_pdv_vector_model, sizeof(s->vector_model_pdv));
163 memcpy(s->coeff_model_runv, vp6_def_runv_coeff_model, sizeof(s->coeff_model_runv));
164 memcpy(s->coeff_reorder, vp6_def_coeff_reorder, sizeof(s->coeff_reorder));
166 vp6_coeff_order_table_init(s);
169 static void vp6_parse_vector_models(vp56_context_t *s)
171 vp56_range_coder_t *c = &s->c;
174 for (comp=0; comp<2; comp++) {
175 if (vp56_rac_get_prob(c, vp6_sig_dct_pct[comp][0]))
176 s->vector_model_dct[comp] = vp56_rac_gets_nn(c, 7);
177 if (vp56_rac_get_prob(c, vp6_sig_dct_pct[comp][1]))
178 s->vector_model_sig[comp] = vp56_rac_gets_nn(c, 7);
181 for (comp=0; comp<2; comp++)
182 for (node=0; node<7; node++)
183 if (vp56_rac_get_prob(c, vp6_pdv_pct[comp][node]))
184 s->vector_model_pdv[comp][node] = vp56_rac_gets_nn(c, 7);
186 for (comp=0; comp<2; comp++)
187 for (node=0; node<8; node++)
188 if (vp56_rac_get_prob(c, vp6_fdv_pct[comp][node]))
189 s->vector_model_fdv[comp][node] = vp56_rac_gets_nn(c, 7);
192 static void vp6_parse_coeff_models(vp56_context_t *s)
194 vp56_range_coder_t *c = &s->c;
196 int node, cg, ctx, pos;
197 int ct; /* code type */
198 int pt; /* plane type (0 for Y, 1 for U or V) */
200 memset(def_prob, 0x80, sizeof(def_prob));
202 for (pt=0; pt<2; pt++)
203 for (node=0; node<11; node++)
204 if (vp56_rac_get_prob(c, vp6_dccv_pct[pt][node])) {
205 def_prob[node] = vp56_rac_gets_nn(c, 7);
206 s->coeff_model_dccv[pt][node] = def_prob[node];
207 } else if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
208 s->coeff_model_dccv[pt][node] = def_prob[node];
211 if (vp56_rac_get(c)) {
212 for (pos=1; pos<64; pos++)
213 if (vp56_rac_get_prob(c, vp6_coeff_reorder_pct[pos]))
214 s->coeff_reorder[pos] = vp56_rac_gets(c, 4);
215 vp6_coeff_order_table_init(s);
218 for (cg=0; cg<2; cg++)
219 for (node=0; node<14; node++)
220 if (vp56_rac_get_prob(c, vp6_runv_pct[cg][node]))
221 s->coeff_model_runv[cg][node] = vp56_rac_gets_nn(c, 7);
223 for (ct=0; ct<3; ct++)
224 for (pt=0; pt<2; pt++)
225 for (cg=0; cg<6; cg++)
226 for (node=0; node<11; node++)
227 if (vp56_rac_get_prob(c, vp6_ract_pct[ct][pt][cg][node])) {
228 def_prob[node] = vp56_rac_gets_nn(c, 7);
229 s->coeff_model_ract[pt][ct][cg][node] = def_prob[node];
230 } else if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
231 s->coeff_model_ract[pt][ct][cg][node] = def_prob[node];
234 /* coeff_model_dcct is a linear combination of coeff_model_dccv */
235 for (pt=0; pt<2; pt++)
236 for (ctx=0; ctx<3; ctx++)
237 for (node=0; node<5; node++)
238 s->coeff_model_dcct[pt][ctx][node] = av_clip(((s->coeff_model_dccv[pt][node] * vp6_dccv_lc[ctx][node][0] + 128) >> 8) + vp6_dccv_lc[ctx][node][1], 1, 255);
241 static void vp6_parse_vector_adjustment(vp56_context_t *s, vp56_mv_t *vect)
243 vp56_range_coder_t *c = &s->c;
246 *vect = (vp56_mv_t) {0,0};
247 if (s->vector_candidate_pos < 2)
248 *vect = s->vector_candidate[0];
250 for (comp=0; comp<2; comp++) {
253 if (vp56_rac_get_prob(c, s->vector_model_dct[comp])) {
254 static const uint8_t prob_order[] = {0, 1, 2, 7, 6, 5, 4};
255 for (i=0; i<sizeof(prob_order); i++) {
256 int j = prob_order[i];
257 delta |= vp56_rac_get_prob(c, s->vector_model_fdv[comp][j])<<j;
260 delta |= vp56_rac_get_prob(c, s->vector_model_fdv[comp][3])<<3;
264 delta = vp56_rac_get_tree(c, vp56_pva_tree,
265 s->vector_model_pdv[comp]);
268 if (delta && vp56_rac_get_prob(c, s->vector_model_sig[comp]))
278 static void vp6_parse_coeff(vp56_context_t *s)
280 vp56_range_coder_t *c = s->ccp;
281 uint8_t *permute = s->scantable.permutated;
282 uint8_t *model, *model2, *model3;
283 int coeff, sign, coeff_idx;
284 int b, i, cg, idx, ctx;
285 int pt = 0; /* plane type (0 for Y, 1 for U or V) */
287 for (b=0; b<6; b++) {
288 int ct = 1; /* code type */
293 ctx = s->left_block[vp56_b6to4[b]].not_null_dc
294 + s->above_blocks[s->above_block_idx[b]].not_null_dc;
295 model = s->coeff_model_dccv[pt];
296 model2 = s->coeff_model_dcct[pt][ctx];
298 for (coeff_idx=0; coeff_idx<64; ) {
299 if ((coeff_idx>1 && ct==0) || vp56_rac_get_prob(c, model2[0])) {
301 if (vp56_rac_get_prob(c, model2[2])) {
302 if (vp56_rac_get_prob(c, model2[3])) {
303 idx = vp56_rac_get_tree(c, vp56_pc_tree, model);
304 coeff = vp56_coeff_bias[idx];
305 for (i=vp56_coeff_bit_length[idx]; i>=0; i--)
306 coeff += vp56_rac_get_prob(c, vp56_coeff_parse_table[idx][i]) << i;
308 if (vp56_rac_get_prob(c, model2[4]))
309 coeff = 3 + vp56_rac_get_prob(c, model[5]);
318 sign = vp56_rac_get(c);
319 coeff = (coeff ^ -sign) + sign;
321 coeff *= s->dequant_ac;
322 idx = s->coeff_index_to_pos[coeff_idx];
323 s->block_coeff[b][permute[idx]] = coeff;
329 if (!vp56_rac_get_prob(c, model2[1]))
332 model3 = s->coeff_model_runv[coeff_idx >= 6];
333 run = vp56_rac_get_tree(c, vp6_pcr_tree, model3);
335 for (run=9, i=0; i<6; i++)
336 run += vp56_rac_get_prob(c, model3[i+8]) << i;
340 cg = vp6_coeff_groups[coeff_idx+=run];
341 model = model2 = s->coeff_model_ract[pt][ct][cg];
344 s->left_block[vp56_b6to4[b]].not_null_dc =
345 s->above_blocks[s->above_block_idx[b]].not_null_dc = !!s->block_coeff[b][0];
349 static int vp6_adjust(int v, int t)
351 int V = v, s = v >> 31;
354 if (V-t-1 >= (unsigned)(t-1))
362 static int vp6_block_variance(uint8_t *src, int stride)
364 int sum = 0, square_sum = 0;
367 for (y=0; y<8; y+=2) {
368 for (x=0; x<8; x+=2) {
370 square_sum += src[x]*src[x];
374 return (16*square_sum - sum*sum) >> 8;
377 static void vp6_filter_hv4(uint8_t *dst, uint8_t *src, int stride,
378 int delta, const int16_t *weights)
382 for (y=0; y<8; y++) {
383 for (x=0; x<8; x++) {
384 dst[x] = av_clip_uint8(( src[x-delta ] * weights[0]
385 + src[x ] * weights[1]
386 + src[x+delta ] * weights[2]
387 + src[x+2*delta] * weights[3] + 64) >> 7);
394 static void vp6_filter_diag2(vp56_context_t *s, uint8_t *dst, uint8_t *src,
395 int stride, int h_weight, int v_weight)
397 uint8_t *tmp = s->edge_emu_buffer+16;
398 s->dsp.put_h264_chroma_pixels_tab[0](tmp, src, stride, 9, h_weight, 0);
399 s->dsp.put_h264_chroma_pixels_tab[0](dst, tmp, stride, 8, 0, v_weight);
402 static void vp6_filter_diag4(uint8_t *dst, uint8_t *src, int stride,
403 const int16_t *h_weights,const int16_t *v_weights)
411 for (y=0; y<11; y++) {
412 for (x=0; x<8; x++) {
413 t[x] = av_clip_uint8(( src[x-1] * h_weights[0]
414 + src[x ] * h_weights[1]
415 + src[x+1] * h_weights[2]
416 + src[x+2] * h_weights[3] + 64) >> 7);
423 for (y=0; y<8; y++) {
424 for (x=0; x<8; x++) {
425 dst[x] = av_clip_uint8(( t[x-8 ] * v_weights[0]
426 + t[x ] * v_weights[1]
427 + t[x+8 ] * v_weights[2]
428 + t[x+16] * v_weights[3] + 64) >> 7);
435 static void vp6_filter(vp56_context_t *s, uint8_t *dst, uint8_t *src,
436 int offset1, int offset2, int stride,
437 vp56_mv_t mv, int mask, int select, int luma)
440 int x8 = mv.x & mask;
441 int y8 = mv.y & mask;
446 filter4 = s->filter_mode;
448 if (s->max_vector_length &&
449 (FFABS(mv.x) > s->max_vector_length ||
450 FFABS(mv.y) > s->max_vector_length)) {
452 } else if (s->sample_variance_threshold
453 && (vp6_block_variance(src+offset1, stride)
454 < s->sample_variance_threshold)) {
460 if ((y8 && (offset2-offset1)*s->flip<0) || (!y8 && offset1 > offset2)) {
465 if (!y8) { /* left or right combine */
466 vp6_filter_hv4(dst, src+offset1, stride, 1,
467 vp6_block_copy_filter[select][x8]);
468 } else if (!x8) { /* above or below combine */
469 vp6_filter_hv4(dst, src+offset1, stride, stride,
470 vp6_block_copy_filter[select][y8]);
472 vp6_filter_diag4(dst, src+offset1 + ((mv.x^mv.y)>>31), stride,
473 vp6_block_copy_filter[select][x8],
474 vp6_block_copy_filter[select][y8]);
478 s->dsp.put_h264_chroma_pixels_tab[0](dst, src+offset1, stride, 8, x8, y8);
480 vp6_filter_diag2(s, dst, src+offset1 + ((mv.x^mv.y)>>31), stride, x8, y8);
485 static int vp6_decode_init(AVCodecContext *avctx)
487 vp56_context_t *s = avctx->priv_data;
489 vp56_init(s, avctx, avctx->codec->id == CODEC_ID_VP6);
490 s->vp56_coord_div = vp6_coord_div;
491 s->parse_vector_adjustment = vp6_parse_vector_adjustment;
492 s->adjust = vp6_adjust;
493 s->filter = vp6_filter;
494 s->parse_coeff = vp6_parse_coeff;
495 s->default_models_init = vp6_default_models_init;
496 s->parse_vector_models = vp6_parse_vector_models;
497 s->parse_coeff_models = vp6_parse_coeff_models;
498 s->parse_header = vp6_parse_header;
503 AVCodec vp6_decoder = {
507 sizeof(vp56_context_t),
515 /* flash version, not flipped upside-down */
516 AVCodec vp6f_decoder = {
520 sizeof(vp56_context_t),