2 * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * VP6 compatible video decoder
25 * The VP6F decoder accepts an optional 1 byte extradata. It is composed of:
26 * - upper 4 bits: difference between encoded width and visible width
27 * - lower 4 bits: difference between encoded height and visible height
41 #define VP6_MAX_HUFF_SIZE 12
43 static int vp6_parse_coeff(VP56Context *s);
44 static int vp6_parse_coeff_huffman(VP56Context *s);
46 static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size)
48 VP56RangeCoder *c = &s->c;
49 int parse_filter_info = 0;
56 int separated_coeff = buf[0] & 1;
58 s->frames[VP56_FRAME_CURRENT]->key_frame = !(buf[0] & 0x80);
59 ff_vp56_init_dequant(s, (buf[0] >> 1) & 0x3F);
61 if (s->frames[VP56_FRAME_CURRENT]->key_frame) {
62 sub_version = buf[1] >> 3;
64 return AVERROR_INVALIDDATA;
65 s->filter_header = buf[1] & 0x06;
67 avpriv_report_missing_feature(s->avctx, "Interlacing");
68 return AVERROR_PATCHWELCOME;
70 if (separated_coeff || !s->filter_header) {
71 coeff_offset = AV_RB16(buf+2) - 2;
76 rows = buf[2]; /* number of stored macroblock rows */
77 cols = buf[3]; /* number of stored macroblock cols */
78 /* buf[4] is number of displayed macroblock rows */
79 /* buf[5] is number of displayed macroblock cols */
81 av_log(s->avctx, AV_LOG_ERROR, "Invalid size %dx%d\n", cols << 4, rows << 4);
82 return AVERROR_INVALIDDATA;
85 if (!s->macroblocks || /* first frame */
86 16*cols != s->avctx->coded_width ||
87 16*rows != s->avctx->coded_height) {
88 if (s->avctx->extradata_size == 0 &&
89 FFALIGN(s->avctx->width, 16) == 16 * cols &&
90 FFALIGN(s->avctx->height, 16) == 16 * rows) {
91 // We assume this is properly signalled container cropping,
92 // in an F4V file. Just set the coded_width/height, don't
93 // touch the cropped ones.
94 s->avctx->coded_width = 16 * cols;
95 s->avctx->coded_height = 16 * rows;
97 ret = ff_set_dimensions(s->avctx, 16 * cols, 16 * rows);
101 if (s->avctx->extradata_size == 1) {
102 s->avctx->width -= s->avctx->extradata[0] >> 4;
103 s->avctx->height -= s->avctx->extradata[0] & 0x0F;
106 res = VP56_SIZE_CHANGE;
109 ret = ff_vp56_init_range_decoder(c, buf+6, buf_size-6);
114 parse_filter_info = s->filter_header;
117 s->sub_version = sub_version;
120 if (!s->sub_version || !s->avctx->coded_width || !s->avctx->coded_height)
121 return AVERROR_INVALIDDATA;
123 if (separated_coeff || !s->filter_header) {
124 coeff_offset = AV_RB16(buf+1) - 2;
128 ret = ff_vp56_init_range_decoder(c, buf+1, buf_size-1);
132 s->golden_frame = vp56_rac_get(c);
133 if (s->filter_header) {
134 s->deblock_filtering = vp56_rac_get(c);
135 if (s->deblock_filtering)
137 if (s->sub_version > 7)
138 parse_filter_info = vp56_rac_get(c);
142 if (parse_filter_info) {
143 if (vp56_rac_get(c)) {
145 s->sample_variance_threshold = vp56_rac_gets(c, 5) << vrt_shift;
146 s->max_vector_length = 2 << vp56_rac_gets(c, 3);
147 } else if (vp56_rac_get(c)) {
152 if (s->sub_version > 7)
153 s->filter_selection = vp56_rac_gets(c, 4);
155 s->filter_selection = 16;
158 s->use_huffman = vp56_rac_get(c);
160 s->parse_coeff = vp6_parse_coeff;
163 buf_size -= coeff_offset;
165 ret = AVERROR_INVALIDDATA;
168 if (s->use_huffman) {
169 s->parse_coeff = vp6_parse_coeff_huffman;
170 init_get_bits(&s->gb, buf, buf_size<<3);
172 ret = ff_vp56_init_range_decoder(&s->cc, buf, buf_size);
183 if (res == VP56_SIZE_CHANGE)
184 ff_set_dimensions(s->avctx, 0, 0);
188 static void vp6_coeff_order_table_init(VP56Context *s)
192 s->modelp->coeff_index_to_pos[0] = 0;
194 for (pos=1; pos<64; pos++)
195 if (s->modelp->coeff_reorder[pos] == i)
196 s->modelp->coeff_index_to_pos[idx++] = pos;
198 for (idx = 0; idx < 64; idx++) {
200 for (i = 0; i <= idx; i++) {
201 int v = s->modelp->coeff_index_to_pos[i];
205 if (s->sub_version > 6)
207 s->modelp->coeff_index_to_idct_selector[idx] = max;
211 static void vp6_default_models_init(VP56Context *s)
213 VP56Model *model = s->modelp;
215 model->vector_dct[0] = 0xA2;
216 model->vector_dct[1] = 0xA4;
217 model->vector_sig[0] = 0x80;
218 model->vector_sig[1] = 0x80;
220 memcpy(model->mb_types_stats, ff_vp56_def_mb_types_stats, sizeof(model->mb_types_stats));
221 memcpy(model->vector_fdv, vp6_def_fdv_vector_model, sizeof(model->vector_fdv));
222 memcpy(model->vector_pdv, vp6_def_pdv_vector_model, sizeof(model->vector_pdv));
223 memcpy(model->coeff_runv, vp6_def_runv_coeff_model, sizeof(model->coeff_runv));
224 memcpy(model->coeff_reorder, vp6_def_coeff_reorder, sizeof(model->coeff_reorder));
226 vp6_coeff_order_table_init(s);
229 static void vp6_parse_vector_models(VP56Context *s)
231 VP56RangeCoder *c = &s->c;
232 VP56Model *model = s->modelp;
235 for (comp=0; comp<2; comp++) {
236 if (vp56_rac_get_prob_branchy(c, vp6_sig_dct_pct[comp][0]))
237 model->vector_dct[comp] = vp56_rac_gets_nn(c, 7);
238 if (vp56_rac_get_prob_branchy(c, vp6_sig_dct_pct[comp][1]))
239 model->vector_sig[comp] = vp56_rac_gets_nn(c, 7);
242 for (comp=0; comp<2; comp++)
243 for (node=0; node<7; node++)
244 if (vp56_rac_get_prob_branchy(c, vp6_pdv_pct[comp][node]))
245 model->vector_pdv[comp][node] = vp56_rac_gets_nn(c, 7);
247 for (comp=0; comp<2; comp++)
248 for (node=0; node<8; node++)
249 if (vp56_rac_get_prob_branchy(c, vp6_fdv_pct[comp][node]))
250 model->vector_fdv[comp][node] = vp56_rac_gets_nn(c, 7);
253 /* nodes must ascend by count, but with descending symbol order */
254 static int vp6_huff_cmp(const void *va, const void *vb)
256 const Node *a = va, *b = vb;
257 return (a->count - b->count)*16 + (b->sym - a->sym);
260 static int vp6_build_huff_tree(VP56Context *s, uint8_t coeff_model[],
261 const uint8_t *map, unsigned size, VLC *vlc)
263 Node nodes[2*VP6_MAX_HUFF_SIZE], *tmp = &nodes[size];
266 /* first compute probabilities from model */
268 for (i=0; i<size-1; i++) {
269 a = tmp[i].count * coeff_model[i] >> 8;
270 b = tmp[i].count * (255 - coeff_model[i]) >> 8;
271 nodes[map[2*i ]].count = a + !a;
272 nodes[map[2*i+1]].count = b + !b;
276 /* then build the huffman tree according to probabilities */
277 return ff_huff_build_tree(s->avctx, vlc, size, FF_HUFFMAN_BITS,
279 FF_HUFFMAN_FLAG_HNODE_FIRST);
282 static int vp6_parse_coeff_models(VP56Context *s)
284 VP56RangeCoder *c = &s->c;
285 VP56Model *model = s->modelp;
287 int node, cg, ctx, pos;
288 int ct; /* code type */
289 int pt; /* plane type (0 for Y, 1 for U or V) */
291 memset(def_prob, 0x80, sizeof(def_prob));
293 for (pt=0; pt<2; pt++)
294 for (node=0; node<11; node++)
295 if (vp56_rac_get_prob_branchy(c, vp6_dccv_pct[pt][node])) {
296 def_prob[node] = vp56_rac_gets_nn(c, 7);
297 model->coeff_dccv[pt][node] = def_prob[node];
298 } else if (s->frames[VP56_FRAME_CURRENT]->key_frame) {
299 model->coeff_dccv[pt][node] = def_prob[node];
302 if (vp56_rac_get(c)) {
303 for (pos=1; pos<64; pos++)
304 if (vp56_rac_get_prob_branchy(c, vp6_coeff_reorder_pct[pos]))
305 model->coeff_reorder[pos] = vp56_rac_gets(c, 4);
306 vp6_coeff_order_table_init(s);
309 for (cg=0; cg<2; cg++)
310 for (node=0; node<14; node++)
311 if (vp56_rac_get_prob_branchy(c, vp6_runv_pct[cg][node]))
312 model->coeff_runv[cg][node] = vp56_rac_gets_nn(c, 7);
314 for (ct=0; ct<3; ct++)
315 for (pt=0; pt<2; pt++)
316 for (cg=0; cg<6; cg++)
317 for (node=0; node<11; node++)
318 if (vp56_rac_get_prob_branchy(c, vp6_ract_pct[ct][pt][cg][node])) {
319 def_prob[node] = vp56_rac_gets_nn(c, 7);
320 model->coeff_ract[pt][ct][cg][node] = def_prob[node];
321 } else if (s->frames[VP56_FRAME_CURRENT]->key_frame) {
322 model->coeff_ract[pt][ct][cg][node] = def_prob[node];
325 if (s->use_huffman) {
326 for (pt=0; pt<2; pt++) {
327 if (vp6_build_huff_tree(s, model->coeff_dccv[pt],
328 vp6_huff_coeff_map, 12, &s->dccv_vlc[pt]))
330 if (vp6_build_huff_tree(s, model->coeff_runv[pt],
331 vp6_huff_run_map, 9, &s->runv_vlc[pt]))
333 for (ct=0; ct<3; ct++)
334 for (cg = 0; cg < 6; cg++)
335 if (vp6_build_huff_tree(s, model->coeff_ract[pt][ct][cg],
336 vp6_huff_coeff_map, 12,
337 &s->ract_vlc[pt][ct][cg]))
340 memset(s->nb_null, 0, sizeof(s->nb_null));
342 /* coeff_dcct is a linear combination of coeff_dccv */
343 for (pt=0; pt<2; pt++)
344 for (ctx=0; ctx<3; ctx++)
345 for (node=0; node<5; node++)
346 model->coeff_dcct[pt][ctx][node] = av_clip(((model->coeff_dccv[pt][node] * vp6_dccv_lc[ctx][node][0] + 128) >> 8) + vp6_dccv_lc[ctx][node][1], 1, 255);
351 static void vp6_parse_vector_adjustment(VP56Context *s, VP56mv *vect)
353 VP56RangeCoder *c = &s->c;
354 VP56Model *model = s->modelp;
357 *vect = (VP56mv) {0,0};
358 if (s->vector_candidate_pos < 2)
359 *vect = s->vector_candidate[0];
361 for (comp=0; comp<2; comp++) {
364 if (vp56_rac_get_prob_branchy(c, model->vector_dct[comp])) {
365 static const uint8_t prob_order[] = {0, 1, 2, 7, 6, 5, 4};
366 for (i=0; i<sizeof(prob_order); i++) {
367 int j = prob_order[i];
368 delta |= vp56_rac_get_prob(c, model->vector_fdv[comp][j])<<j;
371 delta |= vp56_rac_get_prob(c, model->vector_fdv[comp][3])<<3;
375 delta = vp56_rac_get_tree(c, ff_vp56_pva_tree,
376 model->vector_pdv[comp]);
379 if (delta && vp56_rac_get_prob_branchy(c, model->vector_sig[comp]))
390 * Read number of consecutive blocks with null DC or AC.
391 * This value is < 74.
393 static unsigned vp6_get_nb_null(VP56Context *s)
395 unsigned val = get_bits(&s->gb, 2);
397 val += get_bits(&s->gb, 2);
399 val = get_bits1(&s->gb) << 2;
400 val = 6+val + get_bits(&s->gb, 2+val);
405 static int vp6_parse_coeff_huffman(VP56Context *s)
407 VP56Model *model = s->modelp;
408 uint8_t *permute = s->idct_scantable;
410 int coeff, sign, coeff_idx;
412 int pt = 0; /* plane type (0 for Y, 1 for U or V) */
414 for (b=0; b<6; b++) {
415 int ct = 0; /* code type */
417 vlc_coeff = &s->dccv_vlc[pt];
419 for (coeff_idx = 0;;) {
421 if (coeff_idx<2 && s->nb_null[coeff_idx][pt]) {
422 s->nb_null[coeff_idx][pt]--;
426 if (get_bits_left(&s->gb) <= 0)
427 return AVERROR_INVALIDDATA;
428 coeff = get_vlc2(&s->gb, vlc_coeff->table, FF_HUFFMAN_BITS, 3);
431 int pt = (coeff_idx >= 6);
432 run += get_vlc2(&s->gb, s->runv_vlc[pt].table, FF_HUFFMAN_BITS, 3);
434 run += get_bits(&s->gb, 6);
436 s->nb_null[0][pt] = vp6_get_nb_null(s);
438 } else if (coeff == 11) { /* end of block */
439 if (coeff_idx == 1) /* first AC coeff ? */
440 s->nb_null[1][pt] = vp6_get_nb_null(s);
443 int coeff2 = ff_vp56_coeff_bias[coeff];
445 coeff2 += get_bits(&s->gb, coeff <= 9 ? coeff - 4 : 11);
446 ct = 1 + (coeff2 > 1);
447 sign = get_bits1(&s->gb);
448 coeff2 = (coeff2 ^ -sign) + sign;
450 coeff2 *= s->dequant_ac;
451 idx = model->coeff_index_to_pos[coeff_idx];
452 s->block_coeff[b][permute[idx]] = coeff2;
458 cg = FFMIN(vp6_coeff_groups[coeff_idx], 3);
459 vlc_coeff = &s->ract_vlc[pt][ct][cg];
461 s->idct_selector[b] = model->coeff_index_to_idct_selector[FFMIN(coeff_idx, 63)];
466 static int vp6_parse_coeff(VP56Context *s)
468 VP56RangeCoder *c = s->ccp;
469 VP56Model *model = s->modelp;
470 uint8_t *permute = s->idct_scantable;
471 uint8_t *model1, *model2, *model3;
472 int coeff, sign, coeff_idx;
473 int b, i, cg, idx, ctx;
474 int pt = 0; /* plane type (0 for Y, 1 for U or V) */
476 if (vpX_rac_is_end(c)) {
477 av_log(s->avctx, AV_LOG_ERROR, "End of AC stream reached in vp6_parse_coeff\n");
478 return AVERROR_INVALIDDATA;
481 for (b=0; b<6; b++) {
482 int ct = 1; /* code type */
487 ctx = s->left_block[ff_vp56_b6to4[b]].not_null_dc
488 + s->above_blocks[s->above_block_idx[b]].not_null_dc;
489 model1 = model->coeff_dccv[pt];
490 model2 = model->coeff_dcct[pt][ctx];
494 if ((coeff_idx>1 && ct==0) || vp56_rac_get_prob_branchy(c, model2[0])) {
496 if (vp56_rac_get_prob_branchy(c, model2[2])) {
497 if (vp56_rac_get_prob_branchy(c, model2[3])) {
498 idx = vp56_rac_get_tree(c, ff_vp56_pc_tree, model1);
499 coeff = ff_vp56_coeff_bias[idx+5];
500 for (i=ff_vp56_coeff_bit_length[idx]; i>=0; i--)
501 coeff += vp56_rac_get_prob(c, ff_vp56_coeff_parse_table[idx][i]) << i;
503 if (vp56_rac_get_prob_branchy(c, model2[4]))
504 coeff = 3 + vp56_rac_get_prob(c, model1[5]);
513 sign = vp56_rac_get(c);
514 coeff = (coeff ^ -sign) + sign;
516 coeff *= s->dequant_ac;
517 idx = model->coeff_index_to_pos[coeff_idx];
518 s->block_coeff[b][permute[idx]] = coeff;
524 if (!vp56_rac_get_prob_branchy(c, model2[1]))
527 model3 = model->coeff_runv[coeff_idx >= 6];
528 run = vp56_rac_get_tree(c, vp6_pcr_tree, model3);
530 for (run=9, i=0; i<6; i++)
531 run += vp56_rac_get_prob(c, model3[i+8]) << i;
537 cg = vp6_coeff_groups[coeff_idx];
538 model1 = model2 = model->coeff_ract[pt][ct][cg];
541 s->left_block[ff_vp56_b6to4[b]].not_null_dc =
542 s->above_blocks[s->above_block_idx[b]].not_null_dc = !!s->block_coeff[b][0];
543 s->idct_selector[b] = model->coeff_index_to_idct_selector[FFMIN(coeff_idx, 63)];
548 static int vp6_block_variance(uint8_t *src, ptrdiff_t stride)
550 int sum = 0, square_sum = 0;
553 for (y=0; y<8; y+=2) {
554 for (x=0; x<8; x+=2) {
556 square_sum += src[x]*src[x];
560 return (16*square_sum - sum*sum) >> 8;
563 static void vp6_filter_hv4(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
564 int delta, const int16_t *weights)
568 for (y=0; y<8; y++) {
569 for (x=0; x<8; x++) {
570 dst[x] = av_clip_uint8(( src[x-delta ] * weights[0]
571 + src[x ] * weights[1]
572 + src[x+delta ] * weights[2]
573 + src[x+2*delta] * weights[3] + 64) >> 7);
580 static void vp6_filter_diag2(VP56Context *s, uint8_t *dst, uint8_t *src,
581 ptrdiff_t stride, int h_weight, int v_weight)
583 uint8_t *tmp = s->edge_emu_buffer+16;
584 s->h264chroma.put_h264_chroma_pixels_tab[0](tmp, src, stride, 9, h_weight, 0);
585 s->h264chroma.put_h264_chroma_pixels_tab[0](dst, tmp, stride, 8, 0, v_weight);
588 static void vp6_filter(VP56Context *s, uint8_t *dst, uint8_t *src,
589 int offset1, int offset2, ptrdiff_t stride,
590 VP56mv mv, int mask, int select, int luma)
593 int x8 = mv.x & mask;
594 int y8 = mv.y & mask;
599 filter4 = s->filter_mode;
601 if (s->max_vector_length &&
602 (FFABS(mv.x) > s->max_vector_length ||
603 FFABS(mv.y) > s->max_vector_length)) {
605 } else if (s->sample_variance_threshold
606 && (vp6_block_variance(src+offset1, stride)
607 < s->sample_variance_threshold)) {
613 if ((y8 && (offset2-offset1)*s->flip<0) || (!y8 && offset1 > offset2)) {
618 if (!y8) { /* left or right combine */
619 vp6_filter_hv4(dst, src+offset1, stride, 1,
620 vp6_block_copy_filter[select][x8]);
621 } else if (!x8) { /* above or below combine */
622 vp6_filter_hv4(dst, src+offset1, stride, stride,
623 vp6_block_copy_filter[select][y8]);
625 s->vp56dsp.vp6_filter_diag4(dst, src+offset1+((mv.x^mv.y)>>31), stride,
626 vp6_block_copy_filter[select][x8],
627 vp6_block_copy_filter[select][y8]);
631 s->h264chroma.put_h264_chroma_pixels_tab[0](dst, src + offset1, stride, 8, x8, y8);
633 vp6_filter_diag2(s, dst, src+offset1 + ((mv.x^mv.y)>>31), stride, x8, y8);
638 static av_cold void vp6_decode_init_context(VP56Context *s);
640 static av_cold int vp6_decode_init(AVCodecContext *avctx)
642 VP56Context *s = avctx->priv_data;
645 if ((ret = ff_vp56_init(avctx, avctx->codec->id == AV_CODEC_ID_VP6,
646 avctx->codec->id == AV_CODEC_ID_VP6A)) < 0)
648 ff_vp6dsp_init(&s->vp56dsp);
650 vp6_decode_init_context(s);
653 s->alpha_context = av_mallocz(sizeof(VP56Context));
654 ff_vp56_init_context(avctx, s->alpha_context,
655 s->flip == -1, s->has_alpha);
656 ff_vp6dsp_init(&s->alpha_context->vp56dsp);
657 vp6_decode_init_context(s->alpha_context);
663 static av_cold void vp6_decode_init_context(VP56Context *s)
665 s->deblock_filtering = 0;
666 s->vp56_coord_div = vp6_coord_div;
667 s->parse_vector_adjustment = vp6_parse_vector_adjustment;
668 s->filter = vp6_filter;
669 s->default_models_init = vp6_default_models_init;
670 s->parse_vector_models = vp6_parse_vector_models;
671 s->parse_coeff_models = vp6_parse_coeff_models;
672 s->parse_header = vp6_parse_header;
675 static av_cold void vp6_decode_free_context(VP56Context *s);
677 static av_cold int vp6_decode_free(AVCodecContext *avctx)
679 VP56Context *s = avctx->priv_data;
682 vp6_decode_free_context(s);
684 if (s->alpha_context) {
685 ff_vp56_free_context(s->alpha_context);
686 vp6_decode_free_context(s->alpha_context);
687 av_freep(&s->alpha_context);
693 static av_cold void vp6_decode_free_context(VP56Context *s)
697 for (pt=0; pt<2; pt++) {
698 ff_free_vlc(&s->dccv_vlc[pt]);
699 ff_free_vlc(&s->runv_vlc[pt]);
700 for (ct=0; ct<3; ct++)
701 for (cg=0; cg<6; cg++)
702 ff_free_vlc(&s->ract_vlc[pt][ct][cg]);
706 AVCodec ff_vp6_decoder = {
708 .long_name = NULL_IF_CONFIG_SMALL("On2 VP6"),
709 .type = AVMEDIA_TYPE_VIDEO,
710 .id = AV_CODEC_ID_VP6,
711 .priv_data_size = sizeof(VP56Context),
712 .init = vp6_decode_init,
713 .close = vp6_decode_free,
714 .decode = ff_vp56_decode_frame,
715 .capabilities = AV_CODEC_CAP_DR1,
718 /* flash version, not flipped upside-down */
719 AVCodec ff_vp6f_decoder = {
721 .long_name = NULL_IF_CONFIG_SMALL("On2 VP6 (Flash version)"),
722 .type = AVMEDIA_TYPE_VIDEO,
723 .id = AV_CODEC_ID_VP6F,
724 .priv_data_size = sizeof(VP56Context),
725 .init = vp6_decode_init,
726 .close = vp6_decode_free,
727 .decode = ff_vp56_decode_frame,
728 .capabilities = AV_CODEC_CAP_DR1,
731 /* flash version, not flipped upside-down, with alpha channel */
732 AVCodec ff_vp6a_decoder = {
734 .long_name = NULL_IF_CONFIG_SMALL("On2 VP6 (Flash version, with alpha channel)"),
735 .type = AVMEDIA_TYPE_VIDEO,
736 .id = AV_CODEC_ID_VP6A,
737 .priv_data_size = sizeof(VP56Context),
738 .init = vp6_decode_init,
739 .close = vp6_decode_free,
740 .decode = ff_vp56_decode_frame,
741 .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,