X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fvp56.c;h=3b2ac95837ce500ad371d754f99c8aafbd505163;hb=b12d21733975f9001eecb480fc28e5e4473b1327;hp=7f28ed0f7acb95ded9e6408f3268875a7e5612c2;hpb=d3f9edbafa7efd2685c5e49f151e2f8b813461c0;p=ffmpeg diff --git a/libavcodec/vp56.c b/libavcodec/vp56.c index 7f28ed0f7ac..3b2ac95837c 100644 --- a/libavcodec/vp56.c +++ b/libavcodec/vp56.c @@ -1,26 +1,28 @@ -/** - * @file vp56.c - * VP5 and VP6 compatible video decoder (common features) - * +/* * Copyright (C) 2006 Aurelien Jacobs * - * This file is part of FFmpeg. + * This file is part of Libav. * - * FFmpeg is free software; you can redistribute it and/or + * Libav is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * FFmpeg is distributed in the hope that it will be useful, + * Libav is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software + * License along with Libav; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * VP5 and VP6 compatible video decoder (common features) + */ + #include "avcodec.h" #include "bytestream.h" @@ -28,20 +30,21 @@ #include "vp56data.h" -void vp56_init_dequant(vp56_context_t *s, int quantizer) +void ff_vp56_init_dequant(VP56Context *s, int quantizer) { s->quantizer = quantizer; s->dequant_dc = vp56_dc_dequant[quantizer] << 2; s->dequant_ac = vp56_ac_dequant[quantizer] << 2; + memset(s->qscale_table, quantizer, s->mb_width); } -static int vp56_get_vectors_predictors(vp56_context_t *s, int row, int col, - vp56_frame_t ref_frame) +static int vp56_get_vectors_predictors(VP56Context *s, int row, int col, + VP56Frame ref_frame) { int nb_pred = 0; - vp56_mv_t vect[2] = {{0,0}, {0,0}}; + VP56mv vect[2] = {{0,0}, {0,0}}; int pos, offset; - vp56_mv_t mvp; + VP56mv mvp; for (pos=0; pos<12; pos++) { mvp.x = col + vp56_candidate_predictor_pos[pos][0]; @@ -73,10 +76,10 @@ static int vp56_get_vectors_predictors(vp56_context_t *s, int row, int col, return nb_pred+1; } -static void vp56_parse_mb_type_models(vp56_context_t *s) +static void vp56_parse_mb_type_models(VP56Context *s) { - vp56_range_coder_t *c = &s->c; - vp56_model_t *model = s->modelp; + VP56RangeCoder *c = &s->c; + VP56Model *model = s->modelp; int i, ctx, type; for (ctx=0; ctx<3; ctx++) { @@ -144,11 +147,11 @@ static void vp56_parse_mb_type_models(vp56_context_t *s) } } -static vp56_mb_t vp56_parse_mb_type(vp56_context_t *s, - vp56_mb_t prev_type, int ctx) +static VP56mb vp56_parse_mb_type(VP56Context *s, + VP56mb prev_type, int ctx) { uint8_t *mb_type_model = s->modelp->mb_type[ctx][prev_type]; - vp56_range_coder_t *c = &s->c; + VP56RangeCoder *c = &s->c; if (vp56_rac_get_prob(c, mb_type_model[0])) return prev_type; @@ -156,9 +159,9 @@ static vp56_mb_t vp56_parse_mb_type(vp56_context_t *s, return vp56_rac_get_tree(c, vp56_pmbt_tree, mb_type_model); } -static void vp56_decode_4mv(vp56_context_t *s, int row, int col) +static void vp56_decode_4mv(VP56Context *s, int row, int col) { - vp56_mv_t mv = {0,0}; + VP56mv mv = {0,0}; int type[4]; int b; @@ -173,7 +176,7 @@ static void vp56_decode_4mv(vp56_context_t *s, int row, int col) for (b=0; b<4; b++) { switch (type[b]) { case VP56_MB_INTER_NOVEC_PF: - s->mv[b] = (vp56_mv_t) {0,0}; + s->mv[b] = (VP56mv) {0,0}; break; case VP56_MB_INTER_DELTA_PF: s->parse_vector_adjustment(s, &s->mv[b]); @@ -197,13 +200,13 @@ static void vp56_decode_4mv(vp56_context_t *s, int row, int col) s->mv[4].x = s->mv[5].x = RSHIFT(mv.x,2); s->mv[4].y = s->mv[5].y = RSHIFT(mv.y,2); } else { - s->mv[4] = s->mv[5] = (vp56_mv_t) {mv.x/4, mv.y/4}; + s->mv[4] = s->mv[5] = (VP56mv) {mv.x/4, mv.y/4}; } } -static vp56_mb_t vp56_decode_mv(vp56_context_t *s, int row, int col) +static VP56mb vp56_decode_mv(VP56Context *s, int row, int col) { - vp56_mv_t *mv, vect = {0,0}; + VP56mv *mv, vect = {0,0}; int ctx, b; ctx = vp56_get_vectors_predictors(s, row, col, VP56_FRAME_PREVIOUS); @@ -258,16 +261,17 @@ static vp56_mb_t vp56_decode_mv(vp56_context_t *s, int row, int col) return s->mb_type; } -static void vp56_add_predictors_dc(vp56_context_t *s, vp56_frame_t ref_frame) +static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame) { int idx = s->scantable.permutated[0]; - int i; + int b; - for (i=0; i<6; i++) { - vp56_ref_dc_t *ab = &s->above_blocks[s->above_block_idx[i]]; - vp56_ref_dc_t *lb = &s->left_block[vp56_b6to4[i]]; + for (b=0; b<6; b++) { + VP56RefDc *ab = &s->above_blocks[s->above_block_idx[b]]; + VP56RefDc *lb = &s->left_block[vp56_b6to4[b]]; int count = 0; int dc = 0; + int i; if (ref_frame == lb->ref_frame) { dc += lb->dc_coeff; @@ -277,55 +281,36 @@ static void vp56_add_predictors_dc(vp56_context_t *s, vp56_frame_t ref_frame) dc += ab->dc_coeff; count++; } - if (s->avctx->codec->id == CODEC_ID_VP5) { - if (count < 2 && ref_frame == ab[-1].ref_frame) { - dc += ab[-1].dc_coeff; - count++; - } - if (count < 2 && ref_frame == ab[1].ref_frame) { - dc += ab[1].dc_coeff; - count++; - } - } + if (s->avctx->codec->id == CODEC_ID_VP5) + for (i=0; i<2; i++) + if (count < 2 && ref_frame == ab[-1+2*i].ref_frame) { + dc += ab[-1+2*i].dc_coeff; + count++; + } if (count == 0) - dc = s->prev_dc[vp56_b2p[i]][ref_frame]; + dc = s->prev_dc[vp56_b2p[b]][ref_frame]; else if (count == 2) dc /= 2; - s->block_coeff[i][idx] += dc; - s->prev_dc[vp56_b2p[i]][ref_frame] = s->block_coeff[i][idx]; - ab->dc_coeff = s->block_coeff[i][idx]; + s->block_coeff[b][idx] += dc; + s->prev_dc[vp56_b2p[b]][ref_frame] = s->block_coeff[b][idx]; + ab->dc_coeff = s->block_coeff[b][idx]; ab->ref_frame = ref_frame; - lb->dc_coeff = s->block_coeff[i][idx]; + lb->dc_coeff = s->block_coeff[b][idx]; lb->ref_frame = ref_frame; - s->block_coeff[i][idx] *= s->dequant_dc; + s->block_coeff[b][idx] *= s->dequant_dc; } } -static void vp56_edge_filter(vp56_context_t *s, uint8_t *yuv, - int pix_inc, int line_inc, int t) -{ - int pix2_inc = 2 * pix_inc; - int i, v; - - for (i=0; i<12; i++) { - v = (yuv[-pix2_inc] + 3*(yuv[0]-yuv[-pix_inc]) - yuv[pix_inc] + 4) >>3; - v = s->adjust(v, t); - yuv[-pix_inc] = av_clip_uint8(yuv[-pix_inc] + v); - yuv[0] = av_clip_uint8(yuv[0] - v); - yuv += line_inc; - } -} - -static void vp56_deblock_filter(vp56_context_t *s, uint8_t *yuv, +static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv, int stride, int dx, int dy) { int t = vp56_filter_threshold[s->quantizer]; - if (dx) vp56_edge_filter(s, yuv + 10-dx , 1, stride, t); - if (dy) vp56_edge_filter(s, yuv + stride*(10-dy), stride, 1, t); + if (dx) s->vp56dsp.edge_filter_hor(yuv + 10-dx , stride, t); + if (dy) s->vp56dsp.edge_filter_ver(yuv + stride*(10-dy), stride, t); } -static void vp56_mc(vp56_context_t *s, int b, int plane, uint8_t *src, +static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src, int stride, int x, int y) { uint8_t *dst=s->framep[VP56_FRAME_CURRENT]->data[plane]+s->block_offset[b]; @@ -354,7 +339,7 @@ static void vp56_mc(vp56_context_t *s, int b, int plane, uint8_t *src, if (x<0 || x+12>=s->plane_width[plane] || y<0 || y+12>=s->plane_height[plane]) { - ff_emulated_edge_mc(s->edge_emu_buffer, + s->dsp.emulated_edge_mc(s->edge_emu_buffer, src + s->block_offset[b] + (dy-2)*stride + (dx-2), stride, 12, 12, x, y, s->plane_width[plane], @@ -395,12 +380,12 @@ static void vp56_mc(vp56_context_t *s, int b, int plane, uint8_t *src, } } -static void vp56_decode_mb(vp56_context_t *s, int row, int col, int is_alpha) +static void vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha) { AVFrame *frame_current, *frame_ref; - vp56_mb_t mb_type; - vp56_frame_t ref_frame; - int b, ab, b_max, plan, off; + VP56mb mb_type; + VP56Frame ref_frame; + int b, ab, b_max, plane, off; if (s->framep[VP56_FRAME_CURRENT]->key_frame) mb_type = VP56_MB_INTRA; @@ -408,7 +393,7 @@ static void vp56_decode_mb(vp56_context_t *s, int row, int col, int is_alpha) mb_type = vp56_decode_mv(s, row, col); ref_frame = vp56_reference_frame[mb_type]; - memset(s->block_coeff, 0, sizeof(s->block_coeff)); + s->dsp.clear_blocks(*s->block_coeff); s->parse_coeff(s); @@ -416,6 +401,8 @@ static void vp56_decode_mb(vp56_context_t *s, int row, int col, int is_alpha) frame_current = s->framep[VP56_FRAME_CURRENT]; frame_ref = s->framep[ref_frame]; + if (mb_type != VP56_MB_INTRA && !frame_ref->data[0]) + return; ab = 6*is_alpha; b_max = 6 - 2*is_alpha; @@ -423,22 +410,22 @@ static void vp56_decode_mb(vp56_context_t *s, int row, int col, int is_alpha) switch (mb_type) { case VP56_MB_INTRA: for (b=0; bdsp.idct_put(frame_current->data[plan] + s->block_offset[b], - s->stride[plan], s->block_coeff[b]); + plane = vp56_b2p[b+ab]; + s->dsp.idct_put(frame_current->data[plane] + s->block_offset[b], + s->stride[plane], s->block_coeff[b]); } break; case VP56_MB_INTER_NOVEC_PF: case VP56_MB_INTER_NOVEC_GF: for (b=0; bblock_offset[b]; - s->dsp.put_pixels_tab[1][0](frame_current->data[plan] + off, - frame_ref->data[plan] + off, - s->stride[plan], 8); - s->dsp.idct_add(frame_current->data[plan] + off, - s->stride[plan], s->block_coeff[b]); + s->dsp.put_pixels_tab[1][0](frame_current->data[plane] + off, + frame_ref->data[plane] + off, + s->stride[plane], 8); + s->dsp.idct_add(frame_current->data[plane] + off, + s->stride[plane], s->block_coeff[b]); } break; @@ -452,11 +439,11 @@ static void vp56_decode_mb(vp56_context_t *s, int row, int col, int is_alpha) for (b=0; bdata[plan], s->stride[plan], + plane = vp56_b2p[b+ab]; + vp56_mc(s, b, plane, frame_ref->data[plane], s->stride[plane], 16*col+x_off, 16*row+y_off); - s->dsp.idct_add(frame_current->data[plan] + s->block_offset[b], - s->stride[plan], s->block_coeff[b]); + s->dsp.idct_add(frame_current->data[plane] + s->block_offset[b], + s->stride[plane], s->block_coeff[b]); } break; } @@ -464,7 +451,7 @@ static void vp56_decode_mb(vp56_context_t *s, int row, int col, int is_alpha) static int vp56_size_changed(AVCodecContext *avctx) { - vp56_context_t *s = avctx->priv_data; + VP56Context *s = avctx->priv_data; int stride = s->framep[VP56_FRAME_CURRENT]->linesize[0]; int i; @@ -480,10 +467,12 @@ static int vp56_size_changed(AVCodecContext *avctx) s->mb_height = (avctx->coded_height+15) / 16; if (s->mb_width > 1000 || s->mb_height > 1000) { + avcodec_set_dimensions(avctx, 0, 0); av_log(avctx, AV_LOG_ERROR, "picture too big\n"); return -1; } + s->qscale_table = av_realloc(s->qscale_table, s->mb_width); s->above_blocks = av_realloc(s->above_blocks, (4*s->mb_width+6) * sizeof(*s->above_blocks)); s->macroblocks = av_realloc(s->macroblocks, @@ -497,16 +486,22 @@ static int vp56_size_changed(AVCodecContext *avctx) return 0; } -int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size, - uint8_t *buf, int buf_size) +int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size, + AVPacket *avpkt) { - vp56_context_t *s = avctx->priv_data; + const uint8_t *buf = avpkt->data; + VP56Context *s = avctx->priv_data; AVFrame *const p = s->framep[VP56_FRAME_CURRENT]; - int is_alpha, alpha_offset; + int remaining_buf_size = avpkt->size; + int is_alpha, av_uninit(alpha_offset); if (s->has_alpha) { + if (remaining_buf_size < 3) + return -1; alpha_offset = bytestream_get_be24(&buf); - buf_size -= 3; + remaining_buf_size -= 3; + if (remaining_buf_size < alpha_offset) + return -1; } for (is_alpha=0; is_alpha < 1+s->has_alpha; is_alpha++) { @@ -517,10 +512,22 @@ int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size, s->modelp = &s->models[is_alpha]; - res = s->parse_header(s, buf, buf_size, &golden_frame); + res = s->parse_header(s, buf, remaining_buf_size, &golden_frame); if (!res) return -1; + if (res == 2) { + int i; + for (i = 0; i < 4; i++) { + if (s->frames[i].data[0]) + avctx->release_buffer(avctx, &s->frames[i]); + } + if (is_alpha) { + avcodec_set_dimensions(avctx, 0, 0); + return -1; + } + } + if (!is_alpha) { p->reference = 1; if (avctx->get_buffer(avctx, p) < 0) { @@ -536,30 +543,31 @@ int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size, } if (p->key_frame) { - p->pict_type = FF_I_TYPE; + p->pict_type = AV_PICTURE_TYPE_I; s->default_models_init(s); for (block=0; blockmb_height*s->mb_width; block++) s->macroblocks[block].type = VP56_MB_INTRA; } else { - p->pict_type = FF_P_TYPE; + p->pict_type = AV_PICTURE_TYPE_P; vp56_parse_mb_type_models(s); s->parse_vector_models(s); s->mb_type = VP56_MB_INTER_NOVEC_PF; } - s->parse_coeff_models(s); + if (s->parse_coeff_models(s)) + goto next; memset(s->prev_dc, 0, sizeof(s->prev_dc)); s->prev_dc[1][VP56_FRAME_CURRENT] = 128; s->prev_dc[2][VP56_FRAME_CURRENT] = 128; for (block=0; block < 4*s->mb_width+6; block++) { - s->above_blocks[block].ref_frame = -1; + s->above_blocks[block].ref_frame = VP56_FRAME_NONE; s->above_blocks[block].dc_coeff = 0; s->above_blocks[block].not_null_dc = 0; } - s->above_blocks[2*s->mb_width + 2].ref_frame = 0; - s->above_blocks[3*s->mb_width + 4].ref_frame = 0; + s->above_blocks[2*s->mb_width + 2].ref_frame = VP56_FRAME_CURRENT; + s->above_blocks[3*s->mb_width + 4].ref_frame = VP56_FRAME_CURRENT; stride_y = p->linesize[0]; stride_uv = p->linesize[1]; @@ -575,7 +583,7 @@ int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size, mb_row_flip = mb_row; for (block=0; block<4; block++) { - s->left_block[block].ref_frame = -1; + s->left_block[block].ref_frame = VP56_FRAME_NONE; s->left_block[block].dc_coeff = 0; s->left_block[block].not_null_dc = 0; } @@ -611,6 +619,7 @@ int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size, } } + next: if (p->key_frame || golden_frame) { if (s->framep[VP56_FRAME_GOLDEN]->data[0] && s->framep[VP56_FRAME_GOLDEN] != s->framep[VP56_FRAME_GOLDEN2]) @@ -622,7 +631,7 @@ int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size, FFSWAP(AVFrame *, s->framep[VP56_FRAME_GOLDEN], s->framep[VP56_FRAME_GOLDEN2]); buf += alpha_offset; - buf_size -= alpha_offset; + remaining_buf_size -= alpha_offset; } } @@ -640,15 +649,18 @@ int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size, FFSWAP(AVFrame *, s->framep[VP56_FRAME_CURRENT], s->framep[VP56_FRAME_PREVIOUS]); + p->qstride = 0; + p->qscale_table = s->qscale_table; + p->qscale_type = FF_QSCALE_TYPE_VP56; *(AVFrame*)data = *p; *data_size = sizeof(AVFrame); - return buf_size; + return avpkt->size; } -void vp56_init(AVCodecContext *avctx, int flip, int has_alpha) +av_cold void ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha) { - vp56_context_t *s = avctx->priv_data; + VP56Context *s = avctx->priv_data; int i; s->avctx = avctx; @@ -657,10 +669,9 @@ void vp56_init(AVCodecContext *avctx, int flip, int has_alpha) if (avctx->idct_algo == FF_IDCT_AUTO) avctx->idct_algo = FF_IDCT_VP3; dsputil_init(&s->dsp, avctx); + ff_vp56dsp_init(&s->vp56dsp, avctx->codec->id); ff_init_scantable(s->dsp.idct_permutation, &s->scantable,ff_zigzag_direct); - avcodec_set_dimensions(avctx, 0, 0); - for (i=0; i<4; i++) s->framep[i] = &s->frames[i]; s->framep[VP56_FRAME_UNUSED] = s->framep[VP56_FRAME_GOLDEN]; @@ -686,13 +697,14 @@ void vp56_init(AVCodecContext *avctx, int flip, int has_alpha) } } -int vp56_free(AVCodecContext *avctx) +av_cold int ff_vp56_free(AVCodecContext *avctx) { - vp56_context_t *s = avctx->priv_data; + VP56Context *s = avctx->priv_data; - av_free(s->above_blocks); - av_free(s->macroblocks); - av_free(s->edge_emu_buffer_alloc); + av_freep(&s->qscale_table); + av_freep(&s->above_blocks); + av_freep(&s->macroblocks); + av_freep(&s->edge_emu_buffer_alloc); if (s->framep[VP56_FRAME_GOLDEN]->data[0]) avctx->release_buffer(avctx, s->framep[VP56_FRAME_GOLDEN]); if (s->framep[VP56_FRAME_GOLDEN2]->data[0])