X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;ds=sidebyside;f=libavcodec%2Fvp56.c;h=39d82e9c157882b2530c86dbc5c84eed5c1b72a8;hb=788544ff0ed6fe67fda80ad6d3a0796ace035584;hp=30306c42a2e86cc15ebb325bab71b1c004d30bbc;hpb=5fecfb7d58a12baf326e99f2d071060f2638d93c;p=ffmpeg diff --git a/libavcodec/vp56.c b/libavcodec/vp56.c index 30306c42a2e..39d82e9c157 100644 --- a/libavcodec/vp56.c +++ b/libavcodec/vp56.c @@ -1,57 +1,60 @@ -/** - * @file vp56.c - * VP5 and VP6 compatible video decoder (common features) - * +/* * Copyright (C) 2006 Aurelien Jacobs * - * This file is part of FFmpeg. + * This file is part of Libav. * - * FFmpeg is free software; you can redistribute it and/or + * Libav is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * FFmpeg is distributed in the hope that it will be useful, + * Libav is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software + * License along with Libav; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * VP5 and VP6 compatible video decoder (common features) + */ + #include "avcodec.h" #include "bytestream.h" - +#include "internal.h" +#include "h264chroma.h" #include "vp56.h" #include "vp56data.h" -void vp56_init_dequant(vp56_context_t *s, int quantizer) +void ff_vp56_init_dequant(VP56Context *s, int quantizer) { s->quantizer = quantizer; - s->dequant_dc = vp56_dc_dequant[quantizer] << 2; - s->dequant_ac = vp56_ac_dequant[quantizer] << 2; + s->dequant_dc = ff_vp56_dc_dequant[quantizer] << 2; + s->dequant_ac = ff_vp56_ac_dequant[quantizer] << 2; } -static int vp56_get_vectors_predictors(vp56_context_t *s, int row, int col, - vp56_frame_t ref_frame) +static int vp56_get_vectors_predictors(VP56Context *s, int row, int col, + VP56Frame ref_frame) { int nb_pred = 0; - vp56_mv_t vect[2] = {{0,0}, {0,0}}; + VP56mv vect[2] = {{0,0}, {0,0}}; int pos, offset; - vp56_mv_t mvp; + VP56mv mvp; for (pos=0; pos<12; pos++) { - mvp.x = col + vp56_candidate_predictor_pos[pos][0]; - mvp.y = row + vp56_candidate_predictor_pos[pos][1]; + mvp.x = col + ff_vp56_candidate_predictor_pos[pos][0]; + mvp.y = row + ff_vp56_candidate_predictor_pos[pos][1]; if (mvp.x < 0 || mvp.x >= s->mb_width || mvp.y < 0 || mvp.y >= s->mb_height) continue; offset = mvp.x + s->mb_width*mvp.y; - if (vp56_reference_frame[s->macroblocks[offset].type] != ref_frame) + if (ff_vp56_reference_frame[s->macroblocks[offset].type] != ref_frame) continue; if ((s->macroblocks[offset].mv.x == vect[0].x && s->macroblocks[offset].mv.y == vect[0].y) || @@ -73,17 +76,17 @@ static int vp56_get_vectors_predictors(vp56_context_t *s, int row, int col, return nb_pred+1; } -static void vp56_parse_mb_type_models(vp56_context_t *s) +static void vp56_parse_mb_type_models(VP56Context *s) { - vp56_range_coder_t *c = &s->c; - vp56_model_t *model = s->modelp; + VP56RangeCoder *c = &s->c; + VP56Model *model = s->modelp; int i, ctx, type; for (ctx=0; ctx<3; ctx++) { if (vp56_rac_get_prob(c, 174)) { int idx = vp56_rac_gets(c, 4); memcpy(model->mb_types_stats[ctx], - vp56_pre_def_mb_type_stats[idx][ctx], + ff_vp56_pre_def_mb_type_stats[idx][ctx], sizeof(model->mb_types_stats[ctx])); } if (vp56_rac_get_prob(c, 254)) { @@ -92,8 +95,8 @@ static void vp56_parse_mb_type_models(vp56_context_t *s) if (vp56_rac_get_prob(c, 205)) { int delta, sign = vp56_rac_get(c); - delta = vp56_rac_get_tree(c, vp56_pmbtm_tree, - vp56_mb_type_model_model); + delta = vp56_rac_get_tree(c, ff_vp56_pmbtm_tree, + ff_vp56_mb_type_model_model); if (!delta) delta = 4 * vp56_rac_gets(c, 7); model->mb_types_stats[ctx][type][i] += (delta ^ -sign) + sign; @@ -144,21 +147,21 @@ static void vp56_parse_mb_type_models(vp56_context_t *s) } } -static vp56_mb_t vp56_parse_mb_type(vp56_context_t *s, - vp56_mb_t prev_type, int ctx) +static VP56mb vp56_parse_mb_type(VP56Context *s, + VP56mb prev_type, int ctx) { uint8_t *mb_type_model = s->modelp->mb_type[ctx][prev_type]; - vp56_range_coder_t *c = &s->c; + VP56RangeCoder *c = &s->c; if (vp56_rac_get_prob(c, mb_type_model[0])) return prev_type; else - return vp56_rac_get_tree(c, vp56_pmbt_tree, mb_type_model); + return vp56_rac_get_tree(c, ff_vp56_pmbt_tree, mb_type_model); } -static void vp56_decode_4mv(vp56_context_t *s, int row, int col) +static void vp56_decode_4mv(VP56Context *s, int row, int col) { - vp56_mv_t mv = {0,0}; + VP56mv mv = {0,0}; int type[4]; int b; @@ -173,7 +176,7 @@ static void vp56_decode_4mv(vp56_context_t *s, int row, int col) for (b=0; b<4; b++) { switch (type[b]) { case VP56_MB_INTER_NOVEC_PF: - s->mv[b] = (vp56_mv_t) {0,0}; + s->mv[b] = (VP56mv) {0,0}; break; case VP56_MB_INTER_DELTA_PF: s->parse_vector_adjustment(s, &s->mv[b]); @@ -193,17 +196,17 @@ static void vp56_decode_4mv(vp56_context_t *s, int row, int col) s->macroblocks[row * s->mb_width + col].mv = s->mv[3]; /* chroma vectors are average luma vectors */ - if (s->avctx->codec->id == CODEC_ID_VP5) { + if (s->avctx->codec->id == AV_CODEC_ID_VP5) { s->mv[4].x = s->mv[5].x = RSHIFT(mv.x,2); s->mv[4].y = s->mv[5].y = RSHIFT(mv.y,2); } else { - s->mv[4] = s->mv[5] = (vp56_mv_t) {mv.x/4, mv.y/4}; + s->mv[4] = s->mv[5] = (VP56mv) {mv.x/4, mv.y/4}; } } -static vp56_mb_t vp56_decode_mv(vp56_context_t *s, int row, int col) +static VP56mb vp56_decode_mv(VP56Context *s, int row, int col) { - vp56_mv_t *mv, vect = {0,0}; + VP56mv *mv, vect = {0,0}; int ctx, b; ctx = vp56_get_vectors_predictors(s, row, col, VP56_FRAME_PREVIOUS); @@ -258,14 +261,14 @@ static vp56_mb_t vp56_decode_mv(vp56_context_t *s, int row, int col) return s->mb_type; } -static void vp56_add_predictors_dc(vp56_context_t *s, vp56_frame_t ref_frame) +static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame) { - int idx = s->scantable.permutated[0]; + int idx = s->idct_scantable[0]; int b; for (b=0; b<6; b++) { - vp56_ref_dc_t *ab = &s->above_blocks[s->above_block_idx[b]]; - vp56_ref_dc_t *lb = &s->left_block[vp56_b6to4[b]]; + VP56RefDc *ab = &s->above_blocks[s->above_block_idx[b]]; + VP56RefDc *lb = &s->left_block[ff_vp56_b6to4[b]]; int count = 0; int dc = 0; int i; @@ -278,19 +281,19 @@ static void vp56_add_predictors_dc(vp56_context_t *s, vp56_frame_t ref_frame) dc += ab->dc_coeff; count++; } - if (s->avctx->codec->id == CODEC_ID_VP5) + if (s->avctx->codec->id == AV_CODEC_ID_VP5) for (i=0; i<2; i++) if (count < 2 && ref_frame == ab[-1+2*i].ref_frame) { dc += ab[-1+2*i].dc_coeff; count++; } if (count == 0) - dc = s->prev_dc[vp56_b2p[b]][ref_frame]; + dc = s->prev_dc[ff_vp56_b2p[b]][ref_frame]; else if (count == 2) dc /= 2; s->block_coeff[b][idx] += dc; - s->prev_dc[vp56_b2p[b]][ref_frame] = s->block_coeff[b][idx]; + s->prev_dc[ff_vp56_b2p[b]][ref_frame] = s->block_coeff[b][idx]; ab->dc_coeff = s->block_coeff[b][idx]; ab->ref_frame = ref_frame; lb->dc_coeff = s->block_coeff[b][idx]; @@ -299,33 +302,18 @@ static void vp56_add_predictors_dc(vp56_context_t *s, vp56_frame_t ref_frame) } } -static void vp56_edge_filter(vp56_context_t *s, uint8_t *yuv, - int pix_inc, int line_inc, int t) +static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv, + ptrdiff_t stride, int dx, int dy) { - int pix2_inc = 2 * pix_inc; - int i, v; - - for (i=0; i<12; i++) { - v = (yuv[-pix2_inc] + 3*(yuv[0]-yuv[-pix_inc]) - yuv[pix_inc] + 4) >>3; - v = s->adjust(v, t); - yuv[-pix_inc] = av_clip_uint8(yuv[-pix_inc] + v); - yuv[0] = av_clip_uint8(yuv[0] - v); - yuv += line_inc; - } -} - -static void vp56_deblock_filter(vp56_context_t *s, uint8_t *yuv, - int stride, int dx, int dy) -{ - int t = vp56_filter_threshold[s->quantizer]; - if (dx) vp56_edge_filter(s, yuv + 10-dx , 1, stride, t); - if (dy) vp56_edge_filter(s, yuv + stride*(10-dy), stride, 1, t); + int t = ff_vp56_filter_threshold[s->quantizer]; + if (dx) s->vp56dsp.edge_filter_hor(yuv + 10-dx , stride, t); + if (dy) s->vp56dsp.edge_filter_ver(yuv + stride*(10-dy), stride, t); } -static void vp56_mc(vp56_context_t *s, int b, int plane, uint8_t *src, - int stride, int x, int y) +static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src, + ptrdiff_t stride, int x, int y) { - uint8_t *dst=s->framep[VP56_FRAME_CURRENT]->data[plane]+s->block_offset[b]; + uint8_t *dst = s->frames[VP56_FRAME_CURRENT]->data[plane] + s->block_offset[b]; uint8_t *src_block; int src_offset; int overlap_offset = 0; @@ -336,7 +324,7 @@ static void vp56_mc(vp56_context_t *s, int b, int plane, uint8_t *src, if (s->avctx->skip_loop_filter >= AVDISCARD_ALL || (s->avctx->skip_loop_filter >= AVDISCARD_NONKEY - && !s->framep[VP56_FRAME_CURRENT]->key_frame)) + && !s->frames[VP56_FRAME_CURRENT]->key_frame)) deblock_filtering = 0; dx = s->mv[b].x / s->vp56_coord_div[b]; @@ -351,9 +339,10 @@ static void vp56_mc(vp56_context_t *s, int b, int plane, uint8_t *src, if (x<0 || x+12>=s->plane_width[plane] || y<0 || y+12>=s->plane_height[plane]) { - ff_emulated_edge_mc(s->edge_emu_buffer, + s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src + s->block_offset[b] + (dy-2)*stride + (dx-2), - stride, 12, 12, x, y, + stride, stride, + 12, 12, x, y, s->plane_width[plane], s->plane_height[plane]); src_block = s->edge_emu_buffer; @@ -361,9 +350,9 @@ static void vp56_mc(vp56_context_t *s, int b, int plane, uint8_t *src, } else if (deblock_filtering) { /* only need a 12x12 block, but there is no such dsp function, */ /* so copy a 16x12 block */ - s->dsp.put_pixels_tab[0][0](s->edge_emu_buffer, - src + s->block_offset[b] + (dy-2)*stride + (dx-2), - stride, 12); + s->hdsp.put_pixels_tab[0][0](s->edge_emu_buffer, + src + s->block_offset[b] + (dy-2)*stride + (dx-2), + stride, 12); src_block = s->edge_emu_buffer; src_offset = 2 + 2*stride; } else { @@ -384,35 +373,35 @@ static void vp56_mc(vp56_context_t *s, int b, int plane, uint8_t *src, s->filter(s, dst, src_block, src_offset, src_offset+overlap_offset, stride, s->mv[b], mask, s->filter_selection, b<4); else - s->dsp.put_no_rnd_pixels_l2[1](dst, src_block+src_offset, + s->vp3dsp.put_no_rnd_pixels_l2(dst, src_block+src_offset, src_block+src_offset+overlap_offset, stride, 8); } else { - s->dsp.put_pixels_tab[1][0](dst, src_block+src_offset, stride, 8); + s->hdsp.put_pixels_tab[1][0](dst, src_block+src_offset, stride, 8); } } -static void vp56_decode_mb(vp56_context_t *s, int row, int col, int is_alpha) +static void vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha) { AVFrame *frame_current, *frame_ref; - vp56_mb_t mb_type; - vp56_frame_t ref_frame; + VP56mb mb_type; + VP56Frame ref_frame; int b, ab, b_max, plane, off; - if (s->framep[VP56_FRAME_CURRENT]->key_frame) + if (s->frames[VP56_FRAME_CURRENT]->key_frame) mb_type = VP56_MB_INTRA; else mb_type = vp56_decode_mv(s, row, col); - ref_frame = vp56_reference_frame[mb_type]; - - s->dsp.clear_blocks(*s->block_coeff); + ref_frame = ff_vp56_reference_frame[mb_type]; s->parse_coeff(s); vp56_add_predictors_dc(s, ref_frame); - frame_current = s->framep[VP56_FRAME_CURRENT]; - frame_ref = s->framep[ref_frame]; + frame_current = s->frames[VP56_FRAME_CURRENT]; + frame_ref = s->frames[ref_frame]; + if (mb_type != VP56_MB_INTRA && !frame_ref->data[0]) + return; ab = 6*is_alpha; b_max = 6 - 2*is_alpha; @@ -420,8 +409,8 @@ static void vp56_decode_mb(vp56_context_t *s, int row, int col, int is_alpha) switch (mb_type) { case VP56_MB_INTRA: for (b=0; bdsp.idct_put(frame_current->data[plane] + s->block_offset[b], + plane = ff_vp56_b2p[b+ab]; + s->vp3dsp.idct_put(frame_current->data[plane] + s->block_offset[b], s->stride[plane], s->block_coeff[b]); } break; @@ -429,12 +418,12 @@ static void vp56_decode_mb(vp56_context_t *s, int row, int col, int is_alpha) case VP56_MB_INTER_NOVEC_PF: case VP56_MB_INTER_NOVEC_GF: for (b=0; bblock_offset[b]; - s->dsp.put_pixels_tab[1][0](frame_current->data[plane] + off, - frame_ref->data[plane] + off, - s->stride[plane], 8); - s->dsp.idct_add(frame_current->data[plane] + off, + s->hdsp.put_pixels_tab[1][0](frame_current->data[plane] + off, + frame_ref->data[plane] + off, + s->stride[plane], 8); + s->vp3dsp.idct_add(frame_current->data[plane] + off, s->stride[plane], s->block_coeff[b]); } break; @@ -449,20 +438,25 @@ static void vp56_decode_mb(vp56_context_t *s, int row, int col, int is_alpha) for (b=0; bdata[plane], s->stride[plane], 16*col+x_off, 16*row+y_off); - s->dsp.idct_add(frame_current->data[plane] + s->block_offset[b], + s->vp3dsp.idct_add(frame_current->data[plane] + s->block_offset[b], s->stride[plane], s->block_coeff[b]); } break; } + + if (is_alpha) { + s->block_coeff[4][0] = 0; + s->block_coeff[5][0] = 0; + } } static int vp56_size_changed(AVCodecContext *avctx) { - vp56_context_t *s = avctx->priv_data; - int stride = s->framep[VP56_FRAME_CURRENT]->linesize[0]; + VP56Context *s = avctx->priv_data; + int stride = s->frames[VP56_FRAME_CURRENT]->linesize[0]; int i; s->plane_width[0] = s->plane_width[3] = avctx->coded_width; @@ -471,14 +465,15 @@ static int vp56_size_changed(AVCodecContext *avctx) s->plane_height[1] = s->plane_height[2] = avctx->coded_height/2; for (i=0; i<4; i++) - s->stride[i] = s->flip * s->framep[VP56_FRAME_CURRENT]->linesize[i]; + s->stride[i] = s->flip * s->frames[VP56_FRAME_CURRENT]->linesize[i]; s->mb_width = (avctx->coded_width +15) / 16; s->mb_height = (avctx->coded_height+15) / 16; if (s->mb_width > 1000 || s->mb_height > 1000) { + ff_set_dimensions(avctx, 0, 0); av_log(avctx, AV_LOG_ERROR, "picture too big\n"); - return -1; + return AVERROR_INVALIDDATA; } s->above_blocks = av_realloc(s->above_blocks, @@ -494,58 +489,79 @@ static int vp56_size_changed(AVCodecContext *avctx) return 0; } -int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size, - const uint8_t *buf, int buf_size) +int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, + AVPacket *avpkt) { - vp56_context_t *s = avctx->priv_data; - AVFrame *const p = s->framep[VP56_FRAME_CURRENT]; - int remaining_buf_size = buf_size; - int is_alpha, alpha_offset; + const uint8_t *buf = avpkt->data; + VP56Context *s = avctx->priv_data; + AVFrame *const p = s->frames[VP56_FRAME_CURRENT]; + int remaining_buf_size = avpkt->size; + int is_alpha, av_uninit(alpha_offset); + int res; if (s->has_alpha) { + if (remaining_buf_size < 3) + return AVERROR_INVALIDDATA; alpha_offset = bytestream_get_be24(&buf); remaining_buf_size -= 3; + if (remaining_buf_size < alpha_offset) + return AVERROR_INVALIDDATA; } for (is_alpha=0; is_alpha < 1+s->has_alpha; is_alpha++) { int mb_row, mb_col, mb_row_flip, mb_offset = 0; - int block, y, uv, stride_y, stride_uv; + int block, y, uv; + ptrdiff_t stride_y, stride_uv; int golden_frame = 0; - int res; s->modelp = &s->models[is_alpha]; res = s->parse_header(s, buf, remaining_buf_size, &golden_frame); - if (!res) - return -1; + if (res < 0) { + int i; + for (i = 0; i < 4; i++) + av_frame_unref(s->frames[i]); + return res; + } + + if (res == VP56_SIZE_CHANGE) { + int i; + for (i = 0; i < 4; i++) + av_frame_unref(s->frames[i]); + if (is_alpha) { + ff_set_dimensions(avctx, 0, 0); + return AVERROR_INVALIDDATA; + } + } if (!is_alpha) { - p->reference = 1; - if (avctx->get_buffer(avctx, p) < 0) { + int ret = ff_get_buffer(avctx, p, AV_GET_BUFFER_FLAG_REF); + if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); - return -1; + return ret; } - if (res == 2) + if (res == VP56_SIZE_CHANGE) if (vp56_size_changed(avctx)) { - avctx->release_buffer(avctx, p); - return -1; + av_frame_unref(p); + return AVERROR_INVALIDDATA; } } if (p->key_frame) { - p->pict_type = FF_I_TYPE; + p->pict_type = AV_PICTURE_TYPE_I; s->default_models_init(s); for (block=0; blockmb_height*s->mb_width; block++) s->macroblocks[block].type = VP56_MB_INTRA; } else { - p->pict_type = FF_P_TYPE; + p->pict_type = AV_PICTURE_TYPE_P; vp56_parse_mb_type_models(s); s->parse_vector_models(s); s->mb_type = VP56_MB_INTER_NOVEC_PF; } - s->parse_coeff_models(s); + if (s->parse_coeff_models(s)) + goto next; memset(s->prev_dc, 0, sizeof(s->prev_dc)); s->prev_dc[1][VP56_FRAME_CURRENT] = 128; @@ -609,60 +625,57 @@ int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size, } } + next: if (p->key_frame || golden_frame) { - if (s->framep[VP56_FRAME_GOLDEN]->data[0] && - s->framep[VP56_FRAME_GOLDEN] != s->framep[VP56_FRAME_GOLDEN2]) - avctx->release_buffer(avctx, s->framep[VP56_FRAME_GOLDEN]); - s->framep[VP56_FRAME_GOLDEN] = p; + av_frame_unref(s->frames[VP56_FRAME_GOLDEN]); + if ((res = av_frame_ref(s->frames[VP56_FRAME_GOLDEN], p)) < 0) + return res; } if (s->has_alpha) { - FFSWAP(AVFrame *, s->framep[VP56_FRAME_GOLDEN], - s->framep[VP56_FRAME_GOLDEN2]); + FFSWAP(AVFrame *, s->frames[VP56_FRAME_GOLDEN], + s->frames[VP56_FRAME_GOLDEN2]); buf += alpha_offset; remaining_buf_size -= alpha_offset; } } - if (s->framep[VP56_FRAME_PREVIOUS] == s->framep[VP56_FRAME_GOLDEN] || - s->framep[VP56_FRAME_PREVIOUS] == s->framep[VP56_FRAME_GOLDEN2]) { - if (s->framep[VP56_FRAME_UNUSED] != s->framep[VP56_FRAME_GOLDEN] && - s->framep[VP56_FRAME_UNUSED] != s->framep[VP56_FRAME_GOLDEN2]) - FFSWAP(AVFrame *, s->framep[VP56_FRAME_PREVIOUS], - s->framep[VP56_FRAME_UNUSED]); - else - FFSWAP(AVFrame *, s->framep[VP56_FRAME_PREVIOUS], - s->framep[VP56_FRAME_UNUSED2]); - } else if (s->framep[VP56_FRAME_PREVIOUS]->data[0]) - avctx->release_buffer(avctx, s->framep[VP56_FRAME_PREVIOUS]); - FFSWAP(AVFrame *, s->framep[VP56_FRAME_CURRENT], - s->framep[VP56_FRAME_PREVIOUS]); + av_frame_unref(s->frames[VP56_FRAME_PREVIOUS]); + FFSWAP(AVFrame *, s->frames[VP56_FRAME_CURRENT], + s->frames[VP56_FRAME_PREVIOUS]); - *(AVFrame*)data = *p; - *data_size = sizeof(AVFrame); + if ((res = av_frame_ref(data, p)) < 0) + return res; + *got_frame = 1; - return buf_size; + return avpkt->size; } -av_cold void vp56_init(AVCodecContext *avctx, int flip, int has_alpha) +av_cold int ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha) { - vp56_context_t *s = avctx->priv_data; + VP56Context *s = avctx->priv_data; int i; s->avctx = avctx; - avctx->pix_fmt = has_alpha ? PIX_FMT_YUVA420P : PIX_FMT_YUV420P; - - if (avctx->idct_algo == FF_IDCT_AUTO) - avctx->idct_algo = FF_IDCT_VP3; - dsputil_init(&s->dsp, avctx); - ff_init_scantable(s->dsp.idct_permutation, &s->scantable,ff_zigzag_direct); - - avcodec_set_dimensions(avctx, 0, 0); + avctx->pix_fmt = has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P; + + ff_h264chroma_init(&s->h264chroma, 8); + ff_hpeldsp_init(&s->hdsp, avctx->flags); + ff_videodsp_init(&s->vdsp, 8); + ff_vp3dsp_init(&s->vp3dsp, avctx->flags); + for (i = 0; i < 64; i++) { +#define TRANSPOSE(x) (x >> 3) | ((x & 7) << 3) + s->idct_scantable[i] = TRANSPOSE(ff_zigzag_direct[i]); +#undef TRANSPOSE + } - for (i=0; i<4; i++) - s->framep[i] = &s->frames[i]; - s->framep[VP56_FRAME_UNUSED] = s->framep[VP56_FRAME_GOLDEN]; - s->framep[VP56_FRAME_UNUSED2] = s->framep[VP56_FRAME_GOLDEN2]; + for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) { + s->frames[i] = av_frame_alloc(); + if (!s->frames[i]) { + ff_vp56_free(avctx); + return AVERROR(ENOMEM); + } + } s->edge_emu_buffer_alloc = NULL; s->above_blocks = NULL; @@ -682,20 +695,21 @@ av_cold void vp56_init(AVCodecContext *avctx, int flip, int has_alpha) s->frbi = 0; s->srbi = 2; } + + return 0; } -av_cold int vp56_free(AVCodecContext *avctx) +av_cold int ff_vp56_free(AVCodecContext *avctx) { - vp56_context_t *s = avctx->priv_data; + VP56Context *s = avctx->priv_data; + int i; + + av_freep(&s->above_blocks); + av_freep(&s->macroblocks); + av_freep(&s->edge_emu_buffer_alloc); + + for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) + av_frame_free(&s->frames[i]); - av_free(s->above_blocks); - av_free(s->macroblocks); - av_free(s->edge_emu_buffer_alloc); - if (s->framep[VP56_FRAME_GOLDEN]->data[0]) - avctx->release_buffer(avctx, s->framep[VP56_FRAME_GOLDEN]); - if (s->framep[VP56_FRAME_GOLDEN2]->data[0]) - avctx->release_buffer(avctx, s->framep[VP56_FRAME_GOLDEN2]); - if (s->framep[VP56_FRAME_PREVIOUS]->data[0]) - avctx->release_buffer(avctx, s->framep[VP56_FRAME_PREVIOUS]); return 0; }