X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fvp3.c;h=42bc63e5017548bb8ca31dfd4b43b63378679f28;hb=5e239c7f9e57d09c6a4c1d5762f441950f8d979c;hp=13a3bda4a5bce3896387c0008ae533503fc1a3dd;hpb=ec6402b7c595c3ceed6d1b8c1b75c6aa8336e052;p=ffmpeg diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c index 13a3bda4a5b..42bc63e5017 100644 --- a/libavcodec/vp3.c +++ b/libavcodec/vp3.c @@ -35,6 +35,7 @@ #include "libavutil/imgutils.h" #include "avcodec.h" +#include "internal.h" #include "dsputil.h" #include "get_bits.h" @@ -44,8 +45,6 @@ #define FRAGMENT_PIXELS 8 -static av_cold int vp3_decode_end(AVCodecContext *avctx); - //FIXME split things out into their own arrays typedef struct Vp3Fragment { int16_t dc; @@ -225,7 +224,7 @@ typedef struct Vp3DecodeContext { /* these arrays need to be on 16-byte boundaries since SSE2 operations * index into them */ - DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; //priv_data; + + if (s->golden_frame.data[0]) { + if (s->golden_frame.data[0] == s->last_frame.data[0]) + memset(&s->last_frame, 0, sizeof(AVFrame)); + if (s->current_frame.data[0] == s->golden_frame.data[0]) + memset(&s->current_frame, 0, sizeof(AVFrame)); + ff_thread_release_buffer(avctx, &s->golden_frame); + } + if (s->last_frame.data[0]) { + if (s->current_frame.data[0] == s->last_frame.data[0]) + memset(&s->current_frame, 0, sizeof(AVFrame)); + ff_thread_release_buffer(avctx, &s->last_frame); + } + if (s->current_frame.data[0]) + ff_thread_release_buffer(avctx, &s->current_frame); +} + +static av_cold int vp3_decode_end(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + int i; + + av_free(s->superblock_coding); + av_free(s->all_fragments); + av_free(s->coded_fragment_list[0]); + av_free(s->dct_tokens_base); + av_free(s->superblock_fragments); + av_free(s->macroblock_coding); + av_free(s->motion_val[0]); + av_free(s->motion_val[1]); + av_free(s->edge_emu_buffer); + + if (avctx->internal->is_copy) + return 0; + + for (i = 0; i < 16; i++) { + ff_free_vlc(&s->dc_vlc[i]); + ff_free_vlc(&s->ac_vlc_1[i]); + ff_free_vlc(&s->ac_vlc_2[i]); + ff_free_vlc(&s->ac_vlc_3[i]); + ff_free_vlc(&s->ac_vlc_4[i]); + } + + ff_free_vlc(&s->superblock_run_length_vlc); + ff_free_vlc(&s->fragment_run_length_vlc); + ff_free_vlc(&s->mode_code_vlc); + ff_free_vlc(&s->motion_vector_vlc); + + /* release all frames */ + vp3_decode_flush(avctx); + + return 0; +} + /* * This function sets up all of the various blocks mappings: * superblocks <-> fragments, macroblocks <-> fragments, @@ -890,7 +946,7 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, /* decode a VLC into a token */ token = get_vlc2(gb, vlc_table, 11, 3); /* use the token to get a zero run, a coefficient, and an eob run */ - if (token <= 6) { + if ((unsigned) token <= 6U) { eob_run = eob_run_base[token]; if (eob_run_get_bits[token]) eob_run += get_bits(gb, eob_run_get_bits[token]); @@ -908,7 +964,7 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, coeff_i += eob_run; eob_run = 0; } - } else { + } else if (token >= 0) { bits_to_get = coeff_get_bits[token]; if (bits_to_get) bits_to_get = get_bits(gb, bits_to_get); @@ -942,6 +998,10 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, for (i = coeff_index+1; i <= coeff_index+zero_run; i++) s->num_coded_frags[plane][i]--; coeff_i++; + } else { + av_log(s->avctx, AV_LOG_ERROR, + "Invalid token %d\n", token); + return -1; } } @@ -991,6 +1051,8 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) /* unpack the Y plane DC coefficients */ residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0, 0, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; /* reverse prediction of the Y-plane DC coefficients */ reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]); @@ -998,8 +1060,12 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) /* unpack the C plane DC coefficients */ residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, 1, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, 2, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; /* reverse prediction of the C-plane DC coefficients */ if (!(s->avctx->flags & CODEC_FLAG_GRAY)) @@ -1036,11 +1102,17 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) for (i = 1; i <= 63; i++) { residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i, 0, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, 1, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, 2, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; } return 0; @@ -1291,6 +1363,10 @@ static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, case 1: // zero run s->dct_tokens[plane][i]++; i += (token >> 2) & 0x7f; + if (i > 63) { + av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n"); + return i; + } block[perm[i]] = (token >> 9) * dequantizer[perm[i]]; i++; break; @@ -1302,6 +1378,8 @@ static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, return i; } } while (i < 64); + // return value is expected to be a valid level + i--; end: // the actual DC+prediction is in the fragment structure block[0] = frag->dc * s->qmat[0][inter][plane][0]; @@ -1313,10 +1391,10 @@ end: */ static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y) { - int h, cy; - int offset[4]; + int h, cy, i; + int offset[AV_NUM_DATA_POINTERS]; - if (HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) { + if (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) { int y_flipped = s->flipped_image ? s->avctx->height-y : y; // At the end of the frame, report INT_MAX instead of the height of the frame. @@ -1340,7 +1418,8 @@ static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y) offset[0] = s->current_frame.linesize[0]*y; offset[1] = s->current_frame.linesize[1]*cy; offset[2] = s->current_frame.linesize[2]*cy; - offset[3] = 0; + for (i = 3; i < AV_NUM_DATA_POINTERS; i++) + offset[i] = 0; emms_c(); s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h); @@ -1400,7 +1479,7 @@ static void render_slice(Vp3DecodeContext *s, int slice) int fragment_width = s->fragment_width[!!plane]; int fragment_height = s->fragment_height[!!plane]; int fragment_start = s->fragment_start[plane]; - int do_await = !plane && HAVE_PTHREADS && (s->avctx->active_thread_type&FF_THREAD_FRAME); + int do_await = !plane && HAVE_THREADS && (s->avctx->active_thread_type&FF_THREAD_FRAME); if (!s->flipped_image) stride = -stride; if (CONFIG_GRAY && plane && (s->avctx->flags & CODEC_FLAG_GRAY)) @@ -1493,7 +1572,10 @@ static void render_slice(Vp3DecodeContext *s, int slice) /* invert DCT and place (or add) in final output */ if (s->all_fragments[i].coding_method == MODE_INTRA) { - vp3_dequant(s, s->all_fragments + i, plane, 0, block); + int index; + index = vp3_dequant(s, s->all_fragments + i, plane, 0, block); + if (index > 63) + continue; if(s->avctx->idct_algo!=FF_IDCT_VP3) block[0] += 128<<3; s->dsp.idct_put( @@ -1501,7 +1583,10 @@ static void render_slice(Vp3DecodeContext *s, int slice) stride, block); } else { - if (vp3_dequant(s, s->all_fragments + i, plane, 1, block)) { + int index = vp3_dequant(s, s->all_fragments + i, plane, 1, block); + if (index > 63) + continue; + if (index > 0) { s->dsp.idct_add( output_plane + first_pixel, stride, @@ -1571,9 +1656,6 @@ static av_cold int allocate_tables(AVCodecContext *avctx) return 0; } -/* - * This is the ffmpeg/libavcodec API init function. - */ static av_cold int vp3_decode_init(AVCodecContext *avctx) { Vp3DecodeContext *s = avctx->priv_data; @@ -1595,7 +1677,7 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx) avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; if(avctx->idct_algo==FF_IDCT_AUTO) avctx->idct_algo=FF_IDCT_VP3; - dsputil_init(&s->dsp, avctx); + ff_dsputil_init(&s->dsp, avctx); ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct); @@ -1777,10 +1859,15 @@ static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext * Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data; int qps_changed = 0, i, err; +#define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field) + if (!s1->current_frame.data[0] ||s->width != s1->width - ||s->height!= s1->height) + ||s->height!= s1->height) { + if (s != s1) + copy_fields(s, s1, golden_frame, current_frame); return -1; + } if (s != s1) { // init tables if the first frame hasn't been decoded @@ -1796,8 +1883,6 @@ static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext * memcpy(s->motion_val[1], s1->motion_val[1], c_fragment_count * sizeof(*s->motion_val[1])); } -#define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field) - // copy previous frame data copy_fields(s, s1, golden_frame, dsp); @@ -1822,9 +1907,6 @@ static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext * return 0; } -/* - * This is the ffmpeg/libavcodec API frame decode function. - */ static int vp3_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) @@ -1965,7 +2047,7 @@ static int vp3_decode_frame(AVCodecContext *avctx, *data_size=sizeof(AVFrame); *(AVFrame*)data= s->current_frame; - if (!HAVE_PTHREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME)) + if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME)) update_frames(avctx); return buf_size; @@ -1973,59 +2055,12 @@ static int vp3_decode_frame(AVCodecContext *avctx, error: ff_thread_report_progress(&s->current_frame, INT_MAX, 0); - if (!HAVE_PTHREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME)) + if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME)) avctx->release_buffer(avctx, &s->current_frame); return -1; } -/* - * This is the ffmpeg/libavcodec API module cleanup function. - */ -static av_cold int vp3_decode_end(AVCodecContext *avctx) -{ - Vp3DecodeContext *s = avctx->priv_data; - int i; - - if (avctx->is_copy && !s->current_frame.data[0]) - return 0; - - av_free(s->superblock_coding); - av_free(s->all_fragments); - av_free(s->coded_fragment_list[0]); - av_free(s->dct_tokens_base); - av_free(s->superblock_fragments); - av_free(s->macroblock_coding); - av_free(s->motion_val[0]); - av_free(s->motion_val[1]); - av_free(s->edge_emu_buffer); - - if (avctx->is_copy) return 0; - - for (i = 0; i < 16; i++) { - free_vlc(&s->dc_vlc[i]); - free_vlc(&s->ac_vlc_1[i]); - free_vlc(&s->ac_vlc_2[i]); - free_vlc(&s->ac_vlc_3[i]); - free_vlc(&s->ac_vlc_4[i]); - } - - free_vlc(&s->superblock_run_length_vlc); - free_vlc(&s->fragment_run_length_vlc); - free_vlc(&s->mode_code_vlc); - free_vlc(&s->motion_vector_vlc); - - /* release all frames */ - if (s->golden_frame.data[0]) - ff_thread_release_buffer(avctx, &s->golden_frame); - if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY) - ff_thread_release_buffer(avctx, &s->last_frame); - /* no need to release the current_frame since it will always be pointing - * to the same frame as either the golden or last frame */ - - return 0; -} - static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb) { Vp3DecodeContext *s = avctx->priv_data; @@ -2060,6 +2095,23 @@ static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb) return 0; } +static int vp3_init_thread_copy(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + + s->superblock_coding = NULL; + s->all_fragments = NULL; + s->coded_fragment_list[0] = NULL; + s->dct_tokens_base = NULL; + s->superblock_fragments = NULL; + s->macroblock_coding = NULL; + s->motion_val[0] = NULL; + s->motion_val[1] = NULL; + s->edge_emu_buffer = NULL; + + return 0; +} + #if CONFIG_THEORA_DECODER static const enum PixelFormat theora_pix_fmts[4] = { PIX_FMT_YUV420P, PIX_FMT_NONE, PIX_FMT_YUV422P, PIX_FMT_YUV444P @@ -2275,7 +2327,7 @@ static av_cold int theora_decode_init(AVCodecContext *avctx) return -1; } - if (ff_split_xiph_headers(avctx->extradata, avctx->extradata_size, + if (avpriv_split_xiph_headers(avctx->extradata, avctx->extradata_size, 42, header_start, header_len) < 0) { av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n"); return -1; @@ -2321,26 +2373,6 @@ static av_cold int theora_decode_init(AVCodecContext *avctx) return vp3_decode_init(avctx); } -static void vp3_decode_flush(AVCodecContext *avctx) -{ - Vp3DecodeContext *s = avctx->priv_data; - - if (s->golden_frame.data[0]) { - if (s->golden_frame.data[0] == s->last_frame.data[0]) - memset(&s->last_frame, 0, sizeof(AVFrame)); - if (s->current_frame.data[0] == s->golden_frame.data[0]) - memset(&s->current_frame, 0, sizeof(AVFrame)); - ff_thread_release_buffer(avctx, &s->golden_frame); - } - if (s->last_frame.data[0]) { - if (s->current_frame.data[0] == s->last_frame.data[0]) - memset(&s->current_frame, 0, sizeof(AVFrame)); - ff_thread_release_buffer(avctx, &s->last_frame); - } - if (s->current_frame.data[0]) - ff_thread_release_buffer(avctx, &s->current_frame); -} - AVCodec ff_theora_decoder = { .name = "theora", .type = AVMEDIA_TYPE_VIDEO, @@ -2352,6 +2384,7 @@ AVCodec ff_theora_decoder = { .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, .flush = vp3_decode_flush, .long_name = NULL_IF_CONFIG_SMALL("Theora"), + .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy), .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context) }; #endif @@ -2367,5 +2400,6 @@ AVCodec ff_vp3_decoder = { .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, .flush = vp3_decode_flush, .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"), + .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy), .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context) };