X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fvp3.c;h=bcbb70f423cd24f72591bdb1cdac9c562fd54fb2;hb=e6ff064845d02c43526c8a56dab121c219f16659;hp=e2ee24a54dd150846a1f6d8afe1fc3337baa95f9;hpb=c72625f29902bfd7f01184c3eb1c29c94ec2a648;p=ffmpeg diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c index e2ee24a54dd..bcbb70f423c 100644 --- a/libavcodec/vp3.c +++ b/libavcodec/vp3.c @@ -1,25 +1,25 @@ /* * Copyright (C) 2003-2004 the ffmpeg project * - * This file is part of FFmpeg. + * This file is part of Libav. * - * FFmpeg is free software; you can redistribute it and/or + * Libav is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * FFmpeg is distributed in the hope that it will be useful, + * Libav is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software + * License along with Libav; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** - * @file libavcodec/vp3.c + * @file * On2 VP3 Video Decoder * * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx) @@ -33,12 +33,14 @@ #include #include +#include "libavutil/imgutils.h" #include "avcodec.h" #include "dsputil.h" #include "get_bits.h" #include "vp3data.h" #include "xiph.h" +#include "thread.h" #define FRAGMENT_PIXELS 8 @@ -48,8 +50,6 @@ static av_cold int vp3_decode_end(AVCodecContext *avctx); typedef struct Vp3Fragment { int16_t dc; uint8_t coding_method; - int8_t motion_x; - int8_t motion_y; uint8_t qpi; } Vp3Fragment; @@ -130,6 +130,7 @@ typedef struct Vp3DecodeContext { int theora, theora_tables; int version; int width, height; + int chroma_x_shift, chroma_y_shift; AVFrame golden_frame; AVFrame last_frame; AVFrame current_frame; @@ -137,6 +138,7 @@ typedef struct Vp3DecodeContext { DSPContext dsp; int flipped_image; int last_slice_end; + int skip_loop_filter; int qps[3]; int nqps; @@ -158,13 +160,15 @@ typedef struct Vp3DecodeContext { int macroblock_height; int fragment_count; - int fragment_width; - int fragment_height; + int fragment_width[2]; + int fragment_height[2]; Vp3Fragment *all_fragments; int fragment_start[3]; int data_offset[3]; + int8_t (*motion_val[2])[2]; + ScanTable scantable; /* tables */ @@ -221,7 +225,7 @@ typedef struct Vp3DecodeContext { /* these arrays need to be on 16-byte boundaries since SSE2 operations * index into them */ - DECLARE_ALIGNED_16(int16_t, qmat)[3][2][3][64]; // fragments, macroblocks <-> fragments, * superblocks <-> macroblocks * - * Returns 0 is successful; returns 1 if *anything* went wrong. + * @return 0 is successful; returns 1 if *anything* went wrong. */ static int init_block_mapping(Vp3DecodeContext *s) { - int i, j; - signed int hilbert_walk_mb[4]; - - int current_fragment = 0; - int current_width = 0; - int current_height = 0; - int right_edge = 0; - int bottom_edge = 0; - int superblock_row_inc = 0; - int mapping_index = 0; - - static const signed char travel_width[16] = { - 1, 1, 0, -1, - 0, 0, 1, 0, - 1, 0, 1, 0, - 0, -1, 0, 1 - }; - - static const signed char travel_height[16] = { - 0, 0, 1, 0, - 1, 1, 0, -1, - 0, 1, 0, -1, - -1, 0, -1, 0 - }; - - hilbert_walk_mb[0] = 1; - hilbert_walk_mb[1] = s->macroblock_width; - hilbert_walk_mb[2] = 1; - hilbert_walk_mb[3] = -s->macroblock_width; - - /* iterate through each superblock (all planes) and map the fragments */ - for (i = 0; i < s->superblock_count; i++) { - /* time to re-assign the limits? */ - if (i == 0) { - - /* start of Y superblocks */ - right_edge = s->fragment_width; - bottom_edge = s->fragment_height; - current_width = -1; - current_height = 0; - superblock_row_inc = 3 * s->fragment_width - - (s->y_superblock_width * 4 - s->fragment_width); - - /* the first operation for this variable is to advance by 1 */ - current_fragment = -1; - - } else if (i == s->u_superblock_start) { - - /* start of U superblocks */ - right_edge = s->fragment_width / 2; - bottom_edge = s->fragment_height / 2; - current_width = -1; - current_height = 0; - superblock_row_inc = 3 * (s->fragment_width / 2) - - (s->c_superblock_width * 4 - s->fragment_width / 2); - - /* the first operation for this variable is to advance by 1 */ - current_fragment = s->fragment_start[1] - 1; - - } else if (i == s->v_superblock_start) { - - /* start of V superblocks */ - right_edge = s->fragment_width / 2; - bottom_edge = s->fragment_height / 2; - current_width = -1; - current_height = 0; - superblock_row_inc = 3 * (s->fragment_width / 2) - - (s->c_superblock_width * 4 - s->fragment_width / 2); - - /* the first operation for this variable is to advance by 1 */ - current_fragment = s->fragment_start[2] - 1; - - } + int sb_x, sb_y, plane; + int x, y, i, j = 0; - if (current_width >= right_edge - 1) { - /* reset width and move to next superblock row */ - current_width = -1; - current_height += 4; - - /* fragment is now at the start of a new superblock row */ - current_fragment += superblock_row_inc; - } - - /* iterate through all 16 fragments in a superblock */ - for (j = 0; j < 16; j++) { - current_fragment += travel_width[j] + right_edge * travel_height[j]; - current_width += travel_width[j]; - current_height += travel_height[j]; - - /* check if the fragment is in bounds */ - if ((current_width < right_edge) && - (current_height < bottom_edge)) { - s->superblock_fragments[mapping_index] = current_fragment; - } else { - s->superblock_fragments[mapping_index] = -1; - } - - mapping_index++; - } + for (plane = 0; plane < 3; plane++) { + int sb_width = plane ? s->c_superblock_width : s->y_superblock_width; + int sb_height = plane ? s->c_superblock_height : s->y_superblock_height; + int frag_width = s->fragment_width[!!plane]; + int frag_height = s->fragment_height[!!plane]; + + for (sb_y = 0; sb_y < sb_height; sb_y++) + for (sb_x = 0; sb_x < sb_width; sb_x++) + for (i = 0; i < 16; i++) { + x = 4*sb_x + hilbert_offset[i][0]; + y = 4*sb_y + hilbert_offset[i][1]; + + if (x < frag_width && y < frag_height) + s->superblock_fragments[j++] = s->fragment_start[plane] + y*frag_width + x; + else + s->superblock_fragments[j++] = -1; + } } return 0; /* successful path out */ } -/* - * This function wipes out all of the fragment data. - */ -static void init_frame(Vp3DecodeContext *s, GetBitContext *gb) -{ - int i; - - /* zero out all of the fragment information */ - for (i = 0; i < s->fragment_count; i++) { - s->all_fragments[i].motion_x = 127; - s->all_fragments[i].motion_y = 127; - s->all_fragments[i].dc = 0; - s->all_fragments[i].qpi = 0; - } -} - /* * This function sets up the dequantization tables used for a particular * frame. @@ -416,8 +324,6 @@ static void init_dequantizer(Vp3DecodeContext *s, int qpi) s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0]; } } - - memset(s->qscale_table, (FFMAX(s->qmat[0][0][0][1], s->qmat[0][0][1][1])+8)/16, 512); //FIXME finetune } /* @@ -456,6 +362,7 @@ static void init_loop_filter(Vp3DecodeContext *s) */ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) { + int superblock_starts[3] = { 0, s->u_superblock_start, s->v_superblock_start }; int bit = 0; int current_superblock = 0; int current_run = 0; @@ -471,8 +378,15 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) } else { /* unpack the list of partially-coded superblocks */ - bit = get_bits1(gb); - while (current_superblock < s->superblock_count) { + bit = get_bits1(gb) ^ 1; + current_run = 0; + + while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) { + if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) + bit = get_bits1(gb); + else + bit ^= 1; + current_run = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1; if (current_run == 34) @@ -488,11 +402,6 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) current_superblock += current_run; if (bit) num_partial_superblocks += current_run; - - if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) - bit = get_bits1(gb); - else - bit ^= 1; } /* unpack the list of fully coded superblocks if any of the blocks were @@ -501,8 +410,17 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) int superblocks_decoded = 0; current_superblock = 0; - bit = get_bits1(gb); - while (superblocks_decoded < s->superblock_count - num_partial_superblocks) { + bit = get_bits1(gb) ^ 1; + current_run = 0; + + while (superblocks_decoded < s->superblock_count - num_partial_superblocks + && get_bits_left(gb) > 0) { + + if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) + bit = get_bits1(gb); + else + bit ^= 1; + current_run = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1; if (current_run == 34) @@ -521,11 +439,6 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) } } superblocks_decoded += current_run; - - if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) - bit = get_bits1(gb); - else - bit ^= 1; } } @@ -547,22 +460,17 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) memset(s->macroblock_coding, MODE_COPY, s->macroblock_count); for (plane = 0; plane < 3; plane++) { - int sb_start = (int[]){ 0, s->u_superblock_start, s->v_superblock_start }[plane]; + int sb_start = superblock_starts[plane]; int sb_end = sb_start + (plane ? s->c_superblock_count : s->y_superblock_count); int num_coded_frags = 0; - for (i = sb_start; i < sb_end; i++) { + for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) { /* iterate through all 16 fragments in a superblock */ for (j = 0; j < 16; j++) { /* if the fragment is in bounds, check its coding status */ current_fragment = s->superblock_fragments[i * 16 + j]; - if (current_fragment >= s->fragment_count) { - av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_superblocks(): bad fragment number (%d >= %d)\n", - current_fragment, s->fragment_count); - return 1; - } if (current_fragment != -1) { int coded = s->superblock_coding[i]; @@ -615,6 +523,7 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) int coding_mode; int custom_mode_alphabet[CODING_MODE_COUNT]; const int *alphabet; + Vp3Fragment *frag; if (s->keyframe) { for (i = 0; i < s->fragment_count; i++) @@ -639,6 +548,8 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) * coded fragments */ for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) { for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) { + if (get_bits_left(gb) <= 0) + return -1; for (j = 0; j < 4; j++) { int mb_x = 2*sb_x + (j>>1); @@ -653,7 +564,7 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) /* coding modes are only stored if the macroblock has at least one * luma block coded, otherwise it must be INTER_NO_MV */ for (k = 0; k < 4; k++) { - current_fragment = BLOCK_Y*s->fragment_width + BLOCK_X; + current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X; if (s->all_fragments[current_fragment].coding_method != MODE_COPY) break; } @@ -671,20 +582,31 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) s->macroblock_coding[current_macroblock] = coding_mode; for (k = 0; k < 4; k++) { - current_fragment = - BLOCK_Y*s->fragment_width + BLOCK_X; - if (s->all_fragments[current_fragment].coding_method != - MODE_COPY) - s->all_fragments[current_fragment].coding_method = - coding_mode; + frag = s->all_fragments + BLOCK_Y*s->fragment_width[0] + BLOCK_X; + if (frag->coding_method != MODE_COPY) + frag->coding_method = coding_mode; } - for (k = 0; k < 2; k++) { - current_fragment = s->fragment_start[k+1] + - mb_y*(s->fragment_width>>1) + mb_x; - if (s->all_fragments[current_fragment].coding_method != - MODE_COPY) - s->all_fragments[current_fragment].coding_method = - coding_mode; + +#define SET_CHROMA_MODES \ + if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \ + frag[s->fragment_start[1]].coding_method = coding_mode;\ + if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \ + frag[s->fragment_start[2]].coding_method = coding_mode; + + if (s->chroma_y_shift) { + frag = s->all_fragments + mb_y*s->fragment_width[1] + mb_x; + SET_CHROMA_MODES + } else if (s->chroma_x_shift) { + frag = s->all_fragments + 2*mb_y*s->fragment_width[1] + mb_x; + for (k = 0; k < 2; k++) { + SET_CHROMA_MODES + frag += s->fragment_width[1]; + } + } else { + for (k = 0; k < 4; k++) { + frag = s->all_fragments + BLOCK_Y*s->fragment_width[1] + BLOCK_X; + SET_CHROMA_MODES + } } } } @@ -702,21 +624,19 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) { int j, k, sb_x, sb_y; int coding_mode; - int motion_x[6]; - int motion_y[6]; + int motion_x[4]; + int motion_y[4]; int last_motion_x = 0; int last_motion_y = 0; int prior_last_motion_x = 0; int prior_last_motion_y = 0; int current_macroblock; int current_fragment; + int frag; if (s->keyframe) return 0; - memset(motion_x, 0, 6 * sizeof(int)); - memset(motion_y, 0, 6 * sizeof(int)); - /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */ coding_mode = get_bits1(gb); @@ -724,6 +644,8 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) * coded fragments */ for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) { for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) { + if (get_bits_left(gb) <= 0) + return -1; for (j = 0; j < 4; j++) { int mb_x = 2*sb_x + (j>>1); @@ -764,9 +686,8 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) /* fetch 4 vectors from the bitstream, one for each * Y fragment, then average for the C fragment vectors */ - motion_x[4] = motion_y[4] = 0; for (k = 0; k < 4; k++) { - current_fragment = BLOCK_Y*s->fragment_width + BLOCK_X; + current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X; if (s->all_fragments[current_fragment].coding_method != MODE_COPY) { if (coding_mode == 0) { motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; @@ -781,14 +702,7 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) motion_x[k] = 0; motion_y[k] = 0; } - motion_x[4] += motion_x[k]; - motion_y[4] += motion_y[k]; } - - motion_x[5]= - motion_x[4]= RSHIFT(motion_x[4], 2); - motion_y[5]= - motion_y[4]= RSHIFT(motion_y[4], 2); break; case MODE_INTER_LAST_MV: @@ -825,24 +739,55 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) /* assign the motion vectors to the correct fragments */ for (k = 0; k < 4; k++) { current_fragment = - BLOCK_Y*s->fragment_width + BLOCK_X; + BLOCK_Y*s->fragment_width[0] + BLOCK_X; if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { - s->all_fragments[current_fragment].motion_x = motion_x[k]; - s->all_fragments[current_fragment].motion_y = motion_y[k]; + s->motion_val[0][current_fragment][0] = motion_x[k]; + s->motion_val[0][current_fragment][1] = motion_y[k]; } else { - s->all_fragments[current_fragment].motion_x = motion_x[0]; - s->all_fragments[current_fragment].motion_y = motion_y[0]; + s->motion_val[0][current_fragment][0] = motion_x[0]; + s->motion_val[0][current_fragment][1] = motion_y[0]; } } - for (k = 0; k < 2; k++) { - current_fragment = s->fragment_start[k+1] + - mb_y*(s->fragment_width>>1) + mb_x; + + if (s->chroma_y_shift) { if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { - s->all_fragments[current_fragment].motion_x = motion_x[k+4]; - s->all_fragments[current_fragment].motion_y = motion_y[k+4]; + motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] + motion_x[2] + motion_x[3], 2); + motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] + motion_y[2] + motion_y[3], 2); + } + motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1); + motion_y[0] = (motion_y[0]>>1) | (motion_y[0]&1); + frag = mb_y*s->fragment_width[1] + mb_x; + s->motion_val[1][frag][0] = motion_x[0]; + s->motion_val[1][frag][1] = motion_y[0]; + } else if (s->chroma_x_shift) { + if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { + motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1); + motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1); + motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1); + motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1); } else { - s->all_fragments[current_fragment].motion_x = motion_x[0]; - s->all_fragments[current_fragment].motion_y = motion_y[0]; + motion_x[1] = motion_x[0]; + motion_y[1] = motion_y[0]; + } + motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1); + motion_x[1] = (motion_x[1]>>1) | (motion_x[1]&1); + + frag = 2*mb_y*s->fragment_width[1] + mb_x; + for (k = 0; k < 2; k++) { + s->motion_val[1][frag][0] = motion_x[k]; + s->motion_val[1][frag][1] = motion_y[k]; + frag += s->fragment_width[1]; + } + } else { + for (k = 0; k < 4; k++) { + frag = BLOCK_Y*s->fragment_width[1] + BLOCK_X; + if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { + s->motion_val[1][frag][0] = motion_x[k]; + s->motion_val[1][frag][1] = motion_y[k]; + } else { + s->motion_val[1][frag][0] = motion_x[0]; + s->motion_val[1][frag][1] = motion_y[0]; + } } } } @@ -860,9 +805,15 @@ static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb) for (qpi = 0; qpi < s->nqps-1 && num_blocks > 0; qpi++) { i = blocks_decoded = num_blocks_at_qpi = 0; - bit = get_bits1(gb); + bit = get_bits1(gb) ^ 1; + run_length = 0; do { + if (run_length == MAXIMUM_LONG_BIT_RUN) + bit = get_bits1(gb); + else + bit ^= 1; + run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1; if (run_length == 34) run_length += get_bits(gb, 12); @@ -880,12 +831,7 @@ static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb) j++; } } - - if (run_length == MAXIMUM_LONG_BIT_RUN) - bit = get_bits1(gb); - else - bit ^= 1; - } while (blocks_decoded < num_blocks); + } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0); num_blocks -= num_blocks_at_qpi; } @@ -940,9 +886,9 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, if (blocks_ended) dct_tokens[j++] = blocks_ended << 2; - while (coeff_i < num_coeffs) { + while (coeff_i < num_coeffs && get_bits_left(gb) > 0) { /* decode a VLC into a token */ - token = get_vlc2(gb, vlc_table, 5, 3); + token = get_vlc2(gb, vlc_table, 11, 3); /* use the token to get a zero run, a coefficient, and an eob run */ if (token <= 6) { eob_run = eob_run_base[token]; @@ -986,7 +932,7 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, } if (coeff_index + zero_run > 64) { - av_log(s->avctx, AV_LOG_ERROR, "Invalid zero run of %d with" + av_log(s->avctx, AV_LOG_DEBUG, "Invalid zero run of %d with" " %d coeffs left\n", zero_run, 64-coeff_index); zero_run = 64 - coeff_index; } @@ -1047,7 +993,7 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) 0, residual_eob_run); /* reverse prediction of the Y-plane DC coefficients */ - reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height); + reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]); /* unpack the C plane DC coefficients */ residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, @@ -1059,9 +1005,9 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) if (!(s->avctx->flags & CODEC_FLAG_GRAY)) { reverse_dc_prediction(s, s->fragment_start[1], - s->fragment_width / 2, s->fragment_height / 2); + s->fragment_width[1], s->fragment_height[1]); reverse_dc_prediction(s, s->fragment_start[2], - s->fragment_width / 2, s->fragment_height / 2); + s->fragment_width[1], s->fragment_height[1]); } /* fetch the AC table indexes */ @@ -1264,8 +1210,8 @@ static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int ye int x, y; int *bounding_values= s->bounding_values_array+127; - int width = s->fragment_width >> !!plane; - int height = s->fragment_height >> !!plane; + int width = s->fragment_width[!!plane]; + int height = s->fragment_height[!!plane]; int fragment = s->fragment_start [plane] + ystart * width; int stride = s->current_frame.linesize[plane]; uint8_t *plane_data = s->current_frame.data [plane]; @@ -1323,7 +1269,7 @@ static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int ye } /** - * Pulls DCT tokens from the 64 levels to decode and dequant the coefficients + * Pull DCT tokens from the 64 levels to decode and dequant the coefficients * for the next block in coding order */ static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, @@ -1352,8 +1298,7 @@ static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, block[perm[i]] = (token >> 2) * dequantizer[perm[i]]; s->dct_tokens[plane][i++]++; break; - default: - av_log(s->avctx, AV_LOG_ERROR, "internal: invalid token type\n"); + default: // shouldn't happen return i; } } while (i < 64); @@ -1371,19 +1316,27 @@ static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y) int h, cy; int offset[4]; + if (HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) { + int y_flipped = s->flipped_image ? s->avctx->height-y : y; + + // At the end of the frame, report INT_MAX instead of the height of the frame. + // This makes the other threads' ff_thread_await_progress() calls cheaper, because + // they don't have to clip their values. + ff_thread_report_progress(&s->current_frame, y_flipped==s->avctx->height ? INT_MAX : y_flipped-1, 0); + } + if(s->avctx->draw_horiz_band==NULL) return; h= y - s->last_slice_end; + s->last_slice_end= y; y -= h; if (!s->flipped_image) { - if (y == 0) - h -= s->height - s->avctx->height; // account for non-mod16 - y = s->height - y - h; + y = s->avctx->height - y - h; } - cy = y >> 1; + cy = y >> s->chroma_y_shift; offset[0] = s->current_frame.linesize[0]*y; offset[1] = s->current_frame.linesize[1]*cy; offset[2] = s->current_frame.linesize[2]*cy; @@ -1391,7 +1344,28 @@ static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y) emms_c(); s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h); - s->last_slice_end= y + h; +} + +/** + * Wait for the reference frame of the current fragment. + * The progress value is in luma pixel rows. + */ +static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y) +{ + AVFrame *ref_frame; + int ref_row; + int border = motion_y&1; + + if (fragment->coding_method == MODE_USING_GOLDEN || + fragment->coding_method == MODE_GOLDEN_MV) + ref_frame = &s->golden_frame; + else + ref_frame = &s->last_frame; + + ref_row = y + (motion_y>>1); + ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border); + + ff_thread_await_progress(ref_frame, ref_row, 0); } /* @@ -1400,7 +1374,7 @@ static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y) */ static void render_slice(Vp3DecodeContext *s, int slice) { - int x, y, i, j; + int x, y, i, j, fragment; LOCAL_ALIGNED_16(DCTELEM, block, [64]); int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef; int motion_halfpel_index; @@ -1415,25 +1389,23 @@ static void render_slice(Vp3DecodeContext *s, int slice) uint8_t * last_plane = s-> last_frame.data [plane] + s->data_offset[plane]; uint8_t *golden_plane = s-> golden_frame.data [plane] + s->data_offset[plane]; int stride = s->current_frame.linesize[plane]; - int plane_width = s->width >> !!plane; - int plane_height = s->height >> !!plane; + int plane_width = s->width >> (plane && s->chroma_x_shift); + int plane_height = s->height >> (plane && s->chroma_y_shift); + int8_t (*motion_val)[2] = s->motion_val[!!plane]; - int sb_x, sb_y = slice << !plane; - int slice_height = sb_y + (plane ? 1 : 2); + int sb_x, sb_y = slice << (!plane && s->chroma_y_shift); + int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift); int slice_width = plane ? s->c_superblock_width : s->y_superblock_width; - int fragment_width = s->fragment_width >> !!plane; - int fragment_height = s->fragment_height >> !!plane; + int fragment_width = s->fragment_width[!!plane]; + int fragment_height = s->fragment_height[!!plane]; int fragment_start = s->fragment_start[plane]; + int do_await = !plane && HAVE_PTHREADS && (s->avctx->active_thread_type&FF_THREAD_FRAME); if (!s->flipped_image) stride = -stride; if (CONFIG_GRAY && plane && (s->avctx->flags & CODEC_FLAG_GRAY)) continue; - - if(FFABS(stride) > 2048) - return; //various tables are fixed size - /* for each superblock row in the slice (both of them)... */ for (; sb_y < slice_height; sb_y++) { @@ -1444,8 +1416,9 @@ static void render_slice(Vp3DecodeContext *s, int slice) for (j = 0; j < 16; j++) { x = 4*sb_x + hilbert_offset[j][0]; y = 4*sb_y + hilbert_offset[j][1]; + fragment = y*fragment_width + x; - i = fragment_start + y*fragment_width + x; + i = fragment_start + fragment; // bounds check if (x >= fragment_width || y >= fragment_height) @@ -1453,10 +1426,11 @@ static void render_slice(Vp3DecodeContext *s, int slice) first_pixel = 8*y*stride + 8*x; + if (do_await && s->all_fragments[i].coding_method != MODE_INTRA) + await_reference_row(s, &s->all_fragments[i], motion_val[fragment][1], (16*y) >> s->chroma_y_shift); + /* transform if this block was coded */ if (s->all_fragments[i].coding_method != MODE_COPY) { - int intra = s->all_fragments[i].coding_method == MODE_INTRA; - if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) || (s->all_fragments[i].coding_method == MODE_GOLDEN_MV)) motion_source= golden_plane; @@ -1471,17 +1445,11 @@ static void render_slice(Vp3DecodeContext *s, int slice) if ((s->all_fragments[i].coding_method > MODE_INTRA) && (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) { int src_x, src_y; - motion_x = s->all_fragments[i].motion_x; - motion_y = s->all_fragments[i].motion_y; - if(plane){ - motion_x= (motion_x>>1) | (motion_x&1); - motion_y= (motion_y>>1) | (motion_y&1); - } + motion_x = motion_val[fragment][0]; + motion_y = motion_val[fragment][1]; src_x= (motion_x>>1) + 8*x; src_y= (motion_y>>1) + 8*y; - if ((motion_x == 127) || (motion_y == 127)) - av_log(s->avctx, AV_LOG_ERROR, " help! got invalid motion vector! (%X, %X)\n", motion_x, motion_y); motion_halfpel_index = motion_x & 0x01; motion_source += (motion_x >> 1); @@ -1491,10 +1459,9 @@ static void render_slice(Vp3DecodeContext *s, int slice) if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){ uint8_t *temp= s->edge_emu_buffer; - if(stride<0) temp -= 9*stride; - else temp += 9*stride; + if(stride<0) temp -= 8*stride; - ff_emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height); + s->dsp.emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height); motion_source= temp; } } @@ -1522,11 +1489,11 @@ static void render_slice(Vp3DecodeContext *s, int slice) } s->dsp.clear_block(block); - vp3_dequant(s, s->all_fragments + i, plane, !intra, block); /* invert DCT and place (or add) in final output */ if (s->all_fragments[i].coding_method == MODE_INTRA) { + vp3_dequant(s, s->all_fragments + i, plane, 0, block); if(s->avctx->idct_algo!=FF_IDCT_VP3) block[0] += 128<<3; s->dsp.idct_put( @@ -1534,10 +1501,14 @@ static void render_slice(Vp3DecodeContext *s, int slice) stride, block); } else { + if (vp3_dequant(s, s->all_fragments + i, plane, 1, block)) { s->dsp.idct_add( output_plane + first_pixel, stride, block); + } else { + s->dsp.vp3_idct_dc_add(output_plane + first_pixel, stride, block); + } } } else { @@ -1552,7 +1523,8 @@ static void render_slice(Vp3DecodeContext *s, int slice) } // Filter up to the last row in the superblock row - apply_loop_filter(s, plane, 4*sb_y - !!sb_y, FFMIN(4*sb_y+3, fragment_height-1)); + if (!s->skip_loop_filter) + apply_loop_filter(s, plane, 4*sb_y - !!sb_y, FFMIN(4*sb_y+3, fragment_height-1)); } } @@ -1564,7 +1536,39 @@ static void render_slice(Vp3DecodeContext *s, int slice) * dispatch (slice - 1); */ - vp3_draw_horiz_band(s, 64*slice + 64-16); + vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) -16, s->height-16)); +} + +/// Allocate tables for per-frame data in Vp3DecodeContext +static av_cold int allocate_tables(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + int y_fragment_count, c_fragment_count; + + y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; + c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; + + s->superblock_coding = av_malloc(s->superblock_count); + s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment)); + s->coded_fragment_list[0] = av_malloc(s->fragment_count * sizeof(int)); + s->dct_tokens_base = av_malloc(64*s->fragment_count * sizeof(*s->dct_tokens_base)); + s->motion_val[0] = av_malloc(y_fragment_count * sizeof(*s->motion_val[0])); + s->motion_val[1] = av_malloc(c_fragment_count * sizeof(*s->motion_val[1])); + + /* work out the block mapping tables */ + s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int)); + s->macroblock_coding = av_malloc(s->macroblock_count + 1); + + if (!s->superblock_coding || !s->all_fragments || !s->dct_tokens_base || + !s->coded_fragment_list[0] || !s->superblock_fragments || !s->macroblock_coding || + !s->motion_val[0] || !s->motion_val[1]) { + vp3_decode_end(avctx); + return -1; + } + + init_block_mapping(s); + + return 0; } /* @@ -1576,6 +1580,7 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx) int i, inter, plane; int c_width; int c_height; + int y_fragment_count, c_fragment_count; if (avctx->codec_tag == MKTAG('V','P','3','0')) s->version = 0; @@ -1585,7 +1590,8 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx) s->avctx = avctx; s->width = FFALIGN(avctx->width, 16); s->height = FFALIGN(avctx->height, 16); - avctx->pix_fmt = PIX_FMT_YUV420P; + if (avctx->pix_fmt == PIX_FMT_NONE) + avctx->pix_fmt = PIX_FMT_YUV420P; avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; if(avctx->idct_algo==FF_IDCT_AUTO) avctx->idct_algo=FF_IDCT_VP3; @@ -1598,13 +1604,15 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx) for (i = 0; i < 3; i++) s->qps[i] = -1; + avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift); + s->y_superblock_width = (s->width + 31) / 32; s->y_superblock_height = (s->height + 31) / 32; s->y_superblock_count = s->y_superblock_width * s->y_superblock_height; /* work out the dimensions for the C planes */ - c_width = s->width / 2; - c_height = s->height / 2; + c_width = s->width >> s->chroma_x_shift; + c_height = s->height >> s->chroma_y_shift; s->c_superblock_width = (c_width + 31) / 32; s->c_superblock_height = (c_height + 31) / 32; s->c_superblock_count = s->c_superblock_width * s->c_superblock_height; @@ -1612,28 +1620,22 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx) s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2); s->u_superblock_start = s->y_superblock_count; s->v_superblock_start = s->u_superblock_start + s->c_superblock_count; - s->superblock_coding = av_malloc(s->superblock_count); s->macroblock_width = (s->width + 15) / 16; s->macroblock_height = (s->height + 15) / 16; s->macroblock_count = s->macroblock_width * s->macroblock_height; - s->fragment_width = s->width / FRAGMENT_PIXELS; - s->fragment_height = s->height / FRAGMENT_PIXELS; + s->fragment_width[0] = s->width / FRAGMENT_PIXELS; + s->fragment_height[0] = s->height / FRAGMENT_PIXELS; + s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift; + s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift; /* fragment count covers all 8x8 blocks for all 3 planes */ - s->fragment_count = s->fragment_width * s->fragment_height * 3 / 2; - s->fragment_start[1] = s->fragment_width * s->fragment_height; - s->fragment_start[2] = s->fragment_width * s->fragment_height * 5 / 4; - - s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment)); - s->coded_fragment_list[0] = av_malloc(s->fragment_count * sizeof(int)); - s->dct_tokens_base = av_malloc(64*s->fragment_count * sizeof(*s->dct_tokens_base)); - if (!s->superblock_coding || !s->all_fragments || !s->dct_tokens_base || - !s->coded_fragment_list[0]) { - vp3_decode_end(avctx); - return -1; - } + y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; + c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; + s->fragment_count = y_fragment_count + 2*c_fragment_count; + s->fragment_start[1] = y_fragment_count; + s->fragment_start[2] = y_fragment_count + c_fragment_count; if (!s->theora_tables) { @@ -1659,61 +1661,61 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx) for (i = 0; i < 16; i++) { /* DC histograms */ - init_vlc(&s->dc_vlc[i], 5, 32, + init_vlc(&s->dc_vlc[i], 11, 32, &dc_bias[i][0][1], 4, 2, &dc_bias[i][0][0], 4, 2, 0); /* group 1 AC histograms */ - init_vlc(&s->ac_vlc_1[i], 5, 32, + init_vlc(&s->ac_vlc_1[i], 11, 32, &ac_bias_0[i][0][1], 4, 2, &ac_bias_0[i][0][0], 4, 2, 0); /* group 2 AC histograms */ - init_vlc(&s->ac_vlc_2[i], 5, 32, + init_vlc(&s->ac_vlc_2[i], 11, 32, &ac_bias_1[i][0][1], 4, 2, &ac_bias_1[i][0][0], 4, 2, 0); /* group 3 AC histograms */ - init_vlc(&s->ac_vlc_3[i], 5, 32, + init_vlc(&s->ac_vlc_3[i], 11, 32, &ac_bias_2[i][0][1], 4, 2, &ac_bias_2[i][0][0], 4, 2, 0); /* group 4 AC histograms */ - init_vlc(&s->ac_vlc_4[i], 5, 32, + init_vlc(&s->ac_vlc_4[i], 11, 32, &ac_bias_3[i][0][1], 4, 2, &ac_bias_3[i][0][0], 4, 2, 0); } } else { - for (i = 0; i < 16; i++) { + for (i = 0; i < 16; i++) { /* DC histograms */ - if (init_vlc(&s->dc_vlc[i], 5, 32, - &s->huffman_table[i][0][1], 4, 2, - &s->huffman_table[i][0][0], 4, 2, 0) < 0) + if (init_vlc(&s->dc_vlc[i], 11, 32, + &s->huffman_table[i][0][1], 8, 4, + &s->huffman_table[i][0][0], 8, 4, 0) < 0) goto vlc_fail; /* group 1 AC histograms */ - if (init_vlc(&s->ac_vlc_1[i], 5, 32, - &s->huffman_table[i+16][0][1], 4, 2, - &s->huffman_table[i+16][0][0], 4, 2, 0) < 0) + if (init_vlc(&s->ac_vlc_1[i], 11, 32, + &s->huffman_table[i+16][0][1], 8, 4, + &s->huffman_table[i+16][0][0], 8, 4, 0) < 0) goto vlc_fail; /* group 2 AC histograms */ - if (init_vlc(&s->ac_vlc_2[i], 5, 32, - &s->huffman_table[i+16*2][0][1], 4, 2, - &s->huffman_table[i+16*2][0][0], 4, 2, 0) < 0) + if (init_vlc(&s->ac_vlc_2[i], 11, 32, + &s->huffman_table[i+16*2][0][1], 8, 4, + &s->huffman_table[i+16*2][0][0], 8, 4, 0) < 0) goto vlc_fail; /* group 3 AC histograms */ - if (init_vlc(&s->ac_vlc_3[i], 5, 32, - &s->huffman_table[i+16*3][0][1], 4, 2, - &s->huffman_table[i+16*3][0][0], 4, 2, 0) < 0) + if (init_vlc(&s->ac_vlc_3[i], 11, 32, + &s->huffman_table[i+16*3][0][1], 8, 4, + &s->huffman_table[i+16*3][0][0], 8, 4, 0) < 0) goto vlc_fail; /* group 4 AC histograms */ - if (init_vlc(&s->ac_vlc_4[i], 5, 32, - &s->huffman_table[i+16*4][0][1], 4, 2, - &s->huffman_table[i+16*4][0][0], 4, 2, 0) < 0) + if (init_vlc(&s->ac_vlc_4[i], 11, 32, + &s->huffman_table[i+16*4][0][1], 8, 4, + &s->huffman_table[i+16*4][0][0], 8, 4, 0) < 0) goto vlc_fail; } } @@ -1734,28 +1736,92 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx) &motion_vector_vlc_table[0][1], 2, 1, &motion_vector_vlc_table[0][0], 2, 1, 0); - /* work out the block mapping tables */ - s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int)); - s->macroblock_coding = av_malloc(s->macroblock_count + 1); - if (!s->superblock_fragments || !s->macroblock_coding) { - vp3_decode_end(avctx); - return -1; - } - init_block_mapping(s); - for (i = 0; i < 3; i++) { s->current_frame.data[i] = NULL; s->last_frame.data[i] = NULL; s->golden_frame.data[i] = NULL; } - return 0; + return allocate_tables(avctx); vlc_fail: av_log(avctx, AV_LOG_FATAL, "Invalid huffman table\n"); return -1; } +/// Release and shuffle frames after decode finishes +static void update_frames(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + + /* release the last frame, if it is allocated and if it is not the + * golden frame */ + if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY) + ff_thread_release_buffer(avctx, &s->last_frame); + + /* shuffle frames (last = current) */ + s->last_frame= s->current_frame; + + if (s->keyframe) { + if (s->golden_frame.data[0]) + ff_thread_release_buffer(avctx, &s->golden_frame); + s->golden_frame = s->current_frame; + s->last_frame.type = FF_BUFFER_TYPE_COPY; + } + + s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */ +} + +static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src) +{ + Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data; + int qps_changed = 0, i, err; + + if (!s1->current_frame.data[0] + ||s->width != s1->width + ||s->height!= s1->height) + return -1; + + if (s != s1) { + // init tables if the first frame hasn't been decoded + if (!s->current_frame.data[0]) { + int y_fragment_count, c_fragment_count; + s->avctx = dst; + err = allocate_tables(dst); + if (err) + return err; + y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; + c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; + memcpy(s->motion_val[0], s1->motion_val[0], y_fragment_count * sizeof(*s->motion_val[0])); + memcpy(s->motion_val[1], s1->motion_val[1], c_fragment_count * sizeof(*s->motion_val[1])); + } + +#define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field) + + // copy previous frame data + copy_fields(s, s1, golden_frame, dsp); + + // copy qscale data if necessary + for (i = 0; i < 3; i++) { + if (s->qps[i] != s1->qps[1]) { + qps_changed = 1; + memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i])); + } + } + + if (s->qps[0] != s1->qps[0]) + memcpy(&s->bounding_values_array, &s1->bounding_values_array, sizeof(s->bounding_values_array)); + + if (qps_changed) + copy_fields(s, s1, qps, superblock_count); +#undef copy_fields + } + + update_frames(dst); + + return 0; +} + /* * This is the ffmpeg/libavcodec API frame decode function. */ @@ -1767,7 +1833,6 @@ static int vp3_decode_frame(AVCodecContext *avctx, int buf_size = avpkt->size; Vp3DecodeContext *s = avctx->priv_data; GetBitContext gb; - static int counter = 0; int i; init_get_bits(&gb, buf, buf_size * 8); @@ -1793,8 +1858,10 @@ static int vp3_decode_frame(AVCodecContext *avctx, if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n", - s->keyframe?"key":"", counter, s->qps[0]); - counter++; + s->keyframe?"key":"", avctx->frame_number+1, s->qps[0]); + + s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] || + avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL : AVDISCARD_NONKEY); if (s->qps[0] != s->last_qps[0]) init_loop_filter(s); @@ -1808,6 +1875,16 @@ static int vp3_decode_frame(AVCodecContext *avctx, if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe) return buf_size; + s->current_frame.reference = 3; + s->current_frame.pict_type = s->keyframe ? FF_I_TYPE : FF_P_TYPE; + if (ff_thread_get_buffer(avctx, &s->current_frame) < 0) { + av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + goto error; + } + + if (!s->edge_emu_buffer) + s->edge_emu_buffer = av_malloc(9*FFABS(s->current_frame.linesize[0])); + if (s->keyframe) { if (!s->theora) { @@ -1816,7 +1893,7 @@ static int vp3_decode_frame(AVCodecContext *avctx, if (s->version) { s->version = get_bits(&gb, 5); - if (counter == 1) + if (avctx->frame_number == 0) av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version); } } @@ -1826,70 +1903,52 @@ static int vp3_decode_frame(AVCodecContext *avctx, av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n"); skip_bits(&gb, 2); /* reserved? */ } - - if (s->last_frame.data[0] == s->golden_frame.data[0]) { - if (s->golden_frame.data[0]) - avctx->release_buffer(avctx, &s->golden_frame); - s->last_frame= s->golden_frame; /* ensure that we catch any access to this released frame */ - } else { - if (s->golden_frame.data[0]) - avctx->release_buffer(avctx, &s->golden_frame); - if (s->last_frame.data[0]) - avctx->release_buffer(avctx, &s->last_frame); - } - - s->golden_frame.reference = 3; - if(avctx->get_buffer(avctx, &s->golden_frame) < 0) { - av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n"); - return -1; - } - - /* golden frame is also the current frame */ - s->current_frame= s->golden_frame; } else { - /* allocate a new current frame */ - s->current_frame.reference = 3; if (!s->golden_frame.data[0]) { - av_log(s->avctx, AV_LOG_ERROR, "vp3: first frame not a keyframe\n"); - return -1; - } - if(avctx->get_buffer(avctx, &s->current_frame) < 0) { - av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n"); - return -1; + av_log(s->avctx, AV_LOG_WARNING, "vp3: first frame not a keyframe\n"); + + s->golden_frame.reference = 3; + s->golden_frame.pict_type = FF_I_TYPE; + if (ff_thread_get_buffer(avctx, &s->golden_frame) < 0) { + av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + goto error; + } + s->last_frame = s->golden_frame; + s->last_frame.type = FF_BUFFER_TYPE_COPY; + ff_thread_report_progress(&s->last_frame, INT_MAX, 0); } } - s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame - s->current_frame.qstride= 0; - - init_frame(s, &gb); + memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment)); + ff_thread_finish_setup(avctx); if (unpack_superblocks(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n"); - return -1; + goto error; } if (unpack_modes(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n"); - return -1; + goto error; } if (unpack_vectors(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n"); - return -1; + goto error; } if (unpack_block_qpis(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n"); - return -1; + goto error; } if (unpack_dct_coeffs(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n"); - return -1; + goto error; } for (i = 0; i < 3; i++) { + int height = s->height >> (i && s->chroma_y_shift); if (s->flipped_image) s->data_offset[i] = 0; else - s->data_offset[i] = ((s->height>>!!i)-1) * s->current_frame.linesize[i]; + s->data_offset[i] = (height-1) * s->current_frame.linesize[i]; } s->last_slice_end = 0; @@ -1898,25 +1957,26 @@ static int vp3_decode_frame(AVCodecContext *avctx, // filter the last row for (i = 0; i < 3; i++) { - int row = (s->height >> (3+!!i)) - 1; + int row = (s->height >> (3+(i && s->chroma_y_shift))) - 1; apply_loop_filter(s, i, row, row+1); } - vp3_draw_horiz_band(s, s->height); + vp3_draw_horiz_band(s, s->avctx->height); *data_size=sizeof(AVFrame); *(AVFrame*)data= s->current_frame; - /* release the last frame, if it is allocated and if it is not the - * golden frame */ - if ((s->last_frame.data[0]) && - (s->last_frame.data[0] != s->golden_frame.data[0])) - avctx->release_buffer(avctx, &s->last_frame); - - /* shuffle frames (last = current) */ - s->last_frame= s->current_frame; - s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */ + if (!HAVE_PTHREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME)) + update_frames(avctx); return buf_size; + +error: + ff_thread_report_progress(&s->current_frame, INT_MAX, 0); + + if (!HAVE_PTHREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME)) + avctx->release_buffer(avctx, &s->current_frame); + + return -1; } /* @@ -1927,12 +1987,20 @@ static av_cold int vp3_decode_end(AVCodecContext *avctx) Vp3DecodeContext *s = avctx->priv_data; int i; + if (avctx->is_copy && !s->current_frame.data[0]) + return 0; + av_free(s->superblock_coding); av_free(s->all_fragments); av_free(s->coded_fragment_list[0]); av_free(s->dct_tokens_base); av_free(s->superblock_fragments); av_free(s->macroblock_coding); + av_free(s->motion_val[0]); + av_free(s->motion_val[1]); + av_free(s->edge_emu_buffer); + + if (avctx->is_copy) return 0; for (i = 0; i < 16; i++) { free_vlc(&s->dc_vlc[i]); @@ -1948,10 +2016,10 @@ static av_cold int vp3_decode_end(AVCodecContext *avctx) free_vlc(&s->motion_vector_vlc); /* release all frames */ - if (s->golden_frame.data[0] && s->golden_frame.data[0] != s->last_frame.data[0]) - avctx->release_buffer(avctx, &s->golden_frame); - if (s->last_frame.data[0]) - avctx->release_buffer(avctx, &s->last_frame); + if (s->golden_frame.data[0]) + ff_thread_release_buffer(avctx, &s->golden_frame); + if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY) + ff_thread_release_buffer(avctx, &s->last_frame); /* no need to release the current_frame since it will always be pointing * to the same frame as either the golden or last frame */ @@ -1993,10 +2061,16 @@ static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb) } #if CONFIG_THEORA_DECODER +static const enum PixelFormat theora_pix_fmts[4] = { + PIX_FMT_YUV420P, PIX_FMT_NONE, PIX_FMT_YUV422P, PIX_FMT_YUV444P +}; + static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) { Vp3DecodeContext *s = avctx->priv_data; int visible_width, visible_height, colorspace; + int offset_x = 0, offset_y = 0; + AVRational fps, aspect; s->theora = get_bits_long(gb, 24); av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora); @@ -2012,7 +2086,7 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) visible_width = s->width = get_bits(gb, 16) << 4; visible_height = s->height = get_bits(gb, 16) << 4; - if(avcodec_check_dimensions(avctx, s->width, s->height)){ + if(av_image_check_size(s->width, s->height, 0, avctx)){ av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height); s->width= s->height= 0; return -1; @@ -2022,14 +2096,24 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) visible_width = get_bits_long(gb, 24); visible_height = get_bits_long(gb, 24); - skip_bits(gb, 8); /* offset x */ - skip_bits(gb, 8); /* offset y */ + offset_x = get_bits(gb, 8); /* offset x */ + offset_y = get_bits(gb, 8); /* offset y, from bottom */ + } + + fps.num = get_bits_long(gb, 32); + fps.den = get_bits_long(gb, 32); + if (fps.num && fps.den) { + av_reduce(&avctx->time_base.num, &avctx->time_base.den, + fps.den, fps.num, 1<<30); } - skip_bits(gb, 32); /* fps numerator */ - skip_bits(gb, 32); /* fps denumerator */ - skip_bits(gb, 24); /* aspect numerator */ - skip_bits(gb, 24); /* aspect denumerator */ + aspect.num = get_bits_long(gb, 24); + aspect.den = get_bits_long(gb, 24); + if (aspect.num && aspect.den) { + av_reduce(&avctx->sample_aspect_ratio.num, + &avctx->sample_aspect_ratio.den, + aspect.num, aspect.den, 1<<30); + } if (s->theora < 0x030200) skip_bits(gb, 5); /* keyframe frequency force */ @@ -2041,14 +2125,15 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) if (s->theora >= 0x030200) { skip_bits(gb, 5); /* keyframe frequency force */ - skip_bits(gb, 2); /* pixel format: 420,res,422,444 */ + avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)]; skip_bits(gb, 3); /* reserved */ } // align_get_bits(gb); if ( visible_width <= s->width && visible_width > s->width-16 - && visible_height <= s->height && visible_height > s->height-16) + && visible_height <= s->height && visible_height > s->height-16 + && !offset_x && (offset_y == s->height - visible_height)) avcodec_set_dimensions(avctx, visible_width, visible_height); else avcodec_set_dimensions(avctx, s->width, s->height); @@ -2074,13 +2159,9 @@ static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb) if (s->theora >= 0x030200) { n = get_bits(gb, 3); /* loop filter limit values table */ - for (i = 0; i < 64; i++) { - s->filter_limit_values[i] = get_bits(gb, n); - if (s->filter_limit_values[i] > 127) { - av_log(avctx, AV_LOG_ERROR, "filter limit value too large (%i > 127), clamping\n", s->filter_limit_values[i]); - s->filter_limit_values[i] = 127; - } - } + if (n) + for (i = 0; i < 64; i++) + s->filter_limit_values[i] = get_bits(gb, n); } if (s->theora >= 0x030200) @@ -2240,31 +2321,33 @@ static av_cold int theora_decode_init(AVCodecContext *avctx) return vp3_decode_init(avctx); } -AVCodec theora_decoder = { +AVCodec ff_theora_decoder = { "theora", - CODEC_TYPE_VIDEO, + AVMEDIA_TYPE_VIDEO, CODEC_ID_THEORA, sizeof(Vp3DecodeContext), theora_decode_init, NULL, vp3_decode_end, vp3_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND, + CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, NULL, .long_name = NULL_IF_CONFIG_SMALL("Theora"), + .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context) }; #endif -AVCodec vp3_decoder = { +AVCodec ff_vp3_decoder = { "vp3", - CODEC_TYPE_VIDEO, + AVMEDIA_TYPE_VIDEO, CODEC_ID_VP3, sizeof(Vp3DecodeContext), vp3_decode_init, NULL, vp3_decode_end, vp3_decode_frame, - CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND, + CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, NULL, .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"), + .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context) };