X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;ds=sidebyside;f=libavcodec%2Fvp3.c;h=373f5e0b68c818ab06530b8b574d3405e0c4388a;hb=d2205d6543881f2e6fa18c8a354bbcf91a1235f7;hp=75f08893b8522f2462c2423e0687dbadab4fe607;hpb=116d866ca5052ab3d50b2ead7acf5f63c945c273;p=ffmpeg diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c index 75f08893b85..373f5e0b68c 100644 --- a/libavcodec/vp3.c +++ b/libavcodec/vp3.c @@ -1,29 +1,30 @@ /* * Copyright (C) 2003-2004 the ffmpeg project * - * This library is free software; you can redistribute it and/or + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * Libav is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with Libav; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * */ /** - * @file vp3.c + * @file * On2 VP3 Video Decoder * * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx) * For more information about the VP3 coding process, visit: - * http://multimedia.cx/ + * http://wiki.multimedia.cx/index.php?title=On2_VP3 * * Theora decoder by Alex Beregszaszi */ @@ -31,133 +32,35 @@ #include #include #include -#include -#include "common.h" +#include "libavutil/imgutils.h" #include "avcodec.h" +#include "internal.h" #include "dsputil.h" -#include "mpegvideo.h" +#include "get_bits.h" #include "vp3data.h" +#include "xiph.h" +#include "thread.h" #define FRAGMENT_PIXELS 8 -/* - * Debugging Variables - * - * Define one or more of the following compile-time variables to 1 to obtain - * elaborate information about certain aspects of the decoding process. - * - * KEYFRAMES_ONLY: set this to 1 to only see keyframes (VP3 slideshow mode) - * DEBUG_VP3: high-level decoding flow - * DEBUG_INIT: initialization parameters - * DEBUG_DEQUANTIZERS: display how the dequanization tables are built - * DEBUG_BLOCK_CODING: unpacking the superblock/macroblock/fragment coding - * DEBUG_MODES: unpacking the coding modes for individual fragments - * DEBUG_VECTORS: display the motion vectors - * DEBUG_TOKEN: display exhaustive information about each DCT token - * DEBUG_VLC: display the VLCs as they are extracted from the stream - * DEBUG_DC_PRED: display the process of reversing DC prediction - * DEBUG_IDCT: show every detail of the IDCT process - */ - -#define KEYFRAMES_ONLY 0 - -#define DEBUG_VP3 0 -#define DEBUG_INIT 0 -#define DEBUG_DEQUANTIZERS 0 -#define DEBUG_BLOCK_CODING 0 -#define DEBUG_MODES 0 -#define DEBUG_VECTORS 0 -#define DEBUG_TOKEN 0 -#define DEBUG_VLC 0 -#define DEBUG_DC_PRED 0 -#define DEBUG_IDCT 0 - -#if DEBUG_VP3 -#define debug_vp3(args...) av_log(NULL, AV_LOG_DEBUG, ## args) -#else -static inline void debug_vp3(const char *format, ...) { } -#endif - -#if DEBUG_INIT -#define debug_init(args...) av_log(NULL, AV_LOG_DEBUG, ## args) -#else -static inline void debug_init(const char *format, ...) { } -#endif - -#if DEBUG_DEQUANTIZERS -#define debug_dequantizers(args...) av_log(NULL, AV_LOG_DEBUG, ## args) -#else -static inline void debug_dequantizers(const char *format, ...) { } -#endif - -#if DEBUG_BLOCK_CODING -#define debug_block_coding(args...) av_log(NULL, AV_LOG_DEBUG, ## args) -#else -static inline void debug_block_coding(const char *format, ...) { } -#endif - -#if DEBUG_MODES -#define debug_modes(args...) av_log(NULL, AV_LOG_DEBUG, ## args) -#else -static inline void debug_modes(const char *format, ...) { } -#endif - -#if DEBUG_VECTORS -#define debug_vectors(args...) av_log(NULL, AV_LOG_DEBUG, ## args) -#else -static inline void debug_vectors(const char *format, ...) { } -#endif - -#if DEBUG_TOKEN -#define debug_token(args...) av_log(NULL, AV_LOG_DEBUG, ## args) -#else -static inline void debug_token(const char *format, ...) { } -#endif - -#if DEBUG_VLC -#define debug_vlc(args...) av_log(NULL, AV_LOG_DEBUG, ## args) -#else -static inline void debug_vlc(const char *format, ...) { } -#endif - -#if DEBUG_DC_PRED -#define debug_dc_pred(args...) av_log(NULL, AV_LOG_DEBUG, ## args) -#else -static inline void debug_dc_pred(const char *format, ...) { } -#endif - -#if DEBUG_IDCT -#define debug_idct(args...) av_log(NULL, AV_LOG_DEBUG, ## args) -#else -static inline void debug_idct(const char *format, ...) { } -#endif - -typedef struct Coeff { - struct Coeff *next; - DCTELEM coeff; - uint8_t index; -} Coeff; - //FIXME split things out into their own arrays typedef struct Vp3Fragment { - Coeff *next_coeff; - /* address of first pixel taking into account which plane the fragment - * lives on as well as the plane stride */ - int first_pixel; - /* this is the macroblock that the fragment belongs to */ - uint16_t macroblock; + int16_t dc; uint8_t coding_method; - uint8_t coeff_count; - int8_t motion_x; - int8_t motion_y; + uint8_t qpi; } Vp3Fragment; #define SB_NOT_CODED 0 #define SB_PARTIALLY_CODED 1 #define SB_FULLY_CODED 2 +// This is the maximum length of a single long bit run that can be encoded +// for superblock coding or block qps. Theora special-cases this to read a +// bit instead of flipping the current bit to allow for runs longer than 4129. +#define MAXIMUM_LONG_BIT_RUN 4129 + #define MODE_INTER_NO_MV 0 #define MODE_INTRA 1 #define MODE_INTER_PLUS_MV 2 @@ -172,11 +75,8 @@ typedef struct Vp3Fragment { #define MODE_COPY 8 /* There are 6 preset schemes, plus a free-form scheme */ -static int ModeAlphabet[7][CODING_MODE_COUNT] = +static const int ModeAlphabet[6][CODING_MODE_COUNT] = { - /* this is the custom scheme */ - { 0, 0, 0, 0, 0, 0, 0, 0 }, - /* scheme 1: Last motion vector dominates */ { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV, MODE_INTER_NO_MV, @@ -215,6 +115,13 @@ static int ModeAlphabet[7][CODING_MODE_COUNT] = }; +static const uint8_t hilbert_offset[16][2] = { + {0,0}, {1,0}, {1,1}, {0,1}, + {0,2}, {0,3}, {1,3}, {1,2}, + {2,2}, {2,3}, {3,3}, {3,2}, + {3,1}, {2,1}, {2,0}, {3,0} +}; + #define MIN_DEQUANT_VAL 2 typedef struct Vp3DecodeContext { @@ -222,23 +129,27 @@ typedef struct Vp3DecodeContext { int theora, theora_tables; int version; int width, height; + int chroma_x_shift, chroma_y_shift; AVFrame golden_frame; AVFrame last_frame; AVFrame current_frame; int keyframe; DSPContext dsp; int flipped_image; + int last_slice_end; + int skip_loop_filter; - int quality_index; - int last_quality_index; + int qps[3]; + int nqps; + int last_qps[3]; int superblock_count; - int superblock_width; - int superblock_height; int y_superblock_width; int y_superblock_height; + int y_superblock_count; int c_superblock_width; int c_superblock_height; + int c_superblock_count; int u_superblock_start; int v_superblock_start; unsigned char *superblock_coding; @@ -248,14 +159,14 @@ typedef struct Vp3DecodeContext { int macroblock_height; int fragment_count; - int fragment_width; - int fragment_height; + int fragment_width[2]; + int fragment_height[2]; Vp3Fragment *all_fragments; - Coeff *coeffs; - Coeff *next_coeff; - int u_fragment_start; - int v_fragment_start; + int fragment_start[3]; + int data_offset[3]; + + int8_t (*motion_val[2])[2]; ScanTable scantable; @@ -267,11 +178,38 @@ typedef struct Vp3DecodeContext { uint8_t qr_size [2][3][64]; uint16_t qr_base[2][3][64]; - /* this is a list of indices into the all_fragments array indicating + /** + * This is a list of all tokens in bitstream order. Reordering takes place + * by pulling from each level during IDCT. As a consequence, IDCT must be + * in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32 + * otherwise. The 32 different tokens with up to 12 bits of extradata are + * collapsed into 3 types, packed as follows: + * (from the low to high bits) + * + * 2 bits: type (0,1,2) + * 0: EOB run, 14 bits for run length (12 needed) + * 1: zero run, 7 bits for run length + * 7 bits for the next coefficient (3 needed) + * 2: coefficient, 14 bits (11 needed) + * + * Coefficients are signed, so are packed in the highest bits for automatic + * sign extension. + */ + int16_t *dct_tokens[3][64]; + int16_t *dct_tokens_base; +#define TOKEN_EOB(eob_run) ((eob_run) << 2) +#define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) << 9) + ((zero_run) << 2) + 1) +#define TOKEN_COEFF(coeff) (((coeff) << 2) + 2) + + /** + * number of blocks that contain DCT coefficients at the given level or higher + */ + int num_coded_frags[3][64]; + int total_num_coded_frags; + + /* this is a list of indexes into the all_fragments array indicating * which of the fragments are coded */ - int *coded_fragment_list; - int coded_fragment_list_index; - int pixel_addresses_inited; + int *coded_fragment_list[3]; VLC dc_vlc[16]; VLC ac_vlc_1[16]; @@ -286,418 +224,193 @@ typedef struct Vp3DecodeContext { /* these arrays need to be on 16-byte boundaries since SSE2 operations * index into them */ - DECLARE_ALIGNED_16(int16_t, qmat[2][4][64]); // fragments, macroblocks <-> fragments, - * superblocks <-> macroblocks - * - * Returns 0 is successful; returns 1 if *anything* went wrong. - */ -static int init_block_mapping(Vp3DecodeContext *s) +static void vp3_decode_flush(AVCodecContext *avctx) { - int i, j; - signed int hilbert_walk_y[16]; - signed int hilbert_walk_c[16]; - signed int hilbert_walk_mb[4]; - - int current_fragment = 0; - int current_width = 0; - int current_height = 0; - int right_edge = 0; - int bottom_edge = 0; - int superblock_row_inc = 0; - int *hilbert = NULL; - int mapping_index = 0; - - int current_macroblock; - int c_fragment; - - signed char travel_width[16] = { - 1, 1, 0, -1, - 0, 0, 1, 0, - 1, 0, 1, 0, - 0, -1, 0, 1 - }; - - signed char travel_height[16] = { - 0, 0, 1, 0, - 1, 1, 0, -1, - 0, 1, 0, -1, - -1, 0, -1, 0 - }; - - signed char travel_width_mb[4] = { - 1, 0, 1, 0 - }; - - signed char travel_height_mb[4] = { - 0, 1, 0, -1 - }; - - debug_vp3(" vp3: initialize block mapping tables\n"); - - /* figure out hilbert pattern per these frame dimensions */ - hilbert_walk_y[0] = 1; - hilbert_walk_y[1] = 1; - hilbert_walk_y[2] = s->fragment_width; - hilbert_walk_y[3] = -1; - hilbert_walk_y[4] = s->fragment_width; - hilbert_walk_y[5] = s->fragment_width; - hilbert_walk_y[6] = 1; - hilbert_walk_y[7] = -s->fragment_width; - hilbert_walk_y[8] = 1; - hilbert_walk_y[9] = s->fragment_width; - hilbert_walk_y[10] = 1; - hilbert_walk_y[11] = -s->fragment_width; - hilbert_walk_y[12] = -s->fragment_width; - hilbert_walk_y[13] = -1; - hilbert_walk_y[14] = -s->fragment_width; - hilbert_walk_y[15] = 1; - - hilbert_walk_c[0] = 1; - hilbert_walk_c[1] = 1; - hilbert_walk_c[2] = s->fragment_width / 2; - hilbert_walk_c[3] = -1; - hilbert_walk_c[4] = s->fragment_width / 2; - hilbert_walk_c[5] = s->fragment_width / 2; - hilbert_walk_c[6] = 1; - hilbert_walk_c[7] = -s->fragment_width / 2; - hilbert_walk_c[8] = 1; - hilbert_walk_c[9] = s->fragment_width / 2; - hilbert_walk_c[10] = 1; - hilbert_walk_c[11] = -s->fragment_width / 2; - hilbert_walk_c[12] = -s->fragment_width / 2; - hilbert_walk_c[13] = -1; - hilbert_walk_c[14] = -s->fragment_width / 2; - hilbert_walk_c[15] = 1; - - hilbert_walk_mb[0] = 1; - hilbert_walk_mb[1] = s->macroblock_width; - hilbert_walk_mb[2] = 1; - hilbert_walk_mb[3] = -s->macroblock_width; - - /* iterate through each superblock (all planes) and map the fragments */ - for (i = 0; i < s->superblock_count; i++) { - debug_init(" superblock %d (u starts @ %d, v starts @ %d)\n", - i, s->u_superblock_start, s->v_superblock_start); - - /* time to re-assign the limits? */ - if (i == 0) { - - /* start of Y superblocks */ - right_edge = s->fragment_width; - bottom_edge = s->fragment_height; - current_width = -1; - current_height = 0; - superblock_row_inc = 3 * s->fragment_width - - (s->y_superblock_width * 4 - s->fragment_width); - hilbert = hilbert_walk_y; - - /* the first operation for this variable is to advance by 1 */ - current_fragment = -1; - - } else if (i == s->u_superblock_start) { - - /* start of U superblocks */ - right_edge = s->fragment_width / 2; - bottom_edge = s->fragment_height / 2; - current_width = -1; - current_height = 0; - superblock_row_inc = 3 * (s->fragment_width / 2) - - (s->c_superblock_width * 4 - s->fragment_width / 2); - hilbert = hilbert_walk_c; - - /* the first operation for this variable is to advance by 1 */ - current_fragment = s->u_fragment_start - 1; - - } else if (i == s->v_superblock_start) { - - /* start of V superblocks */ - right_edge = s->fragment_width / 2; - bottom_edge = s->fragment_height / 2; - current_width = -1; - current_height = 0; - superblock_row_inc = 3 * (s->fragment_width / 2) - - (s->c_superblock_width * 4 - s->fragment_width / 2); - hilbert = hilbert_walk_c; - - /* the first operation for this variable is to advance by 1 */ - current_fragment = s->v_fragment_start - 1; - - } - - if (current_width >= right_edge - 1) { - /* reset width and move to next superblock row */ - current_width = -1; - current_height += 4; - - /* fragment is now at the start of a new superblock row */ - current_fragment += superblock_row_inc; - } - - /* iterate through all 16 fragments in a superblock */ - for (j = 0; j < 16; j++) { - current_fragment += hilbert[j]; - current_width += travel_width[j]; - current_height += travel_height[j]; - - /* check if the fragment is in bounds */ - if ((current_width < right_edge) && - (current_height < bottom_edge)) { - s->superblock_fragments[mapping_index] = current_fragment; - debug_init(" mapping fragment %d to superblock %d, position %d (%d/%d x %d/%d)\n", - s->superblock_fragments[mapping_index], i, j, - current_width, right_edge, current_height, bottom_edge); - } else { - s->superblock_fragments[mapping_index] = -1; - debug_init(" superblock %d, position %d has no fragment (%d/%d x %d/%d)\n", - i, j, - current_width, right_edge, current_height, bottom_edge); - } + Vp3DecodeContext *s = avctx->priv_data; - mapping_index++; - } + if (s->golden_frame.data[0]) { + if (s->golden_frame.data[0] == s->last_frame.data[0]) + memset(&s->last_frame, 0, sizeof(AVFrame)); + if (s->current_frame.data[0] == s->golden_frame.data[0]) + memset(&s->current_frame, 0, sizeof(AVFrame)); + ff_thread_release_buffer(avctx, &s->golden_frame); } + if (s->last_frame.data[0]) { + if (s->current_frame.data[0] == s->last_frame.data[0]) + memset(&s->current_frame, 0, sizeof(AVFrame)); + ff_thread_release_buffer(avctx, &s->last_frame); + } + if (s->current_frame.data[0]) + ff_thread_release_buffer(avctx, &s->current_frame); +} - /* initialize the superblock <-> macroblock mapping; iterate through - * all of the Y plane superblocks to build this mapping */ - right_edge = s->macroblock_width; - bottom_edge = s->macroblock_height; - current_width = -1; - current_height = 0; - superblock_row_inc = s->macroblock_width - - (s->y_superblock_width * 2 - s->macroblock_width);; - hilbert = hilbert_walk_mb; - mapping_index = 0; - current_macroblock = -1; - for (i = 0; i < s->u_superblock_start; i++) { - - if (current_width >= right_edge - 1) { - /* reset width and move to next superblock row */ - current_width = -1; - current_height += 2; - - /* macroblock is now at the start of a new superblock row */ - current_macroblock += superblock_row_inc; - } +static av_cold int vp3_decode_end(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + int i; - /* iterate through each potential macroblock in the superblock */ - for (j = 0; j < 4; j++) { - current_macroblock += hilbert_walk_mb[j]; - current_width += travel_width_mb[j]; - current_height += travel_height_mb[j]; - - /* check if the macroblock is in bounds */ - if ((current_width < right_edge) && - (current_height < bottom_edge)) { - s->superblock_macroblocks[mapping_index] = current_macroblock; - debug_init(" mapping macroblock %d to superblock %d, position %d (%d/%d x %d/%d)\n", - s->superblock_macroblocks[mapping_index], i, j, - current_width, right_edge, current_height, bottom_edge); - } else { - s->superblock_macroblocks[mapping_index] = -1; - debug_init(" superblock %d, position %d has no macroblock (%d/%d x %d/%d)\n", - i, j, - current_width, right_edge, current_height, bottom_edge); - } + av_free(s->superblock_coding); + av_free(s->all_fragments); + av_free(s->coded_fragment_list[0]); + av_free(s->dct_tokens_base); + av_free(s->superblock_fragments); + av_free(s->macroblock_coding); + av_free(s->motion_val[0]); + av_free(s->motion_val[1]); + av_free(s->edge_emu_buffer); - mapping_index++; - } + if (avctx->internal->is_copy) + return 0; + + for (i = 0; i < 16; i++) { + ff_free_vlc(&s->dc_vlc[i]); + ff_free_vlc(&s->ac_vlc_1[i]); + ff_free_vlc(&s->ac_vlc_2[i]); + ff_free_vlc(&s->ac_vlc_3[i]); + ff_free_vlc(&s->ac_vlc_4[i]); } - /* initialize the macroblock <-> fragment mapping */ - current_fragment = 0; - current_macroblock = 0; - mapping_index = 0; - for (i = 0; i < s->fragment_height; i += 2) { - - for (j = 0; j < s->fragment_width; j += 2) { - - debug_init(" macroblock %d contains fragments: ", current_macroblock); - s->all_fragments[current_fragment].macroblock = current_macroblock; - s->macroblock_fragments[mapping_index++] = current_fragment; - debug_init("%d ", current_fragment); - - if (j + 1 < s->fragment_width) { - s->all_fragments[current_fragment + 1].macroblock = current_macroblock; - s->macroblock_fragments[mapping_index++] = current_fragment + 1; - debug_init("%d ", current_fragment + 1); - } else - s->macroblock_fragments[mapping_index++] = -1; - - if (i + 1 < s->fragment_height) { - s->all_fragments[current_fragment + s->fragment_width].macroblock = - current_macroblock; - s->macroblock_fragments[mapping_index++] = - current_fragment + s->fragment_width; - debug_init("%d ", current_fragment + s->fragment_width); - } else - s->macroblock_fragments[mapping_index++] = -1; - - if ((j + 1 < s->fragment_width) && (i + 1 < s->fragment_height)) { - s->all_fragments[current_fragment + s->fragment_width + 1].macroblock = - current_macroblock; - s->macroblock_fragments[mapping_index++] = - current_fragment + s->fragment_width + 1; - debug_init("%d ", current_fragment + s->fragment_width + 1); - } else - s->macroblock_fragments[mapping_index++] = -1; - - /* C planes */ - c_fragment = s->u_fragment_start + - (i * s->fragment_width / 4) + (j / 2); - s->all_fragments[c_fragment].macroblock = s->macroblock_count; - s->macroblock_fragments[mapping_index++] = c_fragment; - debug_init("%d ", c_fragment); - - c_fragment = s->v_fragment_start + - (i * s->fragment_width / 4) + (j / 2); - s->all_fragments[c_fragment].macroblock = s->macroblock_count; - s->macroblock_fragments[mapping_index++] = c_fragment; - debug_init("%d ", c_fragment); - - debug_init("\n"); - - if (j + 2 <= s->fragment_width) - current_fragment += 2; - else - current_fragment++; - current_macroblock++; - } + ff_free_vlc(&s->superblock_run_length_vlc); + ff_free_vlc(&s->fragment_run_length_vlc); + ff_free_vlc(&s->mode_code_vlc); + ff_free_vlc(&s->motion_vector_vlc); - current_fragment += s->fragment_width; - } + /* release all frames */ + vp3_decode_flush(avctx); - return 0; /* successful path out */ + return 0; } /* - * This function wipes out all of the fragment data. + * This function sets up all of the various blocks mappings: + * superblocks <-> fragments, macroblocks <-> fragments, + * superblocks <-> macroblocks + * + * @return 0 is successful; returns 1 if *anything* went wrong. */ -static void init_frame(Vp3DecodeContext *s, GetBitContext *gb) +static int init_block_mapping(Vp3DecodeContext *s) { - int i; + int sb_x, sb_y, plane; + int x, y, i, j = 0; - /* zero out all of the fragment information */ - s->coded_fragment_list_index = 0; - for (i = 0; i < s->fragment_count; i++) { - s->all_fragments[i].coeff_count = 0; - s->all_fragments[i].motion_x = 127; - s->all_fragments[i].motion_y = 127; - s->all_fragments[i].next_coeff= NULL; - s->coeffs[i].index= - s->coeffs[i].coeff=0; - s->coeffs[i].next= NULL; + for (plane = 0; plane < 3; plane++) { + int sb_width = plane ? s->c_superblock_width : s->y_superblock_width; + int sb_height = plane ? s->c_superblock_height : s->y_superblock_height; + int frag_width = s->fragment_width[!!plane]; + int frag_height = s->fragment_height[!!plane]; + + for (sb_y = 0; sb_y < sb_height; sb_y++) + for (sb_x = 0; sb_x < sb_width; sb_x++) + for (i = 0; i < 16; i++) { + x = 4*sb_x + hilbert_offset[i][0]; + y = 4*sb_y + hilbert_offset[i][1]; + + if (x < frag_width && y < frag_height) + s->superblock_fragments[j++] = s->fragment_start[plane] + y*frag_width + x; + else + s->superblock_fragments[j++] = -1; + } } + + return 0; /* successful path out */ } /* * This function sets up the dequantization tables used for a particular * frame. */ -static void init_dequantizer(Vp3DecodeContext *s) +static void init_dequantizer(Vp3DecodeContext *s, int qpi) { - int ac_scale_factor = s->coded_ac_scale_factor[s->quality_index]; - int dc_scale_factor = s->coded_dc_scale_factor[s->quality_index]; - int i, j, plane, inter, qri, bmi, bmj, qistart; - - debug_vp3(" vp3: initializing dequantization tables\n"); + int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]]; + int dc_scale_factor = s->coded_dc_scale_factor[s->qps[qpi]]; + int i, plane, inter, qri, bmi, bmj, qistart; for(inter=0; inter<2; inter++){ for(plane=0; plane<3; plane++){ int sum=0; for(qri=0; qriqr_count[inter][plane]; qri++){ sum+= s->qr_size[inter][plane][qri]; - if(s->quality_index <= sum) + if(s->qps[qpi] <= sum) break; } qistart= sum - s->qr_size[inter][plane][qri]; bmi= s->qr_base[inter][plane][qri ]; bmj= s->qr_base[inter][plane][qri+1]; for(i=0; i<64; i++){ - int coeff= ( 2*(sum -s->quality_index)*s->base_matrix[bmi][i] - - 2*(qistart-s->quality_index)*s->base_matrix[bmj][i] + int coeff= ( 2*(sum -s->qps[qpi])*s->base_matrix[bmi][i] + - 2*(qistart-s->qps[qpi])*s->base_matrix[bmj][i] + s->qr_size[inter][plane][qri]) / (2*s->qr_size[inter][plane][qri]); - int qmin= 8<<(inter + !plane); + int qmin= 8<<(inter + !i); int qscale= i ? ac_scale_factor : dc_scale_factor; - s->qmat[inter][plane][i]= clip((qscale * coeff)/100 * 4, qmin, 4096); + s->qmat[qpi][inter][plane][s->dsp.idct_permutation[i]]= av_clip((qscale * coeff)/100 * 4, qmin, 4096); } + // all DC coefficients use the same quant so as not to interfere with DC prediction + s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0]; } } - - memset(s->qscale_table, (FFMAX(s->qmat[0][0][1], s->qmat[0][1][1])+8)/16, 512); //FIXME finetune } /* * This function initializes the loop filter boundary limits if the frame's * quality index is different from the previous frame's. + * + * The filter_limit_values may not be larger than 127. */ static void init_loop_filter(Vp3DecodeContext *s) { int *bounding_values= s->bounding_values_array+127; int filter_limit; int x; + int value; - filter_limit = s->filter_limit_values[s->quality_index]; + filter_limit = s->filter_limit_values[s->qps[0]]; + assert(filter_limit < 128); /* set up the bounding values */ memset(s->bounding_values_array, 0, 256 * sizeof(int)); for (x = 0; x < filter_limit; x++) { - bounding_values[-x - filter_limit] = -filter_limit + x; bounding_values[-x] = -x; bounding_values[x] = x; - bounding_values[x + filter_limit] = filter_limit - x; } + for (x = value = filter_limit; x < 128 && value; x++, value--) { + bounding_values[ x] = value; + bounding_values[-x] = -value; + } + if (value) + bounding_values[128] = value; + bounding_values[129] = bounding_values[130] = filter_limit * 0x02020202; } /* @@ -706,94 +419,92 @@ static void init_loop_filter(Vp3DecodeContext *s) */ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) { + int superblock_starts[3] = { 0, s->u_superblock_start, s->v_superblock_start }; int bit = 0; int current_superblock = 0; int current_run = 0; - int decode_fully_flags = 0; - int decode_partial_blocks = 0; - int first_c_fragment_seen; + int num_partial_superblocks = 0; int i, j; int current_fragment; - - debug_vp3(" vp3: unpacking superblock coding\n"); + int plane; if (s->keyframe) { - - debug_vp3(" keyframe-- all superblocks are fully coded\n"); memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count); } else { /* unpack the list of partially-coded superblocks */ - bit = get_bits(gb, 1); - /* toggle the bit because as soon as the first run length is - * fetched the bit will be toggled again */ - bit ^= 1; - while (current_superblock < s->superblock_count) { - if (current_run-- == 0) { + bit = get_bits1(gb) ^ 1; + current_run = 0; + + while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) { + if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) + bit = get_bits1(gb); + else bit ^= 1; + current_run = get_vlc2(gb, - s->superblock_run_length_vlc.table, 6, 2); - if (current_run == 33) + s->superblock_run_length_vlc.table, 6, 2) + 1; + if (current_run == 34) current_run += get_bits(gb, 12); - debug_block_coding(" setting superblocks %d..%d to %s\n", - current_superblock, - current_superblock + current_run - 1, - (bit) ? "partially coded" : "not coded"); - - /* if any of the superblocks are not partially coded, flag - * a boolean to decode the list of fully-coded superblocks */ - if (bit == 0) { - decode_fully_flags = 1; - } else { - /* make a note of the fact that there are partially coded - * superblocks */ - decode_partial_blocks = 1; - } + if (current_superblock + current_run > s->superblock_count) { + av_log(s->avctx, AV_LOG_ERROR, "Invalid partially coded superblock run length\n"); + return -1; } - s->superblock_coding[current_superblock++] = bit; + + memset(s->superblock_coding + current_superblock, bit, current_run); + + current_superblock += current_run; + if (bit) + num_partial_superblocks += current_run; } /* unpack the list of fully coded superblocks if any of the blocks were * not marked as partially coded in the previous step */ - if (decode_fully_flags) { + if (num_partial_superblocks < s->superblock_count) { + int superblocks_decoded = 0; current_superblock = 0; + bit = get_bits1(gb) ^ 1; current_run = 0; - bit = get_bits(gb, 1); - /* toggle the bit because as soon as the first run length is - * fetched the bit will be toggled again */ - bit ^= 1; - while (current_superblock < s->superblock_count) { - /* skip any superblocks already marked as partially coded */ - if (s->superblock_coding[current_superblock] == SB_NOT_CODED) { + while (superblocks_decoded < s->superblock_count - num_partial_superblocks + && get_bits_left(gb) > 0) { + + if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) + bit = get_bits1(gb); + else + bit ^= 1; - if (current_run-- == 0) { - bit ^= 1; current_run = get_vlc2(gb, - s->superblock_run_length_vlc.table, 6, 2); - if (current_run == 33) + s->superblock_run_length_vlc.table, 6, 2) + 1; + if (current_run == 34) current_run += get_bits(gb, 12); + + for (j = 0; j < current_run; current_superblock++) { + if (current_superblock >= s->superblock_count) { + av_log(s->avctx, AV_LOG_ERROR, "Invalid fully coded superblock run length\n"); + return -1; } - debug_block_coding(" setting superblock %d to %s\n", - current_superblock, - (bit) ? "fully coded" : "not coded"); + /* skip any superblocks already marked as partially coded */ + if (s->superblock_coding[current_superblock] == SB_NOT_CODED) { s->superblock_coding[current_superblock] = 2*bit; + j++; } - current_superblock++; + } + superblocks_decoded += current_run; } } /* if there were partial blocks, initialize bitstream for * unpacking fragment codings */ - if (decode_partial_blocks) { + if (num_partial_superblocks) { current_run = 0; - bit = get_bits(gb, 1); + bit = get_bits1(gb); /* toggle the bit because as soon as the first run length is * fetched the bit will be toggled again */ bit ^= 1; @@ -802,32 +513,25 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) /* figure out which fragments are coded; iterate through each * superblock (all planes) */ - s->coded_fragment_list_index = 0; - s->next_coeff= s->coeffs + s->fragment_count; - s->first_coded_y_fragment = s->first_coded_c_fragment = 0; - s->last_coded_y_fragment = s->last_coded_c_fragment = -1; - first_c_fragment_seen = 0; + s->total_num_coded_frags = 0; memset(s->macroblock_coding, MODE_COPY, s->macroblock_count); - for (i = 0; i < s->superblock_count; i++) { + + for (plane = 0; plane < 3; plane++) { + int sb_start = superblock_starts[plane]; + int sb_end = sb_start + (plane ? s->c_superblock_count : s->y_superblock_count); + int num_coded_frags = 0; + + for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) { /* iterate through all 16 fragments in a superblock */ for (j = 0; j < 16; j++) { /* if the fragment is in bounds, check its coding status */ current_fragment = s->superblock_fragments[i * 16 + j]; - if (current_fragment >= s->fragment_count) { - av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_superblocks(): bad fragment number (%d >= %d)\n", - current_fragment, s->fragment_count); - return 1; - } if (current_fragment != -1) { - if (s->superblock_coding[i] == SB_NOT_CODED) { - - /* copy all the fragments from the prior frame */ - s->all_fragments[current_fragment].coding_method = - MODE_COPY; + int coded = s->superblock_coding[i]; - } else if (s->superblock_coding[i] == SB_PARTIALLY_CODED) { + if (s->superblock_coding[i] == SB_PARTIALLY_CODED) { /* fragment may or may not be coded; this is the case * that cares about the fragment coding runs */ @@ -836,73 +540,30 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) current_run = get_vlc2(gb, s->fragment_run_length_vlc.table, 5, 2); } + coded = bit; + } - if (bit) { + if (coded) { /* default mode; actual mode will be decoded in * the next phase */ s->all_fragments[current_fragment].coding_method = MODE_INTER_NO_MV; - s->all_fragments[current_fragment].next_coeff= s->coeffs + current_fragment; - s->coded_fragment_list[s->coded_fragment_list_index] = + s->coded_fragment_list[plane][num_coded_frags++] = current_fragment; - if ((current_fragment >= s->u_fragment_start) && - (s->last_coded_y_fragment == -1) && - (!first_c_fragment_seen)) { - s->first_coded_c_fragment = s->coded_fragment_list_index; - s->last_coded_y_fragment = s->first_coded_c_fragment - 1; - first_c_fragment_seen = 1; - } - s->coded_fragment_list_index++; - s->macroblock_coding[s->all_fragments[current_fragment].macroblock] = MODE_INTER_NO_MV; - debug_block_coding(" superblock %d is partially coded, fragment %d is coded\n", - i, current_fragment); } else { /* not coded; copy this fragment from the prior frame */ s->all_fragments[current_fragment].coding_method = MODE_COPY; - debug_block_coding(" superblock %d is partially coded, fragment %d is not coded\n", - i, current_fragment); - } - - } else { - - /* fragments are fully coded in this superblock; actual - * coding will be determined in next step */ - s->all_fragments[current_fragment].coding_method = - MODE_INTER_NO_MV; - s->all_fragments[current_fragment].next_coeff= s->coeffs + current_fragment; - s->coded_fragment_list[s->coded_fragment_list_index] = - current_fragment; - if ((current_fragment >= s->u_fragment_start) && - (s->last_coded_y_fragment == -1) && - (!first_c_fragment_seen)) { - s->first_coded_c_fragment = s->coded_fragment_list_index; - s->last_coded_y_fragment = s->first_coded_c_fragment - 1; - first_c_fragment_seen = 1; } - s->coded_fragment_list_index++; - s->macroblock_coding[s->all_fragments[current_fragment].macroblock] = MODE_INTER_NO_MV; - debug_block_coding(" superblock %d is fully coded, fragment %d is coded\n", - i, current_fragment); - } } } } - - if (!first_c_fragment_seen) - /* only Y fragments coded in this frame */ - s->last_coded_y_fragment = s->coded_fragment_list_index - 1; - else - /* end the list of coded C fragments */ - s->last_coded_c_fragment = s->coded_fragment_list_index - 1; - - debug_block_coding(" %d total coded fragments, y: %d -> %d, c: %d -> %d\n", - s->coded_fragment_list_index, - s->first_coded_y_fragment, - s->last_coded_y_fragment, - s->first_coded_c_fragment, - s->last_coded_c_fragment); - + s->total_num_coded_frags += num_coded_frags; + for (i = 0; i < 64; i++) + s->num_coded_frags[plane][i] = num_coded_frags; + if (plane < 2) + s->coded_fragment_list[plane+1] = s->coded_fragment_list[plane] + num_coded_frags; + } return 0; } @@ -912,17 +573,16 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) */ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) { - int i, j, k; + int i, j, k, sb_x, sb_y; int scheme; int current_macroblock; int current_fragment; int coding_mode; - - debug_vp3(" vp3: unpacking encoding modes\n"); + int custom_mode_alphabet[CODING_MODE_COUNT]; + const int *alphabet; + Vp3Fragment *frag; if (s->keyframe) { - debug_vp3(" keyframe-- all blocks are coded as INTRA\n"); - for (i = 0; i < s->fragment_count; i++) s->all_fragments[i].coding_method = MODE_INTRA; @@ -930,60 +590,82 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) /* fetch the mode coding scheme for this frame */ scheme = get_bits(gb, 3); - debug_modes(" using mode alphabet %d\n", scheme); /* is it a custom coding scheme? */ if (scheme == 0) { - debug_modes(" custom mode alphabet ahead:\n"); for (i = 0; i < 8; i++) - ModeAlphabet[scheme][get_bits(gb, 3)] = i; - } - - for (i = 0; i < 8; i++) - debug_modes(" mode[%d][%d] = %d\n", scheme, i, - ModeAlphabet[scheme][i]); + custom_mode_alphabet[i] = MODE_INTER_NO_MV; + for (i = 0; i < 8; i++) + custom_mode_alphabet[get_bits(gb, 3)] = i; + alphabet = custom_mode_alphabet; + } else + alphabet = ModeAlphabet[scheme-1]; /* iterate through all of the macroblocks that contain 1 or more * coded fragments */ - for (i = 0; i < s->u_superblock_start; i++) { + for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) { + for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) { + if (get_bits_left(gb) <= 0) + return -1; for (j = 0; j < 4; j++) { - current_macroblock = s->superblock_macroblocks[i * 4 + j]; - if ((current_macroblock == -1) || - (s->macroblock_coding[current_macroblock] == MODE_COPY)) + int mb_x = 2*sb_x + (j>>1); + int mb_y = 2*sb_y + (((j>>1)+j)&1); + current_macroblock = mb_y * s->macroblock_width + mb_x; + + if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height) + continue; + +#define BLOCK_X (2*mb_x + (k&1)) +#define BLOCK_Y (2*mb_y + (k>>1)) + /* coding modes are only stored if the macroblock has at least one + * luma block coded, otherwise it must be INTER_NO_MV */ + for (k = 0; k < 4; k++) { + current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X; + if (s->all_fragments[current_fragment].coding_method != MODE_COPY) + break; + } + if (k == 4) { + s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV; continue; - if (current_macroblock >= s->macroblock_count) { - av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad macroblock number (%d >= %d)\n", - current_macroblock, s->macroblock_count); - return 1; } /* mode 7 means get 3 bits for each coding mode */ if (scheme == 7) coding_mode = get_bits(gb, 3); else - coding_mode = ModeAlphabet[scheme] + coding_mode = alphabet [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)]; s->macroblock_coding[current_macroblock] = coding_mode; - for (k = 0; k < 6; k++) { - current_fragment = - s->macroblock_fragments[current_macroblock * 6 + k]; - if (current_fragment == -1) - continue; - if (current_fragment >= s->fragment_count) { - av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad fragment number (%d >= %d)\n", - current_fragment, s->fragment_count); - return 1; - } - if (s->all_fragments[current_fragment].coding_method != - MODE_COPY) - s->all_fragments[current_fragment].coding_method = - coding_mode; + for (k = 0; k < 4; k++) { + frag = s->all_fragments + BLOCK_Y*s->fragment_width[0] + BLOCK_X; + if (frag->coding_method != MODE_COPY) + frag->coding_method = coding_mode; } - debug_modes(" coding method for macroblock starting @ fragment %d = %d\n", - s->macroblock_fragments[current_macroblock * 6], coding_mode); +#define SET_CHROMA_MODES \ + if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \ + frag[s->fragment_start[1]].coding_method = coding_mode;\ + if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \ + frag[s->fragment_start[2]].coding_method = coding_mode; + + if (s->chroma_y_shift) { + frag = s->all_fragments + mb_y*s->fragment_width[1] + mb_x; + SET_CHROMA_MODES + } else if (s->chroma_x_shift) { + frag = s->all_fragments + 2*mb_y*s->fragment_width[1] + mb_x; + for (k = 0; k < 2; k++) { + SET_CHROMA_MODES + frag += s->fragment_width[1]; + } + } else { + for (k = 0; k < 4; k++) { + frag = s->all_fragments + BLOCK_Y*s->fragment_width[1] + BLOCK_X; + SET_CHROMA_MODES + } + } + } } } } @@ -997,86 +679,73 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) */ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) { - int i, j, k; + int j, k, sb_x, sb_y; int coding_mode; - int motion_x[6]; - int motion_y[6]; + int motion_x[4]; + int motion_y[4]; int last_motion_x = 0; int last_motion_y = 0; int prior_last_motion_x = 0; int prior_last_motion_y = 0; int current_macroblock; int current_fragment; + int frag; - debug_vp3(" vp3: unpacking motion vectors\n"); - if (s->keyframe) { - - debug_vp3(" keyframe-- there are no motion vectors\n"); - - } else { - - memset(motion_x, 0, 6 * sizeof(int)); - memset(motion_y, 0, 6 * sizeof(int)); + if (s->keyframe) + return 0; - /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */ - coding_mode = get_bits(gb, 1); - debug_vectors(" using %s scheme for unpacking motion vectors\n", - (coding_mode == 0) ? "VLC" : "fixed-length"); + /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */ + coding_mode = get_bits1(gb); - /* iterate through all of the macroblocks that contain 1 or more - * coded fragments */ - for (i = 0; i < s->u_superblock_start; i++) { + /* iterate through all of the macroblocks that contain 1 or more + * coded fragments */ + for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) { + for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) { + if (get_bits_left(gb) <= 0) + return -1; - for (j = 0; j < 4; j++) { - current_macroblock = s->superblock_macroblocks[i * 4 + j]; - if ((current_macroblock == -1) || - (s->macroblock_coding[current_macroblock] == MODE_COPY)) - continue; - if (current_macroblock >= s->macroblock_count) { - av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad macroblock number (%d >= %d)\n", - current_macroblock, s->macroblock_count); - return 1; + for (j = 0; j < 4; j++) { + int mb_x = 2*sb_x + (j>>1); + int mb_y = 2*sb_y + (((j>>1)+j)&1); + current_macroblock = mb_y * s->macroblock_width + mb_x; + + if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height || + (s->macroblock_coding[current_macroblock] == MODE_COPY)) + continue; + + switch (s->macroblock_coding[current_macroblock]) { + + case MODE_INTER_PLUS_MV: + case MODE_GOLDEN_MV: + /* all 6 fragments use the same motion vector */ + if (coding_mode == 0) { + motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; + motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; + } else { + motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)]; + motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)]; } - current_fragment = s->macroblock_fragments[current_macroblock * 6]; - if (current_fragment >= s->fragment_count) { - av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d\n", - current_fragment, s->fragment_count); - return 1; + /* vector maintenance, only on MODE_INTER_PLUS_MV */ + if (s->macroblock_coding[current_macroblock] == + MODE_INTER_PLUS_MV) { + prior_last_motion_x = last_motion_x; + prior_last_motion_y = last_motion_y; + last_motion_x = motion_x[0]; + last_motion_y = motion_y[0]; } - switch (s->macroblock_coding[current_macroblock]) { - - case MODE_INTER_PLUS_MV: - case MODE_GOLDEN_MV: - /* all 6 fragments use the same motion vector */ - if (coding_mode == 0) { - motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; - motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; - } else { - motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)]; - motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)]; - } - - for (k = 1; k < 6; k++) { - motion_x[k] = motion_x[0]; - motion_y[k] = motion_y[0]; - } + break; - /* vector maintenance, only on MODE_INTER_PLUS_MV */ - if (s->macroblock_coding[current_macroblock] == - MODE_INTER_PLUS_MV) { - prior_last_motion_x = last_motion_x; - prior_last_motion_y = last_motion_y; - last_motion_x = motion_x[0]; - last_motion_y = motion_y[0]; - } - break; + case MODE_INTER_FOURMV: + /* vector maintenance */ + prior_last_motion_x = last_motion_x; + prior_last_motion_y = last_motion_y; - case MODE_INTER_FOURMV: - /* fetch 4 vectors from the bitstream, one for each - * Y fragment, then average for the C fragment vectors */ - motion_x[4] = motion_y[4] = 0; - for (k = 0; k < 4; k++) { + /* fetch 4 vectors from the bitstream, one for each + * Y fragment, then average for the C fragment vectors */ + for (k = 0; k < 4; k++) { + current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X; + if (s->all_fragments[current_fragment].coding_method != MODE_COPY) { if (coding_mode == 0) { motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; @@ -1084,83 +753,144 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)]; motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)]; } - motion_x[4] += motion_x[k]; - motion_y[4] += motion_y[k]; + last_motion_x = motion_x[k]; + last_motion_y = motion_y[k]; + } else { + motion_x[k] = 0; + motion_y[k] = 0; } + } + break; - motion_x[5]= - motion_x[4]= RSHIFT(motion_x[4], 2); - motion_y[5]= - motion_y[4]= RSHIFT(motion_y[4], 2); - - /* vector maintenance; vector[3] is treated as the - * last vector in this case */ - prior_last_motion_x = last_motion_x; - prior_last_motion_y = last_motion_y; - last_motion_x = motion_x[3]; - last_motion_y = motion_y[3]; - break; - - case MODE_INTER_LAST_MV: - /* all 6 fragments use the last motion vector */ - motion_x[0] = last_motion_x; - motion_y[0] = last_motion_y; - for (k = 1; k < 6; k++) { - motion_x[k] = motion_x[0]; - motion_y[k] = motion_y[0]; - } + case MODE_INTER_LAST_MV: + /* all 6 fragments use the last motion vector */ + motion_x[0] = last_motion_x; + motion_y[0] = last_motion_y; - /* no vector maintenance (last vector remains the - * last vector) */ - break; + /* no vector maintenance (last vector remains the + * last vector) */ + break; - case MODE_INTER_PRIOR_LAST: - /* all 6 fragments use the motion vector prior to the - * last motion vector */ - motion_x[0] = prior_last_motion_x; - motion_y[0] = prior_last_motion_y; - for (k = 1; k < 6; k++) { - motion_x[k] = motion_x[0]; - motion_y[k] = motion_y[0]; - } + case MODE_INTER_PRIOR_LAST: + /* all 6 fragments use the motion vector prior to the + * last motion vector */ + motion_x[0] = prior_last_motion_x; + motion_y[0] = prior_last_motion_y; + + /* vector maintenance */ + prior_last_motion_x = last_motion_x; + prior_last_motion_y = last_motion_y; + last_motion_x = motion_x[0]; + last_motion_y = motion_y[0]; + break; - /* vector maintenance */ - prior_last_motion_x = last_motion_x; - prior_last_motion_y = last_motion_y; - last_motion_x = motion_x[0]; - last_motion_y = motion_y[0]; - break; + default: + /* covers intra, inter without MV, golden without MV */ + motion_x[0] = 0; + motion_y[0] = 0; - default: - /* covers intra, inter without MV, golden without MV */ - memset(motion_x, 0, 6 * sizeof(int)); - memset(motion_y, 0, 6 * sizeof(int)); + /* no vector maintenance */ + break; + } - /* no vector maintenance */ - break; + /* assign the motion vectors to the correct fragments */ + for (k = 0; k < 4; k++) { + current_fragment = + BLOCK_Y*s->fragment_width[0] + BLOCK_X; + if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { + s->motion_val[0][current_fragment][0] = motion_x[k]; + s->motion_val[0][current_fragment][1] = motion_y[k]; + } else { + s->motion_val[0][current_fragment][0] = motion_x[0]; + s->motion_val[0][current_fragment][1] = motion_y[0]; } + } - /* assign the motion vectors to the correct fragments */ - debug_vectors(" vectors for macroblock starting @ fragment %d (coding method %d):\n", - current_fragment, - s->macroblock_coding[current_macroblock]); - for (k = 0; k < 6; k++) { - current_fragment = - s->macroblock_fragments[current_macroblock * 6 + k]; - if (current_fragment == -1) - continue; - if (current_fragment >= s->fragment_count) { - av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d)\n", - current_fragment, s->fragment_count); - return 1; + if (s->chroma_y_shift) { + if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { + motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] + motion_x[2] + motion_x[3], 2); + motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] + motion_y[2] + motion_y[3], 2); + } + motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1); + motion_y[0] = (motion_y[0]>>1) | (motion_y[0]&1); + frag = mb_y*s->fragment_width[1] + mb_x; + s->motion_val[1][frag][0] = motion_x[0]; + s->motion_val[1][frag][1] = motion_y[0]; + } else if (s->chroma_x_shift) { + if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { + motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1); + motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1); + motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1); + motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1); + } else { + motion_x[1] = motion_x[0]; + motion_y[1] = motion_y[0]; + } + motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1); + motion_x[1] = (motion_x[1]>>1) | (motion_x[1]&1); + + frag = 2*mb_y*s->fragment_width[1] + mb_x; + for (k = 0; k < 2; k++) { + s->motion_val[1][frag][0] = motion_x[k]; + s->motion_val[1][frag][1] = motion_y[k]; + frag += s->fragment_width[1]; + } + } else { + for (k = 0; k < 4; k++) { + frag = BLOCK_Y*s->fragment_width[1] + BLOCK_X; + if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { + s->motion_val[1][frag][0] = motion_x[k]; + s->motion_val[1][frag][1] = motion_y[k]; + } else { + s->motion_val[1][frag][0] = motion_x[0]; + s->motion_val[1][frag][1] = motion_y[0]; } - s->all_fragments[current_fragment].motion_x = motion_x[k]; - s->all_fragments[current_fragment].motion_y = motion_y[k]; - debug_vectors(" vector %d: fragment %d = (%d, %d)\n", - k, current_fragment, motion_x[k], motion_y[k]); } } } + } + } + + return 0; +} + +static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb) +{ + int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi; + int num_blocks = s->total_num_coded_frags; + + for (qpi = 0; qpi < s->nqps-1 && num_blocks > 0; qpi++) { + i = blocks_decoded = num_blocks_at_qpi = 0; + + bit = get_bits1(gb) ^ 1; + run_length = 0; + + do { + if (run_length == MAXIMUM_LONG_BIT_RUN) + bit = get_bits1(gb); + else + bit ^= 1; + + run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1; + if (run_length == 34) + run_length += get_bits(gb, 12); + blocks_decoded += run_length; + + if (!bit) + num_blocks_at_qpi += run_length; + + for (j = 0; j < run_length; i++) { + if (i >= s->total_num_coded_frags) + return -1; + + if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) { + s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit; + j++; + } + } + } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0); + + num_blocks -= num_blocks_at_qpi; } return 0; @@ -1180,76 +910,124 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) */ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, VLC *table, int coeff_index, - int first_fragment, int last_fragment, + int plane, int eob_run) { - int i; + int i, j = 0; int token; int zero_run = 0; DCTELEM coeff = 0; - Vp3Fragment *fragment; - uint8_t *perm= s->scantable.permutated; int bits_to_get; - - if ((first_fragment >= s->fragment_count) || - (last_fragment >= s->fragment_count)) { - - av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vlcs(): bad fragment number (%d -> %d ?)\n", - first_fragment, last_fragment); - return 0; + int blocks_ended; + int coeff_i = 0; + int num_coeffs = s->num_coded_frags[plane][coeff_index]; + int16_t *dct_tokens = s->dct_tokens[plane][coeff_index]; + + /* local references to structure members to avoid repeated deferences */ + int *coded_fragment_list = s->coded_fragment_list[plane]; + Vp3Fragment *all_fragments = s->all_fragments; + VLC_TYPE (*vlc_table)[2] = table->table; + + if (num_coeffs < 0) + av_log(s->avctx, AV_LOG_ERROR, "Invalid number of coefficents at level %d\n", coeff_index); + + if (eob_run > num_coeffs) { + coeff_i = blocks_ended = num_coeffs; + eob_run -= num_coeffs; + } else { + coeff_i = blocks_ended = eob_run; + eob_run = 0; } - for (i = first_fragment; i <= last_fragment; i++) { + // insert fake EOB token to cover the split between planes or zzi + if (blocks_ended) + dct_tokens[j++] = blocks_ended << 2; - fragment = &s->all_fragments[s->coded_fragment_list[i]]; - if (fragment->coeff_count > coeff_index) - continue; - - if (!eob_run) { + while (coeff_i < num_coeffs && get_bits_left(gb) > 0) { /* decode a VLC into a token */ - token = get_vlc2(gb, table->table, 5, 3); - debug_vlc(" token = %2d, ", token); + token = get_vlc2(gb, vlc_table, 11, 3); /* use the token to get a zero run, a coefficient, and an eob run */ - if (token <= 6) { + if ((unsigned) token <= 6U) { eob_run = eob_run_base[token]; if (eob_run_get_bits[token]) eob_run += get_bits(gb, eob_run_get_bits[token]); - coeff = zero_run = 0; - } else { + + // record only the number of blocks ended in this plane, + // any spill will be recorded in the next plane. + if (eob_run > num_coeffs - coeff_i) { + dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i); + blocks_ended += num_coeffs - coeff_i; + eob_run -= num_coeffs - coeff_i; + coeff_i = num_coeffs; + } else { + dct_tokens[j++] = TOKEN_EOB(eob_run); + blocks_ended += eob_run; + coeff_i += eob_run; + eob_run = 0; + } + } else if (token >= 0) { bits_to_get = coeff_get_bits[token]; - if (!bits_to_get) - coeff = coeff_tables[token][0]; - else - coeff = coeff_tables[token][get_bits(gb, bits_to_get)]; + if (bits_to_get) + bits_to_get = get_bits(gb, bits_to_get); + coeff = coeff_tables[token][bits_to_get]; zero_run = zero_run_base[token]; if (zero_run_get_bits[token]) zero_run += get_bits(gb, zero_run_get_bits[token]); - } - } - if (!eob_run) { - fragment->coeff_count += zero_run; - if (fragment->coeff_count < 64){ - fragment->next_coeff->coeff= coeff; - fragment->next_coeff->index= perm[fragment->coeff_count++]; //FIXME perm here already? - fragment->next_coeff->next= s->next_coeff; - s->next_coeff->next=NULL; - fragment->next_coeff= s->next_coeff++; + if (zero_run) { + dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run); + } else { + // Save DC into the fragment structure. DC prediction is + // done in raster order, so the actual DC can't be in with + // other tokens. We still need the token in dct_tokens[] + // however, or else the structure collapses on itself. + if (!coeff_index) + all_fragments[coded_fragment_list[coeff_i]].dc = coeff; + + dct_tokens[j++] = TOKEN_COEFF(coeff); + } + + if (coeff_index + zero_run > 64) { + av_log(s->avctx, AV_LOG_DEBUG, "Invalid zero run of %d with" + " %d coeffs left\n", zero_run, 64-coeff_index); + zero_run = 64 - coeff_index; + } + + // zero runs code multiple coefficients, + // so don't try to decode coeffs for those higher levels + for (i = coeff_index+1; i <= coeff_index+zero_run; i++) + s->num_coded_frags[plane][i]--; + coeff_i++; + } else { + av_log(s->avctx, AV_LOG_ERROR, + "Invalid token %d\n", token); + return -1; } - debug_vlc(" fragment %d coeff = %d\n", - s->coded_fragment_list[i], fragment->next_coeff[coeff_index]); - } else { - fragment->coeff_count |= 128; - debug_vlc(" fragment %d eob with %d coefficients\n", - s->coded_fragment_list[i], fragment->coeff_count&127); - eob_run--; - } } + if (blocks_ended > s->num_coded_frags[plane][coeff_index]) + av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n"); + + // decrement the number of blocks that have higher coeffecients for each + // EOB run at this level + if (blocks_ended) + for (i = coeff_index+1; i < 64; i++) + s->num_coded_frags[plane][i] -= blocks_ended; + + // setup the next buffer + if (plane < 2) + s->dct_tokens[plane+1][coeff_index] = dct_tokens + j; + else if (coeff_index < 63) + s->dct_tokens[0][coeff_index+1] = dct_tokens + j; + return eob_run; } +static void reverse_dc_prediction(Vp3DecodeContext *s, + int first_fragment, + int fragment_width, + int fragment_height); /* * This function unpacks all of the DCT coefficient data from the * bitstream. @@ -1262,81 +1040,80 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) int ac_y_table; int ac_c_table; int residual_eob_run = 0; + VLC *y_tables[64]; + VLC *c_tables[64]; - /* fetch the DC table indices */ + s->dct_tokens[0][0] = s->dct_tokens_base; + + /* fetch the DC table indexes */ dc_y_table = get_bits(gb, 4); dc_c_table = get_bits(gb, 4); /* unpack the Y plane DC coefficients */ - debug_vp3(" vp3: unpacking Y plane DC coefficients using table %d\n", - dc_y_table); residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0, - s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run); + 0, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; + + /* reverse prediction of the Y-plane DC coefficients */ + reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]); /* unpack the C plane DC coefficients */ - debug_vp3(" vp3: unpacking C plane DC coefficients using table %d\n", - dc_c_table); residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, - s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run); + 1, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; + residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, + 2, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; - /* fetch the AC table indices */ + /* reverse prediction of the C-plane DC coefficients */ + if (!(s->avctx->flags & CODEC_FLAG_GRAY)) + { + reverse_dc_prediction(s, s->fragment_start[1], + s->fragment_width[1], s->fragment_height[1]); + reverse_dc_prediction(s, s->fragment_start[2], + s->fragment_width[1], s->fragment_height[1]); + } + + /* fetch the AC table indexes */ ac_y_table = get_bits(gb, 4); ac_c_table = get_bits(gb, 4); - /* unpack the group 1 AC coefficients (coeffs 1-5) */ + /* build tables of AC VLC tables */ for (i = 1; i <= 5; i++) { - - debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n", - i, ac_y_table); - residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_y_table], i, - s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run); - - debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n", - i, ac_c_table); - residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_c_table], i, - s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run); + y_tables[i] = &s->ac_vlc_1[ac_y_table]; + c_tables[i] = &s->ac_vlc_1[ac_c_table]; } - - /* unpack the group 2 AC coefficients (coeffs 6-14) */ for (i = 6; i <= 14; i++) { - - debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n", - i, ac_y_table); - residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_y_table], i, - s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run); - - debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n", - i, ac_c_table); - residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_c_table], i, - s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run); + y_tables[i] = &s->ac_vlc_2[ac_y_table]; + c_tables[i] = &s->ac_vlc_2[ac_c_table]; } - - /* unpack the group 3 AC coefficients (coeffs 15-27) */ for (i = 15; i <= 27; i++) { - - debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n", - i, ac_y_table); - residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_y_table], i, - s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run); - - debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n", - i, ac_c_table); - residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_c_table], i, - s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run); + y_tables[i] = &s->ac_vlc_3[ac_y_table]; + c_tables[i] = &s->ac_vlc_3[ac_c_table]; } - - /* unpack the group 4 AC coefficients (coeffs 28-63) */ for (i = 28; i <= 63; i++) { + y_tables[i] = &s->ac_vlc_4[ac_y_table]; + c_tables[i] = &s->ac_vlc_4[ac_c_table]; + } - debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n", - i, ac_y_table); - residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_y_table], i, - s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run); - - debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n", - i, ac_c_table); - residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_c_table], i, - s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run); + /* decode all AC coefficents */ + for (i = 1; i <= 63; i++) { + residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i, + 0, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; + + residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, + 1, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; + residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, + 2, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; } return 0; @@ -1349,9 +1126,7 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) */ #define COMPATIBLE_FRAME(x) \ (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type) -#define FRAME_CODED(x) (s->all_fragments[x].coding_method != MODE_COPY) -#define DC_COEFF(u) (s->coeffs[u].index ? 0 : s->coeffs[u].coeff) //FIXME do somethin to simplify this -static inline int iabs (int x) { return ((x < 0) ? -x : x); } +#define DC_COEFF(u) s->all_fragments[u].dc static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, @@ -1367,28 +1142,12 @@ static void reverse_dc_prediction(Vp3DecodeContext *s, int x, y; int i = first_fragment; - /* - * Fragment prediction groups: - * - * 32222222226 - * 10000000004 - * 10000000004 - * 10000000004 - * 10000000004 - * - * Note: Groups 5 and 7 do not exist as it would mean that the - * fragment's x coordinate is both 0 and (width - 1) at the same time. - */ - int predictor_group; - short predicted_dc; - - /* validity flags for the left, up-left, up, and up-right fragments */ - int fl, ful, fu, fur; + int predicted_dc; /* DC values for the left, up-left, up, and up-right fragments */ int vl, vul, vu, vur; - /* indices for the left, up-left, up, and up-right fragments */ + /* indexes for the left, up-left, up, and up-right fragments */ int l, ul, u, ur; /* @@ -1397,26 +1156,24 @@ static void reverse_dc_prediction(Vp3DecodeContext *s, * 1: up multiplier * 2: up-right multiplier * 3: left multiplier - * 4: mask - * 5: right bit shift divisor (e.g., 7 means >>=7, a.k.a. div by 128) */ - int predictor_transform[16][6] = { - { 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 1, 0, 0 }, // PL - { 0, 0, 1, 0, 0, 0 }, // PUR - { 0, 0, 53, 75, 127, 7 }, // PUR|PL - { 0, 1, 0, 0, 0, 0 }, // PU - { 0, 1, 0, 1, 1, 1 }, // PU|PL - { 0, 1, 0, 0, 0, 0 }, // PU|PUR - { 0, 0, 53, 75, 127, 7 }, // PU|PUR|PL - { 1, 0, 0, 0, 0, 0 }, // PUL - { 0, 0, 0, 1, 0, 0 }, // PUL|PL - { 1, 0, 1, 0, 1, 1 }, // PUL|PUR - { 0, 0, 53, 75, 127, 7 }, // PUL|PUR|PL - { 0, 1, 0, 0, 0, 0 }, // PUL|PU - {-26, 29, 0, 29, 31, 5 }, // PUL|PU|PL - { 3, 10, 3, 0, 15, 4 }, // PUL|PU|PUR - {-26, 29, 0, 29, 31, 5 } // PUL|PU|PUR|PL + static const int predictor_transform[16][4] = { + { 0, 0, 0, 0}, + { 0, 0, 0,128}, // PL + { 0, 0,128, 0}, // PUR + { 0, 0, 53, 75}, // PUR|PL + { 0,128, 0, 0}, // PU + { 0, 64, 0, 64}, // PU|PL + { 0,128, 0, 0}, // PU|PUR + { 0, 0, 53, 75}, // PU|PUR|PL + {128, 0, 0, 0}, // PUL + { 0, 0, 0,128}, // PUL|PL + { 64, 0, 64, 0}, // PUL|PUR + { 0, 0, 53, 75}, // PUL|PUR|PL + { 0,128, 0, 0}, // PUL|PU + {-104,116, 0,116}, // PUL|PU|PL + { 24, 80, 24, 0}, // PUL|PU|PUR + {-104,116, 0,116} // PUL|PU|PUR|PL }; /* This table shows which types of blocks can use other blocks for @@ -1425,7 +1182,7 @@ static void reverse_dc_prediction(Vp3DecodeContext *s, * from other INTRA blocks. There are 2 golden frame coding types; * blocks encoding in these modes can only predict from other blocks * that were encoded with these 1 of these 2 modes. */ - unsigned char compatible_frame[8] = { + static const unsigned char compatible_frame[9] = { 1, /* MODE_INTER_NO_MV */ 0, /* MODE_INTRA */ 1, /* MODE_INTER_PLUS_MV */ @@ -1433,7 +1190,8 @@ static void reverse_dc_prediction(Vp3DecodeContext *s, 1, /* MODE_INTER_PRIOR_MV */ 2, /* MODE_USING_GOLDEN */ 2, /* MODE_GOLDEN_MV */ - 1 /* MODE_INTER_FOUR_MV */ + 1, /* MODE_INTER_FOUR_MV */ + 3 /* MODE_COPY */ }; int current_frame_type; @@ -1442,8 +1200,6 @@ static void reverse_dc_prediction(Vp3DecodeContext *s, int transform = 0; - debug_vp3(" vp3: reversing DC prediction\n"); - vul = vu = vur = vl = 0; last_dc[0] = last_dc[1] = last_dc[2] = 0; @@ -1458,125 +1214,38 @@ static void reverse_dc_prediction(Vp3DecodeContext *s, current_frame_type = compatible_frame[s->all_fragments[i].coding_method]; - predictor_group = (x == 0) + ((y == 0) << 1) + - ((x + 1 == fragment_width) << 2); - debug_dc_pred(" frag %d: group %d, orig DC = %d, ", - i, predictor_group, DC_COEFF(i)); - - switch (predictor_group) { - - case 0: - /* main body of fragments; consider all 4 possible - * fragments for prediction */ - - /* calculate the indices of the predicting fragments */ - ul = i - fragment_width - 1; - u = i - fragment_width; - ur = i - fragment_width + 1; - l = i - 1; - - /* fetch the DC values for the predicting fragments */ - vul = DC_COEFF(ul); - vu = DC_COEFF(u); - vur = DC_COEFF(ur); - vl = DC_COEFF(l); - - /* figure out which fragments are valid */ - ful = FRAME_CODED(ul) && COMPATIBLE_FRAME(ul); - fu = FRAME_CODED(u) && COMPATIBLE_FRAME(u); - fur = FRAME_CODED(ur) && COMPATIBLE_FRAME(ur); - fl = FRAME_CODED(l) && COMPATIBLE_FRAME(l); - - /* decide which predictor transform to use */ - transform = (fl*PL) | (fu*PU) | (ful*PUL) | (fur*PUR); - - break; - - case 1: - /* left column of fragments, not including top corner; - * only consider up and up-right fragments */ - - /* calculate the indices of the predicting fragments */ - u = i - fragment_width; - ur = i - fragment_width + 1; - - /* fetch the DC values for the predicting fragments */ - vu = DC_COEFF(u); - vur = DC_COEFF(ur); - /* figure out which fragments are valid */ - fur = FRAME_CODED(ur) && COMPATIBLE_FRAME(ur); - fu = FRAME_CODED(u) && COMPATIBLE_FRAME(u); - - /* decide which predictor transform to use */ - transform = (fu*PU) | (fur*PUR); - - break; - - case 2: - case 6: - /* top row of fragments, not including top-left frag; - * only consider the left fragment for prediction */ - - /* calculate the indices of the predicting fragments */ - l = i - 1; - - /* fetch the DC values for the predicting fragments */ + transform= 0; + if(x){ + l= i-1; vl = DC_COEFF(l); - - /* figure out which fragments are valid */ - fl = FRAME_CODED(l) && COMPATIBLE_FRAME(l); - - /* decide which predictor transform to use */ - transform = (fl*PL); - - break; - - case 3: - /* top-left fragment */ - - /* nothing to predict from in this case */ - transform = 0; - - break; - - case 4: - /* right column of fragments, not including top corner; - * consider up-left, up, and left fragments for - * prediction */ - - /* calculate the indices of the predicting fragments */ - ul = i - fragment_width - 1; - u = i - fragment_width; - l = i - 1; - - /* fetch the DC values for the predicting fragments */ - vul = DC_COEFF(ul); + if(COMPATIBLE_FRAME(l)) + transform |= PL; + } + if(y){ + u= i-fragment_width; vu = DC_COEFF(u); - vl = DC_COEFF(l); - - /* figure out which fragments are valid */ - ful = FRAME_CODED(ul) && COMPATIBLE_FRAME(ul); - fu = FRAME_CODED(u) && COMPATIBLE_FRAME(u); - fl = FRAME_CODED(l) && COMPATIBLE_FRAME(l); - - /* decide which predictor transform to use */ - transform = (fl*PL) | (fu*PU) | (ful*PUL); - - break; - + if(COMPATIBLE_FRAME(u)) + transform |= PU; + if(x){ + ul= i-fragment_width-1; + vul = DC_COEFF(ul); + if(COMPATIBLE_FRAME(ul)) + transform |= PUL; + } + if(x + 1 < fragment_width){ + ur= i-fragment_width+1; + vur = DC_COEFF(ur); + if(COMPATIBLE_FRAME(ur)) + transform |= PUR; + } } - debug_dc_pred("transform = %d, ", transform); - if (transform == 0) { /* if there were no fragments to predict from, use last * DC saved */ predicted_dc = last_dc[current_frame_type]; - debug_dc_pred("from last DC (%d) = %d\n", - current_frame_type, DC_COEFF(i)); - } else { /* apply the appropriate predictor transform */ @@ -1586,155 +1255,269 @@ static void reverse_dc_prediction(Vp3DecodeContext *s, (predictor_transform[transform][2] * vur) + (predictor_transform[transform][3] * vl); - /* if there is a shift value in the transform, add - * the sign bit before the shift */ - if (predictor_transform[transform][5] != 0) { - predicted_dc += ((predicted_dc >> 15) & - predictor_transform[transform][4]); - predicted_dc >>= predictor_transform[transform][5]; - } + predicted_dc /= 128; /* check for outranging on the [ul u l] and * [ul u ur l] predictors */ - if ((transform == 13) || (transform == 15)) { - if (iabs(predicted_dc - vu) > 128) + if ((transform == 15) || (transform == 13)) { + if (FFABS(predicted_dc - vu) > 128) predicted_dc = vu; - else if (iabs(predicted_dc - vl) > 128) + else if (FFABS(predicted_dc - vl) > 128) predicted_dc = vl; - else if (iabs(predicted_dc - vul) > 128) + else if (FFABS(predicted_dc - vul) > 128) predicted_dc = vul; } - - debug_dc_pred("from pred DC = %d\n", - DC_COEFF(i)); } /* at long last, apply the predictor */ - if(s->coeffs[i].index){ - *s->next_coeff= s->coeffs[i]; - s->coeffs[i].index=0; - s->coeffs[i].coeff=0; - s->coeffs[i].next= s->next_coeff++; - } - s->coeffs[i].coeff += predicted_dc; + DC_COEFF(i) += predicted_dc; /* save the DC */ last_dc[current_frame_type] = DC_COEFF(i); - if(DC_COEFF(i) && !(s->all_fragments[i].coeff_count&127)){ - s->all_fragments[i].coeff_count= 129; -// s->all_fragments[i].next_coeff= s->next_coeff; - s->coeffs[i].next= s->next_coeff; - (s->next_coeff++)->next=NULL; + } + } + } +} + +static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend) +{ + int x, y; + int *bounding_values= s->bounding_values_array+127; + + int width = s->fragment_width[!!plane]; + int height = s->fragment_height[!!plane]; + int fragment = s->fragment_start [plane] + ystart * width; + int stride = s->current_frame.linesize[plane]; + uint8_t *plane_data = s->current_frame.data [plane]; + if (!s->flipped_image) stride = -stride; + plane_data += s->data_offset[plane] + 8*ystart*stride; + + for (y = ystart; y < yend; y++) { + + for (x = 0; x < width; x++) { + /* This code basically just deblocks on the edges of coded blocks. + * However, it has to be much more complicated because of the + * braindamaged deblock ordering used in VP3/Theora. Order matters + * because some pixels get filtered twice. */ + if( s->all_fragments[fragment].coding_method != MODE_COPY ) + { + /* do not perform left edge filter for left columns frags */ + if (x > 0) { + s->dsp.vp3_h_loop_filter( + plane_data + 8*x, + stride, bounding_values); + } + + /* do not perform top edge filter for top row fragments */ + if (y > 0) { + s->dsp.vp3_v_loop_filter( + plane_data + 8*x, + stride, bounding_values); + } + + /* do not perform right edge filter for right column + * fragments or if right fragment neighbor is also coded + * in this frame (it will be filtered in next iteration) */ + if ((x < width - 1) && + (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) { + s->dsp.vp3_h_loop_filter( + plane_data + 8*x + 8, + stride, bounding_values); } + + /* do not perform bottom edge filter for bottom row + * fragments or if bottom fragment neighbor is also coded + * in this frame (it will be filtered in the next row) */ + if ((y < height - 1) && + (s->all_fragments[fragment + width].coding_method == MODE_COPY)) { + s->dsp.vp3_v_loop_filter( + plane_data + 8*x + 8*stride, + stride, bounding_values); + } + } + + fragment++; + } + plane_data += 8*stride; + } +} + +/** + * Pull DCT tokens from the 64 levels to decode and dequant the coefficients + * for the next block in coding order + */ +static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, + int plane, int inter, DCTELEM block[64]) +{ + int16_t *dequantizer = s->qmat[frag->qpi][inter][plane]; + uint8_t *perm = s->scantable.permutated; + int i = 0; + + do { + int token = *s->dct_tokens[plane][i]; + switch (token & 3) { + case 0: // EOB + if (--token < 4) // 0-3 are token types, so the EOB run must now be 0 + s->dct_tokens[plane][i]++; + else + *s->dct_tokens[plane][i] = token & ~3; + goto end; + case 1: // zero run + s->dct_tokens[plane][i]++; + i += (token >> 2) & 0x7f; + if (i > 63) { + av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n"); + return i; } + block[perm[i]] = (token >> 9) * dequantizer[perm[i]]; + i++; + break; + case 2: // coeff + block[perm[i]] = (token >> 2) * dequantizer[perm[i]]; + s->dct_tokens[plane][i++]++; + break; + default: // shouldn't happen + return i; } + } while (i < 64); + // return value is expected to be a valid level + i--; +end: + // the actual DC+prediction is in the fragment structure + block[0] = frag->dc * s->qmat[0][inter][plane][0]; + return i; +} + +/** + * called when all pixels up to row y are complete + */ +static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y) +{ + int h, cy, i; + int offset[AV_NUM_DATA_POINTERS]; + + if (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) { + int y_flipped = s->flipped_image ? s->avctx->height-y : y; + + // At the end of the frame, report INT_MAX instead of the height of the frame. + // This makes the other threads' ff_thread_await_progress() calls cheaper, because + // they don't have to clip their values. + ff_thread_report_progress(&s->current_frame, y_flipped==s->avctx->height ? INT_MAX : y_flipped-1, 0); } + + if(s->avctx->draw_horiz_band==NULL) + return; + + h= y - s->last_slice_end; + s->last_slice_end= y; + y -= h; + + if (!s->flipped_image) { + y = s->avctx->height - y - h; + } + + cy = y >> s->chroma_y_shift; + offset[0] = s->current_frame.linesize[0]*y; + offset[1] = s->current_frame.linesize[1]*cy; + offset[2] = s->current_frame.linesize[2]*cy; + for (i = 3; i < AV_NUM_DATA_POINTERS; i++) + offset[i] = 0; + + emms_c(); + s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h); } +/** + * Wait for the reference frame of the current fragment. + * The progress value is in luma pixel rows. + */ +static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y) +{ + AVFrame *ref_frame; + int ref_row; + int border = motion_y&1; + + if (fragment->coding_method == MODE_USING_GOLDEN || + fragment->coding_method == MODE_GOLDEN_MV) + ref_frame = &s->golden_frame; + else + ref_frame = &s->last_frame; -static void horizontal_filter(unsigned char *first_pixel, int stride, - int *bounding_values); -static void vertical_filter(unsigned char *first_pixel, int stride, - int *bounding_values); + ref_row = y + (motion_y>>1); + ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border); + + ff_thread_await_progress(ref_frame, ref_row, 0); +} /* * Perform the final rendering for a particular slice of data. - * The slice number ranges from 0..(macroblock_height - 1). + * The slice number ranges from 0..(c_superblock_height - 1). */ static void render_slice(Vp3DecodeContext *s, int slice) { - int x, y; - int m, n; - int i; /* indicates current fragment */ - int16_t *dequantizer; - DECLARE_ALIGNED_16(DCTELEM, block[64]); - unsigned char *output_plane; - unsigned char *last_plane; - unsigned char *golden_plane; - int stride; + int x, y, i, j, fragment; + LOCAL_ALIGNED_16(DCTELEM, block, [64]); int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef; - int upper_motion_limit, lower_motion_limit; int motion_halfpel_index; uint8_t *motion_source; - int plane; - int plane_width; - int plane_height; - int slice_height; - int current_macroblock_entry = slice * s->macroblock_width * 6; - int fragment_width; + int plane, first_pixel; + + if (slice >= s->c_superblock_height) + return; + + for (plane = 0; plane < 3; plane++) { + uint8_t *output_plane = s->current_frame.data [plane] + s->data_offset[plane]; + uint8_t * last_plane = s-> last_frame.data [plane] + s->data_offset[plane]; + uint8_t *golden_plane = s-> golden_frame.data [plane] + s->data_offset[plane]; + int stride = s->current_frame.linesize[plane]; + int plane_width = s->width >> (plane && s->chroma_x_shift); + int plane_height = s->height >> (plane && s->chroma_y_shift); + int8_t (*motion_val)[2] = s->motion_val[!!plane]; + + int sb_x, sb_y = slice << (!plane && s->chroma_y_shift); + int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift); + int slice_width = plane ? s->c_superblock_width : s->y_superblock_width; + + int fragment_width = s->fragment_width[!!plane]; + int fragment_height = s->fragment_height[!!plane]; + int fragment_start = s->fragment_start[plane]; + int do_await = !plane && HAVE_THREADS && (s->avctx->active_thread_type&FF_THREAD_FRAME); + + if (!s->flipped_image) stride = -stride; + if (CONFIG_GRAY && plane && (s->avctx->flags & CODEC_FLAG_GRAY)) + continue; - if (slice >= s->macroblock_height) - return; + /* for each superblock row in the slice (both of them)... */ + for (; sb_y < slice_height; sb_y++) { - for (plane = 0; plane < 3; plane++) { + /* for each superblock in a row... */ + for (sb_x = 0; sb_x < slice_width; sb_x++) { - /* set up plane-specific parameters */ - if (plane == 0) { - output_plane = s->current_frame.data[0]; - last_plane = s->last_frame.data[0]; - golden_plane = s->golden_frame.data[0]; - stride = s->current_frame.linesize[0]; - if (!s->flipped_image) stride = -stride; - upper_motion_limit = 7 * s->current_frame.linesize[0]; - lower_motion_limit = s->height * s->current_frame.linesize[0] + s->width - 8; - y = slice * FRAGMENT_PIXELS * 2; - plane_width = s->width; - plane_height = s->height; - slice_height = y + FRAGMENT_PIXELS * 2; - i = s->macroblock_fragments[current_macroblock_entry + 0]; - } else if (plane == 1) { - output_plane = s->current_frame.data[1]; - last_plane = s->last_frame.data[1]; - golden_plane = s->golden_frame.data[1]; - stride = s->current_frame.linesize[1]; - if (!s->flipped_image) stride = -stride; - upper_motion_limit = 7 * s->current_frame.linesize[1]; - lower_motion_limit = (s->height / 2) * s->current_frame.linesize[1] + (s->width / 2) - 8; - y = slice * FRAGMENT_PIXELS; - plane_width = s->width / 2; - plane_height = s->height / 2; - slice_height = y + FRAGMENT_PIXELS; - i = s->macroblock_fragments[current_macroblock_entry + 4]; - } else { - output_plane = s->current_frame.data[2]; - last_plane = s->last_frame.data[2]; - golden_plane = s->golden_frame.data[2]; - stride = s->current_frame.linesize[2]; - if (!s->flipped_image) stride = -stride; - upper_motion_limit = 7 * s->current_frame.linesize[2]; - lower_motion_limit = (s->height / 2) * s->current_frame.linesize[2] + (s->width / 2) - 8; - y = slice * FRAGMENT_PIXELS; - plane_width = s->width / 2; - plane_height = s->height / 2; - slice_height = y + FRAGMENT_PIXELS; - i = s->macroblock_fragments[current_macroblock_entry + 5]; - } - fragment_width = plane_width / FRAGMENT_PIXELS; + /* for each block in a superblock... */ + for (j = 0; j < 16; j++) { + x = 4*sb_x + hilbert_offset[j][0]; + y = 4*sb_y + hilbert_offset[j][1]; + fragment = y*fragment_width + x; - if(ABS(stride) > 2048) - return; //various tables are fixed size + i = fragment_start + fragment; - /* for each fragment row in the slice (both of them)... */ - for (; y < slice_height; y += 8) { + // bounds check + if (x >= fragment_width || y >= fragment_height) + continue; - /* for each fragment in a row... */ - for (x = 0; x < plane_width; x += 8, i++) { + first_pixel = 8*y*stride + 8*x; - if ((i < 0) || (i >= s->fragment_count)) { - av_log(s->avctx, AV_LOG_ERROR, " vp3:render_slice(): bad fragment number (%d)\n", i); - return; - } + if (do_await && s->all_fragments[i].coding_method != MODE_INTRA) + await_reference_row(s, &s->all_fragments[i], motion_val[fragment][1], (16*y) >> s->chroma_y_shift); /* transform if this block was coded */ - if ((s->all_fragments[i].coding_method != MODE_COPY) && - !((s->avctx->flags & CODEC_FLAG_GRAY) && plane)) { - + if (s->all_fragments[i].coding_method != MODE_COPY) { if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) || (s->all_fragments[i].coding_method == MODE_GOLDEN_MV)) motion_source= golden_plane; else motion_source= last_plane; - motion_source += s->all_fragments[i].first_pixel; + motion_source += first_pixel; motion_halfpel_index = 0; /* sort out the motion vector if this fragment is coded @@ -1742,17 +1525,11 @@ static void render_slice(Vp3DecodeContext *s, int slice) if ((s->all_fragments[i].coding_method > MODE_INTRA) && (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) { int src_x, src_y; - motion_x = s->all_fragments[i].motion_x; - motion_y = s->all_fragments[i].motion_y; - if(plane){ - motion_x= (motion_x>>1) | (motion_x&1); - motion_y= (motion_y>>1) | (motion_y&1); - } + motion_x = motion_val[fragment][0]; + motion_y = motion_val[fragment][1]; - src_x= (motion_x>>1) + x; - src_y= (motion_y>>1) + y; - if ((motion_x == 127) || (motion_y == 127)) - av_log(s->avctx, AV_LOG_ERROR, " help! got invalid motion vector! (%X, %X)\n", motion_x, motion_y); + src_x= (motion_x>>1) + 8*x; + src_y= (motion_y>>1) + 8*y; motion_halfpel_index = motion_x & 0x01; motion_source += (motion_x >> 1); @@ -1762,10 +1539,9 @@ static void render_slice(Vp3DecodeContext *s, int slice) if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){ uint8_t *temp= s->edge_emu_buffer; - if(stride<0) temp -= 9*stride; - else temp += 9*stride; + if(stride<0) temp -= 8*stride; - ff_emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height); + s->dsp.emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height); motion_source= temp; } } @@ -1780,113 +1556,61 @@ static void render_slice(Vp3DecodeContext *s, int slice) put_no_rnd_pixels_tab is better optimzed */ if(motion_halfpel_index != 3){ s->dsp.put_no_rnd_pixels_tab[1][motion_halfpel_index]( - output_plane + s->all_fragments[i].first_pixel, + output_plane + first_pixel, motion_source, stride, 8); }else{ int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1 s->dsp.put_no_rnd_pixels_l2[1]( - output_plane + s->all_fragments[i].first_pixel, + output_plane + first_pixel, motion_source - d, motion_source + stride + 1 + d, stride, 8); } - dequantizer = s->qmat[1][plane]; - }else{ - dequantizer = s->qmat[0][plane]; } - /* dequantize the DCT coefficients */ - debug_idct("fragment %d, coding mode %d, DC = %d, dequant = %d:\n", - i, s->all_fragments[i].coding_method, - DC_COEFF(i), dequantizer[0]); - - if(s->avctx->idct_algo==FF_IDCT_VP3){ - Coeff *coeff= s->coeffs + i; - memset(block, 0, sizeof(block)); - while(coeff->next){ - block[coeff->index]= coeff->coeff * dequantizer[coeff->index]; - coeff= coeff->next; - } - }else{ - Coeff *coeff= s->coeffs + i; - memset(block, 0, sizeof(block)); - while(coeff->next){ - block[coeff->index]= (coeff->coeff * dequantizer[coeff->index] + 2)>>2; - coeff= coeff->next; - } - } + s->dsp.clear_block(block); /* invert DCT and place (or add) in final output */ if (s->all_fragments[i].coding_method == MODE_INTRA) { + int index; + index = vp3_dequant(s, s->all_fragments + i, plane, 0, block); + if (index > 63) + continue; if(s->avctx->idct_algo!=FF_IDCT_VP3) block[0] += 128<<3; s->dsp.idct_put( - output_plane + s->all_fragments[i].first_pixel, + output_plane + first_pixel, stride, block); } else { + int index = vp3_dequant(s, s->all_fragments + i, plane, 1, block); + if (index > 63) + continue; + if (index > 0) { s->dsp.idct_add( - output_plane + s->all_fragments[i].first_pixel, + output_plane + first_pixel, stride, block); - } - - debug_idct("block after idct_%s():\n", - (s->all_fragments[i].coding_method == MODE_INTRA)? - "put" : "add"); - for (m = 0; m < 8; m++) { - for (n = 0; n < 8; n++) { - debug_idct(" %3d", *(output_plane + - s->all_fragments[i].first_pixel + (m * stride + n))); + } else { + s->dsp.vp3_idct_dc_add(output_plane + first_pixel, stride, block); } - debug_idct("\n"); } - debug_idct("\n"); - } else { /* copy directly from the previous frame */ s->dsp.put_pixels_tab[1][0]( - output_plane + s->all_fragments[i].first_pixel, - last_plane + s->all_fragments[i].first_pixel, + output_plane + first_pixel, + last_plane + first_pixel, stride, 8); } -#if 0 - /* perform the left edge filter if: - * - the fragment is not on the left column - * - the fragment is coded in this frame - * - the fragment is not coded in this frame but the left - * fragment is coded in this frame (this is done instead - * of a right edge filter when rendering the left fragment - * since this fragment is not available yet) */ - if ((x > 0) && - ((s->all_fragments[i].coding_method != MODE_COPY) || - ((s->all_fragments[i].coding_method == MODE_COPY) && - (s->all_fragments[i - 1].coding_method != MODE_COPY)) )) { - horizontal_filter( - output_plane + s->all_fragments[i].first_pixel + 7*stride, - -stride, bounding_values); - } - - /* perform the top edge filter if: - * - the fragment is not on the top row - * - the fragment is coded in this frame - * - the fragment is not coded in this frame but the above - * fragment is coded in this frame (this is done instead - * of a bottom edge filter when rendering the above - * fragment since this fragment is not available yet) */ - if ((y > 0) && - ((s->all_fragments[i].coding_method != MODE_COPY) || - ((s->all_fragments[i].coding_method == MODE_COPY) && - (s->all_fragments[i - fragment_width].coding_method != MODE_COPY)) )) { - vertical_filter( - output_plane + s->all_fragments[i].first_pixel - stride, - -stride, bounding_values); } -#endif } + + // Filter up to the last row in the superblock row + if (!s->skip_loop_filter) + apply_loop_filter(s, plane, 4*sb_y - !!sb_y, FFMIN(4*sb_y+3, fragment_height-1)); } } @@ -1898,255 +1622,48 @@ static void render_slice(Vp3DecodeContext *s, int slice) * dispatch (slice - 1); */ - emms_c(); -} - -static void horizontal_filter(unsigned char *first_pixel, int stride, - int *bounding_values) -{ - unsigned char *end; - int filter_value; - - for (end= first_pixel + 8*stride; first_pixel < end; first_pixel += stride) { - filter_value = - (first_pixel[-2] - first_pixel[ 1]) - +3*(first_pixel[ 0] - first_pixel[-1]); - filter_value = bounding_values[(filter_value + 4) >> 3]; - first_pixel[-1] = clip_uint8(first_pixel[-1] + filter_value); - first_pixel[ 0] = clip_uint8(first_pixel[ 0] - filter_value); - } -} - -static void vertical_filter(unsigned char *first_pixel, int stride, - int *bounding_values) -{ - unsigned char *end; - int filter_value; - const int nstride= -stride; - - for (end= first_pixel + 8; first_pixel < end; first_pixel++) { - filter_value = - (first_pixel[2 * nstride] - first_pixel[ stride]) - +3*(first_pixel[0 ] - first_pixel[nstride]); - filter_value = bounding_values[(filter_value + 4) >> 3]; - first_pixel[nstride] = clip_uint8(first_pixel[nstride] + filter_value); - first_pixel[0] = clip_uint8(first_pixel[0] - filter_value); - } -} - -static void apply_loop_filter(Vp3DecodeContext *s) -{ - int x, y, plane; - int width, height; - int fragment; - int stride; - unsigned char *plane_data; - int *bounding_values= s->bounding_values_array+127; - -#if 0 - int bounding_values_array[256]; - int filter_limit; - - /* find the right loop limit value */ - for (x = 63; x >= 0; x--) { - if (vp31_ac_scale_factor[x] >= s->quality_index) - break; - } - filter_limit = vp31_filter_limit_values[s->quality_index]; - - /* set up the bounding values */ - memset(bounding_values_array, 0, 256 * sizeof(int)); - for (x = 0; x < filter_limit; x++) { - bounding_values[-x - filter_limit] = -filter_limit + x; - bounding_values[-x] = -x; - bounding_values[x] = x; - bounding_values[x + filter_limit] = filter_limit - x; - } -#endif - - for (plane = 0; plane < 3; plane++) { - - if (plane == 0) { - /* Y plane parameters */ - fragment = 0; - width = s->fragment_width; - height = s->fragment_height; - stride = s->current_frame.linesize[0]; - plane_data = s->current_frame.data[0]; - } else if (plane == 1) { - /* U plane parameters */ - fragment = s->u_fragment_start; - width = s->fragment_width / 2; - height = s->fragment_height / 2; - stride = s->current_frame.linesize[1]; - plane_data = s->current_frame.data[1]; - } else { - /* V plane parameters */ - fragment = s->v_fragment_start; - width = s->fragment_width / 2; - height = s->fragment_height / 2; - stride = s->current_frame.linesize[2]; - plane_data = s->current_frame.data[2]; - } - - for (y = 0; y < height; y++) { - - for (x = 0; x < width; x++) { -START_TIMER - /* do not perform left edge filter for left columns frags */ - if ((x > 0) && - (s->all_fragments[fragment].coding_method != MODE_COPY)) { - horizontal_filter( - plane_data + s->all_fragments[fragment].first_pixel - 7*stride, - stride, bounding_values); - } - - /* do not perform top edge filter for top row fragments */ - if ((y > 0) && - (s->all_fragments[fragment].coding_method != MODE_COPY)) { - vertical_filter( - plane_data + s->all_fragments[fragment].first_pixel + stride, - stride, bounding_values); - } - - /* do not perform right edge filter for right column - * fragments or if right fragment neighbor is also coded - * in this frame (it will be filtered in next iteration) */ - if ((x < width - 1) && - (s->all_fragments[fragment].coding_method != MODE_COPY) && - (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) { - horizontal_filter( - plane_data + s->all_fragments[fragment + 1].first_pixel - 7*stride, - stride, bounding_values); - } - - /* do not perform bottom edge filter for bottom row - * fragments or if bottom fragment neighbor is also coded - * in this frame (it will be filtered in the next row) */ - if ((y < height - 1) && - (s->all_fragments[fragment].coding_method != MODE_COPY) && - (s->all_fragments[fragment + width].coding_method == MODE_COPY)) { - vertical_filter( - plane_data + s->all_fragments[fragment + width].first_pixel + stride, - stride, bounding_values); - } - - fragment++; -STOP_TIMER("loop filter") - } - } - } + vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) -16, s->height-16)); } -/* - * This function computes the first pixel addresses for each fragment. - * This function needs to be invoked after the first frame is allocated - * so that it has access to the plane strides. - */ -static void vp3_calculate_pixel_addresses(Vp3DecodeContext *s) +/// Allocate tables for per-frame data in Vp3DecodeContext +static av_cold int allocate_tables(AVCodecContext *avctx) { + Vp3DecodeContext *s = avctx->priv_data; + int y_fragment_count, c_fragment_count; - int i, x, y; - - /* figure out the first pixel addresses for each of the fragments */ - /* Y plane */ - i = 0; - for (y = s->fragment_height; y > 0; y--) { - for (x = 0; x < s->fragment_width; x++) { - s->all_fragments[i++].first_pixel = - s->golden_frame.linesize[0] * y * FRAGMENT_PIXELS - - s->golden_frame.linesize[0] + - x * FRAGMENT_PIXELS; - debug_init(" fragment %d, first pixel @ %d\n", - i-1, s->all_fragments[i-1].first_pixel); - } - } - - /* U plane */ - i = s->u_fragment_start; - for (y = s->fragment_height / 2; y > 0; y--) { - for (x = 0; x < s->fragment_width / 2; x++) { - s->all_fragments[i++].first_pixel = - s->golden_frame.linesize[1] * y * FRAGMENT_PIXELS - - s->golden_frame.linesize[1] + - x * FRAGMENT_PIXELS; - debug_init(" fragment %d, first pixel @ %d\n", - i-1, s->all_fragments[i-1].first_pixel); - } - } + y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; + c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; - /* V plane */ - i = s->v_fragment_start; - for (y = s->fragment_height / 2; y > 0; y--) { - for (x = 0; x < s->fragment_width / 2; x++) { - s->all_fragments[i++].first_pixel = - s->golden_frame.linesize[2] * y * FRAGMENT_PIXELS - - s->golden_frame.linesize[2] + - x * FRAGMENT_PIXELS; - debug_init(" fragment %d, first pixel @ %d\n", - i-1, s->all_fragments[i-1].first_pixel); - } - } -} + s->superblock_coding = av_malloc(s->superblock_count); + s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment)); + s->coded_fragment_list[0] = av_malloc(s->fragment_count * sizeof(int)); + s->dct_tokens_base = av_malloc(64*s->fragment_count * sizeof(*s->dct_tokens_base)); + s->motion_val[0] = av_malloc(y_fragment_count * sizeof(*s->motion_val[0])); + s->motion_val[1] = av_malloc(c_fragment_count * sizeof(*s->motion_val[1])); -/* FIXME: this should be merged with the above! */ -static void theora_calculate_pixel_addresses(Vp3DecodeContext *s) -{ + /* work out the block mapping tables */ + s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int)); + s->macroblock_coding = av_malloc(s->macroblock_count + 1); - int i, x, y; - - /* figure out the first pixel addresses for each of the fragments */ - /* Y plane */ - i = 0; - for (y = 1; y <= s->fragment_height; y++) { - for (x = 0; x < s->fragment_width; x++) { - s->all_fragments[i++].first_pixel = - s->golden_frame.linesize[0] * y * FRAGMENT_PIXELS - - s->golden_frame.linesize[0] + - x * FRAGMENT_PIXELS; - debug_init(" fragment %d, first pixel @ %d\n", - i-1, s->all_fragments[i-1].first_pixel); - } + if (!s->superblock_coding || !s->all_fragments || !s->dct_tokens_base || + !s->coded_fragment_list[0] || !s->superblock_fragments || !s->macroblock_coding || + !s->motion_val[0] || !s->motion_val[1]) { + vp3_decode_end(avctx); + return -1; } - /* U plane */ - i = s->u_fragment_start; - for (y = 1; y <= s->fragment_height / 2; y++) { - for (x = 0; x < s->fragment_width / 2; x++) { - s->all_fragments[i++].first_pixel = - s->golden_frame.linesize[1] * y * FRAGMENT_PIXELS - - s->golden_frame.linesize[1] + - x * FRAGMENT_PIXELS; - debug_init(" fragment %d, first pixel @ %d\n", - i-1, s->all_fragments[i-1].first_pixel); - } - } + init_block_mapping(s); - /* V plane */ - i = s->v_fragment_start; - for (y = 1; y <= s->fragment_height / 2; y++) { - for (x = 0; x < s->fragment_width / 2; x++) { - s->all_fragments[i++].first_pixel = - s->golden_frame.linesize[2] * y * FRAGMENT_PIXELS - - s->golden_frame.linesize[2] + - x * FRAGMENT_PIXELS; - debug_init(" fragment %d, first pixel @ %d\n", - i-1, s->all_fragments[i-1].first_pixel); - } - } + return 0; } -/* - * This is the ffmpeg/libavcodec API init function. - */ -static int vp3_decode_init(AVCodecContext *avctx) +static av_cold int vp3_decode_init(AVCodecContext *avctx) { Vp3DecodeContext *s = avctx->priv_data; int i, inter, plane; int c_width; int c_height; - int y_superblock_count; - int c_superblock_count; + int y_fragment_count, c_fragment_count; if (avctx->codec_tag == MKTAG('V','P','3','0')) s->version = 0; @@ -2154,84 +1671,65 @@ static int vp3_decode_init(AVCodecContext *avctx) s->version = 1; s->avctx = avctx; - s->width = (avctx->width + 15) & 0xFFFFFFF0; - s->height = (avctx->height + 15) & 0xFFFFFFF0; - avctx->pix_fmt = PIX_FMT_YUV420P; - avctx->has_b_frames = 0; + s->width = FFALIGN(avctx->width, 16); + s->height = FFALIGN(avctx->height, 16); + if (avctx->pix_fmt == PIX_FMT_NONE) + avctx->pix_fmt = PIX_FMT_YUV420P; + avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; if(avctx->idct_algo==FF_IDCT_AUTO) avctx->idct_algo=FF_IDCT_VP3; - dsputil_init(&s->dsp, avctx); + ff_dsputil_init(&s->dsp, avctx); ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct); /* initialize to an impossible value which will force a recalculation * in the first frame decode */ - s->quality_index = -1; + for (i = 0; i < 3; i++) + s->qps[i] = -1; + + avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift); s->y_superblock_width = (s->width + 31) / 32; s->y_superblock_height = (s->height + 31) / 32; - y_superblock_count = s->y_superblock_width * s->y_superblock_height; + s->y_superblock_count = s->y_superblock_width * s->y_superblock_height; /* work out the dimensions for the C planes */ - c_width = s->width / 2; - c_height = s->height / 2; + c_width = s->width >> s->chroma_x_shift; + c_height = s->height >> s->chroma_y_shift; s->c_superblock_width = (c_width + 31) / 32; s->c_superblock_height = (c_height + 31) / 32; - c_superblock_count = s->c_superblock_width * s->c_superblock_height; + s->c_superblock_count = s->c_superblock_width * s->c_superblock_height; - s->superblock_count = y_superblock_count + (c_superblock_count * 2); - s->u_superblock_start = y_superblock_count; - s->v_superblock_start = s->u_superblock_start + c_superblock_count; - s->superblock_coding = av_malloc(s->superblock_count); + s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2); + s->u_superblock_start = s->y_superblock_count; + s->v_superblock_start = s->u_superblock_start + s->c_superblock_count; s->macroblock_width = (s->width + 15) / 16; s->macroblock_height = (s->height + 15) / 16; s->macroblock_count = s->macroblock_width * s->macroblock_height; - s->fragment_width = s->width / FRAGMENT_PIXELS; - s->fragment_height = s->height / FRAGMENT_PIXELS; + s->fragment_width[0] = s->width / FRAGMENT_PIXELS; + s->fragment_height[0] = s->height / FRAGMENT_PIXELS; + s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift; + s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift; /* fragment count covers all 8x8 blocks for all 3 planes */ - s->fragment_count = s->fragment_width * s->fragment_height * 3 / 2; - s->u_fragment_start = s->fragment_width * s->fragment_height; - s->v_fragment_start = s->fragment_width * s->fragment_height * 5 / 4; - - debug_init(" Y plane: %d x %d\n", s->width, s->height); - debug_init(" C plane: %d x %d\n", c_width, c_height); - debug_init(" Y superblocks: %d x %d, %d total\n", - s->y_superblock_width, s->y_superblock_height, y_superblock_count); - debug_init(" C superblocks: %d x %d, %d total\n", - s->c_superblock_width, s->c_superblock_height, c_superblock_count); - debug_init(" total superblocks = %d, U starts @ %d, V starts @ %d\n", - s->superblock_count, s->u_superblock_start, s->v_superblock_start); - debug_init(" macroblocks: %d x %d, %d total\n", - s->macroblock_width, s->macroblock_height, s->macroblock_count); - debug_init(" %d fragments, %d x %d, u starts @ %d, v starts @ %d\n", - s->fragment_count, - s->fragment_width, - s->fragment_height, - s->u_fragment_start, - s->v_fragment_start); - - s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment)); - s->coeffs = av_malloc(s->fragment_count * sizeof(Coeff) * 65); - s->coded_fragment_list = av_malloc(s->fragment_count * sizeof(int)); - s->pixel_addresses_inited = 0; + y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; + c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; + s->fragment_count = y_fragment_count + 2*c_fragment_count; + s->fragment_start[1] = y_fragment_count; + s->fragment_start[2] = y_fragment_count + c_fragment_count; if (!s->theora_tables) { - for (i = 0; i < 64; i++) + for (i = 0; i < 64; i++) { s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i]; - for (i = 0; i < 64; i++) s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i]; - for (i = 0; i < 64; i++) s->base_matrix[0][i] = vp31_intra_y_dequant[i]; - for (i = 0; i < 64; i++) s->base_matrix[1][i] = vp31_intra_c_dequant[i]; - for (i = 0; i < 64; i++) s->base_matrix[2][i] = vp31_inter_dequant[i]; - for (i = 0; i < 64; i++) s->filter_limit_values[i] = vp31_filter_limit_values[i]; + } for(inter=0; inter<2; inter++){ for(plane=0; plane<3; plane++){ @@ -2246,57 +1744,62 @@ static int vp3_decode_init(AVCodecContext *avctx) for (i = 0; i < 16; i++) { /* DC histograms */ - init_vlc(&s->dc_vlc[i], 5, 32, + init_vlc(&s->dc_vlc[i], 11, 32, &dc_bias[i][0][1], 4, 2, &dc_bias[i][0][0], 4, 2, 0); /* group 1 AC histograms */ - init_vlc(&s->ac_vlc_1[i], 5, 32, + init_vlc(&s->ac_vlc_1[i], 11, 32, &ac_bias_0[i][0][1], 4, 2, &ac_bias_0[i][0][0], 4, 2, 0); /* group 2 AC histograms */ - init_vlc(&s->ac_vlc_2[i], 5, 32, + init_vlc(&s->ac_vlc_2[i], 11, 32, &ac_bias_1[i][0][1], 4, 2, &ac_bias_1[i][0][0], 4, 2, 0); /* group 3 AC histograms */ - init_vlc(&s->ac_vlc_3[i], 5, 32, + init_vlc(&s->ac_vlc_3[i], 11, 32, &ac_bias_2[i][0][1], 4, 2, &ac_bias_2[i][0][0], 4, 2, 0); /* group 4 AC histograms */ - init_vlc(&s->ac_vlc_4[i], 5, 32, + init_vlc(&s->ac_vlc_4[i], 11, 32, &ac_bias_3[i][0][1], 4, 2, &ac_bias_3[i][0][0], 4, 2, 0); } } else { - for (i = 0; i < 16; i++) { + for (i = 0; i < 16; i++) { /* DC histograms */ - init_vlc(&s->dc_vlc[i], 5, 32, - &s->huffman_table[i][0][1], 4, 2, - &s->huffman_table[i][0][0], 4, 2, 0); + if (init_vlc(&s->dc_vlc[i], 11, 32, + &s->huffman_table[i][0][1], 8, 4, + &s->huffman_table[i][0][0], 8, 4, 0) < 0) + goto vlc_fail; /* group 1 AC histograms */ - init_vlc(&s->ac_vlc_1[i], 5, 32, - &s->huffman_table[i+16][0][1], 4, 2, - &s->huffman_table[i+16][0][0], 4, 2, 0); + if (init_vlc(&s->ac_vlc_1[i], 11, 32, + &s->huffman_table[i+16][0][1], 8, 4, + &s->huffman_table[i+16][0][0], 8, 4, 0) < 0) + goto vlc_fail; /* group 2 AC histograms */ - init_vlc(&s->ac_vlc_2[i], 5, 32, - &s->huffman_table[i+16*2][0][1], 4, 2, - &s->huffman_table[i+16*2][0][0], 4, 2, 0); + if (init_vlc(&s->ac_vlc_2[i], 11, 32, + &s->huffman_table[i+16*2][0][1], 8, 4, + &s->huffman_table[i+16*2][0][0], 8, 4, 0) < 0) + goto vlc_fail; /* group 3 AC histograms */ - init_vlc(&s->ac_vlc_3[i], 5, 32, - &s->huffman_table[i+16*3][0][1], 4, 2, - &s->huffman_table[i+16*3][0][0], 4, 2, 0); + if (init_vlc(&s->ac_vlc_3[i], 11, 32, + &s->huffman_table[i+16*3][0][1], 8, 4, + &s->huffman_table[i+16*3][0][0], 8, 4, 0) < 0) + goto vlc_fail; /* group 4 AC histograms */ - init_vlc(&s->ac_vlc_4[i], 5, 32, - &s->huffman_table[i+16*4][0][1], 4, 2, - &s->huffman_table[i+16*4][0][0], 4, 2, 0); + if (init_vlc(&s->ac_vlc_4[i], 11, 32, + &s->huffman_table[i+16*4][0][1], 8, 4, + &s->huffman_table[i+16*4][0][0], 8, 4, 0) < 0) + goto vlc_fail; } } @@ -2316,80 +1819,155 @@ static int vp3_decode_init(AVCodecContext *avctx) &motion_vector_vlc_table[0][1], 2, 1, &motion_vector_vlc_table[0][0], 2, 1, 0); - /* work out the block mapping tables */ - s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int)); - s->superblock_macroblocks = av_malloc(s->superblock_count * 4 * sizeof(int)); - s->macroblock_fragments = av_malloc(s->macroblock_count * 6 * sizeof(int)); - s->macroblock_coding = av_malloc(s->macroblock_count + 1); - init_block_mapping(s); - for (i = 0; i < 3; i++) { s->current_frame.data[i] = NULL; s->last_frame.data[i] = NULL; s->golden_frame.data[i] = NULL; } + return allocate_tables(avctx); + +vlc_fail: + av_log(avctx, AV_LOG_FATAL, "Invalid huffman table\n"); + return -1; +} + +/// Release and shuffle frames after decode finishes +static void update_frames(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + + /* release the last frame, if it is allocated and if it is not the + * golden frame */ + if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY) + ff_thread_release_buffer(avctx, &s->last_frame); + + /* shuffle frames (last = current) */ + s->last_frame= s->current_frame; + + if (s->keyframe) { + if (s->golden_frame.data[0]) + ff_thread_release_buffer(avctx, &s->golden_frame); + s->golden_frame = s->current_frame; + s->last_frame.type = FF_BUFFER_TYPE_COPY; + } + + s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */ +} + +static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src) +{ + Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data; + int qps_changed = 0, i, err; + +#define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field) + + if (!s1->current_frame.data[0] + ||s->width != s1->width + ||s->height!= s1->height) { + if (s != s1) + copy_fields(s, s1, golden_frame, current_frame); + return -1; + } + + if (s != s1) { + // init tables if the first frame hasn't been decoded + if (!s->current_frame.data[0]) { + int y_fragment_count, c_fragment_count; + s->avctx = dst; + err = allocate_tables(dst); + if (err) + return err; + y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; + c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; + memcpy(s->motion_val[0], s1->motion_val[0], y_fragment_count * sizeof(*s->motion_val[0])); + memcpy(s->motion_val[1], s1->motion_val[1], c_fragment_count * sizeof(*s->motion_val[1])); + } + + // copy previous frame data + copy_fields(s, s1, golden_frame, dsp); + + // copy qscale data if necessary + for (i = 0; i < 3; i++) { + if (s->qps[i] != s1->qps[1]) { + qps_changed = 1; + memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i])); + } + } + + if (s->qps[0] != s1->qps[0]) + memcpy(&s->bounding_values_array, &s1->bounding_values_array, sizeof(s->bounding_values_array)); + + if (qps_changed) + copy_fields(s, s1, qps, superblock_count); +#undef copy_fields + } + + update_frames(dst); + return 0; } -/* - * This is the ffmpeg/libavcodec API frame decode function. - */ static int vp3_decode_frame(AVCodecContext *avctx, void *data, int *data_size, - uint8_t *buf, int buf_size) + AVPacket *avpkt) { + const uint8_t *buf = avpkt->data; + int buf_size = avpkt->size; Vp3DecodeContext *s = avctx->priv_data; GetBitContext gb; - static int counter = 0; int i; init_get_bits(&gb, buf, buf_size * 8); if (s->theora && get_bits1(&gb)) { -#if 1 av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n"); return -1; -#else - int ptype = get_bits(&gb, 7); - - skip_bits(&gb, 6*8); /* "theora" */ - - switch(ptype) - { - case 1: - theora_decode_comments(avctx, &gb); - break; - case 2: - theora_decode_tables(avctx, &gb); - init_dequantizer(s); - break; - default: - av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype); - } - return buf_size; -#endif } s->keyframe = !get_bits1(&gb); if (!s->theora) skip_bits(&gb, 1); - s->last_quality_index = s->quality_index; - s->quality_index = get_bits(&gb, 6); - if (s->theora >= 0x030200) - skip_bits1(&gb); + for (i = 0; i < 3; i++) + s->last_qps[i] = s->qps[i]; + + s->nqps=0; + do{ + s->qps[s->nqps++]= get_bits(&gb, 6); + } while(s->theora >= 0x030200 && s->nqps<3 && get_bits1(&gb)); + for (i = s->nqps; i < 3; i++) + s->qps[i] = -1; if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n", - s->keyframe?"key":"", counter, s->quality_index); - counter++; + s->keyframe?"key":"", avctx->frame_number+1, s->qps[0]); + + s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] || + avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL : AVDISCARD_NONKEY); - if (s->quality_index != s->last_quality_index) { - init_dequantizer(s); + if (s->qps[0] != s->last_qps[0]) init_loop_filter(s); + + for (i = 0; i < s->nqps; i++) + // reinit all dequantizers if the first one changed, because + // the DC of the first quantizer must be used for all matrices + if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0]) + init_dequantizer(s, i); + + if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe) + return buf_size; + + s->current_frame.reference = 3; + s->current_frame.pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; + if (ff_thread_get_buffer(avctx, &s->current_frame) < 0) { + av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + goto error; } + if (!s->edge_emu_buffer) + s->edge_emu_buffer = av_malloc(9*FFABS(s->current_frame.linesize[0])); + if (s->keyframe) { if (!s->theora) { @@ -2398,7 +1976,7 @@ static int vp3_decode_frame(AVCodecContext *avctx, if (s->version) { s->version = get_bits(&gb, 5); - if (counter == 1) + if (avctx->frame_number == 0) av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version); } } @@ -2408,158 +1986,87 @@ static int vp3_decode_frame(AVCodecContext *avctx, av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n"); skip_bits(&gb, 2); /* reserved? */ } - - if (s->last_frame.data[0] == s->golden_frame.data[0]) { - if (s->golden_frame.data[0]) - avctx->release_buffer(avctx, &s->golden_frame); - s->last_frame= s->golden_frame; /* ensure that we catch any access to this released frame */ - } else { - if (s->golden_frame.data[0]) - avctx->release_buffer(avctx, &s->golden_frame); - if (s->last_frame.data[0]) - avctx->release_buffer(avctx, &s->last_frame); - } - - s->golden_frame.reference = 3; - if(avctx->get_buffer(avctx, &s->golden_frame) < 0) { - av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n"); - return -1; - } - - /* golden frame is also the current frame */ - s->current_frame= s->golden_frame; - - /* time to figure out pixel addresses? */ - if (!s->pixel_addresses_inited) - { - if (!s->flipped_image) - vp3_calculate_pixel_addresses(s); - else - theora_calculate_pixel_addresses(s); - } } else { - /* allocate a new current frame */ - s->current_frame.reference = 3; - if(avctx->get_buffer(avctx, &s->current_frame) < 0) { - av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n"); - return -1; + if (!s->golden_frame.data[0]) { + av_log(s->avctx, AV_LOG_WARNING, "vp3: first frame not a keyframe\n"); + + s->golden_frame.reference = 3; + s->golden_frame.pict_type = AV_PICTURE_TYPE_I; + if (ff_thread_get_buffer(avctx, &s->golden_frame) < 0) { + av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + goto error; + } + s->last_frame = s->golden_frame; + s->last_frame.type = FF_BUFFER_TYPE_COPY; + ff_thread_report_progress(&s->last_frame, INT_MAX, 0); } } - s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame - s->current_frame.qstride= 0; - - {START_TIMER - init_frame(s, &gb); - STOP_TIMER("init_frame")} - -#if KEYFRAMES_ONLY -if (!s->keyframe) { - - memcpy(s->current_frame.data[0], s->golden_frame.data[0], - s->current_frame.linesize[0] * s->height); - memcpy(s->current_frame.data[1], s->golden_frame.data[1], - s->current_frame.linesize[1] * s->height / 2); - memcpy(s->current_frame.data[2], s->golden_frame.data[2], - s->current_frame.linesize[2] * s->height / 2); + memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment)); + ff_thread_finish_setup(avctx); -} else { -#endif - - {START_TIMER if (unpack_superblocks(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n"); - return -1; + goto error; } - STOP_TIMER("unpack_superblocks")} - {START_TIMER if (unpack_modes(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n"); - return -1; + goto error; } - STOP_TIMER("unpack_modes")} - {START_TIMER if (unpack_vectors(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n"); - return -1; + goto error; + } + if (unpack_block_qpis(s, &gb)){ + av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n"); + goto error; } - STOP_TIMER("unpack_vectors")} - {START_TIMER if (unpack_dct_coeffs(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n"); - return -1; + goto error; } - STOP_TIMER("unpack_dct_coeffs")} - {START_TIMER - - reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height); - if ((avctx->flags & CODEC_FLAG_GRAY) == 0) { - reverse_dc_prediction(s, s->u_fragment_start, - s->fragment_width / 2, s->fragment_height / 2); - reverse_dc_prediction(s, s->v_fragment_start, - s->fragment_width / 2, s->fragment_height / 2); + + for (i = 0; i < 3; i++) { + int height = s->height >> (i && s->chroma_y_shift); + if (s->flipped_image) + s->data_offset[i] = 0; + else + s->data_offset[i] = (height-1) * s->current_frame.linesize[i]; } - STOP_TIMER("reverse_dc_prediction")} - {START_TIMER - for (i = 0; i < s->macroblock_height; i++) + s->last_slice_end = 0; + for (i = 0; i < s->c_superblock_height; i++) render_slice(s, i); - STOP_TIMER("render_fragments")} - {START_TIMER - apply_loop_filter(s); - STOP_TIMER("apply_loop_filter")} -#if KEYFRAMES_ONLY -} -#endif + // filter the last row + for (i = 0; i < 3; i++) { + int row = (s->height >> (3+(i && s->chroma_y_shift))) - 1; + apply_loop_filter(s, i, row, row+1); + } + vp3_draw_horiz_band(s, s->avctx->height); *data_size=sizeof(AVFrame); *(AVFrame*)data= s->current_frame; - /* release the last frame, if it is allocated and if it is not the - * golden frame */ - if ((s->last_frame.data[0]) && - (s->last_frame.data[0] != s->golden_frame.data[0])) - avctx->release_buffer(avctx, &s->last_frame); - - /* shuffle frames (last = current) */ - s->last_frame= s->current_frame; - s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */ + if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME)) + update_frames(avctx); return buf_size; -} - -/* - * This is the ffmpeg/libavcodec API module cleanup function. - */ -static int vp3_decode_end(AVCodecContext *avctx) -{ - Vp3DecodeContext *s = avctx->priv_data; - av_free(s->all_fragments); - av_free(s->coeffs); - av_free(s->coded_fragment_list); - av_free(s->superblock_fragments); - av_free(s->superblock_macroblocks); - av_free(s->macroblock_fragments); - av_free(s->macroblock_coding); +error: + ff_thread_report_progress(&s->current_frame, INT_MAX, 0); - /* release all frames */ - if (s->golden_frame.data[0] && s->golden_frame.data[0] != s->last_frame.data[0]) - avctx->release_buffer(avctx, &s->golden_frame); - if (s->last_frame.data[0]) - avctx->release_buffer(avctx, &s->last_frame); - /* no need to release the current_frame since it will always be pointing - * to the same frame as either the golden or last frame */ + if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME)) + avctx->release_buffer(avctx, &s->current_frame); - return 0; + return -1; } static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb) { Vp3DecodeContext *s = avctx->priv_data; - if (get_bits(gb, 1)) { + if (get_bits1(gb)) { int token; if (s->entries >= 32) { /* overflow */ av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n"); @@ -2578,21 +2085,48 @@ static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb) } s->huff_code_size++; s->hbits <<= 1; - read_huffman_tree(avctx, gb); + if (read_huffman_tree(avctx, gb)) + return -1; s->hbits |= 1; - read_huffman_tree(avctx, gb); + if (read_huffman_tree(avctx, gb)) + return -1; s->hbits >>= 1; s->huff_code_size--; } return 0; } +static int vp3_init_thread_copy(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + + s->superblock_coding = NULL; + s->all_fragments = NULL; + s->coded_fragment_list[0] = NULL; + s->dct_tokens_base = NULL; + s->superblock_fragments = NULL; + s->macroblock_coding = NULL; + s->motion_val[0] = NULL; + s->motion_val[1] = NULL; + s->edge_emu_buffer = NULL; + + return 0; +} + +#if CONFIG_THEORA_DECODER +static const enum PixelFormat theora_pix_fmts[4] = { + PIX_FMT_YUV420P, PIX_FMT_NONE, PIX_FMT_YUV422P, PIX_FMT_YUV444P +}; + static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) { Vp3DecodeContext *s = avctx->priv_data; + int visible_width, visible_height, colorspace; + int offset_x = 0, offset_y = 0; + AVRational fps, aspect; s->theora = get_bits_long(gb, 24); - av_log(avctx, AV_LOG_INFO, "Theora bitstream version %X\n", s->theora); + av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora); /* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */ /* but previous versions have the image flipped relative to vp3 */ @@ -2602,47 +2136,41 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) av_log(avctx, AV_LOG_DEBUG, "Old (width = get_bits(gb, 16) << 4; - s->height = get_bits(gb, 16) << 4; + visible_width = s->width = get_bits(gb, 16) << 4; + visible_height = s->height = get_bits(gb, 16) << 4; - if(avcodec_check_dimensions(avctx, s->width, s->height)){ + if(av_image_check_size(s->width, s->height, 0, avctx)){ av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height); s->width= s->height= 0; return -1; } - if (s->theora >= 0x030400) - { - skip_bits(gb, 32); /* total number of superblocks in a frame */ - // fixme, the next field is 36bits long - skip_bits(gb, 32); /* total number of blocks in a frame */ - skip_bits(gb, 4); /* total number of blocks in a frame */ - skip_bits(gb, 32); /* total number of macroblocks in a frame */ - - skip_bits(gb, 24); /* frame width */ - skip_bits(gb, 24); /* frame height */ - } - else - { - skip_bits(gb, 24); /* frame width */ - skip_bits(gb, 24); /* frame height */ + if (s->theora >= 0x030200) { + visible_width = get_bits_long(gb, 24); + visible_height = get_bits_long(gb, 24); + + offset_x = get_bits(gb, 8); /* offset x */ + offset_y = get_bits(gb, 8); /* offset y, from bottom */ } - if (s->theora >= 0x030200) { - skip_bits(gb, 8); /* offset x */ - skip_bits(gb, 8); /* offset y */ - } + fps.num = get_bits_long(gb, 32); + fps.den = get_bits_long(gb, 32); + if (fps.num && fps.den) { + av_reduce(&avctx->time_base.num, &avctx->time_base.den, + fps.den, fps.num, 1<<30); + } - skip_bits(gb, 32); /* fps numerator */ - skip_bits(gb, 32); /* fps denumerator */ - skip_bits(gb, 24); /* aspect numerator */ - skip_bits(gb, 24); /* aspect denumerator */ + aspect.num = get_bits_long(gb, 24); + aspect.den = get_bits_long(gb, 24); + if (aspect.num && aspect.den) { + av_reduce(&avctx->sample_aspect_ratio.num, + &avctx->sample_aspect_ratio.den, + aspect.num, aspect.den, 1<<30); + } if (s->theora < 0x030200) skip_bits(gb, 5); /* keyframe frequency force */ - skip_bits(gb, 8); /* colorspace */ - if (s->theora >= 0x030400) - skip_bits(gb, 2); /* pixel format: 420,res,422,444 */ + colorspace = get_bits(gb, 8); skip_bits(gb, 24); /* bitrate */ skip_bits(gb, 6); /* quality hint */ @@ -2650,15 +2178,28 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) if (s->theora >= 0x030200) { skip_bits(gb, 5); /* keyframe frequency force */ - - if (s->theora < 0x030400) - skip_bits(gb, 5); /* spare bits */ + avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)]; + skip_bits(gb, 3); /* reserved */ } // align_get_bits(gb); - avctx->width = s->width; - avctx->height = s->height; + if ( visible_width <= s->width && visible_width > s->width-16 + && visible_height <= s->height && visible_height > s->height-16 + && !offset_x && (offset_y == s->height - visible_height)) + avcodec_set_dimensions(avctx, visible_width, visible_height); + else + avcodec_set_dimensions(avctx, s->width, s->height); + + if (colorspace == 1) { + avctx->color_primaries = AVCOL_PRI_BT470M; + } else if (colorspace == 2) { + avctx->color_primaries = AVCOL_PRI_BT470BG; + } + if (colorspace == 1 || colorspace == 2) { + avctx->colorspace = AVCOL_SPC_BT470BG; + avctx->color_trc = AVCOL_TRC_BT709; + } return 0; } @@ -2671,8 +2212,9 @@ static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb) if (s->theora >= 0x030200) { n = get_bits(gb, 3); /* loop filter limit values table */ - for (i = 0; i < 64; i++) - s->filter_limit_values[i] = get_bits(gb, n); + if (n) + for (i = 0; i < 64; i++) + s->filter_limit_values[i] = get_bits(gb, n); } if (s->theora >= 0x030200) @@ -2710,10 +2252,10 @@ static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb) for (plane = 0; plane <= 2; plane++) { int newqr= 1; if (inter || plane > 0) - newqr = get_bits(gb, 1); + newqr = get_bits1(gb); if (!newqr) { int qtj, plj; - if(inter && get_bits(gb, 1)){ + if(inter && get_bits1(gb)){ qtj = 0; plj = plane; }else{ @@ -2754,11 +2296,13 @@ static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb) for (s->hti = 0; s->hti < 80; s->hti++) { s->entries = 0; s->huff_code_size = 1; - if (!get_bits(gb, 1)) { + if (!get_bits1(gb)) { s->hbits = 0; - read_huffman_tree(avctx, gb); + if(read_huffman_tree(avctx, gb)) + return -1; s->hbits = 1; - read_huffman_tree(avctx, gb); + if(read_huffman_tree(avctx, gb)) + return -1; } } @@ -2767,13 +2311,14 @@ static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb) return 0; } -static int theora_decode_init(AVCodecContext *avctx) +static av_cold int theora_decode_init(AVCodecContext *avctx) { Vp3DecodeContext *s = avctx->priv_data; GetBitContext gb; int ptype; - uint8_t *p= avctx->extradata; - int op_bytes, i; + uint8_t *header_start[3]; + int header_len[3]; + int i; s->theora = 1; @@ -2783,15 +2328,16 @@ static int theora_decode_init(AVCodecContext *avctx) return -1; } - for(i=0;i<3;i++) { - op_bytes = *(p++)<<8; - op_bytes += *(p++); + if (avpriv_split_xiph_headers(avctx->extradata, avctx->extradata_size, + 42, header_start, header_len) < 0) { + av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n"); + return -1; + } - init_get_bits(&gb, p, op_bytes); - p += op_bytes; + for(i=0;i<3;i++) { + init_get_bits(&gb, header_start[i], header_len[i] * 8); ptype = get_bits(&gb, 8); - debug_vp3("Theora headerpacket type: %x\n", ptype); if (!(ptype & 0x80)) { @@ -2799,8 +2345,8 @@ static int theora_decode_init(AVCodecContext *avctx) // return -1; } - // FIXME: check for this aswell - skip_bits(&gb, 6*8); /* "theora" */ + // FIXME: Check for this as well. + skip_bits_long(&gb, 6*8); /* "theora" */ switch(ptype) { @@ -2812,46 +2358,51 @@ static int theora_decode_init(AVCodecContext *avctx) // theora_decode_comments(avctx, gb); break; case 0x82: - theora_decode_tables(avctx, &gb); + if (theora_decode_tables(avctx, &gb)) + return -1; break; default: av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80); break; } - if(8*op_bytes != get_bits_count(&gb)) - av_log(avctx, AV_LOG_ERROR, "%d bits left in packet %X\n", 8*op_bytes - get_bits_count(&gb), ptype); + if(ptype != 0x81 && 8*header_len[i] != get_bits_count(&gb)) + av_log(avctx, AV_LOG_WARNING, "%d bits left in packet %X\n", 8*header_len[i] - get_bits_count(&gb), ptype); if (s->theora < 0x030200) break; } - vp3_decode_init(avctx); - return 0; + return vp3_decode_init(avctx); } -AVCodec vp3_decoder = { - "vp3", - CODEC_TYPE_VIDEO, - CODEC_ID_VP3, - sizeof(Vp3DecodeContext), - vp3_decode_init, - NULL, - vp3_decode_end, - vp3_decode_frame, - 0, - NULL +AVCodec ff_theora_decoder = { + .name = "theora", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_THEORA, + .priv_data_size = sizeof(Vp3DecodeContext), + .init = theora_decode_init, + .close = vp3_decode_end, + .decode = vp3_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | + CODEC_CAP_FRAME_THREADS, + .flush = vp3_decode_flush, + .long_name = NULL_IF_CONFIG_SMALL("Theora"), + .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy), + .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context) }; +#endif -#ifndef CONFIG_LIBTHEORA -AVCodec theora_decoder = { - "theora", - CODEC_TYPE_VIDEO, - CODEC_ID_THEORA, - sizeof(Vp3DecodeContext), - theora_decode_init, - NULL, - vp3_decode_end, - vp3_decode_frame, - 0, - NULL +AVCodec ff_vp3_decoder = { + .name = "vp3", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_VP3, + .priv_data_size = sizeof(Vp3DecodeContext), + .init = vp3_decode_init, + .close = vp3_decode_end, + .decode = vp3_decode_frame, + .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | + CODEC_CAP_FRAME_THREADS, + .flush = vp3_decode_flush, + .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"), + .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy), + .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context), }; -#endif