X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fwmaprodec.c;h=6a64641c871a5b009d366cf8f55905824bc01aa2;hb=3d8c80b611aa1e2f800dd9c8d8f350407f95c042;hp=b3ba9abe15ca681e0607c3648307fe8e9a73a430;hpb=9f51c682ee83ecf0995648d4574ac09b52bbcb24;p=ffmpeg diff --git a/libavcodec/wmaprodec.c b/libavcodec/wmaprodec.c index b3ba9abe15c..6a64641c871 100644 --- a/libavcodec/wmaprodec.c +++ b/libavcodec/wmaprodec.c @@ -86,14 +86,17 @@ * subframe in order to reconstruct the output samples. */ +#include "libavutil/float_dsp.h" +#include "libavutil/intfloat.h" +#include "libavutil/intreadwrite.h" #include "avcodec.h" #include "internal.h" #include "get_bits.h" #include "put_bits.h" #include "wmaprodata.h" -#include "dsputil.h" #include "sinewin.h" #include "wma.h" +#include "wma_common.h" /** current decoder limitations */ #define WMAPRO_MAX_CHANNELS 8 ///< max number of handled channels @@ -102,7 +105,7 @@ #define MAX_FRAMESIZE 32768 ///< maximum compressed frame size #define WMAPRO_BLOCK_MIN_BITS 6 ///< log2 of min block size -#define WMAPRO_BLOCK_MAX_BITS 12 ///< log2 of max block size +#define WMAPRO_BLOCK_MAX_BITS 13 ///< log2 of max block size #define WMAPRO_BLOCK_MAX_SIZE (1 << WMAPRO_BLOCK_MAX_BITS) ///< maximum block size #define WMAPRO_BLOCK_SIZES (WMAPRO_BLOCK_MAX_BITS - WMAPRO_BLOCK_MIN_BITS + 1) ///< possible block sizes @@ -165,7 +168,7 @@ typedef struct { typedef struct WMAProDecodeCtx { /* generic decoder variables */ AVCodecContext* avctx; ///< codec context for av_log - DSPContext dsp; ///< accelerated DSP functions + AVFloatDSPContext fdsp; uint8_t frame_data[MAX_FRAMESIZE + FF_INPUT_BUFFER_PADDING_SIZE];///< compressed frame data PutBitContext pb; ///< context for filling the frame_data buffer @@ -180,7 +183,6 @@ typedef struct WMAProDecodeCtx { uint8_t bits_per_sample; ///< integer audio sample size for the unscaled IMDCT output (used to scale to [-1.0, 1.0]) uint16_t samples_per_frame; ///< number of samples to output uint16_t log2_frame_size; - int8_t num_channels; ///< number of channels in the stream (same as AVCodecContext.num_channels) int8_t lfe_channel; ///< lfe channel index uint8_t max_num_subframes; uint8_t subframe_len_bits; ///< number of bits used for the subframe length @@ -206,8 +208,6 @@ typedef struct WMAProDecodeCtx { uint32_t frame_num; ///< current frame number (not used for decoding) GetBitContext gb; ///< bitstream reader context int buf_bit_size; ///< buffer size in bits - float* samples; ///< current samplebuffer pointer - float* samples_end; ///< maximum samplebuffer pointer uint8_t drc_gain; ///< gain for the DRC tool int8_t skip_frame; ///< skip output step int8_t parsed_all_subframes; ///< all subframes decoded? @@ -233,7 +233,7 @@ typedef struct WMAProDecodeCtx { *@brief helper function to print the most important members of the context *@param s context */ -static void av_cold dump_context(WMAProDecodeCtx *s) +static av_cold void dump_context(WMAProDecodeCtx *s) { #define PRINT(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %d\n", a, b); #define PRINT_HEX(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %x\n", a, b); @@ -244,7 +244,7 @@ static void av_cold dump_context(WMAProDecodeCtx *s) PRINT("log2 frame size", s->log2_frame_size); PRINT("max num subframes", s->max_num_subframes); PRINT("len prefix", s->len_prefix); - PRINT("num channels", s->num_channels); + PRINT("num channels", s->avctx->channels); } /** @@ -273,15 +273,21 @@ static av_cold int decode_init(AVCodecContext *avctx) WMAProDecodeCtx *s = avctx->priv_data; uint8_t *edata_ptr = avctx->extradata; unsigned int channel_mask; - int i; + int i, bits; int log2_max_num_subframes; int num_possible_block_sizes; + if (!avctx->block_align) { + av_log(avctx, AV_LOG_ERROR, "block_align is not set\n"); + return AVERROR(EINVAL); + } + s->avctx = avctx; - dsputil_init(&s->dsp, avctx); + avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT); + init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE); - avctx->sample_fmt = AV_SAMPLE_FMT_FLT; + avctx->sample_fmt = AV_SAMPLE_FMT_FLTP; if (avctx->extradata_size >= 18) { s->decode_flags = AV_RL16(edata_ptr+14); @@ -293,8 +299,8 @@ static av_cold int decode_init(AVCodecContext *avctx) av_dlog(avctx, "\n"); } else { - av_log_ask_for_sample(avctx, "Unknown extradata size\n"); - return AVERROR_INVALIDDATA; + avpriv_request_sample(avctx, "Unknown extradata size"); + return AVERROR_PATCHWELCOME; } /** generic init */ @@ -306,8 +312,12 @@ static av_cold int decode_init(AVCodecContext *avctx) s->len_prefix = (s->decode_flags & 0x40); /** get frame len */ - s->samples_per_frame = 1 << ff_wma_get_frame_len_bits(avctx->sample_rate, - 3, s->decode_flags); + bits = ff_wma_get_frame_len_bits(avctx->sample_rate, 3, s->decode_flags); + if (bits > WMAPRO_BLOCK_MAX_BITS) { + avpriv_request_sample(avctx, "14-bit block sizes"); + return AVERROR_PATCHWELCOME; + } + s->samples_per_frame = 1 << bits; /** subframe info */ log2_max_num_subframes = ((s->decode_flags & 0x38) >> 3); @@ -326,18 +336,23 @@ static av_cold int decode_init(AVCodecContext *avctx) return AVERROR_INVALIDDATA; } - s->num_channels = avctx->channels; + if (s->avctx->sample_rate <= 0) { + av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n"); + return AVERROR_INVALIDDATA; + } - if (s->num_channels < 0) { - av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n", s->num_channels); + if (avctx->channels < 0) { + av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n", + avctx->channels); return AVERROR_INVALIDDATA; - } else if (s->num_channels > WMAPRO_MAX_CHANNELS) { - av_log_ask_for_sample(avctx, "unsupported number of channels\n"); + } else if (avctx->channels > WMAPRO_MAX_CHANNELS) { + avpriv_request_sample(avctx, + "More than %d channels", WMAPRO_MAX_CHANNELS); return AVERROR_PATCHWELCOME; } /** init previous block len */ - for (i = 0; i < s->num_channels; i++) + for (i = 0; i < avctx->channels; i++) s->channel[i].prev_block_len = s->samples_per_frame; /** extract lfe channel position */ @@ -449,6 +464,7 @@ static av_cold int decode_init(AVCodecContext *avctx) dump_context(s); avctx->channel_layout = channel_mask; + return 0; } @@ -508,25 +524,23 @@ static int decode_subframe_length(WMAProDecodeCtx *s, int offset) */ static int decode_tilehdr(WMAProDecodeCtx *s) { - uint16_t num_samples[WMAPRO_MAX_CHANNELS]; /**< sum of samples for all currently known subframes of a channel */ + uint16_t num_samples[WMAPRO_MAX_CHANNELS] = { 0 };/**< sum of samples for all currently known subframes of a channel */ uint8_t contains_subframe[WMAPRO_MAX_CHANNELS]; /**< flag indicating if a channel contains the current subframe */ - int channels_for_cur_subframe = s->num_channels; /**< number of channels that contain the current subframe */ + int channels_for_cur_subframe = s->avctx->channels; /**< number of channels that contain the current subframe */ int fixed_channel_layout = 0; /**< flag indicating that all channels use the same subframe offsets and sizes */ int min_channel_len = 0; /**< smallest sum of samples (channels with this length will be processed first) */ int c; /* Should never consume more than 3073 bits (256 iterations for the - * while loop when always the minimum amount of 128 samples is substracted + * while loop when always the minimum amount of 128 samples is subtracted * from missing samples in the 8 channel case). * 1 + BLOCK_MAX_SIZE * MAX_CHANNELS / BLOCK_MIN_SIZE * (MAX_CHANNELS + 4) */ /** reset tiling information */ - for (c = 0; c < s->num_channels; c++) + for (c = 0; c < s->avctx->channels; c++) s->channel[c].num_subframes = 0; - memset(num_samples, 0, sizeof(num_samples)); - if (s->max_num_subframes == 1 || get_bits1(&s->gb)) fixed_channel_layout = 1; @@ -535,7 +549,7 @@ static int decode_tilehdr(WMAProDecodeCtx *s) int subframe_len; /** check which channels contain the subframe */ - for (c = 0; c < s->num_channels; c++) { + for (c = 0; c < s->avctx->channels; c++) { if (num_samples[c] == min_channel_len) { if (fixed_channel_layout || channels_for_cur_subframe == 1 || (min_channel_len == s->samples_per_frame - s->min_samples_per_subframe)) @@ -552,7 +566,7 @@ static int decode_tilehdr(WMAProDecodeCtx *s) /** add subframes to the individual channels and find new min_channel_len */ min_channel_len += subframe_len; - for (c = 0; c < s->num_channels; c++) { + for (c = 0; c < s->avctx->channels; c++) { WMAProChannelCtx* chan = &s->channel[c]; if (contains_subframe[c]) { @@ -579,7 +593,7 @@ static int decode_tilehdr(WMAProDecodeCtx *s) } } while (min_channel_len < s->samples_per_frame); - for (c = 0; c < s->num_channels; c++) { + for (c = 0; c < s->avctx->channels; c++) { int i; int offset = 0; for (i = 0; i < s->channel[c].num_subframes; i++) { @@ -605,8 +619,8 @@ static void decode_decorrelation_matrix(WMAProDecodeCtx *s, int i; int offset = 0; int8_t rotation_offset[WMAPRO_MAX_CHANNELS * WMAPRO_MAX_CHANNELS]; - memset(chgroup->decorrelation_matrix, 0, s->num_channels * - s->num_channels * sizeof(*chgroup->decorrelation_matrix)); + memset(chgroup->decorrelation_matrix, 0, s->avctx->channels * + s->avctx->channels * sizeof(*chgroup->decorrelation_matrix)); for (i = 0; i < chgroup->num_channels * (chgroup->num_channels - 1) >> 1; i++) rotation_offset[i] = get_bits(&s->gb, 6); @@ -659,13 +673,13 @@ static int decode_channel_transform(WMAProDecodeCtx* s) /** in the one channel case channel transforms are pointless */ s->num_chgroups = 0; - if (s->num_channels > 1) { + if (s->avctx->channels > 1) { int remaining_channels = s->channels_for_cur_subframe; if (get_bits1(&s->gb)) { - av_log_ask_for_sample(s->avctx, - "unsupported channel transform bit\n"); - return AVERROR_INVALIDDATA; + avpriv_request_sample(s->avctx, + "Channel transform bit"); + return AVERROR_PATCHWELCOME; } for (s->num_chgroups = 0; remaining_channels && @@ -700,12 +714,12 @@ static int decode_channel_transform(WMAProDecodeCtx* s) if (chgroup->num_channels == 2) { if (get_bits1(&s->gb)) { if (get_bits1(&s->gb)) { - av_log_ask_for_sample(s->avctx, - "unsupported channel transform type\n"); + avpriv_request_sample(s->avctx, + "Unknown channel transform type"); } } else { chgroup->transform = 1; - if (s->num_channels == 2) { + if (s->avctx->channels == 2) { chgroup->decorrelation_matrix[0] = 1.0; chgroup->decorrelation_matrix[1] = -1.0; chgroup->decorrelation_matrix[2] = 1.0; @@ -726,8 +740,8 @@ static int decode_channel_transform(WMAProDecodeCtx* s) } else { /** FIXME: more than 6 coupled channels not supported */ if (chgroup->num_channels > 6) { - av_log_ask_for_sample(s->avctx, - "coupled channels > 6\n"); + avpriv_request_sample(s->avctx, + "Coupled channels > 6"); } else { memcpy(chgroup->decorrelation_matrix, default_decorrelation[chgroup->num_channels], @@ -767,7 +781,7 @@ static int decode_coeffs(WMAProDecodeCtx *s, int c) /* Integers 0..15 as single-precision floats. The table saves a costly int to float conversion, and storing the values as integers allows fast sign-flipping. */ - static const int fval_tab[16] = { + static const uint32_t fval_tab[16] = { 0x00000000, 0x3f800000, 0x40000000, 0x40400000, 0x40800000, 0x40a00000, 0x40c00000, 0x40e00000, 0x41000000, 0x41100000, 0x41200000, 0x41300000, @@ -799,7 +813,7 @@ static int decode_coeffs(WMAProDecodeCtx *s, int c) 4 vector coded large values) */ while ((s->transmit_num_vec_coeffs || !rl_mode) && (cur_coeff + 3 < ci->num_vec_coeffs)) { - int vals[4]; + uint32_t vals[4]; int i; unsigned int idx; @@ -809,15 +823,15 @@ static int decode_coeffs(WMAProDecodeCtx *s, int c) for (i = 0; i < 4; i += 2) { idx = get_vlc2(&s->gb, vec2_vlc.table, VLCBITS, VEC2MAXDEPTH); if (idx == HUFF_VEC2_SIZE - 1) { - int v0, v1; + uint32_t v0, v1; v0 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH); if (v0 == HUFF_VEC1_SIZE - 1) v0 += ff_wma_get_large_val(&s->gb); v1 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH); if (v1 == HUFF_VEC1_SIZE - 1) v1 += ff_wma_get_large_val(&s->gb); - ((float*)vals)[i ] = v0; - ((float*)vals)[i+1] = v1; + vals[i ] = av_float2int(v0); + vals[i+1] = av_float2int(v1); } else { vals[i] = fval_tab[symbol_to_vec2[idx] >> 4 ]; vals[i+1] = fval_tab[symbol_to_vec2[idx] & 0xF]; @@ -833,8 +847,8 @@ static int decode_coeffs(WMAProDecodeCtx *s, int c) /** decode sign */ for (i = 0; i < 4; i++) { if (vals[i]) { - int sign = get_bits1(&s->gb) - 1; - *(uint32_t*)&ci->coeffs[cur_coeff] = vals[i] ^ sign<<31; + uint32_t sign = get_bits1(&s->gb) - 1; + AV_WN32A(&ci->coeffs[cur_coeff], vals[i] ^ sign << 31); num_zeros = 0; } else { ci->coeffs[cur_coeff] = 0; @@ -995,14 +1009,14 @@ static void inverse_channel_transform(WMAProDecodeCtx *s) (*ch)[y] = sum; } } - } else if (s->num_channels == 2) { + } else if (s->avctx->channels == 2) { int len = FFMIN(sfb[1], s->subframe_len) - sfb[0]; - s->dsp.vector_fmul_scalar(ch_data[0] + sfb[0], - ch_data[0] + sfb[0], - 181.0 / 128, len); - s->dsp.vector_fmul_scalar(ch_data[1] + sfb[0], - ch_data[1] + sfb[0], - 181.0 / 128, len); + s->fdsp.vector_fmul_scalar(ch_data[0] + sfb[0], + ch_data[0] + sfb[0], + 181.0 / 128, len); + s->fdsp.vector_fmul_scalar(ch_data[1] + sfb[0], + ch_data[1] + sfb[0], + 181.0 / 128, len); } } } @@ -1031,8 +1045,8 @@ static void wmapro_window(WMAProDecodeCtx *s) winlen >>= 1; - s->dsp.vector_fmul_window(start, start, start + winlen, - window, winlen); + s->fdsp.vector_fmul_window(start, start, start + winlen, + window, winlen); s->channel[c].prev_block_len = s->subframe_len; } @@ -1048,7 +1062,7 @@ static int decode_subframe(WMAProDecodeCtx *s) int offset = s->samples_per_frame; int subframe_len = s->samples_per_frame; int i; - int total_samples = s->samples_per_frame * s->num_channels; + int total_samples = s->samples_per_frame * s->avctx->channels; int transmit_coeffs = 0; int cur_subwoofer_cutoff; @@ -1058,7 +1072,7 @@ static int decode_subframe(WMAProDecodeCtx *s) == the next block of the channel with the smallest number of decoded samples */ - for (i = 0; i < s->num_channels; i++) { + for (i = 0; i < s->avctx->channels; i++) { s->channel[i].grouped = 0; if (offset > s->channel[i].decoded_samples) { offset = s->channel[i].decoded_samples; @@ -1072,9 +1086,9 @@ static int decode_subframe(WMAProDecodeCtx *s) /** get a list of all channels that contain the estimated block */ s->channels_for_cur_subframe = 0; - for (i = 0; i < s->num_channels; i++) { + for (i = 0; i < s->avctx->channels; i++) { const int cur_subframe = s->channel[i].cur_subframe; - /** substract already processed samples */ + /** subtract already processed samples */ total_samples -= s->channel[i].decoded_samples; /** and count if there are multiple subframes that match our profile */ @@ -1134,8 +1148,8 @@ static int decode_subframe(WMAProDecodeCtx *s) /** no idea for what the following bit is used */ if (get_bits1(&s->gb)) { - av_log_ask_for_sample(s->avctx, "reserved bit set\n"); - return AVERROR_INVALIDDATA; + avpriv_request_sample(s->avctx, "Reserved bit"); + return AVERROR_PATCHWELCOME; } @@ -1158,7 +1172,12 @@ static int decode_subframe(WMAProDecodeCtx *s) int num_bits = av_log2((s->subframe_len + 3)/4) + 1; for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; - s->channel[c].num_vec_coeffs = get_bits(&s->gb, num_bits) << 2; + int num_vec_coeffs = get_bits(&s->gb, num_bits) << 2; + if (num_vec_coeffs > WMAPRO_BLOCK_MAX_SIZE) { + av_log(s->avctx, AV_LOG_ERROR, "num_vec_coeffs %d is too large\n", num_vec_coeffs); + return AVERROR_INVALIDDATA; + } + s->channel[c].num_vec_coeffs = num_vec_coeffs; } } else { for (i = 0; i < s->channels_for_cur_subframe; i++) { @@ -1243,9 +1262,9 @@ static int decode_subframe(WMAProDecodeCtx *s) s->channel[c].scale_factor_step; const float quant = pow(10.0, exp / 20.0); int start = s->cur_sfb_offsets[b]; - s->dsp.vector_fmul_scalar(s->tmp + start, - s->channel[c].coeffs + start, - quant, end - start); + s->fdsp.vector_fmul_scalar(s->tmp + start, + s->channel[c].coeffs + start, + quant, end - start); } /** apply imdct (imdct_half == DCTIV with reverse) */ @@ -1275,21 +1294,13 @@ static int decode_subframe(WMAProDecodeCtx *s) *@return 0 if the trailer bit indicates that this is the last frame, * 1 if there are additional frames */ -static int decode_frame(WMAProDecodeCtx *s) +static int decode_frame(WMAProDecodeCtx *s, AVFrame *frame, int *got_frame_ptr) { + AVCodecContext *avctx = s->avctx; GetBitContext* gb = &s->gb; int more_frames = 0; int len = 0; - int i; - - /** check for potential output buffer overflow */ - if (s->num_channels * s->samples_per_frame > s->samples_end - s->samples) { - /** return an error if no frame could be decoded at all */ - av_log(s->avctx, AV_LOG_ERROR, - "not enough space for the output samples\n"); - s->packet_loss = 1; - return 0; - } + int i, ret; /** get frame length */ if (s->len_prefix) @@ -1304,9 +1315,9 @@ static int decode_frame(WMAProDecodeCtx *s) } /** read postproc transform */ - if (s->num_channels > 1 && get_bits1(gb)) { + if (s->avctx->channels > 1 && get_bits1(gb)) { if (get_bits1(gb)) { - for (i = 0; i < s->num_channels * s->num_channels; i++) + for (i = 0; i < avctx->channels * avctx->channels; i++) skip_bits(gb, 4); } } @@ -1341,7 +1352,7 @@ static int decode_frame(WMAProDecodeCtx *s) /** reset subframe states */ s->parsed_all_subframes = 0; - for (i = 0; i < s->num_channels; i++) { + for (i = 0; i < avctx->channels; i++) { s->channel[i].decoded_samples = 0; s->channel[i].cur_subframe = 0; s->channel[i].reuse_sf = 0; @@ -1355,19 +1366,20 @@ static int decode_frame(WMAProDecodeCtx *s) } } - /** interleave samples and write them to the output buffer */ - for (i = 0; i < s->num_channels; i++) { - float* ptr = s->samples + i; - int incr = s->num_channels; - float* iptr = s->channel[i].out; - float* iend = iptr + s->samples_per_frame; - - // FIXME should create/use a DSP function here - while (iptr < iend) { - *ptr = *iptr++; - ptr += incr; - } + /* get output buffer */ + frame->nb_samples = s->samples_per_frame; + if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + s->packet_loss = 1; + return 0; + } + /** copy samples to the output buffer */ + for (i = 0; i < avctx->channels; i++) + memcpy(frame->extended_data[i], s->channel[i].out, + s->samples_per_frame * sizeof(*s->channel[i].out)); + + for (i = 0; i < avctx->channels; i++) { /** reuse second half of the IMDCT output for the next frame */ memcpy(&s->channel[i].out[0], &s->channel[i].out[s->samples_per_frame], @@ -1376,8 +1388,11 @@ static int decode_frame(WMAProDecodeCtx *s) if (s->skip_frame) { s->skip_frame = 0; - } else - s->samples += s->num_channels * s->samples_per_frame; + *got_frame_ptr = 0; + av_frame_unref(frame); + } else { + *got_frame_ptr = 1; + } if (s->len_prefix) { if (len != (get_bits_count(gb) - s->frame_offset) + 2) { @@ -1419,7 +1434,7 @@ static int remaining_bits(WMAProDecodeCtx *s, GetBitContext *gb) *@param s codec context *@param gb bitstream reader context *@param len length of the partial frame - *@param append decides wether to reset the buffer or not + *@param append decides whether to reset the buffer or not */ static void save_bits(WMAProDecodeCtx *s, GetBitContext* gb, int len, int append) @@ -1439,7 +1454,7 @@ static void save_bits(WMAProDecodeCtx *s, GetBitContext* gb, int len, buflen = (s->num_saved_bits + len + 8) >> 3; if (len <= 0 || buflen > MAX_FRAMESIZE) { - av_log_ask_for_sample(s->avctx, "input buffer too small\n"); + avpriv_request_sample(s->avctx, "Too small input buffer"); s->packet_loss = 1; return; } @@ -1470,12 +1485,11 @@ static void save_bits(WMAProDecodeCtx *s, GetBitContext* gb, int len, *@brief Decode a single WMA packet. *@param avctx codec context *@param data the output buffer - *@param data_size number of bytes that were written to the output buffer *@param avpkt input packet *@return number of bytes that were read from the input buffer */ -static int decode_packet(AVCodecContext *avctx, - void *data, int *data_size, AVPacket* avpkt) +static int decode_packet(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket* avpkt) { WMAProDecodeCtx *s = avctx->priv_data; GetBitContext* gb = &s->pgb; @@ -1484,16 +1498,17 @@ static int decode_packet(AVCodecContext *avctx, int num_bits_prev_frame; int packet_sequence_number; - s->samples = data; - s->samples_end = (float*)((int8_t*)data + *data_size); - *data_size = 0; + *got_frame_ptr = 0; if (s->packet_done || s->packet_loss) { s->packet_done = 0; /** sanity check for the buffer length */ - if (buf_size < avctx->block_align) - return 0; + if (buf_size < avctx->block_align) { + av_log(avctx, AV_LOG_ERROR, "Input packet too small (%d < %d)\n", + buf_size, avctx->block_align); + return AVERROR_INVALIDDATA; + } s->next_packet_start = buf_size - avctx->block_align; buf_size = avctx->block_align; @@ -1533,7 +1548,7 @@ static int decode_packet(AVCodecContext *avctx, /** decode the cross packet frame if it is valid */ if (!s->packet_loss) - decode_frame(s); + decode_frame(s, data, got_frame_ptr); } else if (s->num_saved_bits - s->frame_offset) { av_dlog(avctx, "ignoring %x previously saved bits\n", s->num_saved_bits - s->frame_offset); @@ -1556,7 +1571,7 @@ static int decode_packet(AVCodecContext *avctx, (frame_size = show_bits(gb, s->log2_frame_size)) && frame_size <= remaining_bits(s, gb)) { save_bits(s, gb, frame_size, 0); - s->packet_done = !decode_frame(s); + s->packet_done = !decode_frame(s, data, got_frame_ptr); } else if (!s->len_prefix && s->num_saved_bits > get_bits_count(&s->gb)) { /** when the frames do not have a length prefix, we don't know @@ -1566,7 +1581,7 @@ static int decode_packet(AVCodecContext *avctx, therefore we save the incoming packet first, then we append the "previous frame" data from the next packet so that we get a buffer that only contains full frames */ - s->packet_done = !decode_frame(s); + s->packet_done = !decode_frame(s, data, got_frame_ptr); } else s->packet_done = 1; } @@ -1578,10 +1593,11 @@ static int decode_packet(AVCodecContext *avctx, save_bits(s, gb, remaining_bits(s, gb), 0); } - *data_size = (int8_t *)s->samples - (int8_t *)data; s->packet_offset = get_bits_count(gb) & 7; + if (s->packet_loss) + return AVERROR_INVALIDDATA; - return (s->packet_loss) ? AVERROR_INVALIDDATA : get_bits_count(gb) >> 3; + return get_bits_count(gb) >> 3; } /** @@ -1594,7 +1610,7 @@ static void flush(AVCodecContext *avctx) int i; /** reset output buffer as a part of it is used during the windowing of a new frame */ - for (i = 0; i < s->num_channels; i++) + for (i = 0; i < avctx->channels; i++) memset(s->channel[i].out, 0, s->samples_per_frame * sizeof(*s->channel[i].out)); s->packet_loss = 1; @@ -1607,12 +1623,14 @@ static void flush(AVCodecContext *avctx) AVCodec ff_wmapro_decoder = { .name = "wmapro", .type = AVMEDIA_TYPE_AUDIO, - .id = CODEC_ID_WMAPRO, + .id = AV_CODEC_ID_WMAPRO, .priv_data_size = sizeof(WMAProDecodeCtx), .init = decode_init, .close = decode_end, .decode = decode_packet, - .capabilities = CODEC_CAP_SUBFRAMES, - .flush= flush, - .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"), + .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1, + .flush = flush, + .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"), + .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, + AV_SAMPLE_FMT_NONE }, };