X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fapedec.c;h=b07f3a090be1b9a8d1b6a3c54cfc822fdb8220ed;hb=01880d287be6e2269b92eee8d9dc442532d277ea;hp=a90a07fa7deff206fa91344428e8a4a4ac27cf37;hpb=ba87f0801d77c21eb1e4891ca1f846500bbb0939;p=ffmpeg diff --git a/libavcodec/apedec.c b/libavcodec/apedec.c index a90a07fa7de..b07f3a090be 100644 --- a/libavcodec/apedec.c +++ b/libavcodec/apedec.c @@ -3,35 +3,35 @@ * Copyright (c) 2007 Benjamin Zores * based upon libdemac from Dave Chapman. * - * This file is part of FFmpeg. + * This file is part of Libav. * - * FFmpeg is free software; you can redistribute it and/or + * Libav is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * FFmpeg is distributed in the hope that it will be useful, + * Libav is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software + * License along with Libav; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#define ALT_BITSTREAM_READER_LE #include "avcodec.h" #include "dsputil.h" -#include "get_bits.h" #include "bytestream.h" +#include "libavutil/audioconvert.h" +#include "libavutil/avassert.h" +#include "libavutil/opt.h" /** * @file * Monkey's Audio lossless audio decoder */ -#define BLOCKS_PER_LOOP 4608 #define MAX_CHANNELS 2 #define MAX_BYTESPERSAMPLE 3 @@ -126,10 +126,13 @@ typedef struct APEPredictor { /** Decoder context */ typedef struct APEContext { + AVClass *class; ///< class for AVOptions AVCodecContext *avctx; + AVFrame frame; DSPContext dsp; int channels; int samples; ///< samples left to decode in current frame + int bps; int fileversion; ///< codec version, very important in decoding process int compression_level; ///< compression levels @@ -138,12 +141,12 @@ typedef struct APEContext { uint32_t CRC; ///< frame CRC int frameflags; ///< frame flags - int currentframeblocks; ///< samples (per channel) in current frame - int blocksdecoded; ///< count of decoded samples in current frame APEPredictor predictor; ///< predictor used for final reconstruction - int32_t decoded0[BLOCKS_PER_LOOP]; ///< decoded data for the first channel - int32_t decoded1[BLOCKS_PER_LOOP]; ///< decoded data for the second channel + int32_t *decoded_buffer; + int decoded_size; + int32_t *decoded[MAX_CHANNELS]; ///< decoded data for each channel + int blocks_per_loop; ///< maximum number of samples to decode for each call int16_t* filterbuf[APE_FILTER_LEVELS]; ///< filter memory @@ -154,30 +157,57 @@ typedef struct APEContext { uint8_t *data; ///< current frame data uint8_t *data_end; ///< frame data end + int data_size; ///< frame data allocated size const uint8_t *ptr; ///< current position in frame data - const uint8_t *last_ptr; ///< position where last 4608-sample block ended int error; } APEContext; // TODO: dsputilize -static av_cold int ape_decode_init(AVCodecContext * avctx) +static av_cold int ape_decode_close(AVCodecContext *avctx) +{ + APEContext *s = avctx->priv_data; + int i; + + for (i = 0; i < APE_FILTER_LEVELS; i++) + av_freep(&s->filterbuf[i]); + + av_freep(&s->decoded_buffer); + av_freep(&s->data); + s->decoded_size = s->data_size = 0; + + return 0; +} + +static av_cold int ape_decode_init(AVCodecContext *avctx) { APEContext *s = avctx->priv_data; int i; if (avctx->extradata_size != 6) { av_log(avctx, AV_LOG_ERROR, "Incorrect extradata\n"); - return -1; - } - if (avctx->bits_per_coded_sample != 16) { - av_log(avctx, AV_LOG_ERROR, "Only 16-bit samples are supported\n"); - return -1; + return AVERROR(EINVAL); } if (avctx->channels > 2) { av_log(avctx, AV_LOG_ERROR, "Only mono and stereo is supported\n"); - return -1; + return AVERROR(EINVAL); + } + s->bps = avctx->bits_per_coded_sample; + switch (s->bps) { + case 8: + avctx->sample_fmt = AV_SAMPLE_FMT_U8; + break; + case 16: + avctx->sample_fmt = AV_SAMPLE_FMT_S16; + break; + case 24: + avctx->sample_fmt = AV_SAMPLE_FMT_S32; + break; + default: + av_log_ask_for_sample(avctx, "Unsupported bits per coded sample %d\n", + s->bps); + return AVERROR_PATCHWELCOME; } s->avctx = avctx; s->channels = avctx->channels; @@ -185,38 +215,36 @@ static av_cold int ape_decode_init(AVCodecContext * avctx) s->compression_level = AV_RL16(avctx->extradata + 2); s->flags = AV_RL16(avctx->extradata + 4); - av_log(avctx, AV_LOG_DEBUG, "Compression Level: %d - Flags: %d\n", s->compression_level, s->flags); + av_log(avctx, AV_LOG_DEBUG, "Compression Level: %d - Flags: %d\n", + s->compression_level, s->flags); if (s->compression_level % 1000 || s->compression_level > COMPRESSION_LEVEL_INSANE) { - av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n", s->compression_level); - return -1; + av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n", + s->compression_level); + return AVERROR_INVALIDDATA; } s->fset = s->compression_level / 1000 - 1; for (i = 0; i < APE_FILTER_LEVELS; i++) { if (!ape_filter_orders[s->fset][i]) break; - s->filterbuf[i] = av_malloc((ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4); + FF_ALLOC_OR_GOTO(avctx, s->filterbuf[i], + (ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4, + filter_alloc_fail); } - dsputil_init(&s->dsp, avctx); - avctx->sample_fmt = SAMPLE_FMT_S16; - avctx->channel_layout = (avctx->channels==2) ? CH_LAYOUT_STEREO : CH_LAYOUT_MONO; - return 0; -} - -static av_cold int ape_decode_close(AVCodecContext * avctx) -{ - APEContext *s = avctx->priv_data; - int i; + ff_dsputil_init(&s->dsp, avctx); + avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; - for (i = 0; i < APE_FILTER_LEVELS; i++) - av_freep(&s->filterbuf[i]); + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; - av_freep(&s->data); return 0; +filter_alloc_fail: + ape_decode_close(avctx); + return AVERROR(ENOMEM); } /** - * @defgroup rangecoder APE range decoder + * @name APE range decoding functions * @{ */ @@ -227,7 +255,7 @@ static av_cold int ape_decode_close(AVCodecContext * avctx) #define BOTTOM_VALUE (TOP_VALUE >> 8) /** Start the decoder */ -static inline void range_start_decoding(APEContext * ctx) +static inline void range_start_decoding(APEContext *ctx) { ctx->rc.buffer = bytestream_get_byte(&ctx->ptr); ctx->rc.low = ctx->rc.buffer >> (8 - EXTRA_BITS); @@ -235,13 +263,16 @@ static inline void range_start_decoding(APEContext * ctx) } /** Perform normalization */ -static inline void range_dec_normalize(APEContext * ctx) +static inline void range_dec_normalize(APEContext *ctx) { while (ctx->rc.range <= BOTTOM_VALUE) { ctx->rc.buffer <<= 8; - if(ctx->ptr < ctx->data_end) + if(ctx->ptr < ctx->data_end) { ctx->rc.buffer += *ctx->ptr; - ctx->ptr++; + ctx->ptr++; + } else { + ctx->error = 1; + } ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF); ctx->rc.range <<= 8; } @@ -253,7 +284,7 @@ static inline void range_dec_normalize(APEContext * ctx) * @param tot_f is the total frequency or (code_value)1<rc.help = ctx->rc.range / tot_f; @@ -265,7 +296,7 @@ static inline int range_decode_culfreq(APEContext * ctx, int tot_f) * @param ctx decoder context * @param shift number of bits to decode */ -static inline int range_decode_culshift(APEContext * ctx, int shift) +static inline int range_decode_culshift(APEContext *ctx, int shift) { range_dec_normalize(ctx); ctx->rc.help = ctx->rc.range >> shift; @@ -279,14 +310,14 @@ static inline int range_decode_culshift(APEContext * ctx, int shift) * @param sy_f the interval length (frequency of the symbol) * @param lt_f the lower end (frequency sum of < symbols) */ -static inline void range_decode_update(APEContext * ctx, int sy_f, int lt_f) +static inline void range_decode_update(APEContext *ctx, int sy_f, int lt_f) { ctx->rc.low -= ctx->rc.help * lt_f; ctx->rc.range = ctx->rc.help * sy_f; } /** Decode n bits (n <= 16) without modelling */ -static inline int range_decode_bits(APEContext * ctx, int n) +static inline int range_decode_bits(APEContext *ctx, int n) { int sym = range_decode_culshift(ctx, n); range_decode_update(ctx, 1, sym); @@ -338,7 +369,7 @@ static const uint16_t counts_diff_3980[21] = { * @param counts probability range start position * @param counts_diff probability range widths */ -static inline int range_get_symbol(APEContext * ctx, +static inline int range_get_symbol(APEContext *ctx, const uint16_t counts[], const uint16_t counts_diff[]) { @@ -362,7 +393,7 @@ static inline int range_get_symbol(APEContext * ctx, } /** @} */ // group rangecoder -static inline void update_rice(APERice *rice, int x) +static inline void update_rice(APERice *rice, unsigned int x) { int lim = rice->k ? (1 << (rice->k + 4)) : 0; rice->ksum += ((x + 1) / 2) - ((rice->ksum + 16) >> 5); @@ -373,9 +404,9 @@ static inline void update_rice(APERice *rice, int x) rice->k++; } -static inline int ape_decode_value(APEContext * ctx, APERice *rice) +static inline int ape_decode_value(APEContext *ctx, APERice *rice) { - int x, overflow; + unsigned int x, overflow; if (ctx->fileversion < 3990) { int tmpk; @@ -390,9 +421,12 @@ static inline int ape_decode_value(APEContext * ctx, APERice *rice) if (tmpk <= 16) x = range_decode_bits(ctx, tmpk); - else { + else if (tmpk <= 32) { x = range_decode_bits(ctx, 16); x |= (range_decode_bits(ctx, tmpk - 16) << 16); + } else { + av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk); + return AVERROR_INVALIDDATA; } x += overflow << tmpk; } else { @@ -440,32 +474,23 @@ static inline int ape_decode_value(APEContext * ctx, APERice *rice) return -(x >> 1); } -static void entropy_decode(APEContext * ctx, int blockstodecode, int stereo) +static void entropy_decode(APEContext *ctx, int blockstodecode, int stereo) { - int32_t *decoded0 = ctx->decoded0; - int32_t *decoded1 = ctx->decoded1; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; - ctx->blocksdecoded = blockstodecode; - - if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) { - /* We are pure silence, just memset the output buffer. */ - memset(decoded0, 0, blockstodecode * sizeof(int32_t)); - memset(decoded1, 0, blockstodecode * sizeof(int32_t)); - } else { - while (blockstodecode--) { - *decoded0++ = ape_decode_value(ctx, &ctx->riceY); - if (stereo) - *decoded1++ = ape_decode_value(ctx, &ctx->riceX); - } + while (blockstodecode--) { + *decoded0++ = ape_decode_value(ctx, &ctx->riceY); + if (stereo) + *decoded1++ = ape_decode_value(ctx, &ctx->riceX); } - - if (ctx->blocksdecoded == ctx->currentframeblocks) - range_dec_normalize(ctx); /* normalize to use up all bytes */ } -static void init_entropy_decoder(APEContext * ctx) +static int init_entropy_decoder(APEContext *ctx) { /* Read the CRC */ + if (ctx->data_end - ctx->ptr < 6) + return AVERROR_INVALIDDATA; ctx->CRC = bytestream_get_be32(&ctx->ptr); /* Read the frame flags if they exist */ @@ -473,12 +498,11 @@ static void init_entropy_decoder(APEContext * ctx) if ((ctx->fileversion > 3820) && (ctx->CRC & 0x80000000)) { ctx->CRC &= ~0x80000000; + if (ctx->data_end - ctx->ptr < 6) + return AVERROR_INVALIDDATA; ctx->frameflags = bytestream_get_be32(&ctx->ptr); } - /* Keep a count of the blocks decoded in this frame */ - ctx->blocksdecoded = 0; - /* Initialize the rice structs */ ctx->riceX.k = 10; ctx->riceX.ksum = (1 << ctx->riceX.k) * 16; @@ -489,18 +513,20 @@ static void init_entropy_decoder(APEContext * ctx) ctx->ptr++; range_start_decoding(ctx); + + return 0; } static const int32_t initial_coeffs[4] = { 360, 317, -109, 98 }; -static void init_predictor_decoder(APEContext * ctx) +static void init_predictor_decoder(APEContext *ctx) { APEPredictor *p = &ctx->predictor; /* Zero the history buffers */ - memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(int32_t)); + memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(*p->historybuffer)); p->buf = p->historybuffer; /* Initialize and zero the coefficients */ @@ -518,7 +544,10 @@ static inline int APESIGN(int32_t x) { return (x < 0) - (x > 0); } -static av_always_inline int predictor_update_filter(APEPredictor *p, const int decoded, const int filter, const int delayA, const int delayB, const int adaptA, const int adaptB) +static av_always_inline int predictor_update_filter(APEPredictor *p, + const int decoded, const int filter, + const int delayA, const int delayB, + const int adaptA, const int adaptB) { int32_t predictionA, predictionB, sign; @@ -562,17 +591,19 @@ static av_always_inline int predictor_update_filter(APEPredictor *p, const int d return p->filterA[filter]; } -static void predictor_decode_stereo(APEContext * ctx, int count) +static void predictor_decode_stereo(APEContext *ctx, int count) { APEPredictor *p = &ctx->predictor; - int32_t *decoded0 = ctx->decoded0; - int32_t *decoded1 = ctx->decoded1; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; while (count--) { /* Predictor Y */ - *decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB, YADAPTCOEFFSA, YADAPTCOEFFSB); + *decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB, + YADAPTCOEFFSA, YADAPTCOEFFSB); decoded0++; - *decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB, XADAPTCOEFFSA, XADAPTCOEFFSB); + *decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB, + XADAPTCOEFFSA, XADAPTCOEFFSB); decoded1++; /* Combined */ @@ -580,16 +611,17 @@ static void predictor_decode_stereo(APEContext * ctx, int count) /* Have we filled the history buffer? */ if (p->buf == p->historybuffer + HISTORY_SIZE) { - memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t)); + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); p->buf = p->historybuffer; } } } -static void predictor_decode_mono(APEContext * ctx, int count) +static void predictor_decode_mono(APEContext *ctx, int count) { APEPredictor *p = &ctx->predictor; - int32_t *decoded0 = ctx->decoded0; + int32_t *decoded0 = ctx->decoded[0]; int32_t predictionA, currentA, A, sign; currentA = p->lastA[0]; @@ -620,7 +652,8 @@ static void predictor_decode_mono(APEContext * ctx, int count) /* Have we filled the history buffer? */ if (p->buf == p->historybuffer + HISTORY_SIZE) { - memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t)); + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); p->buf = p->historybuffer; } @@ -631,32 +664,35 @@ static void predictor_decode_mono(APEContext * ctx, int count) p->lastA[0] = currentA; } -static void do_init_filter(APEFilter *f, int16_t * buf, int order) +static void do_init_filter(APEFilter *f, int16_t *buf, int order) { f->coeffs = buf; f->historybuffer = buf + order; f->delay = f->historybuffer + order * 2; f->adaptcoeffs = f->historybuffer + order; - memset(f->historybuffer, 0, (order * 2) * sizeof(int16_t)); - memset(f->coeffs, 0, order * sizeof(int16_t)); + memset(f->historybuffer, 0, (order * 2) * sizeof(*f->historybuffer)); + memset(f->coeffs, 0, order * sizeof(*f->coeffs)); f->avg = 0; } -static void init_filter(APEContext * ctx, APEFilter *f, int16_t * buf, int order) +static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order) { do_init_filter(&f[0], buf, order); do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order); } -static void do_apply_filter(APEContext * ctx, int version, APEFilter *f, int32_t *data, int count, int order, int fracbits) +static void do_apply_filter(APEContext *ctx, int version, APEFilter *f, + int32_t *data, int count, int order, int fracbits) { int res; int absres; while (count--) { /* round fixedpoint scalar product */ - res = ctx->dsp.scalarproduct_and_madd_int16(f->coeffs, f->delay - order, f->adaptcoeffs - order, order, APESIGN(*data)); + res = ctx->dsp.scalarproduct_and_madd_int16(f->coeffs, f->delay - order, + f->adaptcoeffs - order, + order, APESIGN(*data)); res = (res + (1 << (fracbits - 1))) >> fracbits; res += *data; *data++ = res; @@ -675,7 +711,8 @@ static void do_apply_filter(APEContext * ctx, int version, APEFilter *f, int32_t /* Update the adaption coefficients */ absres = FFABS(res); if (absres) - *f->adaptcoeffs = ((res & (1<<31)) - (1<<30)) >> (25 + (absres <= f->avg*3) + (absres <= f->avg*4/3)); + *f->adaptcoeffs = ((res & (-1<<31)) ^ (-1<<30)) >> + (25 + (absres <= f->avg*3) + (absres <= f->avg*4/3)); else *f->adaptcoeffs = 0; @@ -691,15 +728,15 @@ static void do_apply_filter(APEContext * ctx, int version, APEFilter *f, int32_t /* Have we filled the history buffer? */ if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) { memmove(f->historybuffer, f->delay - (order * 2), - (order * 2) * sizeof(int16_t)); + (order * 2) * sizeof(*f->historybuffer)); f->delay = f->historybuffer + order * 2; f->adaptcoeffs = f->historybuffer + order; } } } -static void apply_filter(APEContext * ctx, APEFilter *f, - int32_t * data0, int32_t * data1, +static void apply_filter(APEContext *ctx, APEFilter *f, + int32_t *data0, int32_t *data1, int count, int order, int fracbits) { do_apply_filter(ctx, ctx->fileversion, &f[0], data0, count, order, fracbits); @@ -707,64 +744,61 @@ static void apply_filter(APEContext * ctx, APEFilter *f, do_apply_filter(ctx, ctx->fileversion, &f[1], data1, count, order, fracbits); } -static void ape_apply_filters(APEContext * ctx, int32_t * decoded0, - int32_t * decoded1, int count) +static void ape_apply_filters(APEContext *ctx, int32_t *decoded0, + int32_t *decoded1, int count) { int i; for (i = 0; i < APE_FILTER_LEVELS; i++) { if (!ape_filter_orders[ctx->fset][i]) break; - apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count, ape_filter_orders[ctx->fset][i], ape_filter_fracbits[ctx->fset][i]); + apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count, + ape_filter_orders[ctx->fset][i], + ape_filter_fracbits[ctx->fset][i]); } } -static void init_frame_decoder(APEContext * ctx) +static int init_frame_decoder(APEContext *ctx) { - int i; - init_entropy_decoder(ctx); + int i, ret; + if ((ret = init_entropy_decoder(ctx)) < 0) + return ret; init_predictor_decoder(ctx); for (i = 0; i < APE_FILTER_LEVELS; i++) { if (!ape_filter_orders[ctx->fset][i]) break; - init_filter(ctx, ctx->filters[i], ctx->filterbuf[i], ape_filter_orders[ctx->fset][i]); + init_filter(ctx, ctx->filters[i], ctx->filterbuf[i], + ape_filter_orders[ctx->fset][i]); } + return 0; } -static void ape_unpack_mono(APEContext * ctx, int count) +static void ape_unpack_mono(APEContext *ctx, int count) { - int32_t left; - int32_t *decoded0 = ctx->decoded0; - int32_t *decoded1 = ctx->decoded1; - if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) { - entropy_decode(ctx, count, 0); /* We are pure silence, so we're done. */ av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence mono\n"); return; } entropy_decode(ctx, count, 0); - ape_apply_filters(ctx, decoded0, NULL, count); + ape_apply_filters(ctx, ctx->decoded[0], NULL, count); /* Now apply the predictor decoding */ predictor_decode_mono(ctx, count); /* Pseudo-stereo - just copy left channel to right channel */ if (ctx->channels == 2) { - while (count--) { - left = *decoded0; - *(decoded1++) = *(decoded0++) = left; - } + memcpy(ctx->decoded[1], ctx->decoded[0], count * sizeof(*ctx->decoded[1])); } } -static void ape_unpack_stereo(APEContext * ctx, int count) +static void ape_unpack_stereo(APEContext *ctx, int count) { int32_t left, right; - int32_t *decoded0 = ctx->decoded0; - int32_t *decoded1 = ctx->decoded1; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) { /* We are pure silence, so we're done. */ @@ -788,66 +822,97 @@ static void ape_unpack_stereo(APEContext * ctx, int count) } } -static int ape_decode_frame(AVCodecContext * avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int ape_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; - int buf_size = avpkt->size; APEContext *s = avctx->priv_data; - int16_t *samples = data; - int nblocks; - int i, n; + uint8_t *sample8; + int16_t *sample16; + int32_t *sample24; + int i, ret; int blockstodecode; - int bytes_used; - - if (buf_size == 0 && !s->samples) { - *data_size = 0; - return 0; - } + int bytes_used = 0; - /* should not happen but who knows */ - if (BLOCKS_PER_LOOP * 2 * avctx->channels > *data_size) { - av_log (avctx, AV_LOG_ERROR, "Packet size is too big to be handled in lavc! (max is %d where you have %d)\n", *data_size, s->samples * 2 * avctx->channels); - return -1; - } + /* this should never be negative, but bad things will happen if it is, so + check it just to make sure. */ + av_assert0(s->samples >= 0); if(!s->samples){ - s->data = av_realloc(s->data, (buf_size + 3) & ~3); + uint32_t nblocks, offset; + int buf_size; + + if (!avpkt->size) { + *got_frame_ptr = 0; + return 0; + } + if (avpkt->size < 8) { + av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); + return AVERROR_INVALIDDATA; + } + buf_size = avpkt->size & ~3; + if (buf_size != avpkt->size) { + av_log(avctx, AV_LOG_WARNING, "packet size is not a multiple of 4. " + "extra bytes at the end will be skipped.\n"); + } + + av_fast_malloc(&s->data, &s->data_size, buf_size); + if (!s->data) + return AVERROR(ENOMEM); s->dsp.bswap_buf((uint32_t*)s->data, (const uint32_t*)buf, buf_size >> 2); - s->ptr = s->last_ptr = s->data; + s->ptr = s->data; s->data_end = s->data + buf_size; - nblocks = s->samples = bytestream_get_be32(&s->ptr); - n = bytestream_get_be32(&s->ptr); - if(n < 0 || n > 3){ + nblocks = bytestream_get_be32(&s->ptr); + offset = bytestream_get_be32(&s->ptr); + if (offset > 3) { av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n"); s->data = NULL; - return -1; + return AVERROR_INVALIDDATA; } - s->ptr += n; - - s->currentframeblocks = nblocks; - buf += 4; - if (s->samples <= 0) { - *data_size = 0; - return buf_size; + if (s->data_end - s->ptr < offset) { + av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); + return AVERROR_INVALIDDATA; } + s->ptr += offset; - memset(s->decoded0, 0, sizeof(s->decoded0)); - memset(s->decoded1, 0, sizeof(s->decoded1)); + if (!nblocks || nblocks > INT_MAX) { + av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %u.\n", nblocks); + return AVERROR_INVALIDDATA; + } + s->samples = nblocks; /* Initialize the frame decoder */ - init_frame_decoder(s); + if (init_frame_decoder(s) < 0) { + av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n"); + return AVERROR_INVALIDDATA; + } + + bytes_used = avpkt->size; } if (!s->data) { - *data_size = 0; - return buf_size; + *got_frame_ptr = 0; + return avpkt->size; } - nblocks = s->samples; - blockstodecode = FFMIN(BLOCKS_PER_LOOP, nblocks); + blockstodecode = FFMIN(s->blocks_per_loop, s->samples); + + /* reallocate decoded sample buffer if needed */ + av_fast_malloc(&s->decoded_buffer, &s->decoded_size, + 2 * FFALIGN(blockstodecode, 8) * sizeof(*s->decoded_buffer)); + if (!s->decoded_buffer) + return AVERROR(ENOMEM); + memset(s->decoded_buffer, 0, s->decoded_size); + s->decoded[0] = s->decoded_buffer; + s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8); + + /* get output buffer */ + s->frame.nb_samples = blockstodecode; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } s->error=0; @@ -857,35 +922,78 @@ static int ape_decode_frame(AVCodecContext * avctx, ape_unpack_stereo(s, blockstodecode); emms_c(); - if(s->error || s->ptr > s->data_end){ + if (s->error) { s->samples=0; av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n"); - return -1; + return AVERROR_INVALIDDATA; } - for (i = 0; i < blockstodecode; i++) { - *samples++ = s->decoded0[i]; - if(s->channels == 2) - *samples++ = s->decoded1[i]; + switch (s->bps) { + case 8: + sample8 = (uint8_t *)s->frame.data[0]; + for (i = 0; i < blockstodecode; i++) { + *sample8++ = (s->decoded[0][i] + 0x80) & 0xff; + if (s->channels == 2) + *sample8++ = (s->decoded[1][i] + 0x80) & 0xff; + } + break; + case 16: + sample16 = (int16_t *)s->frame.data[0]; + for (i = 0; i < blockstodecode; i++) { + *sample16++ = s->decoded[0][i]; + if (s->channels == 2) + *sample16++ = s->decoded[1][i]; + } + break; + case 24: + sample24 = (int32_t *)s->frame.data[0]; + for (i = 0; i < blockstodecode; i++) { + *sample24++ = s->decoded[0][i] << 8; + if (s->channels == 2) + *sample24++ = s->decoded[1][i] << 8; + } + break; } s->samples -= blockstodecode; - *data_size = blockstodecode * 2 * s->channels; - bytes_used = s->samples ? s->ptr - s->last_ptr : buf_size; - s->last_ptr = s->ptr; + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + return bytes_used; } -AVCodec ape_decoder = { - "ape", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_APE, - sizeof(APEContext), - ape_decode_init, - NULL, - ape_decode_close, - ape_decode_frame, - .capabilities = CODEC_CAP_SUBFRAMES, - .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"), +static void ape_flush(AVCodecContext *avctx) +{ + APEContext *s = avctx->priv_data; + s->samples= 0; +} + +#define OFFSET(x) offsetof(APEContext, x) +#define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM) +static const AVOption options[] = { + { "max_samples", "maximum number of samples decoded per call", OFFSET(blocks_per_loop), AV_OPT_TYPE_INT, { 4608 }, 1, INT_MAX, PAR, "max_samples" }, + { "all", "no maximum. decode all samples for each packet at once", 0, AV_OPT_TYPE_CONST, { INT_MAX }, INT_MIN, INT_MAX, PAR, "max_samples" }, + { NULL}, +}; + +static const AVClass ape_decoder_class = { + .class_name = "APE decoder", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + +AVCodec ff_ape_decoder = { + .name = "ape", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_APE, + .priv_data_size = sizeof(APEContext), + .init = ape_decode_init, + .close = ape_decode_close, + .decode = ape_decode_frame, + .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1, + .flush = ape_flush, + .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"), + .priv_class = &ape_decoder_class, };