X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fapedec.c;h=d28e51aae6678c20d4bc7d4093ecd866108c7289;hb=0313653928b47c3b0e493c08c66bb1a374695f7c;hp=0619358dad6a0410cb6a842410f035e6c61292bf;hpb=ad17207b517508c95aa9bd1f67e7beb6d09af52f;p=ffmpeg diff --git a/libavcodec/apedec.c b/libavcodec/apedec.c index 0619358dad6..d28e51aae66 100644 --- a/libavcodec/apedec.c +++ b/libavcodec/apedec.c @@ -20,20 +20,21 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#define ALT_BITSTREAM_READER_LE +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" #include "avcodec.h" #include "dsputil.h" -#include "get_bits.h" #include "bytestream.h" -#include "libavutil/audioconvert.h" -#include "libavutil/avassert.h" +#include "internal.h" +#include "get_bits.h" +#include "unary.h" /** * @file * Monkey's Audio lossless audio decoder */ -#define BLOCKS_PER_LOOP 4608 #define MAX_CHANNELS 2 #define MAX_BYTESPERSAMPLE 3 @@ -124,14 +125,18 @@ typedef struct APEPredictor { int32_t coeffsA[2][4]; ///< adaption coefficients int32_t coeffsB[2][5]; ///< adaption coefficients int32_t historybuffer[HISTORY_SIZE + PREDICTOR_SIZE]; + + unsigned int sample_pos; } APEPredictor; /** Decoder context */ typedef struct APEContext { + AVClass *class; ///< class for AVOptions AVCodecContext *avctx; DSPContext dsp; int channels; int samples; ///< samples left to decode in current frame + int bps; int fileversion; ///< codec version, very important in decoding process int compression_level; ///< compression levels @@ -142,8 +147,10 @@ typedef struct APEContext { int frameflags; ///< frame flags APEPredictor predictor; ///< predictor used for final reconstruction - int32_t decoded0[BLOCKS_PER_LOOP]; ///< decoded data for the first channel - int32_t decoded1[BLOCKS_PER_LOOP]; ///< decoded data for the second channel + int32_t *decoded_buffer; + int decoded_size; + int32_t *decoded[MAX_CHANNELS]; ///< decoded data for each channel + int blocks_per_loop; ///< maximum number of samples to decode for each call int16_t* filterbuf[APE_FILTER_LEVELS]; ///< filter memory @@ -151,15 +158,41 @@ typedef struct APEContext { APERice riceX; ///< rice code parameters for the second channel APERice riceY; ///< rice code parameters for the first channel APEFilter filters[APE_FILTER_LEVELS][2]; ///< filters used for reconstruction + GetBitContext gb; uint8_t *data; ///< current frame data uint8_t *data_end; ///< frame data end + int data_size; ///< frame data allocated size const uint8_t *ptr; ///< current position in frame data - const uint8_t *last_ptr; ///< position where last 4608-sample block ended int error; + + void (*entropy_decode_mono)(struct APEContext *ctx, int blockstodecode); + void (*entropy_decode_stereo)(struct APEContext *ctx, int blockstodecode); + void (*predictor_decode_mono)(struct APEContext *ctx, int count); + void (*predictor_decode_stereo)(struct APEContext *ctx, int count); } APEContext; +static void ape_apply_filters(APEContext *ctx, int32_t *decoded0, + int32_t *decoded1, int count); + +static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode); +static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode); +static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode); +static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode); +static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode); +static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode); +static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode); +static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode); +static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode); + +static void predictor_decode_mono_3800(APEContext *ctx, int count); +static void predictor_decode_stereo_3800(APEContext *ctx, int count); +static void predictor_decode_mono_3930(APEContext *ctx, int count); +static void predictor_decode_stereo_3930(APEContext *ctx, int count); +static void predictor_decode_mono_3950(APEContext *ctx, int count); +static void predictor_decode_stereo_3950(APEContext *ctx, int count); + // TODO: dsputilize static av_cold int ape_decode_close(AVCodecContext *avctx) @@ -170,7 +203,10 @@ static av_cold int ape_decode_close(AVCodecContext *avctx) for (i = 0; i < APE_FILTER_LEVELS; i++) av_freep(&s->filterbuf[i]); + av_freep(&s->decoded_buffer); av_freep(&s->data); + s->decoded_size = s->data_size = 0; + return 0; } @@ -183,14 +219,26 @@ static av_cold int ape_decode_init(AVCodecContext *avctx) av_log(avctx, AV_LOG_ERROR, "Incorrect extradata\n"); return AVERROR(EINVAL); } - if (avctx->bits_per_coded_sample != 16) { - av_log(avctx, AV_LOG_ERROR, "Only 16-bit samples are supported\n"); - return AVERROR(EINVAL); - } if (avctx->channels > 2) { av_log(avctx, AV_LOG_ERROR, "Only mono and stereo is supported\n"); return AVERROR(EINVAL); } + s->bps = avctx->bits_per_coded_sample; + switch (s->bps) { + case 8: + avctx->sample_fmt = AV_SAMPLE_FMT_U8P; + break; + case 16: + avctx->sample_fmt = AV_SAMPLE_FMT_S16P; + break; + case 24: + avctx->sample_fmt = AV_SAMPLE_FMT_S32P; + break; + default: + avpriv_request_sample(avctx, + "%d bits per coded sample", s->bps); + return AVERROR_PATCHWELCOME; + } s->avctx = avctx; s->channels = avctx->channels; s->fileversion = AV_RL16(avctx->extradata); @@ -199,7 +247,8 @@ static av_cold int ape_decode_init(AVCodecContext *avctx) av_log(avctx, AV_LOG_DEBUG, "Compression Level: %d - Flags: %d\n", s->compression_level, s->flags); - if (s->compression_level % 1000 || s->compression_level > COMPRESSION_LEVEL_INSANE) { + if (s->compression_level % 1000 || s->compression_level > COMPRESSION_LEVEL_INSANE || + (s->fileversion < 3930 && s->compression_level == COMPRESSION_LEVEL_INSANE)) { av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n", s->compression_level); return AVERROR_INVALIDDATA; @@ -213,9 +262,37 @@ static av_cold int ape_decode_init(AVCodecContext *avctx) filter_alloc_fail); } - dsputil_init(&s->dsp, avctx); - avctx->sample_fmt = AV_SAMPLE_FMT_S16; + if (s->fileversion < 3860) { + s->entropy_decode_mono = entropy_decode_mono_0000; + s->entropy_decode_stereo = entropy_decode_stereo_0000; + } else if (s->fileversion < 3900) { + s->entropy_decode_mono = entropy_decode_mono_3860; + s->entropy_decode_stereo = entropy_decode_stereo_3860; + } else if (s->fileversion < 3930) { + s->entropy_decode_mono = entropy_decode_mono_3900; + s->entropy_decode_stereo = entropy_decode_stereo_3900; + } else if (s->fileversion < 3990) { + s->entropy_decode_mono = entropy_decode_mono_3900; + s->entropy_decode_stereo = entropy_decode_stereo_3930; + } else { + s->entropy_decode_mono = entropy_decode_mono_3990; + s->entropy_decode_stereo = entropy_decode_stereo_3990; + } + + if (s->fileversion < 3930) { + s->predictor_decode_mono = predictor_decode_mono_3800; + s->predictor_decode_stereo = predictor_decode_stereo_3800; + } else if (s->fileversion < 3950) { + s->predictor_decode_mono = predictor_decode_mono_3930; + s->predictor_decode_stereo = predictor_decode_stereo_3930; + } else { + s->predictor_decode_mono = predictor_decode_mono_3950; + s->predictor_decode_stereo = predictor_decode_stereo_3950; + } + + ff_dsputil_init(&s->dsp, avctx); avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; + return 0; filter_alloc_fail: ape_decode_close(avctx); @@ -372,7 +449,7 @@ static inline int range_get_symbol(APEContext *ctx, } /** @} */ // group rangecoder -static inline void update_rice(APERice *rice, int x) +static inline void update_rice(APERice *rice, unsigned int x) { int lim = rice->k ? (1 << (rice->k + 4)) : 0; rice->ksum += ((x + 1) / 2) - ((rice->ksum + 16) >> 5); @@ -383,64 +460,120 @@ static inline void update_rice(APERice *rice, int x) rice->k++; } -static inline int ape_decode_value(APEContext *ctx, APERice *rice) +static inline int get_rice_ook(GetBitContext *gb, int k) { - int x, overflow; + unsigned int x; - if (ctx->fileversion < 3990) { - int tmpk; + x = get_unary(gb, 1, get_bits_left(gb)); - overflow = range_get_symbol(ctx, counts_3970, counts_diff_3970); + if (k) + x = (x << k) | get_bits(gb, k); - if (overflow == (MODEL_ELEMENTS - 1)) { - tmpk = range_decode_bits(ctx, 5); - overflow = 0; - } else - tmpk = (rice->k < 1) ? 0 : rice->k - 1; + return x; +} + +static inline int ape_decode_value_3860(APEContext *ctx, GetBitContext *gb, + APERice *rice) +{ + unsigned int x, overflow; - if (tmpk <= 16) - x = range_decode_bits(ctx, tmpk); - else { - x = range_decode_bits(ctx, 16); - x |= (range_decode_bits(ctx, tmpk - 16) << 16); + overflow = get_unary(gb, 1, get_bits_left(gb)); + + if (ctx->fileversion > 3880) { + while (overflow >= 16) { + overflow -= 16; + rice->k += 4; } - x += overflow << tmpk; + } + + if (!rice->k) + x = overflow; + else + x = (overflow << rice->k) + get_bits(gb, rice->k); + + rice->ksum += x - (rice->ksum + 8 >> 4); + if (rice->ksum < (rice->k ? 1 << (rice->k + 4) : 0)) + rice->k--; + else if (rice->ksum >= (1 << (rice->k + 5)) && rice->k < 24) + rice->k++; + + /* Convert to signed */ + if (x & 1) + return (x >> 1) + 1; + else + return -(x >> 1); +} + +static inline int ape_decode_value_3900(APEContext *ctx, APERice *rice) +{ + unsigned int x, overflow; + int tmpk; + + overflow = range_get_symbol(ctx, counts_3970, counts_diff_3970); + + if (overflow == (MODEL_ELEMENTS - 1)) { + tmpk = range_decode_bits(ctx, 5); + overflow = 0; + } else + tmpk = (rice->k < 1) ? 0 : rice->k - 1; + + if (tmpk <= 16 || ctx->fileversion < 3910) + x = range_decode_bits(ctx, tmpk); + else if (tmpk <= 32) { + x = range_decode_bits(ctx, 16); + x |= (range_decode_bits(ctx, tmpk - 16) << 16); } else { - int base, pivot; + av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk); + return AVERROR_INVALIDDATA; + } + x += overflow << tmpk; - pivot = rice->ksum >> 5; - if (pivot == 0) - pivot = 1; + update_rice(rice, x); - overflow = range_get_symbol(ctx, counts_3980, counts_diff_3980); + /* Convert to signed */ + if (x & 1) + return (x >> 1) + 1; + else + return -(x >> 1); +} - if (overflow == (MODEL_ELEMENTS - 1)) { - overflow = range_decode_bits(ctx, 16) << 16; - overflow |= range_decode_bits(ctx, 16); - } +static inline int ape_decode_value_3990(APEContext *ctx, APERice *rice) +{ + unsigned int x, overflow; + int base, pivot; - if (pivot < 0x10000) { - base = range_decode_culfreq(ctx, pivot); - range_decode_update(ctx, 1, base); - } else { - int base_hi = pivot, base_lo; - int bbits = 0; + pivot = rice->ksum >> 5; + if (pivot == 0) + pivot = 1; - while (base_hi & ~0xFFFF) { - base_hi >>= 1; - bbits++; - } - base_hi = range_decode_culfreq(ctx, base_hi + 1); - range_decode_update(ctx, 1, base_hi); - base_lo = range_decode_culfreq(ctx, 1 << bbits); - range_decode_update(ctx, 1, base_lo); + overflow = range_get_symbol(ctx, counts_3980, counts_diff_3980); - base = (base_hi << bbits) + base_lo; + if (overflow == (MODEL_ELEMENTS - 1)) { + overflow = range_decode_bits(ctx, 16) << 16; + overflow |= range_decode_bits(ctx, 16); + } + + if (pivot < 0x10000) { + base = range_decode_culfreq(ctx, pivot); + range_decode_update(ctx, 1, base); + } else { + int base_hi = pivot, base_lo; + int bbits = 0; + + while (base_hi & ~0xFFFF) { + base_hi >>= 1; + bbits++; } + base_hi = range_decode_culfreq(ctx, base_hi + 1); + range_decode_update(ctx, 1, base_hi); + base_lo = range_decode_culfreq(ctx, 1 << bbits); + range_decode_update(ctx, 1, base_lo); - x = base + overflow * pivot; + base = (base_hi << bbits) + base_lo; } + x = base + overflow * pivot; + update_rice(rice, x); /* Convert to signed */ @@ -450,30 +583,148 @@ static inline int ape_decode_value(APEContext *ctx, APERice *rice) return -(x >> 1); } -static void entropy_decode(APEContext *ctx, int blockstodecode, int stereo) +static void decode_array_0000(APEContext *ctx, GetBitContext *gb, + int32_t *out, APERice *rice, int blockstodecode) { - int32_t *decoded0 = ctx->decoded0; - int32_t *decoded1 = ctx->decoded1; + int i; + int ksummax, ksummin; - if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) { - /* We are pure silence, just memset the output buffer. */ - memset(decoded0, 0, blockstodecode * sizeof(int32_t)); - memset(decoded1, 0, blockstodecode * sizeof(int32_t)); - } else { - while (blockstodecode--) { - *decoded0++ = ape_decode_value(ctx, &ctx->riceY); - if (stereo) - *decoded1++ = ape_decode_value(ctx, &ctx->riceX); + rice->ksum = 0; + for (i = 0; i < 5; i++) { + out[i] = get_rice_ook(&ctx->gb, 10); + rice->ksum += out[i]; + } + rice->k = av_log2(rice->ksum / 10) + 1; + for (; i < 64; i++) { + out[i] = get_rice_ook(&ctx->gb, rice->k); + rice->ksum += out[i]; + rice->k = av_log2(rice->ksum / ((i + 1) * 2)) + 1; + } + ksummax = 1 << rice->k + 7; + ksummin = rice->k ? (1 << rice->k + 6) : 0; + for (; i < blockstodecode; i++) { + out[i] = get_rice_ook(&ctx->gb, rice->k); + rice->ksum += out[i] - out[i - 64]; + while (rice->ksum < ksummin) { + rice->k--; + ksummin = rice->k ? ksummin >> 1 : 0; + ksummax >>= 1; + } + while (rice->ksum >= ksummax) { + rice->k++; + if (rice->k > 24) + return; + ksummax <<= 1; + ksummin = ksummin ? ksummin << 1 : 128; } } + + for (i = 0; i < blockstodecode; i++) { + if (out[i] & 1) + out[i] = (out[i] >> 1) + 1; + else + out[i] = -(out[i] >> 1); + } +} + +static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode) +{ + decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY, + blockstodecode); +} + +static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode) +{ + decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY, + blockstodecode); + decode_array_0000(ctx, &ctx->gb, ctx->decoded[1], &ctx->riceX, + blockstodecode); +} + +static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + + while (blockstodecode--) + *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY); +} + +static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + int blocks = blockstodecode; + + while (blockstodecode--) + *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY); + while (blocks--) + *decoded1++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceX); +} + +static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + + while (blockstodecode--) + *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY); +} + +static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + int blocks = blockstodecode; + + while (blockstodecode--) + *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY); + range_dec_normalize(ctx); + // because of some implementation peculiarities we need to backpedal here + ctx->ptr -= 1; + range_start_decoding(ctx); + while (blocks--) + *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX); +} + +static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + + while (blockstodecode--) { + *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY); + *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX); + } +} + +static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + + while (blockstodecode--) + *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY); +} + +static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + + while (blockstodecode--) { + *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY); + *decoded1++ = ape_decode_value_3990(ctx, &ctx->riceX); + } } static int init_entropy_decoder(APEContext *ctx) { /* Read the CRC */ - if (ctx->data_end - ctx->ptr < 6) - return AVERROR_INVALIDDATA; - ctx->CRC = bytestream_get_be32(&ctx->ptr); + if (ctx->fileversion >= 3900) { + if (ctx->data_end - ctx->ptr < 6) + return AVERROR_INVALIDDATA; + ctx->CRC = bytestream_get_be32(&ctx->ptr); + } else { + ctx->CRC = get_bits_long(&ctx->gb, 32); + } /* Read the frame flags if they exist */ ctx->frameflags = 0; @@ -491,15 +742,29 @@ static int init_entropy_decoder(APEContext *ctx) ctx->riceY.k = 10; ctx->riceY.ksum = (1 << ctx->riceY.k) * 16; - /* The first 8 bits of input are ignored. */ - ctx->ptr++; + if (ctx->fileversion >= 3900) { + /* The first 8 bits of input are ignored. */ + ctx->ptr++; - range_start_decoding(ctx); + range_start_decoding(ctx); + } return 0; } -static const int32_t initial_coeffs[4] = { +static const int32_t initial_coeffs_fast_3320[1] = { + 375, +}; + +static const int32_t initial_coeffs_a_3800[3] = { + 64, 115, 64, +}; + +static const int32_t initial_coeffs_b_3800[2] = { + 740, 0 +}; + +static const int32_t initial_coeffs_3930[4] = { 360, 317, -109, 98 }; @@ -508,17 +773,39 @@ static void init_predictor_decoder(APEContext *ctx) APEPredictor *p = &ctx->predictor; /* Zero the history buffers */ - memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(int32_t)); + memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(*p->historybuffer)); p->buf = p->historybuffer; /* Initialize and zero the coefficients */ - memcpy(p->coeffsA[0], initial_coeffs, sizeof(initial_coeffs)); - memcpy(p->coeffsA[1], initial_coeffs, sizeof(initial_coeffs)); + if (ctx->fileversion < 3930) { + if (ctx->compression_level == COMPRESSION_LEVEL_FAST) { + memcpy(p->coeffsA[0], initial_coeffs_fast_3320, + sizeof(initial_coeffs_fast_3320)); + memcpy(p->coeffsA[1], initial_coeffs_fast_3320, + sizeof(initial_coeffs_fast_3320)); + } else { + memcpy(p->coeffsA[0], initial_coeffs_a_3800, + sizeof(initial_coeffs_a_3800)); + memcpy(p->coeffsA[1], initial_coeffs_a_3800, + sizeof(initial_coeffs_a_3800)); + } + } else { + memcpy(p->coeffsA[0], initial_coeffs_3930, sizeof(initial_coeffs_3930)); + memcpy(p->coeffsA[1], initial_coeffs_3930, sizeof(initial_coeffs_3930)); + } memset(p->coeffsB, 0, sizeof(p->coeffsB)); + if (ctx->fileversion < 3930) { + memcpy(p->coeffsB[0], initial_coeffs_b_3800, + sizeof(initial_coeffs_b_3800)); + memcpy(p->coeffsB[1], initial_coeffs_b_3800, + sizeof(initial_coeffs_b_3800)); + } p->filterA[0] = p->filterA[1] = 0; p->filterB[0] = p->filterB[1] = 0; p->lastA[0] = p->lastA[1] = 0; + + p->sample_pos = 0; } /** Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero) */ @@ -526,6 +813,304 @@ static inline int APESIGN(int32_t x) { return (x < 0) - (x > 0); } +static av_always_inline int filter_fast_3320(APEPredictor *p, + const int decoded, const int filter, + const int delayA) +{ + int32_t predictionA; + + p->buf[delayA] = p->lastA[filter]; + if (p->sample_pos < 3) { + p->lastA[filter] = decoded; + p->filterA[filter] = decoded; + return decoded; + } + + predictionA = p->buf[delayA] * 2 - p->buf[delayA - 1]; + p->lastA[filter] = decoded + (predictionA * p->coeffsA[filter][0] >> 9); + + if ((decoded ^ predictionA) > 0) + p->coeffsA[filter][0]++; + else + p->coeffsA[filter][0]--; + + p->filterA[filter] += p->lastA[filter]; + + return p->filterA[filter]; +} + +static av_always_inline int filter_3800(APEPredictor *p, + const int decoded, const int filter, + const int delayA, const int delayB, + const int start, const int shift) +{ + int32_t predictionA, predictionB, sign; + int32_t d0, d1, d2, d3, d4; + + p->buf[delayA] = p->lastA[filter]; + p->buf[delayB] = p->filterB[filter]; + if (p->sample_pos < start) { + predictionA = decoded + p->filterA[filter]; + p->lastA[filter] = decoded; + p->filterB[filter] = decoded; + p->filterA[filter] = predictionA; + return predictionA; + } + d2 = p->buf[delayA]; + d1 = (p->buf[delayA] - p->buf[delayA - 1]) << 1; + d0 = p->buf[delayA] + ((p->buf[delayA - 2] - p->buf[delayA - 1]) << 3); + d3 = p->buf[delayB] * 2 - p->buf[delayB - 1]; + d4 = p->buf[delayB]; + + predictionA = d0 * p->coeffsA[filter][0] + + d1 * p->coeffsA[filter][1] + + d2 * p->coeffsA[filter][2]; + + sign = APESIGN(decoded); + p->coeffsA[filter][0] += (((d0 >> 30) & 2) - 1) * sign; + p->coeffsA[filter][1] += (((d1 >> 28) & 8) - 4) * sign; + p->coeffsA[filter][2] += (((d2 >> 28) & 8) - 4) * sign; + + predictionB = d3 * p->coeffsB[filter][0] - + d4 * p->coeffsB[filter][1]; + p->lastA[filter] = decoded + (predictionA >> 11); + sign = APESIGN(p->lastA[filter]); + p->coeffsB[filter][0] += (((d3 >> 29) & 4) - 2) * sign; + p->coeffsB[filter][1] -= (((d4 >> 30) & 2) - 1) * sign; + + p->filterB[filter] = p->lastA[filter] + (predictionB >> shift); + p->filterA[filter] = p->filterB[filter] + ((p->filterA[filter] * 31) >> 5); + + return p->filterA[filter]; +} + +static void long_filter_high_3800(int32_t *buffer, int order, int shift, + int32_t *coeffs, int32_t *delay, int length) +{ + int i, j; + int32_t dotprod, sign; + + memset(coeffs, 0, order * sizeof(*coeffs)); + for (i = 0; i < order; i++) + delay[i] = buffer[i]; + for (i = order; i < length; i++) { + dotprod = 0; + sign = APESIGN(buffer[i]); + for (j = 0; j < order; j++) { + dotprod += delay[j] * coeffs[j]; + coeffs[j] -= (((delay[j] >> 30) & 2) - 1) * sign; + } + buffer[i] -= dotprod >> shift; + for (j = 0; j < order - 1; j++) + delay[j] = delay[j + 1]; + delay[order - 1] = buffer[i]; + } +} + +static void long_filter_ehigh_3830(int32_t *buffer, int length) +{ + int i, j; + int32_t dotprod, sign; + int32_t coeffs[8], delay[8]; + + memset(coeffs, 0, sizeof(coeffs)); + memset(delay, 0, sizeof(delay)); + for (i = 0; i < length; i++) { + dotprod = 0; + sign = APESIGN(buffer[i]); + for (j = 7; j >= 0; j--) { + dotprod += delay[j] * coeffs[j]; + coeffs[j] -= (((delay[j] >> 30) & 2) - 1) * sign; + } + for (j = 7; j > 0; j--) + delay[j] = delay[j - 1]; + delay[0] = buffer[i]; + buffer[i] -= dotprod >> 9; + } +} + +static void predictor_decode_stereo_3800(APEContext *ctx, int count) +{ + APEPredictor *p = &ctx->predictor; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + int32_t coeffs[256], delay[256]; + int start = 4, shift = 10; + + if (ctx->compression_level == COMPRESSION_LEVEL_HIGH) { + start = 16; + long_filter_high_3800(decoded0, 16, 9, coeffs, delay, count); + long_filter_high_3800(decoded1, 16, 9, coeffs, delay, count); + } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) { + int order = 128, shift2 = 11; + + if (ctx->fileversion >= 3830) { + order <<= 1; + shift++; + shift2++; + long_filter_ehigh_3830(decoded0 + order, count - order); + long_filter_ehigh_3830(decoded1 + order, count - order); + } + start = order; + long_filter_high_3800(decoded0, order, shift2, coeffs, delay, count); + long_filter_high_3800(decoded1, order, shift2, coeffs, delay, count); + } + + while (count--) { + int X = *decoded0, Y = *decoded1; + if (ctx->compression_level == COMPRESSION_LEVEL_FAST) { + *decoded0 = filter_fast_3320(p, Y, 0, YDELAYA); + decoded0++; + *decoded1 = filter_fast_3320(p, X, 1, XDELAYA); + decoded1++; + } else { + *decoded0 = filter_3800(p, Y, 0, YDELAYA, YDELAYB, + start, shift); + decoded0++; + *decoded1 = filter_3800(p, X, 1, XDELAYA, XDELAYB, + start, shift); + decoded1++; + } + + /* Combined */ + p->buf++; + p->sample_pos++; + + /* Have we filled the history buffer? */ + if (p->buf == p->historybuffer + HISTORY_SIZE) { + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); + p->buf = p->historybuffer; + } + } +} + +static void predictor_decode_mono_3800(APEContext *ctx, int count) +{ + APEPredictor *p = &ctx->predictor; + int32_t *decoded0 = ctx->decoded[0]; + int32_t coeffs[256], delay[256]; + int start = 4, shift = 10; + + if (ctx->compression_level == COMPRESSION_LEVEL_HIGH) { + start = 16; + long_filter_high_3800(decoded0, 16, 9, coeffs, delay, count); + } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) { + int order = 128, shift2 = 11; + + if (ctx->fileversion >= 3830) { + order <<= 1; + shift++; + shift2++; + long_filter_ehigh_3830(decoded0 + order, count - order); + } + start = order; + long_filter_high_3800(decoded0, order, shift2, coeffs, delay, count); + } + + while (count--) { + if (ctx->compression_level == COMPRESSION_LEVEL_FAST) { + *decoded0 = filter_fast_3320(p, *decoded0, 0, YDELAYA); + decoded0++; + } else { + *decoded0 = filter_3800(p, *decoded0, 0, YDELAYA, YDELAYB, + start, shift); + decoded0++; + } + + /* Combined */ + p->buf++; + p->sample_pos++; + + /* Have we filled the history buffer? */ + if (p->buf == p->historybuffer + HISTORY_SIZE) { + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); + p->buf = p->historybuffer; + } + } +} + +static av_always_inline int predictor_update_3930(APEPredictor *p, + const int decoded, const int filter, + const int delayA) +{ + int32_t predictionA, sign; + int32_t d0, d1, d2, d3; + + p->buf[delayA] = p->lastA[filter]; + d0 = p->buf[delayA ]; + d1 = p->buf[delayA ] - p->buf[delayA - 1]; + d2 = p->buf[delayA - 1] - p->buf[delayA - 2]; + d3 = p->buf[delayA - 2] - p->buf[delayA - 3]; + + predictionA = d0 * p->coeffsA[filter][0] + + d1 * p->coeffsA[filter][1] + + d2 * p->coeffsA[filter][2] + + d3 * p->coeffsA[filter][3]; + + p->lastA[filter] = decoded + (predictionA >> 9); + p->filterA[filter] = p->lastA[filter] + ((p->filterA[filter] * 31) >> 5); + + sign = APESIGN(decoded); + p->coeffsA[filter][0] += ((d0 < 0) * 2 - 1) * sign; + p->coeffsA[filter][1] += ((d1 < 0) * 2 - 1) * sign; + p->coeffsA[filter][2] += ((d2 < 0) * 2 - 1) * sign; + p->coeffsA[filter][3] += ((d3 < 0) * 2 - 1) * sign; + + return p->filterA[filter]; +} + +static void predictor_decode_stereo_3930(APEContext *ctx, int count) +{ + APEPredictor *p = &ctx->predictor; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + + ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count); + + while (count--) { + /* Predictor Y */ + int Y = *decoded1, X = *decoded0; + *decoded0 = predictor_update_3930(p, Y, 0, YDELAYA); + decoded0++; + *decoded1 = predictor_update_3930(p, X, 1, XDELAYA); + decoded1++; + + /* Combined */ + p->buf++; + + /* Have we filled the history buffer? */ + if (p->buf == p->historybuffer + HISTORY_SIZE) { + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); + p->buf = p->historybuffer; + } + } +} + +static void predictor_decode_mono_3930(APEContext *ctx, int count) +{ + APEPredictor *p = &ctx->predictor; + int32_t *decoded0 = ctx->decoded[0]; + + ape_apply_filters(ctx, ctx->decoded[0], NULL, count); + + while (count--) { + *decoded0 = predictor_update_3930(p, *decoded0, 0, YDELAYA); + decoded0++; + + p->buf++; + + /* Have we filled the history buffer? */ + if (p->buf == p->historybuffer + HISTORY_SIZE) { + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); + p->buf = p->historybuffer; + } + } +} + static av_always_inline int predictor_update_filter(APEPredictor *p, const int decoded, const int filter, const int delayA, const int delayB, @@ -573,11 +1158,13 @@ static av_always_inline int predictor_update_filter(APEPredictor *p, return p->filterA[filter]; } -static void predictor_decode_stereo(APEContext *ctx, int count) +static void predictor_decode_stereo_3950(APEContext *ctx, int count) { APEPredictor *p = &ctx->predictor; - int32_t *decoded0 = ctx->decoded0; - int32_t *decoded1 = ctx->decoded1; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + + ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count); while (count--) { /* Predictor Y */ @@ -593,18 +1180,21 @@ static void predictor_decode_stereo(APEContext *ctx, int count) /* Have we filled the history buffer? */ if (p->buf == p->historybuffer + HISTORY_SIZE) { - memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t)); + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); p->buf = p->historybuffer; } } } -static void predictor_decode_mono(APEContext *ctx, int count) +static void predictor_decode_mono_3950(APEContext *ctx, int count) { APEPredictor *p = &ctx->predictor; - int32_t *decoded0 = ctx->decoded0; + int32_t *decoded0 = ctx->decoded[0]; int32_t predictionA, currentA, A, sign; + ape_apply_filters(ctx, ctx->decoded[0], NULL, count); + currentA = p->lastA[0]; while (count--) { @@ -633,7 +1223,8 @@ static void predictor_decode_mono(APEContext *ctx, int count) /* Have we filled the history buffer? */ if (p->buf == p->historybuffer + HISTORY_SIZE) { - memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t)); + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); p->buf = p->historybuffer; } @@ -651,8 +1242,8 @@ static void do_init_filter(APEFilter *f, int16_t *buf, int order) f->delay = f->historybuffer + order * 2; f->adaptcoeffs = f->historybuffer + order; - memset(f->historybuffer, 0, (order * 2) * sizeof(int16_t)); - memset(f->coeffs, 0, order * sizeof(int16_t)); + memset(f->historybuffer, 0, (order * 2) * sizeof(*f->historybuffer)); + memset(f->coeffs, 0, order * sizeof(*f->coeffs)); f->avg = 0; } @@ -691,7 +1282,7 @@ static void do_apply_filter(APEContext *ctx, int version, APEFilter *f, /* Update the adaption coefficients */ absres = FFABS(res); if (absres) - *f->adaptcoeffs = ((res & (1<<31)) - (1<<30)) >> + *f->adaptcoeffs = ((res & (-1<<31)) ^ (-1<<30)) >> (25 + (absres <= f->avg*3) + (absres <= f->avg*4/3)); else *f->adaptcoeffs = 0; @@ -708,7 +1299,7 @@ static void do_apply_filter(APEContext *ctx, int version, APEFilter *f, /* Have we filled the history buffer? */ if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) { memmove(f->historybuffer, f->delay - (order * 2), - (order * 2) * sizeof(int16_t)); + (order * 2) * sizeof(*f->historybuffer)); f->delay = f->historybuffer + order * 2; f->adaptcoeffs = f->historybuffer + order; } @@ -756,33 +1347,28 @@ static int init_frame_decoder(APEContext *ctx) static void ape_unpack_mono(APEContext *ctx, int count) { - int32_t *decoded0 = ctx->decoded0; - int32_t *decoded1 = ctx->decoded1; - if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) { - entropy_decode(ctx, count, 0); /* We are pure silence, so we're done. */ av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence mono\n"); return; } - entropy_decode(ctx, count, 0); - ape_apply_filters(ctx, decoded0, NULL, count); + ctx->entropy_decode_mono(ctx, count); /* Now apply the predictor decoding */ - predictor_decode_mono(ctx, count); + ctx->predictor_decode_mono(ctx, count); /* Pseudo-stereo - just copy left channel to right channel */ if (ctx->channels == 2) { - memcpy(decoded1, decoded0, count * sizeof(*decoded1)); + memcpy(ctx->decoded[1], ctx->decoded[0], count * sizeof(*ctx->decoded[1])); } } static void ape_unpack_stereo(APEContext *ctx, int count) { int32_t left, right; - int32_t *decoded0 = ctx->decoded0; - int32_t *decoded1 = ctx->decoded1; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) { /* We are pure silence, so we're done. */ @@ -790,11 +1376,10 @@ static void ape_unpack_stereo(APEContext *ctx, int count) return; } - entropy_decode(ctx, count, 1); - ape_apply_filters(ctx, decoded0, decoded1, count); + ctx->entropy_decode_stereo(ctx, count); /* Now apply the predictor decoding */ - predictor_decode_stereo(ctx, count); + ctx->predictor_decode_stereo(ctx, count); /* Decorrelate and scale to output depth */ while (count--) { @@ -806,58 +1391,70 @@ static void ape_unpack_stereo(APEContext *ctx, int count) } } -static int ape_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int ape_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { + AVFrame *frame = data; const uint8_t *buf = avpkt->data; - int buf_size = avpkt->size; APEContext *s = avctx->priv_data; - int16_t *samples = data; - uint32_t nblocks; - int i; + uint8_t *sample8; + int16_t *sample16; + int32_t *sample24; + int i, ch, ret; int blockstodecode; - int bytes_used; - - /* should not happen but who knows */ - if (BLOCKS_PER_LOOP * 2 * avctx->channels > *data_size) { - av_log (avctx, AV_LOG_ERROR, "Output buffer is too small.\n"); - return AVERROR(EINVAL); - } + int bytes_used = 0; /* this should never be negative, but bad things will happen if it is, so check it just to make sure. */ av_assert0(s->samples >= 0); if(!s->samples){ - uint32_t offset; - void *tmp_data; + uint32_t nblocks, offset; + int buf_size; - if (buf_size < 8) { + if (!avpkt->size) { + *got_frame_ptr = 0; + return 0; + } + if (avpkt->size < 8) { av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); return AVERROR_INVALIDDATA; } - - tmp_data = av_realloc(s->data, FFALIGN(buf_size, 4)); - if (!tmp_data) + buf_size = avpkt->size & ~3; + if (buf_size != avpkt->size) { + av_log(avctx, AV_LOG_WARNING, "packet size is not a multiple of 4. " + "extra bytes at the end will be skipped.\n"); + } + if (s->fileversion < 3950) // previous versions overread two bytes + buf_size += 2; + av_fast_malloc(&s->data, &s->data_size, buf_size); + if (!s->data) return AVERROR(ENOMEM); - s->data = tmp_data; s->dsp.bswap_buf((uint32_t*)s->data, (const uint32_t*)buf, buf_size >> 2); - s->ptr = s->last_ptr = s->data; + memset(s->data + (buf_size & ~3), 0, buf_size & 3); + s->ptr = s->data; s->data_end = s->data + buf_size; nblocks = bytestream_get_be32(&s->ptr); offset = bytestream_get_be32(&s->ptr); - if (offset > 3) { - av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n"); - s->data = NULL; - return AVERROR_INVALIDDATA; - } - if (s->data_end - s->ptr < offset) { - av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); - return AVERROR_INVALIDDATA; + if (s->fileversion >= 3900) { + if (offset > 3) { + av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n"); + s->data = NULL; + return AVERROR_INVALIDDATA; + } + if (s->data_end - s->ptr < offset) { + av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); + return AVERROR_INVALIDDATA; + } + s->ptr += offset; + } else { + init_get_bits(&s->gb, s->ptr, (s->data_end - s->ptr) * 8); + if (s->fileversion > 3800) + skip_bits_long(&s->gb, offset * 8); + else + skip_bits_long(&s->gb, offset); } - s->ptr += offset; if (!nblocks || nblocks > INT_MAX) { av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %u.\n", nblocks); @@ -865,23 +1462,41 @@ static int ape_decode_frame(AVCodecContext *avctx, } s->samples = nblocks; - memset(s->decoded0, 0, sizeof(s->decoded0)); - memset(s->decoded1, 0, sizeof(s->decoded1)); - /* Initialize the frame decoder */ if (init_frame_decoder(s) < 0) { av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n"); return AVERROR_INVALIDDATA; } + + bytes_used = avpkt->size; } if (!s->data) { - *data_size = 0; - return buf_size; + *got_frame_ptr = 0; + return avpkt->size; } - nblocks = s->samples; - blockstodecode = FFMIN(BLOCKS_PER_LOOP, nblocks); + blockstodecode = FFMIN(s->blocks_per_loop, s->samples); + // for old files coefficients were not interleaved, + // so we need to decode all of them at once + if (s->fileversion < 3930) + blockstodecode = s->samples; + + /* reallocate decoded sample buffer if needed */ + av_fast_malloc(&s->decoded_buffer, &s->decoded_size, + 2 * FFALIGN(blockstodecode, 8) * sizeof(*s->decoded_buffer)); + if (!s->decoded_buffer) + return AVERROR(ENOMEM); + memset(s->decoded_buffer, 0, s->decoded_size); + s->decoded[0] = s->decoded_buffer; + s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8); + + /* get output buffer */ + frame->nb_samples = blockstodecode; + if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } s->error=0; @@ -897,17 +1512,34 @@ static int ape_decode_frame(AVCodecContext *avctx, return AVERROR_INVALIDDATA; } - for (i = 0; i < blockstodecode; i++) { - *samples++ = s->decoded0[i]; - if(s->channels == 2) - *samples++ = s->decoded1[i]; + switch (s->bps) { + case 8: + for (ch = 0; ch < s->channels; ch++) { + sample8 = (uint8_t *)frame->data[ch]; + for (i = 0; i < blockstodecode; i++) + *sample8++ = (s->decoded[ch][i] + 0x80) & 0xff; + } + break; + case 16: + for (ch = 0; ch < s->channels; ch++) { + sample16 = (int16_t *)frame->data[ch]; + for (i = 0; i < blockstodecode; i++) + *sample16++ = s->decoded[ch][i]; + } + break; + case 24: + for (ch = 0; ch < s->channels; ch++) { + sample24 = (int32_t *)frame->data[ch]; + for (i = 0; i < blockstodecode; i++) + *sample24++ = s->decoded[ch][i] << 8; + } + break; } s->samples -= blockstodecode; - *data_size = blockstodecode * 2 * s->channels; - bytes_used = s->samples ? s->ptr - s->last_ptr : buf_size; - s->last_ptr = s->ptr; + *got_frame_ptr = 1; + return bytes_used; } @@ -917,15 +1549,35 @@ static void ape_flush(AVCodecContext *avctx) s->samples= 0; } +#define OFFSET(x) offsetof(APEContext, x) +#define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM) +static const AVOption options[] = { + { "max_samples", "maximum number of samples decoded per call", OFFSET(blocks_per_loop), AV_OPT_TYPE_INT, { .i64 = 4608 }, 1, INT_MAX, PAR, "max_samples" }, + { "all", "no maximum. decode all samples for each packet at once", 0, AV_OPT_TYPE_CONST, { .i64 = INT_MAX }, INT_MIN, INT_MAX, PAR, "max_samples" }, + { NULL}, +}; + +static const AVClass ape_decoder_class = { + .class_name = "APE decoder", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + AVCodec ff_ape_decoder = { .name = "ape", .type = AVMEDIA_TYPE_AUDIO, - .id = CODEC_ID_APE, + .id = AV_CODEC_ID_APE, .priv_data_size = sizeof(APEContext), .init = ape_decode_init, .close = ape_decode_close, .decode = ape_decode_frame, - .capabilities = CODEC_CAP_SUBFRAMES, - .flush = ape_flush, - .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"), + .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1, + .flush = ape_flush, + .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"), + .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P, + AV_SAMPLE_FMT_S16P, + AV_SAMPLE_FMT_S32P, + AV_SAMPLE_FMT_NONE }, + .priv_class = &ape_decoder_class, };