X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fapedec.c;h=131c6f32d744f8a16628181186217848d4f48ceb;hb=bf52f773913cf74bdf0d2c8c2cb4473fa1b7801e;hp=061804c053ca43c87402504ecc1611554a34edfa;hpb=36ef5369ee9b336febc2c270f8718cec4476cb85;p=ffmpeg diff --git a/libavcodec/apedec.c b/libavcodec/apedec.c index 061804c053c..131c6f32d74 100644 --- a/libavcodec/apedec.c +++ b/libavcodec/apedec.c @@ -20,12 +20,18 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#include "avcodec.h" -#include "dsputil.h" -#include "bytestream.h" -#include "libavutil/audioconvert.h" +#include + #include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" #include "libavutil/opt.h" +#include "apedsp.h" +#include "avcodec.h" +#include "bswapdsp.h" +#include "bytestream.h" +#include "internal.h" +#include "get_bits.h" +#include "unary.h" /** * @file @@ -122,14 +128,16 @@ typedef struct APEPredictor { int32_t coeffsA[2][4]; ///< adaption coefficients int32_t coeffsB[2][5]; ///< adaption coefficients int32_t historybuffer[HISTORY_SIZE + PREDICTOR_SIZE]; + + unsigned int sample_pos; } APEPredictor; /** Decoder context */ typedef struct APEContext { AVClass *class; ///< class for AVOptions AVCodecContext *avctx; - AVFrame frame; - DSPContext dsp; + BswapDSPContext bdsp; + APEDSPContext adsp; int channels; int samples; ///< samples left to decode in current frame int bps; @@ -154,6 +162,7 @@ typedef struct APEContext { APERice riceX; ///< rice code parameters for the second channel APERice riceY; ///< rice code parameters for the first channel APEFilter filters[APE_FILTER_LEVELS][2]; ///< filters used for reconstruction + GetBitContext gb; uint8_t *data; ///< current frame data uint8_t *data_end; ///< frame data end @@ -161,9 +170,32 @@ typedef struct APEContext { const uint8_t *ptr; ///< current position in frame data int error; + + void (*entropy_decode_mono)(struct APEContext *ctx, int blockstodecode); + void (*entropy_decode_stereo)(struct APEContext *ctx, int blockstodecode); + void (*predictor_decode_mono)(struct APEContext *ctx, int count); + void (*predictor_decode_stereo)(struct APEContext *ctx, int count); } APEContext; -// TODO: dsputilize +static void ape_apply_filters(APEContext *ctx, int32_t *decoded0, + int32_t *decoded1, int count); + +static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode); +static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode); +static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode); +static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode); +static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode); +static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode); +static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode); +static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode); +static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode); + +static void predictor_decode_mono_3800(APEContext *ctx, int count); +static void predictor_decode_stereo_3800(APEContext *ctx, int count); +static void predictor_decode_mono_3930(APEContext *ctx, int count); +static void predictor_decode_stereo_3930(APEContext *ctx, int count); +static void predictor_decode_mono_3950(APEContext *ctx, int count); +static void predictor_decode_stereo_3950(APEContext *ctx, int count); static av_cold int ape_decode_close(AVCodecContext *avctx) { @@ -180,6 +212,19 @@ static av_cold int ape_decode_close(AVCodecContext *avctx) return 0; } +static int32_t scalarproduct_and_madd_int16_c(int16_t *v1, const int16_t *v2, + const int16_t *v3, + int order, int mul) +{ + int res = 0; + + while (order--) { + res += *v1 * *v2++; + *v1++ += mul * *v3++; + } + return res; +} + static av_cold int ape_decode_init(AVCodecContext *avctx) { APEContext *s = avctx->priv_data; @@ -196,17 +241,17 @@ static av_cold int ape_decode_init(AVCodecContext *avctx) s->bps = avctx->bits_per_coded_sample; switch (s->bps) { case 8: - avctx->sample_fmt = AV_SAMPLE_FMT_U8; + avctx->sample_fmt = AV_SAMPLE_FMT_U8P; break; case 16: - avctx->sample_fmt = AV_SAMPLE_FMT_S16; + avctx->sample_fmt = AV_SAMPLE_FMT_S16P; break; case 24: - avctx->sample_fmt = AV_SAMPLE_FMT_S32; + avctx->sample_fmt = AV_SAMPLE_FMT_S32P; break; default: - av_log_ask_for_sample(avctx, "Unsupported bits per coded sample %d\n", - s->bps); + avpriv_request_sample(avctx, + "%d bits per coded sample", s->bps); return AVERROR_PATCHWELCOME; } s->avctx = avctx; @@ -217,7 +262,8 @@ static av_cold int ape_decode_init(AVCodecContext *avctx) av_log(avctx, AV_LOG_DEBUG, "Compression Level: %d - Flags: %d\n", s->compression_level, s->flags); - if (s->compression_level % 1000 || s->compression_level > COMPRESSION_LEVEL_INSANE) { + if (s->compression_level % 1000 || s->compression_level > COMPRESSION_LEVEL_INSANE || + (s->fileversion < 3930 && s->compression_level == COMPRESSION_LEVEL_INSANE)) { av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n", s->compression_level); return AVERROR_INVALIDDATA; @@ -231,11 +277,45 @@ static av_cold int ape_decode_init(AVCodecContext *avctx) filter_alloc_fail); } - ff_dsputil_init(&s->dsp, avctx); - avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; + if (s->fileversion < 3860) { + s->entropy_decode_mono = entropy_decode_mono_0000; + s->entropy_decode_stereo = entropy_decode_stereo_0000; + } else if (s->fileversion < 3900) { + s->entropy_decode_mono = entropy_decode_mono_3860; + s->entropy_decode_stereo = entropy_decode_stereo_3860; + } else if (s->fileversion < 3930) { + s->entropy_decode_mono = entropy_decode_mono_3900; + s->entropy_decode_stereo = entropy_decode_stereo_3900; + } else if (s->fileversion < 3990) { + s->entropy_decode_mono = entropy_decode_mono_3900; + s->entropy_decode_stereo = entropy_decode_stereo_3930; + } else { + s->entropy_decode_mono = entropy_decode_mono_3990; + s->entropy_decode_stereo = entropy_decode_stereo_3990; + } + + if (s->fileversion < 3930) { + s->predictor_decode_mono = predictor_decode_mono_3800; + s->predictor_decode_stereo = predictor_decode_stereo_3800; + } else if (s->fileversion < 3950) { + s->predictor_decode_mono = predictor_decode_mono_3930; + s->predictor_decode_stereo = predictor_decode_stereo_3930; + } else { + s->predictor_decode_mono = predictor_decode_mono_3950; + s->predictor_decode_stereo = predictor_decode_stereo_3950; + } - avcodec_get_frame_defaults(&s->frame); - avctx->coded_frame = &s->frame; + s->adsp.scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_c; + + if (ARCH_ARM) + ff_apedsp_init_arm(&s->adsp); + if (ARCH_PPC) + ff_apedsp_init_ppc(&s->adsp); + if (ARCH_X86) + ff_apedsp_init_x86(&s->adsp); + + ff_bswapdsp_init(&s->bdsp); + avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; return 0; filter_alloc_fail: @@ -404,67 +484,120 @@ static inline void update_rice(APERice *rice, unsigned int x) rice->k++; } -static inline int ape_decode_value(APEContext *ctx, APERice *rice) +static inline int get_rice_ook(GetBitContext *gb, int k) { - unsigned int x, overflow; + unsigned int x; - if (ctx->fileversion < 3990) { - int tmpk; + x = get_unary(gb, 1, get_bits_left(gb)); - overflow = range_get_symbol(ctx, counts_3970, counts_diff_3970); + if (k) + x = (x << k) | get_bits(gb, k); - if (overflow == (MODEL_ELEMENTS - 1)) { - tmpk = range_decode_bits(ctx, 5); - overflow = 0; - } else - tmpk = (rice->k < 1) ? 0 : rice->k - 1; + return x; +} - if (tmpk <= 16) - x = range_decode_bits(ctx, tmpk); - else if (tmpk <= 32) { - x = range_decode_bits(ctx, 16); - x |= (range_decode_bits(ctx, tmpk - 16) << 16); - } else { - av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk); - return AVERROR_INVALIDDATA; +static inline int ape_decode_value_3860(APEContext *ctx, GetBitContext *gb, + APERice *rice) +{ + unsigned int x, overflow; + + overflow = get_unary(gb, 1, get_bits_left(gb)); + + if (ctx->fileversion > 3880) { + while (overflow >= 16) { + overflow -= 16; + rice->k += 4; } - x += overflow << tmpk; + } + + if (!rice->k) + x = overflow; + else + x = (overflow << rice->k) + get_bits(gb, rice->k); + + rice->ksum += x - (rice->ksum + 8 >> 4); + if (rice->ksum < (rice->k ? 1 << (rice->k + 4) : 0)) + rice->k--; + else if (rice->ksum >= (1 << (rice->k + 5)) && rice->k < 24) + rice->k++; + + /* Convert to signed */ + if (x & 1) + return (x >> 1) + 1; + else + return -(x >> 1); +} + +static inline int ape_decode_value_3900(APEContext *ctx, APERice *rice) +{ + unsigned int x, overflow; + int tmpk; + + overflow = range_get_symbol(ctx, counts_3970, counts_diff_3970); + + if (overflow == (MODEL_ELEMENTS - 1)) { + tmpk = range_decode_bits(ctx, 5); + overflow = 0; + } else + tmpk = (rice->k < 1) ? 0 : rice->k - 1; + + if (tmpk <= 16 || ctx->fileversion < 3910) + x = range_decode_bits(ctx, tmpk); + else if (tmpk <= 32) { + x = range_decode_bits(ctx, 16); + x |= (range_decode_bits(ctx, tmpk - 16) << 16); } else { - int base, pivot; + av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk); + return AVERROR_INVALIDDATA; + } + x += overflow << tmpk; + + update_rice(rice, x); + + /* Convert to signed */ + if (x & 1) + return (x >> 1) + 1; + else + return -(x >> 1); +} - pivot = rice->ksum >> 5; - if (pivot == 0) - pivot = 1; +static inline int ape_decode_value_3990(APEContext *ctx, APERice *rice) +{ + unsigned int x, overflow; + int base, pivot; - overflow = range_get_symbol(ctx, counts_3980, counts_diff_3980); + pivot = rice->ksum >> 5; + if (pivot == 0) + pivot = 1; - if (overflow == (MODEL_ELEMENTS - 1)) { - overflow = range_decode_bits(ctx, 16) << 16; - overflow |= range_decode_bits(ctx, 16); - } + overflow = range_get_symbol(ctx, counts_3980, counts_diff_3980); - if (pivot < 0x10000) { - base = range_decode_culfreq(ctx, pivot); - range_decode_update(ctx, 1, base); - } else { - int base_hi = pivot, base_lo; - int bbits = 0; + if (overflow == (MODEL_ELEMENTS - 1)) { + overflow = range_decode_bits(ctx, 16) << 16; + overflow |= range_decode_bits(ctx, 16); + } - while (base_hi & ~0xFFFF) { - base_hi >>= 1; - bbits++; - } - base_hi = range_decode_culfreq(ctx, base_hi + 1); - range_decode_update(ctx, 1, base_hi); - base_lo = range_decode_culfreq(ctx, 1 << bbits); - range_decode_update(ctx, 1, base_lo); + if (pivot < 0x10000) { + base = range_decode_culfreq(ctx, pivot); + range_decode_update(ctx, 1, base); + } else { + int base_hi = pivot, base_lo; + int bbits = 0; - base = (base_hi << bbits) + base_lo; + while (base_hi & ~0xFFFF) { + base_hi >>= 1; + bbits++; } + base_hi = range_decode_culfreq(ctx, base_hi + 1); + range_decode_update(ctx, 1, base_hi); + base_lo = range_decode_culfreq(ctx, 1 << bbits); + range_decode_update(ctx, 1, base_lo); - x = base + overflow * pivot; + base = (base_hi << bbits) + base_lo; } + x = base + overflow * pivot; + update_rice(rice, x); /* Convert to signed */ @@ -474,24 +607,148 @@ static inline int ape_decode_value(APEContext *ctx, APERice *rice) return -(x >> 1); } -static void entropy_decode(APEContext *ctx, int blockstodecode, int stereo) +static void decode_array_0000(APEContext *ctx, GetBitContext *gb, + int32_t *out, APERice *rice, int blockstodecode) +{ + int i; + int ksummax, ksummin; + + rice->ksum = 0; + for (i = 0; i < FFMIN(blockstodecode, 5); i++) { + out[i] = get_rice_ook(&ctx->gb, 10); + rice->ksum += out[i]; + } + rice->k = av_log2(rice->ksum / 10) + 1; + for (; i < FFMIN(blockstodecode, 64); i++) { + out[i] = get_rice_ook(&ctx->gb, rice->k); + rice->ksum += out[i]; + rice->k = av_log2(rice->ksum / ((i + 1) * 2)) + 1; + } + ksummax = 1 << rice->k + 7; + ksummin = rice->k ? (1 << rice->k + 6) : 0; + for (; i < blockstodecode; i++) { + out[i] = get_rice_ook(&ctx->gb, rice->k); + rice->ksum += out[i] - out[i - 64]; + while (rice->ksum < ksummin) { + rice->k--; + ksummin = rice->k ? ksummin >> 1 : 0; + ksummax >>= 1; + } + while (rice->ksum >= ksummax) { + rice->k++; + if (rice->k > 24) + return; + ksummax <<= 1; + ksummin = ksummin ? ksummin << 1 : 128; + } + } + + for (i = 0; i < blockstodecode; i++) { + if (out[i] & 1) + out[i] = (out[i] >> 1) + 1; + else + out[i] = -(out[i] >> 1); + } +} + +static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode) +{ + decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY, + blockstodecode); +} + +static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode) +{ + decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY, + blockstodecode); + decode_array_0000(ctx, &ctx->gb, ctx->decoded[1], &ctx->riceX, + blockstodecode); +} + +static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + + while (blockstodecode--) + *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY); +} + +static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + int blocks = blockstodecode; + + while (blockstodecode--) + *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY); + while (blocks--) + *decoded1++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceX); +} + +static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + + while (blockstodecode--) + *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY); +} + +static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + int blocks = blockstodecode; + + while (blockstodecode--) + *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY); + range_dec_normalize(ctx); + // because of some implementation peculiarities we need to backpedal here + ctx->ptr -= 1; + range_start_decoding(ctx); + while (blocks--) + *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX); +} + +static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode) { int32_t *decoded0 = ctx->decoded[0]; int32_t *decoded1 = ctx->decoded[1]; while (blockstodecode--) { - *decoded0++ = ape_decode_value(ctx, &ctx->riceY); - if (stereo) - *decoded1++ = ape_decode_value(ctx, &ctx->riceX); + *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY); + *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX); + } +} + +static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + + while (blockstodecode--) + *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY); +} + +static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + + while (blockstodecode--) { + *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY); + *decoded1++ = ape_decode_value_3990(ctx, &ctx->riceX); } } static int init_entropy_decoder(APEContext *ctx) { /* Read the CRC */ - if (ctx->data_end - ctx->ptr < 6) - return AVERROR_INVALIDDATA; - ctx->CRC = bytestream_get_be32(&ctx->ptr); + if (ctx->fileversion >= 3900) { + if (ctx->data_end - ctx->ptr < 6) + return AVERROR_INVALIDDATA; + ctx->CRC = bytestream_get_be32(&ctx->ptr); + } else { + ctx->CRC = get_bits_long(&ctx->gb, 32); + } /* Read the frame flags if they exist */ ctx->frameflags = 0; @@ -509,15 +766,29 @@ static int init_entropy_decoder(APEContext *ctx) ctx->riceY.k = 10; ctx->riceY.ksum = (1 << ctx->riceY.k) * 16; - /* The first 8 bits of input are ignored. */ - ctx->ptr++; + if (ctx->fileversion >= 3900) { + /* The first 8 bits of input are ignored. */ + ctx->ptr++; - range_start_decoding(ctx); + range_start_decoding(ctx); + } return 0; } -static const int32_t initial_coeffs[4] = { +static const int32_t initial_coeffs_fast_3320[1] = { + 375, +}; + +static const int32_t initial_coeffs_a_3800[3] = { + 64, 115, 64, +}; + +static const int32_t initial_coeffs_b_3800[2] = { + 740, 0 +}; + +static const int32_t initial_coeffs_3930[4] = { 360, 317, -109, 98 }; @@ -530,13 +801,35 @@ static void init_predictor_decoder(APEContext *ctx) p->buf = p->historybuffer; /* Initialize and zero the coefficients */ - memcpy(p->coeffsA[0], initial_coeffs, sizeof(initial_coeffs)); - memcpy(p->coeffsA[1], initial_coeffs, sizeof(initial_coeffs)); + if (ctx->fileversion < 3930) { + if (ctx->compression_level == COMPRESSION_LEVEL_FAST) { + memcpy(p->coeffsA[0], initial_coeffs_fast_3320, + sizeof(initial_coeffs_fast_3320)); + memcpy(p->coeffsA[1], initial_coeffs_fast_3320, + sizeof(initial_coeffs_fast_3320)); + } else { + memcpy(p->coeffsA[0], initial_coeffs_a_3800, + sizeof(initial_coeffs_a_3800)); + memcpy(p->coeffsA[1], initial_coeffs_a_3800, + sizeof(initial_coeffs_a_3800)); + } + } else { + memcpy(p->coeffsA[0], initial_coeffs_3930, sizeof(initial_coeffs_3930)); + memcpy(p->coeffsA[1], initial_coeffs_3930, sizeof(initial_coeffs_3930)); + } memset(p->coeffsB, 0, sizeof(p->coeffsB)); + if (ctx->fileversion < 3930) { + memcpy(p->coeffsB[0], initial_coeffs_b_3800, + sizeof(initial_coeffs_b_3800)); + memcpy(p->coeffsB[1], initial_coeffs_b_3800, + sizeof(initial_coeffs_b_3800)); + } p->filterA[0] = p->filterA[1] = 0; p->filterB[0] = p->filterB[1] = 0; p->lastA[0] = p->lastA[1] = 0; + + p->sample_pos = 0; } /** Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero) */ @@ -544,6 +837,302 @@ static inline int APESIGN(int32_t x) { return (x < 0) - (x > 0); } +static av_always_inline int filter_fast_3320(APEPredictor *p, + const int decoded, const int filter, + const int delayA) +{ + int32_t predictionA; + + p->buf[delayA] = p->lastA[filter]; + if (p->sample_pos < 3) { + p->lastA[filter] = decoded; + p->filterA[filter] = decoded; + return decoded; + } + + predictionA = p->buf[delayA] * 2 - p->buf[delayA - 1]; + p->lastA[filter] = decoded + (predictionA * p->coeffsA[filter][0] >> 9); + + if ((decoded ^ predictionA) > 0) + p->coeffsA[filter][0]++; + else + p->coeffsA[filter][0]--; + + p->filterA[filter] += p->lastA[filter]; + + return p->filterA[filter]; +} + +static av_always_inline int filter_3800(APEPredictor *p, + const int decoded, const int filter, + const int delayA, const int delayB, + const int start, const int shift) +{ + int32_t predictionA, predictionB, sign; + int32_t d0, d1, d2, d3, d4; + + p->buf[delayA] = p->lastA[filter]; + p->buf[delayB] = p->filterB[filter]; + if (p->sample_pos < start) { + predictionA = decoded + p->filterA[filter]; + p->lastA[filter] = decoded; + p->filterB[filter] = decoded; + p->filterA[filter] = predictionA; + return predictionA; + } + d2 = p->buf[delayA]; + d1 = (p->buf[delayA] - p->buf[delayA - 1]) << 1; + d0 = p->buf[delayA] + ((p->buf[delayA - 2] - p->buf[delayA - 1]) << 3); + d3 = p->buf[delayB] * 2 - p->buf[delayB - 1]; + d4 = p->buf[delayB]; + + predictionA = d0 * p->coeffsA[filter][0] + + d1 * p->coeffsA[filter][1] + + d2 * p->coeffsA[filter][2]; + + sign = APESIGN(decoded); + p->coeffsA[filter][0] += (((d0 >> 30) & 2) - 1) * sign; + p->coeffsA[filter][1] += (((d1 >> 28) & 8) - 4) * sign; + p->coeffsA[filter][2] += (((d2 >> 28) & 8) - 4) * sign; + + predictionB = d3 * p->coeffsB[filter][0] - + d4 * p->coeffsB[filter][1]; + p->lastA[filter] = decoded + (predictionA >> 11); + sign = APESIGN(p->lastA[filter]); + p->coeffsB[filter][0] += (((d3 >> 29) & 4) - 2) * sign; + p->coeffsB[filter][1] -= (((d4 >> 30) & 2) - 1) * sign; + + p->filterB[filter] = p->lastA[filter] + (predictionB >> shift); + p->filterA[filter] = p->filterB[filter] + ((p->filterA[filter] * 31) >> 5); + + return p->filterA[filter]; +} + +static void long_filter_high_3800(int32_t *buffer, int order, int shift, + int32_t *coeffs, int32_t *delay, int length) +{ + int i, j; + int32_t dotprod, sign; + + memset(coeffs, 0, order * sizeof(*coeffs)); + for (i = 0; i < order; i++) + delay[i] = buffer[i]; + for (i = order; i < length; i++) { + dotprod = 0; + sign = APESIGN(buffer[i]); + for (j = 0; j < order; j++) { + dotprod += delay[j] * coeffs[j]; + coeffs[j] -= (((delay[j] >> 30) & 2) - 1) * sign; + } + buffer[i] -= dotprod >> shift; + for (j = 0; j < order - 1; j++) + delay[j] = delay[j + 1]; + delay[order - 1] = buffer[i]; + } +} + +static void long_filter_ehigh_3830(int32_t *buffer, int length) +{ + int i, j; + int32_t dotprod, sign; + int32_t coeffs[8] = { 0 }, delay[8] = { 0 }; + + for (i = 0; i < length; i++) { + dotprod = 0; + sign = APESIGN(buffer[i]); + for (j = 7; j >= 0; j--) { + dotprod += delay[j] * coeffs[j]; + coeffs[j] -= (((delay[j] >> 30) & 2) - 1) * sign; + } + for (j = 7; j > 0; j--) + delay[j] = delay[j - 1]; + delay[0] = buffer[i]; + buffer[i] -= dotprod >> 9; + } +} + +static void predictor_decode_stereo_3800(APEContext *ctx, int count) +{ + APEPredictor *p = &ctx->predictor; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + int32_t coeffs[256], delay[256]; + int start = 4, shift = 10; + + if (ctx->compression_level == COMPRESSION_LEVEL_HIGH) { + start = 16; + long_filter_high_3800(decoded0, 16, 9, coeffs, delay, count); + long_filter_high_3800(decoded1, 16, 9, coeffs, delay, count); + } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) { + int order = 128, shift2 = 11; + + if (ctx->fileversion >= 3830) { + order <<= 1; + shift++; + shift2++; + long_filter_ehigh_3830(decoded0 + order, count - order); + long_filter_ehigh_3830(decoded1 + order, count - order); + } + start = order; + long_filter_high_3800(decoded0, order, shift2, coeffs, delay, count); + long_filter_high_3800(decoded1, order, shift2, coeffs, delay, count); + } + + while (count--) { + int X = *decoded0, Y = *decoded1; + if (ctx->compression_level == COMPRESSION_LEVEL_FAST) { + *decoded0 = filter_fast_3320(p, Y, 0, YDELAYA); + decoded0++; + *decoded1 = filter_fast_3320(p, X, 1, XDELAYA); + decoded1++; + } else { + *decoded0 = filter_3800(p, Y, 0, YDELAYA, YDELAYB, + start, shift); + decoded0++; + *decoded1 = filter_3800(p, X, 1, XDELAYA, XDELAYB, + start, shift); + decoded1++; + } + + /* Combined */ + p->buf++; + p->sample_pos++; + + /* Have we filled the history buffer? */ + if (p->buf == p->historybuffer + HISTORY_SIZE) { + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); + p->buf = p->historybuffer; + } + } +} + +static void predictor_decode_mono_3800(APEContext *ctx, int count) +{ + APEPredictor *p = &ctx->predictor; + int32_t *decoded0 = ctx->decoded[0]; + int32_t coeffs[256], delay[256]; + int start = 4, shift = 10; + + if (ctx->compression_level == COMPRESSION_LEVEL_HIGH) { + start = 16; + long_filter_high_3800(decoded0, 16, 9, coeffs, delay, count); + } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) { + int order = 128, shift2 = 11; + + if (ctx->fileversion >= 3830) { + order <<= 1; + shift++; + shift2++; + long_filter_ehigh_3830(decoded0 + order, count - order); + } + start = order; + long_filter_high_3800(decoded0, order, shift2, coeffs, delay, count); + } + + while (count--) { + if (ctx->compression_level == COMPRESSION_LEVEL_FAST) { + *decoded0 = filter_fast_3320(p, *decoded0, 0, YDELAYA); + decoded0++; + } else { + *decoded0 = filter_3800(p, *decoded0, 0, YDELAYA, YDELAYB, + start, shift); + decoded0++; + } + + /* Combined */ + p->buf++; + p->sample_pos++; + + /* Have we filled the history buffer? */ + if (p->buf == p->historybuffer + HISTORY_SIZE) { + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); + p->buf = p->historybuffer; + } + } +} + +static av_always_inline int predictor_update_3930(APEPredictor *p, + const int decoded, const int filter, + const int delayA) +{ + int32_t predictionA, sign; + int32_t d0, d1, d2, d3; + + p->buf[delayA] = p->lastA[filter]; + d0 = p->buf[delayA ]; + d1 = p->buf[delayA ] - p->buf[delayA - 1]; + d2 = p->buf[delayA - 1] - p->buf[delayA - 2]; + d3 = p->buf[delayA - 2] - p->buf[delayA - 3]; + + predictionA = d0 * p->coeffsA[filter][0] + + d1 * p->coeffsA[filter][1] + + d2 * p->coeffsA[filter][2] + + d3 * p->coeffsA[filter][3]; + + p->lastA[filter] = decoded + (predictionA >> 9); + p->filterA[filter] = p->lastA[filter] + ((p->filterA[filter] * 31) >> 5); + + sign = APESIGN(decoded); + p->coeffsA[filter][0] += ((d0 < 0) * 2 - 1) * sign; + p->coeffsA[filter][1] += ((d1 < 0) * 2 - 1) * sign; + p->coeffsA[filter][2] += ((d2 < 0) * 2 - 1) * sign; + p->coeffsA[filter][3] += ((d3 < 0) * 2 - 1) * sign; + + return p->filterA[filter]; +} + +static void predictor_decode_stereo_3930(APEContext *ctx, int count) +{ + APEPredictor *p = &ctx->predictor; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + + ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count); + + while (count--) { + /* Predictor Y */ + int Y = *decoded1, X = *decoded0; + *decoded0 = predictor_update_3930(p, Y, 0, YDELAYA); + decoded0++; + *decoded1 = predictor_update_3930(p, X, 1, XDELAYA); + decoded1++; + + /* Combined */ + p->buf++; + + /* Have we filled the history buffer? */ + if (p->buf == p->historybuffer + HISTORY_SIZE) { + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); + p->buf = p->historybuffer; + } + } +} + +static void predictor_decode_mono_3930(APEContext *ctx, int count) +{ + APEPredictor *p = &ctx->predictor; + int32_t *decoded0 = ctx->decoded[0]; + + ape_apply_filters(ctx, ctx->decoded[0], NULL, count); + + while (count--) { + *decoded0 = predictor_update_3930(p, *decoded0, 0, YDELAYA); + decoded0++; + + p->buf++; + + /* Have we filled the history buffer? */ + if (p->buf == p->historybuffer + HISTORY_SIZE) { + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); + p->buf = p->historybuffer; + } + } +} + static av_always_inline int predictor_update_filter(APEPredictor *p, const int decoded, const int filter, const int delayA, const int delayB, @@ -591,12 +1180,14 @@ static av_always_inline int predictor_update_filter(APEPredictor *p, return p->filterA[filter]; } -static void predictor_decode_stereo(APEContext *ctx, int count) +static void predictor_decode_stereo_3950(APEContext *ctx, int count) { APEPredictor *p = &ctx->predictor; int32_t *decoded0 = ctx->decoded[0]; int32_t *decoded1 = ctx->decoded[1]; + ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count); + while (count--) { /* Predictor Y */ *decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB, @@ -618,12 +1209,14 @@ static void predictor_decode_stereo(APEContext *ctx, int count) } } -static void predictor_decode_mono(APEContext *ctx, int count) +static void predictor_decode_mono_3950(APEContext *ctx, int count) { APEPredictor *p = &ctx->predictor; int32_t *decoded0 = ctx->decoded[0]; int32_t predictionA, currentA, A, sign; + ape_apply_filters(ctx, ctx->decoded[0], NULL, count); + currentA = p->lastA[0]; while (count--) { @@ -690,9 +1283,10 @@ static void do_apply_filter(APEContext *ctx, int version, APEFilter *f, while (count--) { /* round fixedpoint scalar product */ - res = ctx->dsp.scalarproduct_and_madd_int16(f->coeffs, f->delay - order, - f->adaptcoeffs - order, - order, APESIGN(*data)); + res = ctx->adsp.scalarproduct_and_madd_int16(f->coeffs, + f->delay - order, + f->adaptcoeffs - order, + order, APESIGN(*data)); res = (res + (1 << (fracbits - 1))) >> fracbits; res += *data; *data++ = res; @@ -782,11 +1376,10 @@ static void ape_unpack_mono(APEContext *ctx, int count) return; } - entropy_decode(ctx, count, 0); - ape_apply_filters(ctx, ctx->decoded[0], NULL, count); + ctx->entropy_decode_mono(ctx, count); /* Now apply the predictor decoding */ - predictor_decode_mono(ctx, count); + ctx->predictor_decode_mono(ctx, count); /* Pseudo-stereo - just copy left channel to right channel */ if (ctx->channels == 2) { @@ -806,11 +1399,10 @@ static void ape_unpack_stereo(APEContext *ctx, int count) return; } - entropy_decode(ctx, count, 1); - ape_apply_filters(ctx, decoded0, decoded1, count); + ctx->entropy_decode_stereo(ctx, count); /* Now apply the predictor decoding */ - predictor_decode_stereo(ctx, count); + ctx->predictor_decode_stereo(ctx, count); /* Decorrelate and scale to output depth */ while (count--) { @@ -825,14 +1417,14 @@ static void ape_unpack_stereo(APEContext *ctx, int count) static int ape_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { + AVFrame *frame = data; const uint8_t *buf = avpkt->data; APEContext *s = avctx->priv_data; uint8_t *sample8; int16_t *sample16; int32_t *sample24; - int i, ret; + int i, ch, ret; int blockstodecode; - int bytes_used = 0; /* this should never be negative, but bad things will happen if it is, so check it just to make sure. */ @@ -855,29 +1447,41 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data, av_log(avctx, AV_LOG_WARNING, "packet size is not a multiple of 4. " "extra bytes at the end will be skipped.\n"); } - + if (s->fileversion < 3950) // previous versions overread two bytes + buf_size += 2; av_fast_malloc(&s->data, &s->data_size, buf_size); if (!s->data) return AVERROR(ENOMEM); - s->dsp.bswap_buf((uint32_t*)s->data, (const uint32_t*)buf, buf_size >> 2); + s->bdsp.bswap_buf((uint32_t *) s->data, (const uint32_t *) buf, + buf_size >> 2); + memset(s->data + (buf_size & ~3), 0, buf_size & 3); s->ptr = s->data; s->data_end = s->data + buf_size; nblocks = bytestream_get_be32(&s->ptr); offset = bytestream_get_be32(&s->ptr); - if (offset > 3) { - av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n"); - s->data = NULL; - return AVERROR_INVALIDDATA; - } - if (s->data_end - s->ptr < offset) { - av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); - return AVERROR_INVALIDDATA; + if (s->fileversion >= 3900) { + if (offset > 3) { + av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n"); + s->data = NULL; + return AVERROR_INVALIDDATA; + } + if (s->data_end - s->ptr < offset) { + av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); + return AVERROR_INVALIDDATA; + } + s->ptr += offset; + } else { + init_get_bits(&s->gb, s->ptr, (s->data_end - s->ptr) * 8); + if (s->fileversion > 3800) + skip_bits_long(&s->gb, offset * 8); + else + skip_bits_long(&s->gb, offset); } - s->ptr += offset; if (!nblocks || nblocks > INT_MAX) { - av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %u.\n", nblocks); + av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %"PRIu32".\n", + nblocks); return AVERROR_INVALIDDATA; } s->samples = nblocks; @@ -888,7 +1492,6 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data, return AVERROR_INVALIDDATA; } - bytes_used = avpkt->size; } if (!s->data) { @@ -897,6 +1500,10 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data, } blockstodecode = FFMIN(s->blocks_per_loop, s->samples); + // for old files coefficients were not interleaved, + // so we need to decode all of them at once + if (s->fileversion < 3930) + blockstodecode = s->samples; /* reallocate decoded sample buffer if needed */ av_fast_malloc(&s->decoded_buffer, &s->decoded_size, @@ -908,8 +1515,8 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data, s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8); /* get output buffer */ - s->frame.nb_samples = blockstodecode; - if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + frame->nb_samples = blockstodecode; + if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } @@ -930,37 +1537,33 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data, switch (s->bps) { case 8: - sample8 = (uint8_t *)s->frame.data[0]; - for (i = 0; i < blockstodecode; i++) { - *sample8++ = (s->decoded[0][i] + 0x80) & 0xff; - if (s->channels == 2) - *sample8++ = (s->decoded[1][i] + 0x80) & 0xff; + for (ch = 0; ch < s->channels; ch++) { + sample8 = (uint8_t *)frame->data[ch]; + for (i = 0; i < blockstodecode; i++) + *sample8++ = (s->decoded[ch][i] + 0x80) & 0xff; } break; case 16: - sample16 = (int16_t *)s->frame.data[0]; - for (i = 0; i < blockstodecode; i++) { - *sample16++ = s->decoded[0][i]; - if (s->channels == 2) - *sample16++ = s->decoded[1][i]; + for (ch = 0; ch < s->channels; ch++) { + sample16 = (int16_t *)frame->data[ch]; + for (i = 0; i < blockstodecode; i++) + *sample16++ = s->decoded[ch][i]; } break; case 24: - sample24 = (int32_t *)s->frame.data[0]; - for (i = 0; i < blockstodecode; i++) { - *sample24++ = s->decoded[0][i] << 8; - if (s->channels == 2) - *sample24++ = s->decoded[1][i] << 8; + for (ch = 0; ch < s->channels; ch++) { + sample24 = (int32_t *)frame->data[ch]; + for (i = 0; i < blockstodecode; i++) + *sample24++ = s->decoded[ch][i] << 8; } break; } s->samples -= blockstodecode; - *got_frame_ptr = 1; - *(AVFrame *)data = s->frame; + *got_frame_ptr = 1; - return bytes_used; + return (s->samples == 0) ? avpkt->size : 0; } static void ape_flush(AVCodecContext *avctx) @@ -972,8 +1575,8 @@ static void ape_flush(AVCodecContext *avctx) #define OFFSET(x) offsetof(APEContext, x) #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM) static const AVOption options[] = { - { "max_samples", "maximum number of samples decoded per call", OFFSET(blocks_per_loop), AV_OPT_TYPE_INT, { 4608 }, 1, INT_MAX, PAR, "max_samples" }, - { "all", "no maximum. decode all samples for each packet at once", 0, AV_OPT_TYPE_CONST, { INT_MAX }, INT_MIN, INT_MAX, PAR, "max_samples" }, + { "max_samples", "maximum number of samples decoded per call", OFFSET(blocks_per_loop), AV_OPT_TYPE_INT, { .i64 = 4608 }, 1, INT_MAX, PAR, "max_samples" }, + { "all", "no maximum. decode all samples for each packet at once", 0, AV_OPT_TYPE_CONST, { .i64 = INT_MAX }, INT_MIN, INT_MAX, PAR, "max_samples" }, { NULL}, }; @@ -986,6 +1589,7 @@ static const AVClass ape_decoder_class = { AVCodec ff_ape_decoder = { .name = "ape", + .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"), .type = AVMEDIA_TYPE_AUDIO, .id = AV_CODEC_ID_APE, .priv_data_size = sizeof(APEContext), @@ -994,6 +1598,9 @@ AVCodec ff_ape_decoder = { .decode = ape_decode_frame, .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1, .flush = ape_flush, - .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"), + .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P, + AV_SAMPLE_FMT_S16P, + AV_SAMPLE_FMT_S32P, + AV_SAMPLE_FMT_NONE }, .priv_class = &ape_decoder_class, };