X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fatrac1.c;h=52d43e2c946aa3641d0388bcebd69ffdbbd2b865;hb=a93faf30d688b872e0ecc453b2dfc36470683ed6;hp=5ff8816476f2f0826dafbf5087fea29340bd0abb;hpb=b1078e9fe6b5d8f034d15a6ab91430fd41921fe2;p=ffmpeg diff --git a/libavcodec/atrac1.c b/libavcodec/atrac1.c index 5ff8816476f..52d43e2c946 100644 --- a/libavcodec/atrac1.c +++ b/libavcodec/atrac1.c @@ -1,28 +1,28 @@ /* - * Atrac 1 compatible decoder + * ATRAC1 compatible decoder * Copyright (c) 2009 Maxim Poliakovski * Copyright (c) 2009 Benjamin Larsson * - * This file is part of FFmpeg. + * This file is part of Libav. * - * FFmpeg is free software; you can redistribute it and/or + * Libav is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * FFmpeg is distributed in the hope that it will be useful, + * Libav is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software + * License along with Libav; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file - * Atrac 1 compatible decoder. + * ATRAC1 compatible decoder. * This decoder handles raw ATRAC1 data and probably SDDS data. */ @@ -32,10 +32,13 @@ #include #include +#include "libavutil/float_dsp.h" + #include "avcodec.h" -#include "get_bits.h" -#include "dsputil.h" +#include "bitstream.h" #include "fft.h" +#include "internal.h" +#include "sinewin.h" #include "atrac.h" #include "atrac1data.h" @@ -55,32 +58,30 @@ /** * Sound unit struct, one unit is used per channel */ -typedef struct { +typedef struct AT1SUCtx { int log2_block_count[AT1_QMF_BANDS]; ///< log2 number of blocks in a band int num_bfus; ///< number of Block Floating Units float* spectrum[2]; - DECLARE_ALIGNED(16, float, spec1)[AT1_SU_SAMPLES]; ///< mdct buffer - DECLARE_ALIGNED(16, float, spec2)[AT1_SU_SAMPLES]; ///< mdct buffer - DECLARE_ALIGNED(16, float, fst_qmf_delay)[46]; ///< delay line for the 1st stacked QMF filter - DECLARE_ALIGNED(16, float, snd_qmf_delay)[46]; ///< delay line for the 2nd stacked QMF filter - DECLARE_ALIGNED(16, float, last_qmf_delay)[256+23]; ///< delay line for the last stacked QMF filter + DECLARE_ALIGNED(32, float, spec1)[AT1_SU_SAMPLES]; ///< mdct buffer + DECLARE_ALIGNED(32, float, spec2)[AT1_SU_SAMPLES]; ///< mdct buffer + DECLARE_ALIGNED(32, float, fst_qmf_delay)[46]; ///< delay line for the 1st stacked QMF filter + DECLARE_ALIGNED(32, float, snd_qmf_delay)[46]; ///< delay line for the 2nd stacked QMF filter + DECLARE_ALIGNED(32, float, last_qmf_delay)[256+23]; ///< delay line for the last stacked QMF filter } AT1SUCtx; /** * The atrac1 context, holds all needed parameters for decoding */ -typedef struct { +typedef struct AT1Ctx { AT1SUCtx SUs[AT1_MAX_CHANNELS]; ///< channel sound unit - DECLARE_ALIGNED(16, float, spec)[AT1_SU_SAMPLES]; ///< the mdct spectrum buffer + DECLARE_ALIGNED(32, float, spec)[AT1_SU_SAMPLES]; ///< the mdct spectrum buffer - DECLARE_ALIGNED(16, float, low)[256]; - DECLARE_ALIGNED(16, float, mid)[256]; - DECLARE_ALIGNED(16, float, high)[512]; + DECLARE_ALIGNED(32, float, low)[256]; + DECLARE_ALIGNED(32, float, mid)[256]; + DECLARE_ALIGNED(32, float, high)[512]; float* bands[3]; - DECLARE_ALIGNED(16, float, out_samples)[AT1_MAX_CHANNELS][AT1_SU_SAMPLES]; FFTContext mdct_ctx[3]; - int channels; - DSPContext dsp; + AVFloatDSPContext fdsp; } AT1Ctx; /** size of the transform in samples in the long mode for each QMF band */ @@ -99,7 +100,7 @@ static void at1_imdct(AT1Ctx *q, float *spec, float *out, int nbits, for (i = 0; i < transf_size / 2; i++) FFSWAP(float, spec[i], spec[transf_size - 1 - i]); } - ff_imdct_half(mdct_context, out, spec); + mdct_context->imdct_half(mdct_context, out, spec); } @@ -128,7 +129,7 @@ static int at1_imdct_block(AT1SUCtx* su, AT1Ctx *q) nbits = mdct_long_nbits[band_num] - log2_block_count; if (nbits != 5 && nbits != 7 && nbits != 8) - return -1; + return AVERROR_INVALIDDATA; } else { block_size = 32; nbits = 5; @@ -140,8 +141,8 @@ static int at1_imdct_block(AT1SUCtx* su, AT1Ctx *q) at1_imdct(q, &q->spec[pos], &su->spectrum[0][ref_pos + start_pos], nbits, band_num); /* overlap and window */ - q->dsp.vector_fmul_window(&q->bands[band_num][start_pos], prev_buf, - &su->spectrum[0][ref_pos + start_pos], ff_sine_32, 0, 16); + q->fdsp.vector_fmul_window(&q->bands[band_num][start_pos], prev_buf, + &su->spectrum[0][ref_pos + start_pos], ff_sine_32, 16); prev_buf = &su->spectrum[0][ref_pos+start_pos + 16]; start_pos += block_size; @@ -164,30 +165,31 @@ static int at1_imdct_block(AT1SUCtx* su, AT1Ctx *q) * Parse the block size mode byte */ -static int at1_parse_bsm(GetBitContext* gb, int log2_block_cnt[AT1_QMF_BANDS]) +static int at1_parse_bsm(BitstreamContext *bc, + int log2_block_cnt[AT1_QMF_BANDS]) { int log2_block_count_tmp, i; for (i = 0; i < 2; i++) { /* low and mid band */ - log2_block_count_tmp = get_bits(gb, 2); + log2_block_count_tmp = bitstream_read(bc, 2); if (log2_block_count_tmp & 1) - return -1; + return AVERROR_INVALIDDATA; log2_block_cnt[i] = 2 - log2_block_count_tmp; } /* high band */ - log2_block_count_tmp = get_bits(gb, 2); + log2_block_count_tmp = bitstream_read(bc, 2); if (log2_block_count_tmp != 0 && log2_block_count_tmp != 3) - return -1; + return AVERROR_INVALIDDATA; log2_block_cnt[IDX_HIGH_BAND] = 3 - log2_block_count_tmp; - skip_bits(gb, 2); + bitstream_skip(bc, 2); return 0; } -static int at1_unpack_dequant(GetBitContext* gb, AT1SUCtx* su, +static int at1_unpack_dequant(BitstreamContext *bc, AT1SUCtx *su, float spec[AT1_SU_SAMPLES]) { int bits_used, band_num, bfu_num, i; @@ -195,22 +197,22 @@ static int at1_unpack_dequant(GetBitContext* gb, AT1SUCtx* su, uint8_t idsfs[AT1_MAX_BFU]; ///< the scalefactor indexes for each BFU /* parse the info byte (2nd byte) telling how much BFUs were coded */ - su->num_bfus = bfu_amount_tab1[get_bits(gb, 3)]; + su->num_bfus = bfu_amount_tab1[bitstream_read(bc, 3)]; /* calc number of consumed bits: num_BFUs * (idwl(4bits) + idsf(6bits)) + log2_block_count(8bits) + info_byte(8bits) + info_byte_copy(8bits) + log2_block_count_copy(8bits) */ bits_used = su->num_bfus * 10 + 32 + - bfu_amount_tab2[get_bits(gb, 2)] + - (bfu_amount_tab3[get_bits(gb, 3)] << 1); + bfu_amount_tab2[bitstream_read(bc, 2)] + + (bfu_amount_tab3[bitstream_read(bc, 3)] << 1); /* get word length index (idwl) for each BFU */ for (i = 0; i < su->num_bfus; i++) - idwls[i] = get_bits(gb, 4); + idwls[i] = bitstream_read(bc, 4); /* get scalefactor index (idsf) for each BFU */ for (i = 0; i < su->num_bfus; i++) - idsfs[i] = get_bits(gb, 6); + idsfs[i] = bitstream_read(bc, 6); /* zero idwl/idsf for empty BFUs */ for (i = su->num_bfus; i < AT1_MAX_BFU; i++) @@ -223,12 +225,12 @@ static int at1_unpack_dequant(GetBitContext* gb, AT1SUCtx* su, int num_specs = specs_per_bfu[bfu_num]; int word_len = !!idwls[bfu_num] + idwls[bfu_num]; - float scale_factor = sf_table[idsfs[bfu_num]]; + float scale_factor = ff_atrac_sf_table[idsfs[bfu_num]]; bits_used += word_len * num_specs; /* add number of bits consumed by current BFU */ /* check for bitstream overflow */ if (bits_used > AT1_SU_MAX_BITS) - return -1; + return AVERROR_INVALIDDATA; /* get the position of the 1st spec according to the block size mode */ pos = su->log2_block_count[band_num] ? bfu_start_short[bfu_num] : bfu_start_long[bfu_num]; @@ -240,7 +242,7 @@ static int at1_unpack_dequant(GetBitContext* gb, AT1SUCtx* su, /* read in a quantized spec and convert it to * signed int and then inverse quantization */ - spec[pos+i] = get_sbits(gb, word_len) * scale_factor * max_quant; + spec[pos+i] = bitstream_read_signed(bc, word_len) * scale_factor * max_quant; } } else { /* word_len = 0 -> empty BFU, zero all specs in the emty BFU */ memset(&spec[pos], 0, num_specs * sizeof(float)); @@ -258,88 +260,105 @@ static void at1_subband_synthesis(AT1Ctx *q, AT1SUCtx* su, float *pOut) float iqmf_temp[512 + 46]; /* combine low and middle bands */ - atrac_iqmf(q->bands[0], q->bands[1], 128, temp, su->fst_qmf_delay, iqmf_temp); + ff_atrac_iqmf(q->bands[0], q->bands[1], 128, temp, su->fst_qmf_delay, iqmf_temp); /* delay the signal of the high band by 23 samples */ memcpy( su->last_qmf_delay, &su->last_qmf_delay[256], sizeof(float) * 23); memcpy(&su->last_qmf_delay[23], q->bands[2], sizeof(float) * 256); /* combine (low + middle) and high bands */ - atrac_iqmf(temp, su->last_qmf_delay, 256, pOut, su->snd_qmf_delay, iqmf_temp); + ff_atrac_iqmf(temp, su->last_qmf_delay, 256, pOut, su->snd_qmf_delay, iqmf_temp); } static int atrac1_decode_frame(AVCodecContext *avctx, void *data, - int *data_size, AVPacket *avpkt) + int *got_frame_ptr, AVPacket *avpkt) { + AVFrame *frame = data; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AT1Ctx *q = avctx->priv_data; - int ch, ret, i; - GetBitContext gb; - float* samples = data; + int ch, ret; + BitstreamContext bc; + + if (buf_size < 212 * avctx->channels) { + av_log(avctx, AV_LOG_ERROR, "Not enough data to decode!\n"); + return AVERROR_INVALIDDATA; + } - if (buf_size < 212 * q->channels) { - av_log(q,AV_LOG_ERROR,"Not enought data to decode!\n"); - return -1; + /* get output buffer */ + frame->nb_samples = AT1_SU_SAMPLES; + if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } - for (ch = 0; ch < q->channels; ch++) { + for (ch = 0; ch < avctx->channels; ch++) { AT1SUCtx* su = &q->SUs[ch]; - init_get_bits(&gb, &buf[212 * ch], 212 * 8); + bitstream_init8(&bc, &buf[212 * ch], 212); /* parse block_size_mode, 1st byte */ - ret = at1_parse_bsm(&gb, su->log2_block_count); + ret = at1_parse_bsm(&bc, su->log2_block_count); if (ret < 0) return ret; - ret = at1_unpack_dequant(&gb, su, q->spec); + ret = at1_unpack_dequant(&bc, su, q->spec); if (ret < 0) return ret; ret = at1_imdct_block(su, q); if (ret < 0) return ret; - at1_subband_synthesis(q, su, q->out_samples[ch]); + at1_subband_synthesis(q, su, (float *)frame->extended_data[ch]); } - /* interleave; FIXME, should create/use a DSP function */ - if (q->channels == 1) { - /* mono */ - memcpy(samples, q->out_samples[0], AT1_SU_SAMPLES * 4); - } else { - /* stereo */ - for (i = 0; i < AT1_SU_SAMPLES; i++) { - samples[i * 2] = q->out_samples[0][i]; - samples[i * 2 + 1] = q->out_samples[1][i]; - } - } + *got_frame_ptr = 1; - *data_size = q->channels * AT1_SU_SAMPLES * sizeof(*samples); return avctx->block_align; } +static av_cold int atrac1_decode_end(AVCodecContext * avctx) +{ + AT1Ctx *q = avctx->priv_data; + + ff_mdct_end(&q->mdct_ctx[0]); + ff_mdct_end(&q->mdct_ctx[1]); + ff_mdct_end(&q->mdct_ctx[2]); + + return 0; +} + + static av_cold int atrac1_decode_init(AVCodecContext *avctx) { AT1Ctx *q = avctx->priv_data; + int ret; - avctx->sample_fmt = SAMPLE_FMT_FLT; + avctx->sample_fmt = AV_SAMPLE_FMT_FLTP; - q->channels = avctx->channels; + if (avctx->channels < 1 || avctx->channels > AT1_MAX_CHANNELS) { + av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels: %d\n", + avctx->channels); + return AVERROR(EINVAL); + } /* Init the mdct transforms */ - ff_mdct_init(&q->mdct_ctx[0], 6, 1, -1.0/ (1 << 15)); - ff_mdct_init(&q->mdct_ctx[1], 8, 1, -1.0/ (1 << 15)); - ff_mdct_init(&q->mdct_ctx[2], 9, 1, -1.0/ (1 << 15)); + if ((ret = ff_mdct_init(&q->mdct_ctx[0], 6, 1, -1.0/ (1 << 15))) || + (ret = ff_mdct_init(&q->mdct_ctx[1], 8, 1, -1.0/ (1 << 15))) || + (ret = ff_mdct_init(&q->mdct_ctx[2], 9, 1, -1.0/ (1 << 15)))) { + av_log(avctx, AV_LOG_ERROR, "Error initializing MDCT\n"); + atrac1_decode_end(avctx); + return ret; + } ff_init_ff_sine_windows(5); - atrac_generate_tables(); + ff_atrac_generate_tables(); - dsputil_init(&q->dsp, avctx); + avpriv_float_dsp_init(&q->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT); q->bands[0] = q->low; q->bands[1] = q->mid; @@ -355,23 +374,16 @@ static av_cold int atrac1_decode_init(AVCodecContext *avctx) } -static av_cold int atrac1_decode_end(AVCodecContext * avctx) { - AT1Ctx *q = avctx->priv_data; - - ff_mdct_end(&q->mdct_ctx[0]); - ff_mdct_end(&q->mdct_ctx[1]); - ff_mdct_end(&q->mdct_ctx[2]); - return 0; -} - - -AVCodec atrac1_decoder = { - .name = "atrac1", - .type = AVMEDIA_TYPE_AUDIO, - .id = CODEC_ID_ATRAC1, +AVCodec ff_atrac1_decoder = { + .name = "atrac1", + .long_name = NULL_IF_CONFIG_SMALL("ATRAC1 (Adaptive TRansform Acoustic Coding)"), + .type = AVMEDIA_TYPE_AUDIO, + .id = AV_CODEC_ID_ATRAC1, .priv_data_size = sizeof(AT1Ctx), - .init = atrac1_decode_init, - .close = atrac1_decode_end, - .decode = atrac1_decode_frame, - .long_name = NULL_IF_CONFIG_SMALL("Atrac 1 (Adaptive TRansform Acoustic Coding)"), + .init = atrac1_decode_init, + .close = atrac1_decode_end, + .decode = atrac1_decode_frame, + .capabilities = AV_CODEC_CAP_DR1, + .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, + AV_SAMPLE_FMT_NONE }, };