X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Faacenc.c;h=6021c375bb4709c438b5e5892ae1b5efe21f7d4a;hb=ddffe3de4352eb025b78843cf3b44501056b54bb;hp=742fee9cd18514b62ccd48e265101d121b4f5879;hpb=9b8e2a870957293898998209c6e9bed290cc9bef;p=ffmpeg diff --git a/libavcodec/aacenc.c b/libavcodec/aacenc.c index 742fee9cd18..6021c375bb4 100644 --- a/libavcodec/aacenc.c +++ b/libavcodec/aacenc.c @@ -34,6 +34,7 @@ #include "avcodec.h" #include "put_bits.h" #include "dsputil.h" +#include "internal.h" #include "mpeg4audio.h" #include "kbdwin.h" #include "sinewin.h" @@ -180,47 +181,81 @@ static void put_audio_specific_config(AVCodecContext *avctx) flush_put_bits(&pb); } +#define WINDOW_FUNC(type) \ +static void apply_ ##type ##_window(DSPContext *dsp, SingleChannelElement *sce, const float *audio) + +WINDOW_FUNC(only_long) +{ + const float *lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024; + const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024; + float *out = sce->ret; + + dsp->vector_fmul (out, audio, lwindow, 1024); + dsp->vector_fmul_reverse(out + 1024, audio + 1024, pwindow, 1024); +} + +WINDOW_FUNC(long_start) +{ + const float *lwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024; + const float *swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128; + float *out = sce->ret; + + dsp->vector_fmul(out, audio, lwindow, 1024); + memcpy(out + 1024, audio + 1024, sizeof(out[0]) * 448); + dsp->vector_fmul_reverse(out + 1024 + 448, audio + 1024 + 448, swindow, 128); + memset(out + 1024 + 576, 0, sizeof(out[0]) * 448); +} + +WINDOW_FUNC(long_stop) +{ + const float *lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024; + const float *swindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128; + float *out = sce->ret; + + memset(out, 0, sizeof(out[0]) * 448); + dsp->vector_fmul(out + 448, audio + 448, swindow, 128); + memcpy(out + 576, audio + 576, sizeof(out[0]) * 448); + dsp->vector_fmul_reverse(out + 1024, audio + 1024, lwindow, 1024); +} + +WINDOW_FUNC(eight_short) +{ + const float *swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128; + const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128; + const float *in = audio + 448; + float *out = sce->ret; + int w; + + for (w = 0; w < 8; w++) { + dsp->vector_fmul (out, in, w ? pwindow : swindow, 128); + out += 128; + in += 128; + dsp->vector_fmul_reverse(out, in, swindow, 128); + out += 128; + } +} + +static void (*const apply_window[4])(DSPContext *dsp, SingleChannelElement *sce, const float *audio) = { + [ONLY_LONG_SEQUENCE] = apply_only_long_window, + [LONG_START_SEQUENCE] = apply_long_start_window, + [EIGHT_SHORT_SEQUENCE] = apply_eight_short_window, + [LONG_STOP_SEQUENCE] = apply_long_stop_window +}; + static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce, float *audio) { - int i, k; - const float * lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024; - const float * swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128; - const float * pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128; + int i; float *output = sce->ret; - if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) { - memcpy(output, sce->saved, sizeof(output[0])*1024); - if (sce->ics.window_sequence[0] == LONG_STOP_SEQUENCE) { - memset(output, 0, sizeof(output[0]) * 448); - for (i = 448; i < 576; i++) - output[i] = sce->saved[i] * pwindow[i - 448]; - } - if (sce->ics.window_sequence[0] != LONG_START_SEQUENCE) { - for (i = 0; i < 1024; i++) { - output[i+1024] = audio[i] * lwindow[1024 - i - 1]; - sce->saved[i] = audio[i] * lwindow[i]; - } - } else { - memcpy(output + 1024, audio, sizeof(output[0]) * 448); - for (; i < 576; i++) - output[i+1024] = audio[i] * swindow[576 - i - 1]; - memset(output+1024+576, 0, sizeof(output[0]) * 448); - memcpy(sce->saved, audio, sizeof(sce->saved[0]) * 1024); - } + apply_window[sce->ics.window_sequence[0]](&s->dsp, sce, audio); + + if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) s->mdct1024.mdct_calc(&s->mdct1024, sce->coeffs, output); - } else { - for (k = 0; k < 1024; k += 128) { - for (i = 448 + k; i < 448 + k + 256; i++) - output[i - 448 - k] = (i < 1024) - ? sce->saved[i] - : audio[i-1024]; - s->dsp.vector_fmul (output, output, k ? swindow : pwindow, 128); - s->dsp.vector_fmul_reverse(output+128, output+128, swindow, 128); - s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + k, output); - } - memcpy(sce->saved, audio, sizeof(sce->saved[0]) * 1024); - } + else + for (i = 0; i < 1024; i += 128) + s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + i, output + i*2); + memcpy(audio, audio + 1024, sizeof(audio[0]) * 1024); } /** @@ -429,9 +464,9 @@ static void put_bitstream_info(AVCodecContext *avctx, AACEncContext *s, put_bits(&s->pb, 3, TYPE_FIL); put_bits(&s->pb, 4, FFMIN(namelen, 15)); if (namelen >= 15) - put_bits(&s->pb, 8, namelen - 16); + put_bits(&s->pb, 8, namelen - 14); put_bits(&s->pb, 4, 0); //extension type - filler - padbits = 8 - (put_bits_count(&s->pb) & 7); + padbits = -put_bits_count(&s->pb) & 7; avpriv_align_put_bits(&s->pb); for (i = 0; i < namelen - 2; i++) put_bits(&s->pb, 8, name[i]); @@ -442,8 +477,7 @@ static void put_bitstream_info(AVCodecContext *avctx, AACEncContext *s, * Deinterleave input samples. * Channels are reordered from Libav's default order to AAC order. */ -static void deinterleave_input_samples(AACEncContext *s, - const float *samples) +static void deinterleave_input_samples(AACEncContext *s, const AVFrame *frame) { int ch, i; const int sinc = s->channels; @@ -451,38 +485,46 @@ static void deinterleave_input_samples(AACEncContext *s, /* deinterleave and remap input samples */ for (ch = 0; ch < sinc; ch++) { - const float *sptr = samples + channel_map[ch]; - /* copy last 1024 samples of previous frame to the start of the current frame */ - memcpy(&s->planar_samples[ch][0], &s->planar_samples[ch][1024], 1024 * sizeof(s->planar_samples[0][0])); + memcpy(&s->planar_samples[ch][1024], &s->planar_samples[ch][2048], 1024 * sizeof(s->planar_samples[0][0])); /* deinterleave */ - for (i = 1024; i < 1024 * 2; i++) { - s->planar_samples[ch][i] = *sptr; - sptr += sinc; + i = 2048; + if (frame) { + const float *sptr = ((const float *)frame->data[0]) + channel_map[ch]; + for (; i < 2048 + frame->nb_samples; i++) { + s->planar_samples[ch][i] = *sptr; + sptr += sinc; + } } + memset(&s->planar_samples[ch][i], 0, + (3072 - i) * sizeof(s->planar_samples[0][0])); } } -static int aac_encode_frame(AVCodecContext *avctx, - uint8_t *frame, int buf_size, void *data) +static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr) { AACEncContext *s = avctx->priv_data; - float **samples = s->planar_samples, *samples2, *la; + float **samples = s->planar_samples, *samples2, *la, *overlap; ChannelElement *cpe; - int i, ch, w, g, chans, tag, start_ch; + int i, ch, w, g, chans, tag, start_ch, ret; int chan_el_counter[4]; FFPsyWindowInfo windows[AAC_MAX_CHANNELS]; - if (s->last_frame) + if (s->last_frame == 2) return 0; - if (data) { - deinterleave_input_samples(s, data); - if (s->psypp) - ff_psy_preprocess(s->psypp, s->planar_samples, s->channels); + /* add current frame to queue */ + if (frame) { + if ((ret = ff_af_queue_add(&s->afq, frame) < 0)) + return ret; } + deinterleave_input_samples(s, frame); + if (s->psypp) + ff_psy_preprocess(s->psypp, s->planar_samples, s->channels); + if (!avctx->frame_number) return 0; @@ -495,9 +537,10 @@ static int aac_encode_frame(AVCodecContext *avctx, for (ch = 0; ch < chans; ch++) { IndividualChannelStream *ics = &cpe->ch[ch].ics; int cur_channel = start_ch + ch; - samples2 = &samples[cur_channel][0]; + overlap = &samples[cur_channel][0]; + samples2 = overlap + 1024; la = samples2 + (448+64); - if (!data) + if (!frame) la = NULL; if (tag == TYPE_LFE) { wi[ch].window_type[0] = ONLY_LONG_SEQUENCE; @@ -524,13 +567,20 @@ static int aac_encode_frame(AVCodecContext *avctx, for (w = 0; w < ics->num_windows; w++) ics->group_len[w] = wi[ch].grouping[w]; - apply_window_and_mdct(s, &cpe->ch[ch], samples2); + apply_window_and_mdct(s, &cpe->ch[ch], overlap); } start_ch += chans; } + if ((ret = ff_alloc_packet(avpkt, 768 * s->channels))) { + av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n"); + return ret; + } + do { int frame_bits; - init_put_bits(&s->pb, frame, buf_size*8); + + init_put_bits(&s->pb, avpkt->data, avpkt->size); + if ((avctx->frame_number & 0xFF)==1 && !(avctx->flags & CODEC_FLAG_BITEXACT)) put_bitstream_info(avctx, s, LIBAVCODEC_IDENT); start_ch = 0; @@ -610,10 +660,15 @@ static int aac_encode_frame(AVCodecContext *avctx, s->lambda = FFMIN(s->lambda, 65536.f); } - if (!data) - s->last_frame = 1; + if (!frame) + s->last_frame++; + + ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts, + &avpkt->duration); - return put_bits_count(&s->pb)>>3; + avpkt->size = put_bits_count(&s->pb) >> 3; + *got_packet_ptr = 1; + return 0; } static av_cold int aac_encode_end(AVCodecContext *avctx) @@ -627,6 +682,10 @@ static av_cold int aac_encode_end(AVCodecContext *avctx) ff_psy_preprocess_end(s->psypp); av_freep(&s->buffer.samples); av_freep(&s->cpe); + ff_af_queue_close(&s->afq); +#if FF_API_OLD_ENCODE_AUDIO + av_freep(&avctx->coded_frame); +#endif return 0; } @@ -634,7 +693,7 @@ static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s) { int ret = 0; - dsputil_init(&s->dsp, avctx); + ff_dsputil_init(&s->dsp, avctx); // window init ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024); @@ -652,12 +711,18 @@ static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s) static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s) { - FF_ALLOCZ_OR_GOTO(avctx, s->buffer.samples, 2 * 1024 * s->channels * sizeof(s->buffer.samples[0]), alloc_fail); + int ch; + FF_ALLOCZ_OR_GOTO(avctx, s->buffer.samples, 3 * 1024 * s->channels * sizeof(s->buffer.samples[0]), alloc_fail); FF_ALLOCZ_OR_GOTO(avctx, s->cpe, sizeof(ChannelElement) * s->chan_map[0], alloc_fail); FF_ALLOCZ_OR_GOTO(avctx, avctx->extradata, 5 + FF_INPUT_BUFFER_PADDING_SIZE, alloc_fail); - for(int ch = 0; ch < s->channels; ch++) - s->planar_samples[ch] = s->buffer.samples + 2 * 1024 * ch; + for(ch = 0; ch < s->channels; ch++) + s->planar_samples[ch] = s->buffer.samples + 3 * 1024 * ch; + +#if FF_API_OLD_ENCODE_AUDIO + if (!(avctx->coded_frame = avcodec_alloc_frame())) + goto alloc_fail; +#endif return 0; alloc_fail: @@ -720,6 +785,9 @@ static av_cold int aac_encode_init(AVCodecContext *avctx) for (i = 0; i < 428; i++) ff_aac_pow34sf_tab[i] = sqrt(ff_aac_pow2sf_tab[i] * sqrt(ff_aac_pow2sf_tab[i])); + avctx->delay = 1024; + ff_af_queue_init(avctx, &s->afq); + return 0; fail: aac_encode_end(avctx); @@ -748,10 +816,12 @@ AVCodec ff_aac_encoder = { .id = CODEC_ID_AAC, .priv_data_size = sizeof(AACEncContext), .init = aac_encode_init, - .encode = aac_encode_frame, + .encode2 = aac_encode_frame, .close = aac_encode_end, - .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL, - .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE}, - .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"), - .priv_class = &aacenc_class, + .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | + CODEC_CAP_EXPERIMENTAL, + .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT, + AV_SAMPLE_FMT_NONE }, + .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"), + .priv_class = &aacenc_class, };