X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Faacdec.c;h=4d002edf6a1322948e330159e37ab6d2a9f15c54;hb=c18365402bbb6fbfa7854b47c3288f9e31f39f44;hp=e289caa4f6aa02290524f12863a528bba780f909;hpb=e4744b59aadd6e7064491c0228d6248289a6a85a;p=ffmpeg diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c index e289caa4f6a..4d002edf6a1 100644 --- a/libavcodec/aacdec.c +++ b/libavcodec/aacdec.c @@ -98,6 +98,7 @@ #include "aacsbr.h" #include "mpeg4audio.h" #include "aacadtsdec.h" +#include "libavutil/intfloat.h" #include #include @@ -108,11 +109,6 @@ # include "arm/aac.h" #endif -union float754 { - float f; - uint32_t i; -}; - static VLC vlc_scalefactors; static VLC vlc_spectral[11]; @@ -184,9 +180,11 @@ static av_cold int che_configure(AACContext *ac, int type, int id, int *channels) { if (che_pos[type][id]) { - if (!ac->che[type][id] && !(ac->che[type][id] = av_mallocz(sizeof(ChannelElement)))) - return AVERROR(ENOMEM); - ff_aac_sbr_ctx_init(&ac->che[type][id]->sbr); + if (!ac->che[type][id]) { + if (!(ac->che[type][id] = av_mallocz(sizeof(ChannelElement)))) + return AVERROR(ENOMEM); + ff_aac_sbr_ctx_init(ac, &ac->che[type][id]->sbr); + } if (type != TYPE_CCE) { ac->output_data[(*channels)++] = ac->che[type][id]->ch[0].ret; if (type == TYPE_CPE || @@ -450,15 +448,17 @@ static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx, * @param ac pointer to AACContext, may be null * @param avctx pointer to AVCCodecContext, used for logging * @param m4ac pointer to MPEG4AudioConfig, used for parsing - * @param data pointer to AVCodecContext extradata - * @param data_size size of AVCCodecContext extradata + * @param data pointer to buffer holding an audio specific config + * @param bit_size size of audio specific config or data in bits + * @param sync_extension look for an appended sync extension * * @return Returns error status or number of consumed bits. <0 - error */ static int decode_audio_specific_config(AACContext *ac, AVCodecContext *avctx, MPEG4AudioConfig *m4ac, - const uint8_t *data, int data_size) + const uint8_t *data, int bit_size, + int sync_extension) { GetBitContext gb; int i; @@ -468,9 +468,9 @@ static int decode_audio_specific_config(AACContext *ac, av_dlog(avctx, "%02x ", avctx->extradata[i]); av_dlog(avctx, "\n"); - init_get_bits(&gb, data, data_size * 8); + init_get_bits(&gb, data, bit_size); - if ((i = ff_mpeg4audio_get_config(m4ac, data, data_size)) < 0) + if ((i = avpriv_mpeg4audio_get_config(m4ac, data, bit_size, sync_extension)) < 0) return -1; if (m4ac->sampling_index > 12) { av_log(avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", m4ac->sampling_index); @@ -530,6 +530,22 @@ static void reset_all_predictors(PredictorState *ps) reset_predict_state(&ps[i]); } +static int sample_rate_idx (int rate) +{ + if (92017 <= rate) return 0; + else if (75132 <= rate) return 1; + else if (55426 <= rate) return 2; + else if (46009 <= rate) return 3; + else if (37566 <= rate) return 4; + else if (27713 <= rate) return 5; + else if (23004 <= rate) return 6; + else if (18783 <= rate) return 7; + else if (13856 <= rate) return 8; + else if (11502 <= rate) return 9; + else if (9391 <= rate) return 10; + else return 11; +} + static void reset_predictor_group(PredictorState *ps, int group_num) { int i; @@ -546,6 +562,7 @@ static void reset_predictor_group(PredictorState *ps, int group_num) static av_cold int aac_decode_init(AVCodecContext *avctx) { AACContext *ac = avctx->priv_data; + float output_scale_factor; ac->avctx = avctx; ac->m4ac.sample_rate = avctx->sample_rate; @@ -553,11 +570,42 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) if (avctx->extradata_size > 0) { if (decode_audio_specific_config(ac, ac->avctx, &ac->m4ac, avctx->extradata, - avctx->extradata_size) < 0) + avctx->extradata_size*8, 1) < 0) return -1; + } else { + int sr, i; + enum ChannelPosition new_che_pos[4][MAX_ELEM_ID]; + + sr = sample_rate_idx(avctx->sample_rate); + ac->m4ac.sampling_index = sr; + ac->m4ac.channels = avctx->channels; + ac->m4ac.sbr = -1; + ac->m4ac.ps = -1; + + for (i = 0; i < FF_ARRAY_ELEMS(ff_mpeg4audio_channels); i++) + if (ff_mpeg4audio_channels[i] == avctx->channels) + break; + if (i == FF_ARRAY_ELEMS(ff_mpeg4audio_channels)) { + i = 0; + } + ac->m4ac.chan_config = i; + + if (ac->m4ac.chan_config) { + int ret = set_default_channel_config(avctx, new_che_pos, ac->m4ac.chan_config); + if (!ret) + output_configure(ac, ac->che_pos, new_che_pos, ac->m4ac.chan_config, OC_GLOBAL_HDR); + else if (avctx->err_recognition & AV_EF_EXPLODE) + return AVERROR_INVALIDDATA; + } } - avctx->sample_fmt = AV_SAMPLE_FMT_S16; + if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) { + avctx->sample_fmt = AV_SAMPLE_FMT_FLT; + output_scale_factor = 1.0 / 32768.0; + } else { + avctx->sample_fmt = AV_SAMPLE_FMT_S16; + output_scale_factor = 1.0; + } AAC_INIT_VLC_STATIC( 0, 304); AAC_INIT_VLC_STATIC( 1, 270); @@ -578,12 +626,6 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) ac->random_state = 0x1f2e3d4c; - // -1024 - Compensate wrong IMDCT method. - // 60 - Required to scale values to the correct range [-32768,32767] - // for float to int16 conversion. (1 << (60 / 4)) == 32768 - ac->sf_scale = 1. / -1024.; - ac->sf_offset = 60; - ff_aac_tableinit(); INIT_VLC_STATIC(&vlc_scalefactors,7,FF_ARRAY_ELEMS(ff_aac_scalefactor_code), @@ -591,9 +633,9 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]), 352); - ff_mdct_init(&ac->mdct, 11, 1, 1.0); - ff_mdct_init(&ac->mdct_small, 8, 1, 1.0); - ff_mdct_init(&ac->mdct_ltp, 11, 0, 1.0); + ff_mdct_init(&ac->mdct, 11, 1, output_scale_factor/1024.0); + ff_mdct_init(&ac->mdct_small, 8, 1, output_scale_factor/128.0); + ff_mdct_init(&ac->mdct_ltp, 11, 0, -2.0/output_scale_factor); // window initialization ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024); ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128); @@ -602,6 +644,9 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) cbrt_tableinit(); + avcodec_get_frame_defaults(&ac->frame); + avctx->coded_frame = &ac->frame; + return 0; } @@ -651,23 +696,20 @@ static void decode_ltp(AACContext *ac, LongTermPrediction *ltp, int sfb; ltp->lag = get_bits(gb, 11); - ltp->coef = ltp_coef[get_bits(gb, 3)] * ac->sf_scale; + ltp->coef = ltp_coef[get_bits(gb, 3)]; for (sfb = 0; sfb < FFMIN(max_sfb, MAX_LTP_LONG_SFB); sfb++) ltp->used[sfb] = get_bits1(gb); } /** * Decode Individual Channel Stream info; reference: table 4.6. - * - * @param common_window Channels have independent [0], or shared [1], Individual Channel Stream information. */ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics, - GetBitContext *gb, int common_window) + GetBitContext *gb) { if (get_bits1(gb)) { av_log(ac->avctx, AV_LOG_ERROR, "Reserved bit set.\n"); - memset(ics, 0, sizeof(IndividualChannelStream)); - return -1; + return AVERROR_INVALIDDATA; } ics->window_sequence[1] = ics->window_sequence[0]; ics->window_sequence[0] = get_bits(gb, 2); @@ -702,13 +744,11 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics, if (ics->predictor_present) { if (ac->m4ac.object_type == AOT_AAC_MAIN) { if (decode_prediction(ac, ics, gb)) { - memset(ics, 0, sizeof(IndividualChannelStream)); - return -1; + return AVERROR_INVALIDDATA; } } else if (ac->m4ac.object_type == AOT_AAC_LC) { av_log(ac->avctx, AV_LOG_ERROR, "Prediction is not allowed in AAC-LC.\n"); - memset(ics, 0, sizeof(IndividualChannelStream)); - return -1; + return AVERROR_INVALIDDATA; } else { if ((ics->ltp.present = get_bits(gb, 1))) decode_ltp(ac, &ics->ltp, gb, ics->max_sfb); @@ -720,8 +760,7 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics, av_log(ac->avctx, AV_LOG_ERROR, "Number of scalefactor bands in group (%d) exceeds limit (%d).\n", ics->max_sfb, ics->num_swb); - memset(ics, 0, sizeof(IndividualChannelStream)); - return -1; + return AVERROR_INVALIDDATA; } return 0; @@ -789,7 +828,6 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb, enum BandType band_type[120], int band_type_run_end[120]) { - const int sf_offset = ac->sf_offset + (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE ? 12 : 0); int g, i, idx = 0; int offset[3] = { global_gain, global_gain - 90, 0 }; int clipped_offset; @@ -811,7 +849,7 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb, "audible artifact, there may be a bug in the " "decoder. ", offset[2], clipped_offset); } - sf[idx] = ff_aac_pow2sf_tab[-clipped_offset + 200]; + sf[idx] = ff_aac_pow2sf_tab[-clipped_offset + POW_SF2_ZERO]; } } else if (band_type[idx] == NOISE_BT) { for (; i < run_end; i++, idx++) { @@ -820,13 +858,13 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb, else offset[1] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60; clipped_offset = av_clip(offset[1], -100, 155); - if (offset[2] != clipped_offset) { + if (offset[1] != clipped_offset) { av_log_ask_for_sample(ac->avctx, "Noise gain clipped " "(%d -> %d).\nIf you heard an audible " "artifact, there may be a bug in the decoder. ", offset[1], clipped_offset); } - sf[idx] = -ff_aac_pow2sf_tab[clipped_offset + sf_offset + 100]; + sf[idx] = -ff_aac_pow2sf_tab[clipped_offset + POW_SF2_ZERO]; } } else { for (; i < run_end; i++, idx++) { @@ -836,7 +874,7 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb, "%s (%d) out of range.\n", sf_str[0], offset[0]); return -1; } - sf[idx] = -ff_aac_pow2sf_tab[ offset[0] + sf_offset]; + sf[idx] = -ff_aac_pow2sf_tab[offset[0] - 100 + POW_SF2_ZERO]; } } } @@ -956,7 +994,7 @@ static inline float *VMUL4(float *dst, const float *v, unsigned idx, static inline float *VMUL2S(float *dst, const float *v, unsigned idx, unsigned sign, const float *scale) { - union float754 s0, s1; + union av_intfloat32 s0, s1; s0.f = s1.f = *scale; s0.i ^= sign >> 1 << 31; @@ -974,8 +1012,8 @@ static inline float *VMUL4S(float *dst, const float *v, unsigned idx, unsigned sign, const float *scale) { unsigned nz = idx >> 12; - union float754 s = { .f = *scale }; - union float754 t; + union av_intfloat32 s = { .f = *scale }; + union av_intfloat32 t; t.i = s.i ^ (sign & 1U<<31); *dst++ = v[idx & 3] * t.f; @@ -1088,7 +1126,7 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024], GET_VLC(code, re, gb, vlc_tab, 8, 2); cb_idx = cb_vector_idx[code]; nnz = cb_idx >> 8 & 15; - bits = SHOW_UBITS(re, gb, nnz) << (32-nnz); + bits = nnz ? GET_CACHE(re, gb) : 0; LAST_SKIP_BITS(re, gb, nnz); cf = VMUL4S(cf, vq, cb_idx, bits, sf + idx); } while (len -= 4); @@ -1128,7 +1166,7 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024], GET_VLC(code, re, gb, vlc_tab, 8, 2); cb_idx = cb_vector_idx[code]; nnz = cb_idx >> 8 & 15; - sign = SHOW_UBITS(re, gb, nnz) << (cb_idx >> 12); + sign = nnz ? SHOW_UBITS(re, gb, nnz) << (cb_idx >> 12) : 0; LAST_SKIP_BITS(re, gb, nnz); cf = VMUL2S(cf, vq, cb_idx, sign, sf + idx); } while (len -= 2); @@ -1224,7 +1262,7 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024], static av_always_inline float flt16_round(float pf) { - union float754 tmp; + union av_intfloat32 tmp; tmp.f = pf; tmp.i = (tmp.i + 0x00008000U) & 0xFFFF0000U; return tmp.f; @@ -1232,7 +1270,7 @@ static av_always_inline float flt16_round(float pf) static av_always_inline float flt16_even(float pf) { - union float754 tmp; + union av_intfloat32 tmp; tmp.f = pf; tmp.i = (tmp.i + 0x00007FFFU + (tmp.i & 0x00010000U >> 16)) & 0xFFFF0000U; return tmp.f; @@ -1240,14 +1278,13 @@ static av_always_inline float flt16_even(float pf) static av_always_inline float flt16_trunc(float pf) { - union float754 pun; + union av_intfloat32 pun; pun.f = pf; pun.i &= 0xFFFF0000U; return pun.f; } static av_always_inline void predict(PredictorState *ps, float *coef, - float sf_scale, float inv_sf_scale, int output_enable) { const float a = 0.953125; // 61.0 / 64 @@ -1264,9 +1301,9 @@ static av_always_inline void predict(PredictorState *ps, float *coef, pv = flt16_round(k1 * r0 + k2 * r1); if (output_enable) - *coef += pv * sf_scale; + *coef += pv; - e0 = *coef * inv_sf_scale; + e0 = *coef; e1 = e0 - k1 * r0; ps->cor1 = flt16_trunc(alpha * cor1 + r1 * e1); @@ -1284,7 +1321,6 @@ static av_always_inline void predict(PredictorState *ps, float *coef, static void apply_prediction(AACContext *ac, SingleChannelElement *sce) { int sfb, k; - float sf_scale = ac->sf_scale, inv_sf_scale = 1 / ac->sf_scale; if (!sce->ics.predictor_initialized) { reset_all_predictors(sce->predictor_state); @@ -1295,7 +1331,6 @@ static void apply_prediction(AACContext *ac, SingleChannelElement *sce) for (sfb = 0; sfb < ff_aac_pred_sfb_max[ac->m4ac.sampling_index]; sfb++) { for (k = sce->ics.swb_offset[sfb]; k < sce->ics.swb_offset[sfb + 1]; k++) { predict(&sce->predictor_state[k], &sce->coeffs[k], - sf_scale, inv_sf_scale, sce->ics.predictor_present && sce->ics.prediction_used[sfb]); } } @@ -1330,8 +1365,8 @@ static int decode_ics(AACContext *ac, SingleChannelElement *sce, global_gain = get_bits(gb, 8); if (!common_window && !scale_flag) { - if (decode_ics_info(ac, ics, gb, 0) < 0) - return -1; + if (decode_ics_info(ac, ics, gb) < 0) + return AVERROR_INVALIDDATA; } if (decode_band_types(ac, sce->band_type, sce->band_type_run_end, gb, ics) < 0) @@ -1447,8 +1482,8 @@ static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe) common_window = get_bits1(gb); if (common_window) { - if (decode_ics_info(ac, &cpe->ch[0].ics, gb, 1)) - return -1; + if (decode_ics_info(ac, &cpe->ch[0].ics, gb)) + return AVERROR_INVALIDDATA; i = cpe->ch[1].ics.use_kb_window[0]; cpe->ch[1].ics = cpe->ch[0].ics; cpe->ch[1].ics.use_kb_window[1] = i; @@ -1756,12 +1791,10 @@ static void windowing_and_mdct_ltp(AACContext *ac, float *out, } else { memset(in, 0, 448 * sizeof(float)); ac->dsp.vector_fmul(in + 448, in + 448, swindow_prev, 128); - memcpy(in + 576, in + 576, 448 * sizeof(float)); } if (ics->window_sequence[0] != LONG_START_SEQUENCE) { ac->dsp.vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024); } else { - memcpy(in + 1024, in + 1024, 448 * sizeof(float)); ac->dsp.vector_fmul_reverse(in + 1024 + 448, in + 1024 + 448, swindow, 128); memset(in + 1024 + 576, 0, 448 * sizeof(float)); } @@ -1830,9 +1863,9 @@ static void update_ltp(AACContext *ac, SingleChannelElement *sce) saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * lwindow[511 - i]; } - memcpy(sce->ltp_state, &sce->ltp_state[1024], 1024 * sizeof(int16_t)); - ac->fmt_conv.float_to_int16(&(sce->ltp_state[1024]), sce->ret, 1024); - ac->fmt_conv.float_to_int16(&(sce->ltp_state[2048]), saved_ltp, 1024); + memcpy(sce->ltp_state, sce->ltp_state+1024, 1024 * sizeof(*sce->ltp_state)); + memcpy(sce->ltp_state+1024, sce->ret, 1024 * sizeof(*sce->ltp_state)); + memcpy(sce->ltp_state+2048, saved_ltp, 1024 * sizeof(*sce->ltp_state)); } /** @@ -2039,26 +2072,28 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb) int size; AACADTSHeaderInfo hdr_info; - size = ff_aac_parse_header(gb, &hdr_info); + size = avpriv_aac_parse_header(gb, &hdr_info); if (size > 0) { - if (ac->output_configured != OC_LOCKED && hdr_info.chan_config) { + if (hdr_info.chan_config) { enum ChannelPosition new_che_pos[4][MAX_ELEM_ID]; memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0])); ac->m4ac.chan_config = hdr_info.chan_config; if (set_default_channel_config(ac->avctx, new_che_pos, hdr_info.chan_config)) return -7; - if (output_configure(ac, ac->che_pos, new_che_pos, hdr_info.chan_config, OC_TRIAL_FRAME)) + if (output_configure(ac, ac->che_pos, new_che_pos, hdr_info.chan_config, + FFMAX(ac->output_configured, OC_TRIAL_FRAME))) return -7; } else if (ac->output_configured != OC_LOCKED) { + ac->m4ac.chan_config = 0; ac->output_configured = OC_NONE; } if (ac->output_configured != OC_LOCKED) { ac->m4ac.sbr = -1; ac->m4ac.ps = -1; + ac->m4ac.sample_rate = hdr_info.sample_rate; + ac->m4ac.sampling_index = hdr_info.sampling_index; + ac->m4ac.object_type = hdr_info.object_type; } - ac->m4ac.sample_rate = hdr_info.sample_rate; - ac->m4ac.sampling_index = hdr_info.sampling_index; - ac->m4ac.object_type = hdr_info.object_type; if (!ac->avctx->sample_rate) ac->avctx->sample_rate = hdr_info.sample_rate; if (hdr_info.num_aac_frames == 1) { @@ -2073,13 +2108,13 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb) } static int aac_decode_frame_int(AVCodecContext *avctx, void *data, - int *data_size, GetBitContext *gb) + int *got_frame_ptr, GetBitContext *gb) { AACContext *ac = avctx->priv_data; ChannelElement *che = NULL, *che_prev = NULL; enum RawDataBlockType elem_type, elem_type_prev = TYPE_END; - int err, elem_id, data_size_tmp; - int samples = 0, multiplier; + int err, elem_id; + int samples = 0, multiplier, audio_found = 0; if (show_bits(gb, 12) == 0xfff) { if (parse_adts_frame_header(ac, gb) < 0) { @@ -2110,10 +2145,12 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data, case TYPE_SCE: err = decode_ics(ac, &che->ch[0], gb, 0, 0); + audio_found = 1; break; case TYPE_CPE: err = decode_cpe(ac, gb, che); + audio_found = 1; break; case TYPE_CCE: @@ -2122,6 +2159,7 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data, case TYPE_LFE: err = decode_ics(ac, &che->ch[0], gb, 0, 0); + audio_found = 1; break; case TYPE_DSE: @@ -2179,26 +2217,35 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data, avctx->frame_size = samples; } - data_size_tmp = samples * avctx->channels * sizeof(int16_t); - if (*data_size < data_size_tmp) { - av_log(avctx, AV_LOG_ERROR, - "Output buffer too small (%d) or trying to output too many samples (%d) for this frame.\n", - *data_size, data_size_tmp); - return -1; - } - *data_size = data_size_tmp; + if (samples) { + /* get output buffer */ + ac->frame.nb_samples = samples; + if ((err = avctx->get_buffer(avctx, &ac->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return err; + } + + if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT) + ac->fmt_conv.float_interleave((float *)ac->frame.data[0], + (const float **)ac->output_data, + samples, avctx->channels); + else + ac->fmt_conv.float_to_int16_interleave((int16_t *)ac->frame.data[0], + (const float **)ac->output_data, + samples, avctx->channels); - if (samples) - ac->fmt_conv.float_to_int16_interleave(data, (const float **)ac->output_data, samples, avctx->channels); + *(AVFrame *)data = ac->frame; + } + *got_frame_ptr = !!samples; - if (ac->output_configured) + if (ac->output_configured && audio_found) ac->output_configured = OC_LOCKED; return 0; } static int aac_decode_frame(AVCodecContext *avctx, void *data, - int *data_size, AVPacket *avpkt) + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; @@ -2209,7 +2256,7 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data, init_get_bits(&gb, buf, buf_size * 8); - if ((err = aac_decode_frame_int(avctx, data, data_size, &gb)) < 0) + if ((err = aac_decode_frame_int(avctx, data, got_frame_ptr, &gb)) < 0) return err; buf_consumed = (get_bits_count(&gb) + 7) >> 3; @@ -2260,29 +2307,42 @@ static inline uint32_t latm_get_value(GetBitContext *b) } static int latm_decode_audio_specific_config(struct LATMContext *latmctx, - GetBitContext *gb) + GetBitContext *gb, int asclen) { - AVCodecContext *avctx = latmctx->aac_ctx.avctx; - MPEG4AudioConfig m4ac; - int config_start_bit = get_bits_count(gb); - int bits_consumed, esize; + AACContext *ac = &latmctx->aac_ctx; + AVCodecContext *avctx = ac->avctx; + MPEG4AudioConfig m4ac = {0}; + int config_start_bit = get_bits_count(gb); + int sync_extension = 0; + int bits_consumed, esize; + + if (asclen) { + sync_extension = 1; + asclen = FFMIN(asclen, get_bits_left(gb)); + } else + asclen = get_bits_left(gb); if (config_start_bit % 8) { av_log_missing_feature(latmctx->aac_ctx.avctx, "audio specific " "config not byte aligned.\n", 1); return AVERROR_INVALIDDATA; - } else { - bits_consumed = - decode_audio_specific_config(NULL, avctx, &m4ac, + } + bits_consumed = decode_audio_specific_config(NULL, avctx, &m4ac, gb->buffer + (config_start_bit / 8), - get_bits_left(gb) / 8); + asclen, sync_extension); - if (bits_consumed < 0) - return AVERROR_INVALIDDATA; + if (bits_consumed < 0) + return AVERROR_INVALIDDATA; + + if (ac->m4ac.sample_rate != m4ac.sample_rate || + ac->m4ac.chan_config != m4ac.chan_config) { + + av_log(avctx, AV_LOG_INFO, "audio config changed\n"); + latmctx->initialized = 0; esize = (bits_consumed+7) / 8; - if (avctx->extradata_size <= esize) { + if (avctx->extradata_size < esize) { av_free(avctx->extradata); avctx->extradata = av_malloc(esize + FF_INPUT_BUFFER_PADDING_SIZE); if (!avctx->extradata) @@ -2292,9 +2352,8 @@ static int latm_decode_audio_specific_config(struct LATMContext *latmctx, avctx->extradata_size = esize; memcpy(avctx->extradata, gb->buffer + (config_start_bit/8), esize); memset(avctx->extradata+esize, 0, FF_INPUT_BUFFER_PADDING_SIZE); - - skip_bits_long(gb, bits_consumed); } + skip_bits_long(gb, bits_consumed); return bits_consumed; } @@ -2333,11 +2392,11 @@ static int read_stream_mux_config(struct LATMContext *latmctx, // for all but first stream: use_same_config = get_bits(gb, 1); if (!audio_mux_version) { - if ((ret = latm_decode_audio_specific_config(latmctx, gb)) < 0) + if ((ret = latm_decode_audio_specific_config(latmctx, gb, 0)) < 0) return ret; } else { int ascLen = latm_get_value(gb); - if ((ret = latm_decode_audio_specific_config(latmctx, gb)) < 0) + if ((ret = latm_decode_audio_specific_config(latmctx, gb, ascLen)) < 0) return ret; ascLen -= ret; skip_bits_long(gb, ascLen); @@ -2431,16 +2490,13 @@ static int read_audio_mux_element(struct LATMContext *latmctx, } -static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size, - AVPacket *avpkt) +static int latm_decode_frame(AVCodecContext *avctx, void *out, + int *got_frame_ptr, AVPacket *avpkt) { struct LATMContext *latmctx = avctx->priv_data; int muxlength, err; GetBitContext gb; - if (avpkt->size == 0) - return 0; - init_get_bits(&gb, avpkt->data, avpkt->size * 8); // check for LOAS sync word @@ -2457,10 +2513,12 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size, if (!latmctx->initialized) { if (!avctx->extradata) { - *out_size = 0; + *got_frame_ptr = 0; return avpkt->size; } else { - if ((err = aac_decode_init(avctx)) < 0) + if ((err = decode_audio_specific_config( + &latmctx->aac_ctx, avctx, &latmctx->aac_ctx.m4ac, + avctx->extradata, avctx->extradata_size*8, 1)) < 0) return err; latmctx->initialized = 1; } @@ -2473,7 +2531,7 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size, return AVERROR_INVALIDDATA; } - if ((err = aac_decode_frame_int(avctx, out, out_size, &gb)) < 0) + if ((err = aac_decode_frame_int(avctx, out, got_frame_ptr, &gb)) < 0) return err; return muxlength; @@ -2482,33 +2540,28 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size, av_cold static int latm_decode_init(AVCodecContext *avctx) { struct LATMContext *latmctx = avctx->priv_data; - int ret; - - ret = aac_decode_init(avctx); + int ret = aac_decode_init(avctx); - if (avctx->extradata_size > 0) { + if (avctx->extradata_size > 0) latmctx->initialized = !ret; - } else { - latmctx->initialized = 0; - } return ret; } AVCodec ff_aac_decoder = { - "aac", - AVMEDIA_TYPE_AUDIO, - CODEC_ID_AAC, - sizeof(AACContext), - aac_decode_init, - NULL, - aac_decode_close, - aac_decode_frame, + .name = "aac", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_AAC, + .priv_data_size = sizeof(AACContext), + .init = aac_decode_init, + .close = aac_decode_close, + .decode = aac_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"), .sample_fmts = (const enum AVSampleFormat[]) { - AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE + AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }, + .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1, .channel_layouts = aac_channel_layout, }; @@ -2527,7 +2580,8 @@ AVCodec ff_aac_latm_decoder = { .decode = latm_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Codec LATM syntax)"), .sample_fmts = (const enum AVSampleFormat[]) { - AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE + AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }, + .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1, .channel_layouts = aac_channel_layout, };