*
* AAC LATM decoder
* Copyright (c) 2008-2010 Paul Kendall <paul@kcbbs.gen.nz>
- * Copyright (c) 2010 Janne Grunau <janne-ffmpeg@jannau.net>
+ * Copyright (c) 2010 Janne Grunau <janne-libav@jannau.net>
*
* This file is part of Libav.
*
Parametric Stereo.
*/
-
+#include "libavutil/float_dsp.h"
#include "avcodec.h"
#include "internal.h"
#include "get_bits.h"
-#include "dsputil.h"
#include "fft.h"
#include "fmtconvert.h"
#include "lpc.h"
static const char overread_err[] = "Input buffer exhausted before END element found\n";
-static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
+static int count_channels(uint8_t (*layout)[3], int tags)
{
- // For PCE based channel configurations map the channels solely based on tags.
- if (!ac->m4ac.chan_config) {
- return ac->tag_che_map[type][elem_id];
- }
- // For indexed channel configurations map the channels solely based on position.
- switch (ac->m4ac.chan_config) {
- case 7:
- if (ac->tags_mapped == 3 && type == TYPE_CPE) {
- ac->tags_mapped++;
- return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
- }
- case 6:
- /* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1]
- instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have
- encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */
- if (ac->tags_mapped == tags_per_config[ac->m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
- ac->tags_mapped++;
- return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
- }
- case 5:
- if (ac->tags_mapped == 2 && type == TYPE_CPE) {
- ac->tags_mapped++;
- return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
- }
- case 4:
- if (ac->tags_mapped == 2 && ac->m4ac.chan_config == 4 && type == TYPE_SCE) {
- ac->tags_mapped++;
- return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
- }
- case 3:
- case 2:
- if (ac->tags_mapped == (ac->m4ac.chan_config != 2) && type == TYPE_CPE) {
- ac->tags_mapped++;
- return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
- } else if (ac->m4ac.chan_config == 2) {
- return NULL;
- }
- case 1:
- if (!ac->tags_mapped && type == TYPE_SCE) {
- ac->tags_mapped++;
- return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][0];
- }
- default:
- return NULL;
- }
-}
-
-static int count_channels(enum ChannelPosition che_pos[4][MAX_ELEM_ID])
-{
- int i, type, sum = 0;
- for (i = 0; i < MAX_ELEM_ID; i++) {
- for (type = 0; type < 4; type++) {
- sum += (1 + (type == TYPE_CPE)) *
- (che_pos[type][i] != AAC_CHANNEL_OFF &&
- che_pos[type][i] != AAC_CHANNEL_CC);
- }
+ int i, sum = 0;
+ for (i = 0; i < tags; i++) {
+ int syn_ele = layout[i][0];
+ int pos = layout[i][2];
+ sum += (1 + (syn_ele == TYPE_CPE)) *
+ (pos != AAC_CHANNEL_OFF && pos != AAC_CHANNEL_CC);
}
return sum;
}
ff_aac_sbr_ctx_init(ac, &ac->che[type][id]->sbr);
}
if (type != TYPE_CCE) {
- ac->output_data[(*channels)++] = ac->che[type][id]->ch[0].ret;
+ ac->output_element[(*channels)++] = &ac->che[type][id]->ch[0];
if (type == TYPE_CPE ||
- (type == TYPE_SCE && ac->m4ac.ps == 1)) {
- ac->output_data[(*channels)++] = ac->che[type][id]->ch[1].ret;
+ (type == TYPE_SCE && ac->oc[1].m4ac.ps == 1)) {
+ ac->output_element[(*channels)++] = &ac->che[type][id]->ch[1];
}
}
} else {
return 0;
}
+static int frame_configure_elements(AVCodecContext *avctx)
+{
+ AACContext *ac = avctx->priv_data;
+ int type, id, ch, ret;
+
+ /* set channel pointers to internal buffers by default */
+ for (type = 0; type < 4; type++) {
+ for (id = 0; id < MAX_ELEM_ID; id++) {
+ ChannelElement *che = ac->che[type][id];
+ if (che) {
+ che->ch[0].ret = che->ch[0].ret_buf;
+ che->ch[1].ret = che->ch[1].ret_buf;
+ }
+ }
+ }
+
+ /* get output buffer */
+ av_frame_unref(ac->frame);
+ ac->frame->nb_samples = 2048;
+ if ((ret = ff_get_buffer(avctx, ac->frame, 0)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+
+ /* map output channel pointers to AVFrame data */
+ for (ch = 0; ch < avctx->channels; ch++) {
+ if (ac->output_element[ch])
+ ac->output_element[ch]->ret = (float *)ac->frame->extended_data[ch];
+ }
+
+ return 0;
+}
+
+struct elem_to_channel {
+ uint64_t av_position;
+ uint8_t syn_ele;
+ uint8_t elem_id;
+ uint8_t aac_position;
+};
+
+static int assign_pair(struct elem_to_channel e2c_vec[MAX_ELEM_ID],
+ uint8_t (*layout_map)[3], int offset, uint64_t left,
+ uint64_t right, int pos)
+{
+ if (layout_map[offset][0] == TYPE_CPE) {
+ e2c_vec[offset] = (struct elem_to_channel) {
+ .av_position = left | right, .syn_ele = TYPE_CPE,
+ .elem_id = layout_map[offset ][1], .aac_position = pos };
+ return 1;
+ } else {
+ e2c_vec[offset] = (struct elem_to_channel) {
+ .av_position = left, .syn_ele = TYPE_SCE,
+ .elem_id = layout_map[offset ][1], .aac_position = pos };
+ e2c_vec[offset + 1] = (struct elem_to_channel) {
+ .av_position = right, .syn_ele = TYPE_SCE,
+ .elem_id = layout_map[offset + 1][1], .aac_position = pos };
+ return 2;
+ }
+}
+
+static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos, int *current) {
+ int num_pos_channels = 0;
+ int first_cpe = 0;
+ int sce_parity = 0;
+ int i;
+ for (i = *current; i < tags; i++) {
+ if (layout_map[i][2] != pos)
+ break;
+ if (layout_map[i][0] == TYPE_CPE) {
+ if (sce_parity) {
+ if (pos == AAC_CHANNEL_FRONT && !first_cpe) {
+ sce_parity = 0;
+ } else {
+ return -1;
+ }
+ }
+ num_pos_channels += 2;
+ first_cpe = 1;
+ } else {
+ num_pos_channels++;
+ sce_parity ^= 1;
+ }
+ }
+ if (sce_parity &&
+ ((pos == AAC_CHANNEL_FRONT && first_cpe) || pos == AAC_CHANNEL_SIDE))
+ return -1;
+ *current = i;
+ return num_pos_channels;
+}
+
+static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
+{
+ int i, n, total_non_cc_elements;
+ struct elem_to_channel e2c_vec[4*MAX_ELEM_ID] = {{ 0 }};
+ int num_front_channels, num_side_channels, num_back_channels;
+ uint64_t layout;
+
+ if (FF_ARRAY_ELEMS(e2c_vec) < tags)
+ return 0;
+
+ i = 0;
+ num_front_channels =
+ count_paired_channels(layout_map, tags, AAC_CHANNEL_FRONT, &i);
+ if (num_front_channels < 0)
+ return 0;
+ num_side_channels =
+ count_paired_channels(layout_map, tags, AAC_CHANNEL_SIDE, &i);
+ if (num_side_channels < 0)
+ return 0;
+ num_back_channels =
+ count_paired_channels(layout_map, tags, AAC_CHANNEL_BACK, &i);
+ if (num_back_channels < 0)
+ return 0;
+
+ i = 0;
+ if (num_front_channels & 1) {
+ e2c_vec[i] = (struct elem_to_channel) {
+ .av_position = AV_CH_FRONT_CENTER, .syn_ele = TYPE_SCE,
+ .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_FRONT };
+ i++;
+ num_front_channels--;
+ }
+ if (num_front_channels >= 4) {
+ i += assign_pair(e2c_vec, layout_map, i,
+ AV_CH_FRONT_LEFT_OF_CENTER,
+ AV_CH_FRONT_RIGHT_OF_CENTER,
+ AAC_CHANNEL_FRONT);
+ num_front_channels -= 2;
+ }
+ if (num_front_channels >= 2) {
+ i += assign_pair(e2c_vec, layout_map, i,
+ AV_CH_FRONT_LEFT,
+ AV_CH_FRONT_RIGHT,
+ AAC_CHANNEL_FRONT);
+ num_front_channels -= 2;
+ }
+ while (num_front_channels >= 2) {
+ i += assign_pair(e2c_vec, layout_map, i,
+ UINT64_MAX,
+ UINT64_MAX,
+ AAC_CHANNEL_FRONT);
+ num_front_channels -= 2;
+ }
+
+ if (num_side_channels >= 2) {
+ i += assign_pair(e2c_vec, layout_map, i,
+ AV_CH_SIDE_LEFT,
+ AV_CH_SIDE_RIGHT,
+ AAC_CHANNEL_FRONT);
+ num_side_channels -= 2;
+ }
+ while (num_side_channels >= 2) {
+ i += assign_pair(e2c_vec, layout_map, i,
+ UINT64_MAX,
+ UINT64_MAX,
+ AAC_CHANNEL_SIDE);
+ num_side_channels -= 2;
+ }
+
+ while (num_back_channels >= 4) {
+ i += assign_pair(e2c_vec, layout_map, i,
+ UINT64_MAX,
+ UINT64_MAX,
+ AAC_CHANNEL_BACK);
+ num_back_channels -= 2;
+ }
+ if (num_back_channels >= 2) {
+ i += assign_pair(e2c_vec, layout_map, i,
+ AV_CH_BACK_LEFT,
+ AV_CH_BACK_RIGHT,
+ AAC_CHANNEL_BACK);
+ num_back_channels -= 2;
+ }
+ if (num_back_channels) {
+ e2c_vec[i] = (struct elem_to_channel) {
+ .av_position = AV_CH_BACK_CENTER, .syn_ele = TYPE_SCE,
+ .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_BACK };
+ i++;
+ num_back_channels--;
+ }
+
+ if (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
+ e2c_vec[i] = (struct elem_to_channel) {
+ .av_position = AV_CH_LOW_FREQUENCY, .syn_ele = TYPE_LFE,
+ .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_LFE };
+ i++;
+ }
+ while (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
+ e2c_vec[i] = (struct elem_to_channel) {
+ .av_position = UINT64_MAX, .syn_ele = TYPE_LFE,
+ .elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_LFE };
+ i++;
+ }
+
+ // Must choose a stable sort
+ total_non_cc_elements = n = i;
+ do {
+ int next_n = 0;
+ for (i = 1; i < n; i++) {
+ if (e2c_vec[i-1].av_position > e2c_vec[i].av_position) {
+ FFSWAP(struct elem_to_channel, e2c_vec[i-1], e2c_vec[i]);
+ next_n = i;
+ }
+ }
+ n = next_n;
+ } while (n > 0);
+
+ layout = 0;
+ for (i = 0; i < total_non_cc_elements; i++) {
+ layout_map[i][0] = e2c_vec[i].syn_ele;
+ layout_map[i][1] = e2c_vec[i].elem_id;
+ layout_map[i][2] = e2c_vec[i].aac_position;
+ if (e2c_vec[i].av_position != UINT64_MAX) {
+ layout |= e2c_vec[i].av_position;
+ }
+ }
+
+ return layout;
+}
+
+/**
+ * Save current output configuration if and only if it has been locked.
+ */
+static void push_output_configuration(AACContext *ac) {
+ if (ac->oc[1].status == OC_LOCKED) {
+ ac->oc[0] = ac->oc[1];
+ }
+ ac->oc[1].status = OC_NONE;
+}
+
+/**
+ * Restore the previous output configuration if and only if the current
+ * configuration is unlocked.
+ */
+static void pop_output_configuration(AACContext *ac) {
+ if (ac->oc[1].status != OC_LOCKED && ac->oc[0].status != OC_NONE) {
+ ac->oc[1] = ac->oc[0];
+ ac->avctx->channels = ac->oc[1].channels;
+ ac->avctx->channel_layout = ac->oc[1].channel_layout;
+ }
+}
+
/**
* Configure output channel order based on the current program configuration element.
*
- * @param che_pos current channel position configuration
- * @param new_che_pos New channel position configuration - we only do something if it differs from the current one.
- *
* @return Returns error status. 0 - OK, !0 - error
*/
-static av_cold int output_configure(AACContext *ac,
- enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
- int channel_config, enum OCStatus oc_type)
+static int output_configure(AACContext *ac,
+ uint8_t layout_map[MAX_ELEM_ID*4][3], int tags,
+ enum OCStatus oc_type, int get_new_frame)
{
AVCodecContext *avctx = ac->avctx;
- int i, type, channels = 0, ret;
-
- if (new_che_pos)
- memcpy(ac->che_pos, new_che_pos, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
-
- if (channel_config) {
- for (i = 0; i < tags_per_config[channel_config]; i++) {
- int id = aac_channel_layout_map[channel_config - 1][i][1];
- type = aac_channel_layout_map[channel_config - 1][i][0];
- if ((ret = che_configure(ac, ac->che_pos[type][id],
- type, id, &channels)))
- return ret;
+ int i, channels = 0, ret;
+ uint64_t layout = 0;
+
+ if (ac->oc[1].layout_map != layout_map) {
+ memcpy(ac->oc[1].layout_map, layout_map, tags * sizeof(layout_map[0]));
+ ac->oc[1].layout_map_tags = tags;
+ }
+
+ // Try to sniff a reasonable channel order, otherwise output the
+ // channels in the order the PCE declared them.
+ if (avctx->request_channel_layout != AV_CH_LAYOUT_NATIVE)
+ layout = sniff_channel_order(layout_map, tags);
+ for (i = 0; i < tags; i++) {
+ int type = layout_map[i][0];
+ int id = layout_map[i][1];
+ int position = layout_map[i][2];
+ // Allocate or free elements depending on if they are in the
+ // current program configuration.
+ ret = che_configure(ac, position, type, id, &channels);
+ if (ret < 0)
+ return ret;
+ }
+ if (ac->oc[1].m4ac.ps == 1 && channels == 2) {
+ if (layout == AV_CH_FRONT_CENTER) {
+ layout = AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT;
+ } else {
+ layout = 0;
}
+ }
- memset(ac->tag_che_map, 0, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
+ memcpy(ac->tag_che_map, ac->che, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
+ avctx->channel_layout = ac->oc[1].channel_layout = layout;
+ avctx->channels = ac->oc[1].channels = channels;
+ ac->oc[1].status = oc_type;
- avctx->channel_layout = aac_channel_layout[channel_config - 1];
- } else {
- /* Allocate or free elements depending on if they are in the
- * current program configuration.
- *
- * Set up default 1:1 output mapping.
- */
+ if (get_new_frame) {
+ if ((ret = frame_configure_elements(ac->avctx)) < 0)
+ return ret;
+ }
- for (i = 0; i < MAX_ELEM_ID; i++) {
- for (type = 0; type < 4; type++) {
- if ((ret = che_configure(ac, ac->che_pos[type][i],
- type, i, &channels)))
- return ret;
- }
- }
+ return 0;
+}
- memcpy(ac->tag_che_map, ac->che, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
+/**
+ * Set up channel positions based on a default channel configuration
+ * as specified in table 1.17.
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int set_default_channel_config(AVCodecContext *avctx,
+ uint8_t (*layout_map)[3],
+ int *tags,
+ int channel_config)
+{
+ if (channel_config < 1 || channel_config > 7) {
+ av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n",
+ channel_config);
+ return -1;
+ }
+ *tags = tags_per_config[channel_config];
+ memcpy(layout_map, aac_channel_layout_map[channel_config-1], *tags * sizeof(*layout_map));
+ return 0;
+}
- avctx->channel_layout = 0;
+static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
+{
+ // For PCE based channel configurations map the channels solely based on tags.
+ if (!ac->oc[1].m4ac.chan_config) {
+ return ac->tag_che_map[type][elem_id];
}
+ // Allow single CPE stereo files to be signalled with mono configuration.
+ if (!ac->tags_mapped && type == TYPE_CPE && ac->oc[1].m4ac.chan_config == 1) {
+ uint8_t layout_map[MAX_ELEM_ID*4][3];
+ int layout_map_tags;
+ push_output_configuration(ac);
- avctx->channels = channels;
+ if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags,
+ 2) < 0)
+ return NULL;
+ if (output_configure(ac, layout_map, layout_map_tags,
+ OC_TRIAL_FRAME, 1) < 0)
+ return NULL;
- ac->output_configured = oc_type;
+ ac->oc[1].m4ac.chan_config = 2;
+ ac->oc[1].m4ac.ps = 0;
+ }
+ // And vice-versa
+ if (!ac->tags_mapped && type == TYPE_SCE && ac->oc[1].m4ac.chan_config == 2) {
+ uint8_t layout_map[MAX_ELEM_ID*4][3];
+ int layout_map_tags;
+ push_output_configuration(ac);
- return 0;
+ if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags,
+ 1) < 0)
+ return NULL;
+ if (output_configure(ac, layout_map, layout_map_tags,
+ OC_TRIAL_FRAME, 1) < 0)
+ return NULL;
+
+ ac->oc[1].m4ac.chan_config = 1;
+ if (ac->oc[1].m4ac.sbr)
+ ac->oc[1].m4ac.ps = -1;
+ }
+ // For indexed channel configurations map the channels solely based on position.
+ switch (ac->oc[1].m4ac.chan_config) {
+ case 7:
+ if (ac->tags_mapped == 3 && type == TYPE_CPE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
+ }
+ case 6:
+ /* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1]
+ instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have
+ encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */
+ if (ac->tags_mapped == tags_per_config[ac->oc[1].m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
+ }
+ case 5:
+ if (ac->tags_mapped == 2 && type == TYPE_CPE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
+ }
+ case 4:
+ if (ac->tags_mapped == 2 && ac->oc[1].m4ac.chan_config == 4 && type == TYPE_SCE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
+ }
+ case 3:
+ case 2:
+ if (ac->tags_mapped == (ac->oc[1].m4ac.chan_config != 2) && type == TYPE_CPE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
+ } else if (ac->oc[1].m4ac.chan_config == 2) {
+ return NULL;
+ }
+ case 1:
+ if (!ac->tags_mapped && type == TYPE_SCE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][0];
+ }
+ default:
+ return NULL;
+ }
}
/**
* Decode an array of 4 bit element IDs, optionally interleaved with a stereo/mono switching bit.
*
- * @param cpe_map Stereo (Channel Pair Element) map, NULL if stereo bit is not present.
- * @param sce_map mono (Single Channel Element) map
* @param type speaker type/position for these channels
*/
-static void decode_channel_map(enum ChannelPosition *cpe_map,
- enum ChannelPosition *sce_map,
+static void decode_channel_map(uint8_t layout_map[][3],
enum ChannelPosition type,
GetBitContext *gb, int n)
{
while (n--) {
- enum ChannelPosition *map = cpe_map && get_bits1(gb) ? cpe_map : sce_map; // stereo or mono map
- map[get_bits(gb, 4)] = type;
+ enum RawDataBlockType syn_ele;
+ switch (type) {
+ case AAC_CHANNEL_FRONT:
+ case AAC_CHANNEL_BACK:
+ case AAC_CHANNEL_SIDE:
+ syn_ele = get_bits1(gb);
+ break;
+ case AAC_CHANNEL_CC:
+ skip_bits1(gb);
+ syn_ele = TYPE_CCE;
+ break;
+ case AAC_CHANNEL_LFE:
+ syn_ele = TYPE_LFE;
+ break;
+ }
+ layout_map[0][0] = syn_ele;
+ layout_map[0][1] = get_bits(gb, 4);
+ layout_map[0][2] = type;
+ layout_map++;
}
}
/**
* Decode program configuration element; reference: table 4.2.
*
- * @param new_che_pos New channel position configuration - we only do something if it differs from the current one.
- *
* @return Returns error status. 0 - OK, !0 - error
*/
static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
- enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
+ uint8_t (*layout_map)[3],
GetBitContext *gb)
{
int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc, sampling_index;
int comment_len;
+ int tags;
skip_bits(gb, 2); // object_type
if (get_bits1(gb))
skip_bits(gb, 3); // mixdown_coeff_index and pseudo_surround
- decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_FRONT, gb, num_front);
- decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_SIDE, gb, num_side );
- decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_BACK, gb, num_back );
- decode_channel_map(NULL, new_che_pos[TYPE_LFE], AAC_CHANNEL_LFE, gb, num_lfe );
+ decode_channel_map(layout_map , AAC_CHANNEL_FRONT, gb, num_front);
+ tags = num_front;
+ decode_channel_map(layout_map + tags, AAC_CHANNEL_SIDE, gb, num_side);
+ tags += num_side;
+ decode_channel_map(layout_map + tags, AAC_CHANNEL_BACK, gb, num_back);
+ tags += num_back;
+ decode_channel_map(layout_map + tags, AAC_CHANNEL_LFE, gb, num_lfe);
+ tags += num_lfe;
skip_bits_long(gb, 4 * num_assoc_data);
- decode_channel_map(new_che_pos[TYPE_CCE], new_che_pos[TYPE_CCE], AAC_CHANNEL_CC, gb, num_cc );
+ decode_channel_map(layout_map + tags, AAC_CHANNEL_CC, gb, num_cc);
+ tags += num_cc;
align_get_bits(gb);
return -1;
}
skip_bits_long(gb, comment_len);
- return 0;
-}
-
-/**
- * Set up channel positions based on a default channel configuration
- * as specified in table 1.17.
- *
- * @param new_che_pos New channel position configuration - we only do something if it differs from the current one.
- *
- * @return Returns error status. 0 - OK, !0 - error
- */
-static av_cold int set_default_channel_config(AVCodecContext *avctx,
- enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
- int channel_config)
-{
- if (channel_config < 1 || channel_config > 7) {
- av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n",
- channel_config);
- return -1;
- }
-
- /* default channel configurations:
- *
- * 1ch : front center (mono)
- * 2ch : L + R (stereo)
- * 3ch : front center + L + R
- * 4ch : front center + L + R + back center
- * 5ch : front center + L + R + back stereo
- * 6ch : front center + L + R + back stereo + LFE
- * 7ch : front center + L + R + outer front left + outer front right + back stereo + LFE
- */
-
- if (channel_config != 2)
- new_che_pos[TYPE_SCE][0] = AAC_CHANNEL_FRONT; // front center (or mono)
- if (channel_config > 1)
- new_che_pos[TYPE_CPE][0] = AAC_CHANNEL_FRONT; // L + R (or stereo)
- if (channel_config == 4)
- new_che_pos[TYPE_SCE][1] = AAC_CHANNEL_BACK; // back center
- if (channel_config > 4)
- new_che_pos[TYPE_CPE][(channel_config == 7) + 1]
- = AAC_CHANNEL_BACK; // back stereo
- if (channel_config > 5)
- new_che_pos[TYPE_LFE][0] = AAC_CHANNEL_LFE; // LFE
- if (channel_config == 7)
- new_che_pos[TYPE_CPE][1] = AAC_CHANNEL_FRONT; // outer front left + outer front right
-
- return 0;
+ return tags;
}
/**
MPEG4AudioConfig *m4ac,
int channel_config)
{
- enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
int extension_flag, ret;
+ uint8_t layout_map[MAX_ELEM_ID*4][3];
+ int tags = 0;
if (get_bits1(gb)) { // frameLengthFlag
- av_log_missing_feature(avctx, "960/120 MDCT window is", 1);
- return -1;
+ avpriv_request_sample(avctx, "960/120 MDCT window");
+ return AVERROR_PATCHWELCOME;
}
if (get_bits1(gb)) // dependsOnCoreCoder
m4ac->object_type == AOT_ER_AAC_SCALABLE)
skip_bits(gb, 3); // layerNr
- memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
if (channel_config == 0) {
skip_bits(gb, 4); // element_instance_tag
- if ((ret = decode_pce(avctx, m4ac, new_che_pos, gb)))
- return ret;
+ tags = decode_pce(avctx, m4ac, layout_map, gb);
+ if (tags < 0)
+ return tags;
} else {
- if ((ret = set_default_channel_config(avctx, new_che_pos, channel_config)))
+ if ((ret = set_default_channel_config(avctx, layout_map, &tags, channel_config)))
return ret;
}
- if (count_channels(new_che_pos) > 1) {
+ if (count_channels(layout_map, tags) > 1) {
m4ac->ps = 0;
} else if (m4ac->sbr == 1 && m4ac->ps == -1)
m4ac->ps = 1;
- if (ac && (ret = output_configure(ac, new_che_pos, channel_config, OC_GLOBAL_HDR)))
+ if (ac && (ret = output_configure(ac, layout_map, tags, OC_GLOBAL_HDR, 0)))
return ret;
if (extension_flag) {
*/
static av_always_inline int lcg_random(int previous_val)
{
- return previous_val * 1664525 + 1013904223;
+ union { unsigned u; int s; } v = { previous_val * 1664525u + 1013904223 };
+ return v.s;
}
static av_always_inline void reset_predict_state(PredictorState *ps)
static av_cold int aac_decode_init(AVCodecContext *avctx)
{
AACContext *ac = avctx->priv_data;
- float output_scale_factor;
ac->avctx = avctx;
- ac->m4ac.sample_rate = avctx->sample_rate;
+ ac->oc[1].m4ac.sample_rate = avctx->sample_rate;
+
+ avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
if (avctx->extradata_size > 0) {
- if (decode_audio_specific_config(ac, ac->avctx, &ac->m4ac,
+ if (decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
avctx->extradata,
avctx->extradata_size*8, 1) < 0)
return -1;
} else {
int sr, i;
- enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
+ uint8_t layout_map[MAX_ELEM_ID*4][3];
+ int layout_map_tags;
sr = sample_rate_idx(avctx->sample_rate);
- ac->m4ac.sampling_index = sr;
- ac->m4ac.channels = avctx->channels;
- ac->m4ac.sbr = -1;
- ac->m4ac.ps = -1;
+ ac->oc[1].m4ac.sampling_index = sr;
+ ac->oc[1].m4ac.channels = avctx->channels;
+ ac->oc[1].m4ac.sbr = -1;
+ ac->oc[1].m4ac.ps = -1;
for (i = 0; i < FF_ARRAY_ELEMS(ff_mpeg4audio_channels); i++)
if (ff_mpeg4audio_channels[i] == avctx->channels)
if (i == FF_ARRAY_ELEMS(ff_mpeg4audio_channels)) {
i = 0;
}
- ac->m4ac.chan_config = i;
+ ac->oc[1].m4ac.chan_config = i;
- if (ac->m4ac.chan_config) {
- int ret = set_default_channel_config(avctx, new_che_pos, ac->m4ac.chan_config);
+ if (ac->oc[1].m4ac.chan_config) {
+ int ret = set_default_channel_config(avctx, layout_map,
+ &layout_map_tags, ac->oc[1].m4ac.chan_config);
if (!ret)
- output_configure(ac, new_che_pos, ac->m4ac.chan_config, OC_GLOBAL_HDR);
+ output_configure(ac, layout_map, layout_map_tags,
+ OC_GLOBAL_HDR, 0);
else if (avctx->err_recognition & AV_EF_EXPLODE)
return AVERROR_INVALIDDATA;
}
}
- if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) {
- avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
- output_scale_factor = 1.0 / 32768.0;
- } else {
- avctx->sample_fmt = AV_SAMPLE_FMT_S16;
- output_scale_factor = 1.0;
- }
-
AAC_INIT_VLC_STATIC( 0, 304);
AAC_INIT_VLC_STATIC( 1, 270);
AAC_INIT_VLC_STATIC( 2, 550);
ff_aac_sbr_init();
- dsputil_init(&ac->dsp, avctx);
ff_fmt_convert_init(&ac->fmt_conv, avctx);
+ avpriv_float_dsp_init(&ac->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
ac->random_state = 0x1f2e3d4c;
ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]),
352);
- ff_mdct_init(&ac->mdct, 11, 1, output_scale_factor/1024.0);
- ff_mdct_init(&ac->mdct_small, 8, 1, output_scale_factor/128.0);
- ff_mdct_init(&ac->mdct_ltp, 11, 0, -2.0/output_scale_factor);
+ ff_mdct_init(&ac->mdct, 11, 1, 1.0 / (32768.0 * 1024.0));
+ ff_mdct_init(&ac->mdct_small, 8, 1, 1.0 / (32768.0 * 128.0));
+ ff_mdct_init(&ac->mdct_ltp, 11, 0, -2.0 * 32768.0);
// window initialization
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
cbrt_tableinit();
- avcodec_get_frame_defaults(&ac->frame);
- avctx->coded_frame = &ac->frame;
-
return 0;
}
return -1;
}
}
- for (sfb = 0; sfb < FFMIN(ics->max_sfb, ff_aac_pred_sfb_max[ac->m4ac.sampling_index]); sfb++) {
+ for (sfb = 0; sfb < FFMIN(ics->max_sfb, ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]); sfb++) {
ics->prediction_used[sfb] = get_bits1(gb);
}
return 0;
/**
* Decode Long Term Prediction data; reference: table 4.xx.
*/
-static void decode_ltp(AACContext *ac, LongTermPrediction *ltp,
+static void decode_ltp(LongTermPrediction *ltp,
GetBitContext *gb, uint8_t max_sfb)
{
int sfb;
}
}
ics->num_windows = 8;
- ics->swb_offset = ff_swb_offset_128[ac->m4ac.sampling_index];
- ics->num_swb = ff_aac_num_swb_128[ac->m4ac.sampling_index];
- ics->tns_max_bands = ff_tns_max_bands_128[ac->m4ac.sampling_index];
+ ics->swb_offset = ff_swb_offset_128[ac->oc[1].m4ac.sampling_index];
+ ics->num_swb = ff_aac_num_swb_128[ac->oc[1].m4ac.sampling_index];
+ ics->tns_max_bands = ff_tns_max_bands_128[ac->oc[1].m4ac.sampling_index];
ics->predictor_present = 0;
} else {
ics->max_sfb = get_bits(gb, 6);
ics->num_windows = 1;
- ics->swb_offset = ff_swb_offset_1024[ac->m4ac.sampling_index];
- ics->num_swb = ff_aac_num_swb_1024[ac->m4ac.sampling_index];
- ics->tns_max_bands = ff_tns_max_bands_1024[ac->m4ac.sampling_index];
+ ics->swb_offset = ff_swb_offset_1024[ac->oc[1].m4ac.sampling_index];
+ ics->num_swb = ff_aac_num_swb_1024[ac->oc[1].m4ac.sampling_index];
+ ics->tns_max_bands = ff_tns_max_bands_1024[ac->oc[1].m4ac.sampling_index];
ics->predictor_present = get_bits1(gb);
ics->predictor_reset_group = 0;
if (ics->predictor_present) {
- if (ac->m4ac.object_type == AOT_AAC_MAIN) {
+ if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN) {
if (decode_prediction(ac, ics, gb)) {
return AVERROR_INVALIDDATA;
}
- } else if (ac->m4ac.object_type == AOT_AAC_LC) {
+ } else if (ac->oc[1].m4ac.object_type == AOT_AAC_LC) {
av_log(ac->avctx, AV_LOG_ERROR, "Prediction is not allowed in AAC-LC.\n");
return AVERROR_INVALIDDATA;
} else {
if ((ics->ltp.present = get_bits(gb, 1)))
- decode_ltp(ac, &ics->ltp, gb, ics->max_sfb);
+ decode_ltp(&ics->ltp, gb, ics->max_sfb);
}
}
}
av_log(ac->avctx, AV_LOG_ERROR, "invalid band type\n");
return -1;
}
- while ((sect_len_incr = get_bits(gb, bits)) == (1 << bits) - 1)
+ do {
+ sect_len_incr = get_bits(gb, bits);
sect_end += sect_len_incr;
- sect_end += sect_len_incr;
- if (get_bits_left(gb) < 0) {
- av_log(ac->avctx, AV_LOG_ERROR, overread_err);
- return -1;
- }
- if (sect_end > ics->max_sfb) {
- av_log(ac->avctx, AV_LOG_ERROR,
- "Number of bands (%d) exceeds limit (%d).\n",
- sect_end, ics->max_sfb);
- return -1;
- }
+ if (get_bits_left(gb) < 0) {
+ av_log(ac->avctx, AV_LOG_ERROR, overread_err);
+ return -1;
+ }
+ if (sect_end > ics->max_sfb) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Number of bands (%d) exceeds limit (%d).\n",
+ sect_end, ics->max_sfb);
+ return -1;
+ }
+ } while (sect_len_incr == (1 << bits) - 1);
for (; k < sect_end; k++) {
band_type [idx] = sect_band_type;
band_type_run_end[idx++] = sect_end;
int offset[3] = { global_gain, global_gain - 90, 0 };
int clipped_offset;
int noise_flag = 1;
- static const char *sf_str[3] = { "Global gain", "Noise gain", "Intensity stereo position" };
for (g = 0; g < ics->num_window_groups; g++) {
for (i = 0; i < ics->max_sfb;) {
int run_end = band_type_run_end[idx];
offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
clipped_offset = av_clip(offset[2], -155, 100);
if (offset[2] != clipped_offset) {
- av_log_ask_for_sample(ac->avctx, "Intensity stereo "
- "position clipped (%d -> %d).\nIf you heard an "
- "audible artifact, there may be a bug in the "
- "decoder. ", offset[2], clipped_offset);
+ avpriv_request_sample(ac->avctx,
+ "If you heard an audible artifact, there may be a bug in the decoder. "
+ "Clipped intensity stereo position (%d -> %d)",
+ offset[2], clipped_offset);
}
sf[idx] = ff_aac_pow2sf_tab[-clipped_offset + POW_SF2_ZERO];
}
offset[1] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
clipped_offset = av_clip(offset[1], -100, 155);
if (offset[1] != clipped_offset) {
- av_log_ask_for_sample(ac->avctx, "Noise gain clipped "
- "(%d -> %d).\nIf you heard an audible "
- "artifact, there may be a bug in the decoder. ",
- offset[1], clipped_offset);
+ avpriv_request_sample(ac->avctx,
+ "If you heard an audible artifact, there may be a bug in the decoder. "
+ "Clipped noise gain (%d -> %d)",
+ offset[1], clipped_offset);
}
sf[idx] = -ff_aac_pow2sf_tab[clipped_offset + POW_SF2_ZERO];
}
offset[0] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
if (offset[0] > 255U) {
av_log(ac->avctx, AV_LOG_ERROR,
- "%s (%d) out of range.\n", sf_str[0], offset[0]);
+ "Scalefactor (%d) out of range.\n", offset[0]);
return -1;
}
sf[idx] = -ff_aac_pow2sf_tab[offset[0] - 100 + POW_SF2_ZERO];
{
int w, filt, i, coef_len, coef_res, coef_compress;
const int is8 = ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE;
- const int tns_max_order = is8 ? 7 : ac->m4ac.object_type == AOT_AAC_MAIN ? 20 : 12;
+ const int tns_max_order = is8 ? 7 : ac->oc[1].m4ac.object_type == AOT_AAC_MAIN ? 20 : 12;
for (w = 0; w < ics->num_windows; w++) {
if ((tns->n_filt[w] = get_bits(gb, 2 - is8))) {
coef_res = get_bits1(gb);
t.i = s.i ^ (sign & 1U<<31);
*dst++ = v[idx>>4 & 3] * t.f;
- sign <<= nz & 1; nz >>= 1;
+ sign <<= nz & 1;
t.i = s.i ^ (sign & 1U<<31);
*dst++ = v[idx>>6 & 3] * t.f;
cfo[k] = ac->random_state;
}
- band_energy = ac->dsp.scalarproduct_float(cfo, cfo, off_len);
+ band_energy = ac->fdsp.scalarproduct_float(cfo, cfo, off_len);
scale = sf[idx] / sqrtf(band_energy);
- ac->dsp.vector_fmul_scalar(cfo, cfo, scale, off_len);
+ ac->fdsp.vector_fmul_scalar(cfo, cfo, scale, off_len);
}
} else {
const float *vq = ff_aac_codebook_vector_vals[cbt_m1];
}
} while (len -= 2);
- ac->dsp.vector_fmul_scalar(cfo, cfo, sf[idx], off_len);
+ ac->fdsp.vector_fmul_scalar(cfo, cfo, sf[idx], off_len);
}
}
}
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
- for (sfb = 0; sfb < ff_aac_pred_sfb_max[ac->m4ac.sampling_index]; sfb++) {
+ for (sfb = 0; sfb < ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]; sfb++) {
for (k = sce->ics.swb_offset[sfb]; k < sce->ics.swb_offset[sfb + 1]; k++) {
predict(&sce->predictor_state[k], &sce->coeffs[k],
sce->ics.predictor_present && sce->ics.prediction_used[sfb]);
if ((tns->present = get_bits1(gb)) && decode_tns(ac, tns, gb, ics))
return -1;
if (get_bits1(gb)) {
- av_log_missing_feature(ac->avctx, "SSR", 1);
- return -1;
+ avpriv_request_sample(ac->avctx, "SSR");
+ return AVERROR_PATCHWELCOME;
}
}
if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present, &pulse, ics, sce->band_type) < 0)
return -1;
- if (ac->m4ac.object_type == AOT_AAC_MAIN && !common_window)
+ if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN && !common_window)
apply_prediction(ac, sce);
return 0;
if (cpe->ms_mask[idx] &&
cpe->ch[0].band_type[idx] < NOISE_BT && cpe->ch[1].band_type[idx] < NOISE_BT) {
for (group = 0; group < ics->group_len[g]; group++) {
- ac->dsp.butterflies_float(ch0 + group * 128 + offsets[i],
- ch1 + group * 128 + offsets[i],
- offsets[i+1] - offsets[i]);
+ ac->fdsp.butterflies_float(ch0 + group * 128 + offsets[i],
+ ch1 + group * 128 + offsets[i],
+ offsets[i+1] - offsets[i]);
}
}
}
c *= 1 - 2 * cpe->ms_mask[idx];
scale = c * sce1->sf[idx];
for (group = 0; group < ics->group_len[g]; group++)
- ac->dsp.vector_fmul_scalar(coef1 + group * 128 + offsets[i],
- coef0 + group * 128 + offsets[i],
- scale,
- offsets[i + 1] - offsets[i]);
+ ac->fdsp.vector_fmul_scalar(coef1 + group * 128 + offsets[i],
+ coef0 + group * 128 + offsets[i],
+ scale,
+ offsets[i + 1] - offsets[i]);
}
} else {
int bt_run_end = sce1->band_type_run_end[idx];
i = cpe->ch[1].ics.use_kb_window[0];
cpe->ch[1].ics = cpe->ch[0].ics;
cpe->ch[1].ics.use_kb_window[1] = i;
- if (cpe->ch[1].ics.predictor_present && (ac->m4ac.object_type != AOT_AAC_MAIN))
+ if (cpe->ch[1].ics.predictor_present && (ac->oc[1].m4ac.object_type != AOT_AAC_MAIN))
if ((cpe->ch[1].ics.ltp.present = get_bits(gb, 1)))
- decode_ltp(ac, &cpe->ch[1].ics.ltp, gb, cpe->ch[1].ics.max_sfb);
+ decode_ltp(&cpe->ch[1].ics.ltp, gb, cpe->ch[1].ics.max_sfb);
ms_present = get_bits(gb, 2);
if (ms_present == 3) {
av_log(ac->avctx, AV_LOG_ERROR, "ms_present = 3 is reserved.\n");
if (common_window) {
if (ms_present)
apply_mid_side_stereo(ac, cpe);
- if (ac->m4ac.object_type == AOT_AAC_MAIN) {
+ if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN) {
apply_prediction(ac, &cpe->ch[0]);
apply_prediction(ac, &cpe->ch[1]);
}
/**
* Decode dynamic range information; reference: table 4.52.
*
- * @param cnt length of TYPE_FIL syntactic element in bytes
- *
* @return Returns number of bytes consumed.
*/
static int decode_dynamic_range(DynamicRangeControl *che_drc,
- GetBitContext *gb, int cnt)
+ GetBitContext *gb)
{
int n = 1;
int drc_num_bands = 1;
if (!che) {
av_log(ac->avctx, AV_LOG_ERROR, "SBR was found before the first channel element.\n");
return res;
- } else if (!ac->m4ac.sbr) {
+ } else if (!ac->oc[1].m4ac.sbr) {
av_log(ac->avctx, AV_LOG_ERROR, "SBR signaled to be not-present but was found in the bitstream.\n");
skip_bits_long(gb, 8 * cnt - 4);
return res;
- } else if (ac->m4ac.sbr == -1 && ac->output_configured == OC_LOCKED) {
+ } else if (ac->oc[1].m4ac.sbr == -1 && ac->oc[1].status == OC_LOCKED) {
av_log(ac->avctx, AV_LOG_ERROR, "Implicit SBR was found with a first occurrence after the first frame.\n");
skip_bits_long(gb, 8 * cnt - 4);
return res;
- } else if (ac->m4ac.ps == -1 && ac->output_configured < OC_LOCKED && ac->avctx->channels == 1) {
- ac->m4ac.sbr = 1;
- ac->m4ac.ps = 1;
- output_configure(ac, NULL, ac->m4ac.chan_config, ac->output_configured);
+ } else if (ac->oc[1].m4ac.ps == -1 && ac->oc[1].status < OC_LOCKED && ac->avctx->channels == 1) {
+ ac->oc[1].m4ac.sbr = 1;
+ ac->oc[1].m4ac.ps = 1;
+ output_configure(ac, ac->oc[1].layout_map, ac->oc[1].layout_map_tags,
+ ac->oc[1].status, 1);
} else {
- ac->m4ac.sbr = 1;
+ ac->oc[1].m4ac.sbr = 1;
}
res = ff_decode_sbr_extension(ac, &che->sbr, gb, crc_flag, cnt, elem_type);
break;
case EXT_DYNAMIC_RANGE:
- res = decode_dynamic_range(&ac->che_drc, gb, cnt);
+ res = decode_dynamic_range(&ac->che_drc, gb);
break;
case EXT_FILL:
case EXT_FILL_DATA:
int w, filt, m, i;
int bottom, top, order, start, end, size, inc;
float lpc[TNS_MAX_ORDER];
- float tmp[TNS_MAX_ORDER];
+ float tmp[TNS_MAX_ORDER + 1];
for (w = 0; w < ics->num_windows; w++) {
bottom = ics->num_swb;
const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
if (ics->window_sequence[0] != LONG_STOP_SEQUENCE) {
- ac->dsp.vector_fmul(in, in, lwindow_prev, 1024);
+ ac->fdsp.vector_fmul(in, in, lwindow_prev, 1024);
} else {
memset(in, 0, 448 * sizeof(float));
- ac->dsp.vector_fmul(in + 448, in + 448, swindow_prev, 128);
+ ac->fdsp.vector_fmul(in + 448, in + 448, swindow_prev, 128);
}
if (ics->window_sequence[0] != LONG_START_SEQUENCE) {
- ac->dsp.vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024);
+ ac->fdsp.vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024);
} else {
- ac->dsp.vector_fmul_reverse(in + 1024 + 448, in + 1024 + 448, swindow, 128);
+ ac->fdsp.vector_fmul_reverse(in + 1024 + 448, in + 1024 + 448, swindow, 128);
memset(in + 1024 + 576, 0, 448 * sizeof(float));
}
ac->mdct_ltp.mdct_calc(&ac->mdct_ltp, out, in);
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
memcpy(saved_ltp, saved, 512 * sizeof(float));
memset(saved_ltp + 576, 0, 448 * sizeof(float));
- ac->dsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
+ ac->fdsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
for (i = 0; i < 64; i++)
saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i];
} else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
memcpy(saved_ltp, ac->buf_mdct + 512, 448 * sizeof(float));
memset(saved_ltp + 576, 0, 448 * sizeof(float));
- ac->dsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
+ ac->fdsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
for (i = 0; i < 64; i++)
saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i];
} else { // LONG_STOP or ONLY_LONG
- ac->dsp.vector_fmul_reverse(saved_ltp, ac->buf_mdct + 512, &lwindow[512], 512);
+ ac->fdsp.vector_fmul_reverse(saved_ltp, ac->buf_mdct + 512, &lwindow[512], 512);
for (i = 0; i < 512; i++)
saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * lwindow[511 - i];
}
*/
if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) &&
(ics->window_sequence[0] == ONLY_LONG_SEQUENCE || ics->window_sequence[0] == LONG_START_SEQUENCE)) {
- ac->dsp.vector_fmul_window( out, saved, buf, lwindow_prev, 512);
+ ac->fdsp.vector_fmul_window( out, saved, buf, lwindow_prev, 512);
} else {
- memcpy( out, saved, 448 * sizeof(float));
+ memcpy( out, saved, 448 * sizeof(float));
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
- ac->dsp.vector_fmul_window(out + 448 + 0*128, saved + 448, buf + 0*128, swindow_prev, 64);
- ac->dsp.vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64, buf + 1*128, swindow, 64);
- ac->dsp.vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64, buf + 2*128, swindow, 64);
- ac->dsp.vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64, buf + 3*128, swindow, 64);
- ac->dsp.vector_fmul_window(temp, buf + 3*128 + 64, buf + 4*128, swindow, 64);
- memcpy( out + 448 + 4*128, temp, 64 * sizeof(float));
+ ac->fdsp.vector_fmul_window(out + 448 + 0*128, saved + 448, buf + 0*128, swindow_prev, 64);
+ ac->fdsp.vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64, buf + 1*128, swindow, 64);
+ ac->fdsp.vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64, buf + 2*128, swindow, 64);
+ ac->fdsp.vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64, buf + 3*128, swindow, 64);
+ ac->fdsp.vector_fmul_window(temp, buf + 3*128 + 64, buf + 4*128, swindow, 64);
+ memcpy( out + 448 + 4*128, temp, 64 * sizeof(float));
} else {
- ac->dsp.vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, 64);
- memcpy( out + 576, buf + 64, 448 * sizeof(float));
+ ac->fdsp.vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, 64);
+ memcpy( out + 576, buf + 64, 448 * sizeof(float));
}
}
// buffer update
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
- memcpy( saved, temp + 64, 64 * sizeof(float));
- ac->dsp.vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 64);
- ac->dsp.vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 64);
- ac->dsp.vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 64);
- memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float));
+ memcpy( saved, temp + 64, 64 * sizeof(float));
+ ac->fdsp.vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 64);
+ ac->fdsp.vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 64);
+ ac->fdsp.vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 64);
+ memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float));
} else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
- memcpy( saved, buf + 512, 448 * sizeof(float));
- memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float));
+ memcpy( saved, buf + 512, 448 * sizeof(float));
+ memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float));
} else { // LONG_STOP or ONLY_LONG
- memcpy( saved, buf + 512, 512 * sizeof(float));
+ memcpy( saved, buf + 512, 512 * sizeof(float));
}
}
float *dest = target->coeffs;
const float *src = cce->ch[0].coeffs;
int g, i, group, k, idx = 0;
- if (ac->m4ac.object_type == AOT_AAC_LTP) {
+ if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) {
av_log(ac->avctx, AV_LOG_ERROR,
"Dependent coupling is not supported together with LTP\n");
return;
const float gain = cce->coup.gain[index][0];
const float *src = cce->ch[0].ret;
float *dest = target->ret;
- const int len = 1024 << (ac->m4ac.sbr == 1);
+ const int len = 1024 << (ac->oc[1].m4ac.sbr == 1);
for (i = 0; i < len; i++)
dest[i] += gain * src[i];
if (che) {
if (type <= TYPE_CPE)
apply_channel_coupling(ac, che, type, i, BEFORE_TNS, apply_dependent_coupling);
- if (ac->m4ac.object_type == AOT_AAC_LTP) {
+ if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) {
if (che->ch[0].ics.predictor_present) {
if (che->ch[0].ics.ltp.present)
apply_ltp(ac, &che->ch[0]);
apply_channel_coupling(ac, che, type, i, BETWEEN_TNS_AND_IMDCT, apply_dependent_coupling);
if (type != TYPE_CCE || che->coup.coupling_point == AFTER_IMDCT) {
imdct_and_windowing(ac, &che->ch[0]);
- if (ac->m4ac.object_type == AOT_AAC_LTP)
+ if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP)
update_ltp(ac, &che->ch[0]);
if (type == TYPE_CPE) {
imdct_and_windowing(ac, &che->ch[1]);
- if (ac->m4ac.object_type == AOT_AAC_LTP)
+ if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP)
update_ltp(ac, &che->ch[1]);
}
- if (ac->m4ac.sbr > 0) {
+ if (ac->oc[1].m4ac.sbr > 0) {
ff_sbr_apply(ac, &che->sbr, type, che->ch[0].ret, che->ch[1].ret);
}
}
{
int size;
AACADTSHeaderInfo hdr_info;
+ uint8_t layout_map[MAX_ELEM_ID*4][3];
+ int layout_map_tags;
size = avpriv_aac_parse_header(gb, &hdr_info);
if (size > 0) {
+ if (hdr_info.num_aac_frames != 1) {
+ avpriv_report_missing_feature(ac->avctx,
+ "More than one AAC RDB per ADTS frame");
+ return AVERROR_PATCHWELCOME;
+ }
+ push_output_configuration(ac);
if (hdr_info.chan_config) {
- enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
- memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
- ac->m4ac.chan_config = hdr_info.chan_config;
- if (set_default_channel_config(ac->avctx, new_che_pos, hdr_info.chan_config))
+ ac->oc[1].m4ac.chan_config = hdr_info.chan_config;
+ if (set_default_channel_config(ac->avctx, layout_map,
+ &layout_map_tags, hdr_info.chan_config))
return -7;
- if (output_configure(ac, new_che_pos, hdr_info.chan_config,
- FFMAX(ac->output_configured, OC_TRIAL_FRAME)))
+ if (output_configure(ac, layout_map, layout_map_tags,
+ FFMAX(ac->oc[1].status, OC_TRIAL_FRAME), 0))
return -7;
- } else if (ac->output_configured != OC_LOCKED) {
- ac->m4ac.chan_config = 0;
- ac->output_configured = OC_NONE;
- }
- if (ac->output_configured != OC_LOCKED) {
- ac->m4ac.sbr = -1;
- ac->m4ac.ps = -1;
- ac->m4ac.sample_rate = hdr_info.sample_rate;
- ac->m4ac.sampling_index = hdr_info.sampling_index;
- ac->m4ac.object_type = hdr_info.object_type;
- }
- if (!ac->avctx->sample_rate)
- ac->avctx->sample_rate = hdr_info.sample_rate;
- if (hdr_info.num_aac_frames == 1) {
- if (!hdr_info.crc_absent)
- skip_bits(gb, 16);
} else {
- av_log_missing_feature(ac->avctx, "More than one AAC RDB per ADTS frame is", 0);
- return -1;
+ ac->oc[1].m4ac.chan_config = 0;
}
+ ac->oc[1].m4ac.sample_rate = hdr_info.sample_rate;
+ ac->oc[1].m4ac.sampling_index = hdr_info.sampling_index;
+ ac->oc[1].m4ac.object_type = hdr_info.object_type;
+ if (ac->oc[0].status != OC_LOCKED ||
+ ac->oc[0].m4ac.chan_config != hdr_info.chan_config ||
+ ac->oc[0].m4ac.sample_rate != hdr_info.sample_rate) {
+ ac->oc[1].m4ac.sbr = -1;
+ ac->oc[1].m4ac.ps = -1;
+ }
+ if (!hdr_info.crc_absent)
+ skip_bits(gb, 16);
}
return size;
}
ChannelElement *che = NULL, *che_prev = NULL;
enum RawDataBlockType elem_type, elem_type_prev = TYPE_END;
int err, elem_id;
- int samples = 0, multiplier, audio_found = 0;
+ int samples = 0, multiplier, audio_found = 0, pce_found = 0;
+
+ ac->frame = data;
if (show_bits(gb, 12) == 0xfff) {
if (parse_adts_frame_header(ac, gb) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error decoding AAC frame header.\n");
- return -1;
+ err = -1;
+ goto fail;
}
- if (ac->m4ac.sampling_index > 12) {
- av_log(ac->avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", ac->m4ac.sampling_index);
- return -1;
+ if (ac->oc[1].m4ac.sampling_index > 12) {
+ av_log(ac->avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", ac->oc[1].m4ac.sampling_index);
+ err = -1;
+ goto fail;
}
}
+ if (frame_configure_elements(avctx) < 0) {
+ err = -1;
+ goto fail;
+ }
+
ac->tags_mapped = 0;
// parse
while ((elem_type = get_bits(gb, 3)) != TYPE_END) {
if (!(che=get_che(ac, elem_type, elem_id))) {
av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n",
elem_type, elem_id);
- return -1;
+ err = -1;
+ goto fail;
}
samples = 1024;
}
break;
case TYPE_PCE: {
- enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
- memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
- if ((err = decode_pce(avctx, &ac->m4ac, new_che_pos, gb)))
+ uint8_t layout_map[MAX_ELEM_ID*4][3];
+ int tags;
+ push_output_configuration(ac);
+ tags = decode_pce(avctx, &ac->oc[1].m4ac, layout_map, gb);
+ if (tags < 0) {
+ err = tags;
break;
- if (ac->output_configured > OC_TRIAL_PCE)
+ }
+ if (pce_found) {
av_log(avctx, AV_LOG_ERROR,
"Not evaluating a further program_config_element as this construct is dubious at best.\n");
- else
- err = output_configure(ac, new_che_pos, 0, OC_TRIAL_PCE);
+ pop_output_configuration(ac);
+ } else {
+ err = output_configure(ac, layout_map, tags, OC_TRIAL_PCE, 1);
+ pce_found = 1;
+ }
break;
}
elem_id += get_bits(gb, 8) - 1;
if (get_bits_left(gb) < 8 * elem_id) {
av_log(avctx, AV_LOG_ERROR, overread_err);
- return -1;
+ err = -1;
+ goto fail;
}
while (elem_id > 0)
elem_id -= decode_extension_payload(ac, gb, elem_id, che_prev, elem_type_prev);
elem_type_prev = elem_type;
if (err)
- return err;
+ goto fail;
if (get_bits_left(gb) < 3) {
av_log(avctx, AV_LOG_ERROR, overread_err);
- return -1;
+ err = -1;
+ goto fail;
}
}
spectral_to_sample(ac);
- multiplier = (ac->m4ac.sbr == 1) ? ac->m4ac.ext_sample_rate > ac->m4ac.sample_rate : 0;
+ multiplier = (ac->oc[1].m4ac.sbr == 1) ? ac->oc[1].m4ac.ext_sample_rate > ac->oc[1].m4ac.sample_rate : 0;
samples <<= multiplier;
- if (ac->output_configured < OC_LOCKED) {
- avctx->sample_rate = ac->m4ac.sample_rate << multiplier;
- avctx->frame_size = samples;
- }
-
- if (samples) {
- /* get output buffer */
- ac->frame.nb_samples = samples;
- if ((err = avctx->get_buffer(avctx, &ac->frame)) < 0) {
- av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return err;
- }
- if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT)
- ac->fmt_conv.float_interleave((float *)ac->frame.data[0],
- (const float **)ac->output_data,
- samples, avctx->channels);
- else
- ac->fmt_conv.float_to_int16_interleave((int16_t *)ac->frame.data[0],
- (const float **)ac->output_data,
- samples, avctx->channels);
-
- *(AVFrame *)data = ac->frame;
- }
+ if (samples)
+ ac->frame->nb_samples = samples;
*got_frame_ptr = !!samples;
- if (ac->output_configured && audio_found)
- ac->output_configured = OC_LOCKED;
+ if (ac->oc[1].status && audio_found) {
+ avctx->sample_rate = ac->oc[1].m4ac.sample_rate << multiplier;
+ avctx->frame_size = samples;
+ ac->oc[1].status = OC_LOCKED;
+ }
return 0;
+fail:
+ pop_output_configuration(ac);
+ return err;
}
static int aac_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR(ENOMEM);
avctx->extradata_size = new_extradata_size;
memcpy(avctx->extradata, new_extradata, new_extradata_size);
- if (decode_audio_specific_config(ac, ac->avctx, &ac->m4ac,
+ push_output_configuration(ac);
+ if (decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
avctx->extradata,
- avctx->extradata_size*8, 1) < 0)
+ avctx->extradata_size*8, 1) < 0) {
+ pop_output_configuration(ac);
return AVERROR_INVALIDDATA;
+ }
}
init_get_bits(&gb, buf, buf_size * 8);
{
AACContext *ac = &latmctx->aac_ctx;
AVCodecContext *avctx = ac->avctx;
- MPEG4AudioConfig m4ac = {0};
+ MPEG4AudioConfig m4ac = { 0 };
int config_start_bit = get_bits_count(gb);
int sync_extension = 0;
int bits_consumed, esize;
asclen = get_bits_left(gb);
if (config_start_bit % 8) {
- av_log_missing_feature(latmctx->aac_ctx.avctx, "audio specific "
- "config not byte aligned.\n", 1);
- return AVERROR_INVALIDDATA;
+ avpriv_request_sample(latmctx->aac_ctx.avctx,
+ "Non-byte-aligned audio-specific config");
+ return AVERROR_PATCHWELCOME;
}
if (asclen <= 0)
return AVERROR_INVALIDDATA;
if (bits_consumed < 0)
return AVERROR_INVALIDDATA;
- if (ac->m4ac.sample_rate != m4ac.sample_rate ||
- ac->m4ac.chan_config != m4ac.chan_config) {
+ if (ac->oc[1].m4ac.sample_rate != m4ac.sample_rate ||
+ ac->oc[1].m4ac.chan_config != m4ac.chan_config) {
av_log(avctx, AV_LOG_INFO, "audio config changed\n");
latmctx->initialized = 0;
skip_bits(gb, 6); // numSubFrames
// numPrograms
if (get_bits(gb, 4)) { // numPrograms
- av_log_missing_feature(latmctx->aac_ctx.avctx,
- "multiple programs are not supported\n", 1);
+ avpriv_request_sample(latmctx->aac_ctx.avctx, "Multiple programs");
return AVERROR_PATCHWELCOME;
}
// for each layer (which there is only on in DVB)
if (get_bits(gb, 3)) { // numLayer
- av_log_missing_feature(latmctx->aac_ctx.avctx,
- "multiple layers are not supported\n", 1);
+ avpriv_request_sample(latmctx->aac_ctx.avctx, "Multiple layers");
return AVERROR_PATCHWELCOME;
}
*got_frame_ptr = 0;
return avpkt->size;
} else {
+ push_output_configuration(&latmctx->aac_ctx);
if ((err = decode_audio_specific_config(
- &latmctx->aac_ctx, avctx, &latmctx->aac_ctx.m4ac,
- avctx->extradata, avctx->extradata_size*8, 1)) < 0)
+ &latmctx->aac_ctx, avctx, &latmctx->aac_ctx.oc[1].m4ac,
+ avctx->extradata, avctx->extradata_size*8, 1)) < 0) {
+ pop_output_configuration(&latmctx->aac_ctx);
return err;
+ }
latmctx->initialized = 1;
}
}
return muxlength;
}
-av_cold static int latm_decode_init(AVCodecContext *avctx)
+static av_cold int latm_decode_init(AVCodecContext *avctx)
{
struct LATMContext *latmctx = avctx->priv_data;
int ret = aac_decode_init(avctx);
AVCodec ff_aac_decoder = {
- .name = "aac",
- .type = AVMEDIA_TYPE_AUDIO,
- .id = CODEC_ID_AAC,
- .priv_data_size = sizeof(AACContext),
- .init = aac_decode_init,
- .close = aac_decode_close,
- .decode = aac_decode_frame,
- .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
- .sample_fmts = (const enum AVSampleFormat[]) {
- AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
+ .name = "aac",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_AAC,
+ .priv_data_size = sizeof(AACContext),
+ .init = aac_decode_init,
+ .close = aac_decode_close,
+ .decode = aac_decode_frame,
+ .long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
+ .sample_fmts = (const enum AVSampleFormat[]) {
+ AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE
},
- .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
+ .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
.channel_layouts = aac_channel_layout,
};
To do a more complex LATM demuxing a separate LATM demuxer should be used.
*/
AVCodec ff_aac_latm_decoder = {
- .name = "aac_latm",
- .type = AVMEDIA_TYPE_AUDIO,
- .id = CODEC_ID_AAC_LATM,
- .priv_data_size = sizeof(struct LATMContext),
- .init = latm_decode_init,
- .close = aac_decode_close,
- .decode = latm_decode_frame,
- .long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Codec LATM syntax)"),
- .sample_fmts = (const enum AVSampleFormat[]) {
- AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
+ .name = "aac_latm",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_AAC_LATM,
+ .priv_data_size = sizeof(struct LATMContext),
+ .init = latm_decode_init,
+ .close = aac_decode_close,
+ .decode = latm_decode_frame,
+ .long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Coding LATM syntax)"),
+ .sample_fmts = (const enum AVSampleFormat[]) {
+ AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE
},
- .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
+ .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
.channel_layouts = aac_channel_layout,
};