3 * Copyright (c) 2011 Justin Ruggles
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/mem_internal.h"
29 static void ac3_exponent_min_c(uint8_t *exp, int num_reuse_blocks, int nb_coefs)
33 if (!num_reuse_blocks)
36 for (i = 0; i < nb_coefs; i++) {
37 uint8_t min_exp = *exp;
38 uint8_t *exp1 = exp + 256;
39 for (blk = 0; blk < num_reuse_blocks; blk++) {
40 uint8_t next_exp = *exp1;
41 if (next_exp < min_exp)
49 static int ac3_max_msb_abs_int16_c(const int16_t *src, int len)
52 for (i = 0; i < len; i++)
57 static void ac3_lshift_int16_c(int16_t *src, unsigned int len,
60 uint32_t *src32 = (uint32_t *)src;
61 const uint32_t mask = ~(((1 << shift) - 1) << 16);
64 for (i = 0; i < len; i += 8) {
65 src32[i ] = (src32[i ] << shift) & mask;
66 src32[i+1] = (src32[i+1] << shift) & mask;
67 src32[i+2] = (src32[i+2] << shift) & mask;
68 src32[i+3] = (src32[i+3] << shift) & mask;
69 src32[i+4] = (src32[i+4] << shift) & mask;
70 src32[i+5] = (src32[i+5] << shift) & mask;
71 src32[i+6] = (src32[i+6] << shift) & mask;
72 src32[i+7] = (src32[i+7] << shift) & mask;
76 static void ac3_rshift_int32_c(int32_t *src, unsigned int len,
92 static void float_to_fixed24_c(int32_t *dst, const float *src, unsigned int len)
94 const float scale = 1 << 24;
96 *dst++ = lrintf(*src++ * scale);
97 *dst++ = lrintf(*src++ * scale);
98 *dst++ = lrintf(*src++ * scale);
99 *dst++ = lrintf(*src++ * scale);
100 *dst++ = lrintf(*src++ * scale);
101 *dst++ = lrintf(*src++ * scale);
102 *dst++ = lrintf(*src++ * scale);
103 *dst++ = lrintf(*src++ * scale);
108 static void ac3_bit_alloc_calc_bap_c(int16_t *mask, int16_t *psd,
110 int snr_offset, int floor,
111 const uint8_t *bap_tab, uint8_t *bap)
113 int bin, band, band_end;
115 /* special case, if snr offset is -960, set all bap's to zero */
116 if (snr_offset == -960) {
117 memset(bap, 0, AC3_MAX_COEFS);
122 band = ff_ac3_bin_to_band_tab[start];
124 int m = (FFMAX(mask[band] - snr_offset - floor, 0) & 0x1FE0) + floor;
125 band_end = ff_ac3_band_start_tab[++band];
126 band_end = FFMIN(band_end, end);
128 for (; bin < band_end; bin++) {
129 int address = av_clip_uintp2((psd[bin] - m) >> 5, 6);
130 bap[bin] = bap_tab[address];
132 } while (end > band_end);
135 static void ac3_update_bap_counts_c(uint16_t mant_cnt[16], uint8_t *bap,
139 mant_cnt[bap[len]]++;
142 DECLARE_ALIGNED(16, const uint16_t, ff_ac3_bap_bits)[16] = {
143 0, 0, 0, 3, 0, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16
146 static int ac3_compute_mantissa_size_c(uint16_t mant_cnt[6][16])
151 for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
152 // bap=1 : 3 mantissas in 5 bits
153 bits += (mant_cnt[blk][1] / 3) * 5;
154 // bap=2 : 3 mantissas in 7 bits
155 // bap=4 : 2 mantissas in 7 bits
156 bits += ((mant_cnt[blk][2] / 3) + (mant_cnt[blk][4] >> 1)) * 7;
157 // bap=3 : 1 mantissa in 3 bits
158 bits += mant_cnt[blk][3] * 3;
159 // bap=5 to 15 : get bits per mantissa from table
160 for (bap = 5; bap < 16; bap++)
161 bits += mant_cnt[blk][bap] * ff_ac3_bap_bits[bap];
166 static void ac3_extract_exponents_c(uint8_t *exp, int32_t *coef, int nb_coefs)
170 for (i = 0; i < nb_coefs; i++) {
171 int v = abs(coef[i]);
172 exp[i] = v ? 23 - av_log2(v) : 24;
176 static void ac3_sum_square_butterfly_int32_c(int64_t sum[4],
177 const int32_t *coef0,
178 const int32_t *coef1,
183 sum[0] = sum[1] = sum[2] = sum[3] = 0;
185 for (i = 0; i < len; i++) {
190 MAC64(sum[0], lt, lt);
191 MAC64(sum[1], rt, rt);
192 MAC64(sum[2], md, md);
193 MAC64(sum[3], sd, sd);
197 static void ac3_sum_square_butterfly_float_c(float sum[4],
204 sum[0] = sum[1] = sum[2] = sum[3] = 0;
206 for (i = 0; i < len; i++) {
218 static void ac3_downmix_5_to_2_symmetric_c(float **samples, float **matrix,
223 float front_mix = matrix[0][0];
224 float center_mix = matrix[0][1];
225 float surround_mix = matrix[0][3];
227 for (i = 0; i < len; i++) {
228 v0 = samples[0][i] * front_mix +
229 samples[1][i] * center_mix +
230 samples[3][i] * surround_mix;
232 v1 = samples[1][i] * center_mix +
233 samples[2][i] * front_mix +
234 samples[4][i] * surround_mix;
241 static void ac3_downmix_5_to_1_symmetric_c(float **samples, float **matrix,
245 float front_mix = matrix[0][0];
246 float center_mix = matrix[0][1];
247 float surround_mix = matrix[0][3];
249 for (i = 0; i < len; i++) {
250 samples[0][i] = samples[0][i] * front_mix +
251 samples[1][i] * center_mix +
252 samples[2][i] * front_mix +
253 samples[3][i] * surround_mix +
254 samples[4][i] * surround_mix;
258 static void ac3_downmix_c(float **samples, float **matrix,
259 int out_ch, int in_ch, int len)
265 for (i = 0; i < len; i++) {
267 for (j = 0; j < in_ch; j++) {
268 v0 += samples[j][i] * matrix[0][j];
269 v1 += samples[j][i] * matrix[1][j];
274 } else if (out_ch == 1) {
275 for (i = 0; i < len; i++) {
277 for (j = 0; j < in_ch; j++)
278 v0 += samples[j][i] * matrix[0][j];
284 static void ac3_downmix_5_to_2_symmetric_c_fixed(int32_t **samples, int16_t **matrix,
289 int16_t front_mix = matrix[0][0];
290 int16_t center_mix = matrix[0][1];
291 int16_t surround_mix = matrix[0][3];
293 for (i = 0; i < len; i++) {
294 v0 = (int64_t)samples[0][i] * front_mix +
295 (int64_t)samples[1][i] * center_mix +
296 (int64_t)samples[3][i] * surround_mix;
298 v1 = (int64_t)samples[1][i] * center_mix +
299 (int64_t)samples[2][i] * front_mix +
300 (int64_t)samples[4][i] * surround_mix;
302 samples[0][i] = (v0+2048)>>12;
303 samples[1][i] = (v1+2048)>>12;
307 static void ac3_downmix_5_to_1_symmetric_c_fixed(int32_t **samples, int16_t **matrix,
312 int16_t front_mix = matrix[0][0];
313 int16_t center_mix = matrix[0][1];
314 int16_t surround_mix = matrix[0][3];
316 for (i = 0; i < len; i++) {
317 v0 = (int64_t)samples[0][i] * front_mix +
318 (int64_t)samples[1][i] * center_mix +
319 (int64_t)samples[2][i] * front_mix +
320 (int64_t)samples[3][i] * surround_mix +
321 (int64_t)samples[4][i] * surround_mix;
323 samples[0][i] = (v0+2048)>>12;
327 static void ac3_downmix_c_fixed(int32_t **samples, int16_t **matrix,
328 int out_ch, int in_ch, int len)
333 for (i = 0; i < len; i++) {
335 for (j = 0; j < in_ch; j++) {
336 v0 += (int64_t)samples[j][i] * matrix[0][j];
337 v1 += (int64_t)samples[j][i] * matrix[1][j];
339 samples[0][i] = (v0+2048)>>12;
340 samples[1][i] = (v1+2048)>>12;
342 } else if (out_ch == 1) {
343 for (i = 0; i < len; i++) {
345 for (j = 0; j < in_ch; j++)
346 v0 += (int64_t)samples[j][i] * matrix[0][j];
347 samples[0][i] = (v0+2048)>>12;
352 void ff_ac3dsp_downmix_fixed(AC3DSPContext *c, int32_t **samples, int16_t **matrix,
353 int out_ch, int in_ch, int len)
355 if (c->in_channels != in_ch || c->out_channels != out_ch) {
356 c->in_channels = in_ch;
357 c->out_channels = out_ch;
358 c->downmix_fixed = NULL;
360 if (in_ch == 5 && out_ch == 2 &&
361 !(matrix[1][0] | matrix[0][2] |
362 matrix[1][3] | matrix[0][4] |
363 (matrix[0][1] ^ matrix[1][1]) |
364 (matrix[0][0] ^ matrix[1][2]))) {
365 c->downmix_fixed = ac3_downmix_5_to_2_symmetric_c_fixed;
366 } else if (in_ch == 5 && out_ch == 1 &&
367 matrix[0][0] == matrix[0][2] &&
368 matrix[0][3] == matrix[0][4]) {
369 c->downmix_fixed = ac3_downmix_5_to_1_symmetric_c_fixed;
373 if (c->downmix_fixed)
374 c->downmix_fixed(samples, matrix, len);
376 ac3_downmix_c_fixed(samples, matrix, out_ch, in_ch, len);
379 static void apply_window_int16_c(int16_t *output, const int16_t *input,
380 const int16_t *window, unsigned int len)
385 for (i = 0; i < len2; i++) {
386 int16_t w = window[i];
387 output[i] = (MUL16(input[i], w) + (1 << 14)) >> 15;
388 output[len-i-1] = (MUL16(input[len-i-1], w) + (1 << 14)) >> 15;
392 void ff_ac3dsp_downmix(AC3DSPContext *c, float **samples, float **matrix,
393 int out_ch, int in_ch, int len)
395 if (c->in_channels != in_ch || c->out_channels != out_ch) {
396 int **matrix_cmp = (int **)matrix;
398 c->in_channels = in_ch;
399 c->out_channels = out_ch;
402 if (in_ch == 5 && out_ch == 2 &&
403 !(matrix_cmp[1][0] | matrix_cmp[0][2] |
404 matrix_cmp[1][3] | matrix_cmp[0][4] |
405 (matrix_cmp[0][1] ^ matrix_cmp[1][1]) |
406 (matrix_cmp[0][0] ^ matrix_cmp[1][2]))) {
407 c->downmix = ac3_downmix_5_to_2_symmetric_c;
408 } else if (in_ch == 5 && out_ch == 1 &&
409 matrix_cmp[0][0] == matrix_cmp[0][2] &&
410 matrix_cmp[0][3] == matrix_cmp[0][4]) {
411 c->downmix = ac3_downmix_5_to_1_symmetric_c;
415 ff_ac3dsp_set_downmix_x86(c);
419 c->downmix(samples, matrix, len);
421 ac3_downmix_c(samples, matrix, out_ch, in_ch, len);
424 av_cold void ff_ac3dsp_init(AC3DSPContext *c, int bit_exact)
426 c->ac3_exponent_min = ac3_exponent_min_c;
427 c->ac3_max_msb_abs_int16 = ac3_max_msb_abs_int16_c;
428 c->ac3_lshift_int16 = ac3_lshift_int16_c;
429 c->ac3_rshift_int32 = ac3_rshift_int32_c;
430 c->float_to_fixed24 = float_to_fixed24_c;
431 c->bit_alloc_calc_bap = ac3_bit_alloc_calc_bap_c;
432 c->update_bap_counts = ac3_update_bap_counts_c;
433 c->compute_mantissa_size = ac3_compute_mantissa_size_c;
434 c->extract_exponents = ac3_extract_exponents_c;
435 c->sum_square_butterfly_int32 = ac3_sum_square_butterfly_int32_c;
436 c->sum_square_butterfly_float = ac3_sum_square_butterfly_float_c;
440 c->downmix_fixed = NULL;
441 c->apply_window_int16 = apply_window_int16_c;
444 ff_ac3dsp_init_arm(c, bit_exact);
446 ff_ac3dsp_init_x86(c, bit_exact);
448 ff_ac3dsp_init_mips(c, bit_exact);