3 * Copyright (c) 2011 Justin Ruggles
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 static void ac3_exponent_min_c(uint8_t *exp, int num_reuse_blocks, int nb_coefs)
31 if (!num_reuse_blocks)
34 for (i = 0; i < nb_coefs; i++) {
35 uint8_t min_exp = *exp;
36 uint8_t *exp1 = exp + 256;
37 for (blk = 0; blk < num_reuse_blocks; blk++) {
38 uint8_t next_exp = *exp1;
39 if (next_exp < min_exp)
47 static int ac3_max_msb_abs_int16_c(const int16_t *src, int len)
50 for (i = 0; i < len; i++)
55 static void ac3_lshift_int16_c(int16_t *src, unsigned int len,
58 uint32_t *src32 = (uint32_t *)src;
59 const uint32_t mask = ~(((1 << shift) - 1) << 16);
62 for (i = 0; i < len; i += 8) {
63 src32[i ] = (src32[i ] << shift) & mask;
64 src32[i+1] = (src32[i+1] << shift) & mask;
65 src32[i+2] = (src32[i+2] << shift) & mask;
66 src32[i+3] = (src32[i+3] << shift) & mask;
67 src32[i+4] = (src32[i+4] << shift) & mask;
68 src32[i+5] = (src32[i+5] << shift) & mask;
69 src32[i+6] = (src32[i+6] << shift) & mask;
70 src32[i+7] = (src32[i+7] << shift) & mask;
74 static void ac3_rshift_int32_c(int32_t *src, unsigned int len,
90 static void float_to_fixed24_c(int32_t *dst, const float *src, unsigned int len)
92 const float scale = 1 << 24;
94 *dst++ = lrintf(*src++ * scale);
95 *dst++ = lrintf(*src++ * scale);
96 *dst++ = lrintf(*src++ * scale);
97 *dst++ = lrintf(*src++ * scale);
98 *dst++ = lrintf(*src++ * scale);
99 *dst++ = lrintf(*src++ * scale);
100 *dst++ = lrintf(*src++ * scale);
101 *dst++ = lrintf(*src++ * scale);
106 static void ac3_bit_alloc_calc_bap_c(int16_t *mask, int16_t *psd,
108 int snr_offset, int floor,
109 const uint8_t *bap_tab, uint8_t *bap)
111 int bin, band, band_end;
113 /* special case, if snr offset is -960, set all bap's to zero */
114 if (snr_offset == -960) {
115 memset(bap, 0, AC3_MAX_COEFS);
120 band = ff_ac3_bin_to_band_tab[start];
122 int m = (FFMAX(mask[band] - snr_offset - floor, 0) & 0x1FE0) + floor;
123 band_end = ff_ac3_band_start_tab[++band];
124 band_end = FFMIN(band_end, end);
126 for (; bin < band_end; bin++) {
127 int address = av_clip_uintp2((psd[bin] - m) >> 5, 6);
128 bap[bin] = bap_tab[address];
130 } while (end > band_end);
133 static void ac3_update_bap_counts_c(uint16_t mant_cnt[16], uint8_t *bap,
137 mant_cnt[bap[len]]++;
140 DECLARE_ALIGNED(16, const uint16_t, ff_ac3_bap_bits)[16] = {
141 0, 0, 0, 3, 0, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16
144 static int ac3_compute_mantissa_size_c(uint16_t mant_cnt[6][16])
149 for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
150 // bap=1 : 3 mantissas in 5 bits
151 bits += (mant_cnt[blk][1] / 3) * 5;
152 // bap=2 : 3 mantissas in 7 bits
153 // bap=4 : 2 mantissas in 7 bits
154 bits += ((mant_cnt[blk][2] / 3) + (mant_cnt[blk][4] >> 1)) * 7;
155 // bap=3 : 1 mantissa in 3 bits
156 bits += mant_cnt[blk][3] * 3;
157 // bap=5 to 15 : get bits per mantissa from table
158 for (bap = 5; bap < 16; bap++)
159 bits += mant_cnt[blk][bap] * ff_ac3_bap_bits[bap];
164 static void ac3_extract_exponents_c(uint8_t *exp, int32_t *coef, int nb_coefs)
168 for (i = 0; i < nb_coefs; i++) {
169 int v = abs(coef[i]);
170 exp[i] = v ? 23 - av_log2(v) : 24;
174 static void ac3_sum_square_butterfly_int32_c(int64_t sum[4],
175 const int32_t *coef0,
176 const int32_t *coef1,
181 sum[0] = sum[1] = sum[2] = sum[3] = 0;
183 for (i = 0; i < len; i++) {
188 MAC64(sum[0], lt, lt);
189 MAC64(sum[1], rt, rt);
190 MAC64(sum[2], md, md);
191 MAC64(sum[3], sd, sd);
195 static void ac3_sum_square_butterfly_float_c(float sum[4],
202 sum[0] = sum[1] = sum[2] = sum[3] = 0;
204 for (i = 0; i < len; i++) {
216 static void ac3_downmix_5_to_2_symmetric_c(float **samples, float **matrix,
221 float front_mix = matrix[0][0];
222 float center_mix = matrix[0][1];
223 float surround_mix = matrix[0][3];
225 for (i = 0; i < len; i++) {
226 v0 = samples[0][i] * front_mix +
227 samples[1][i] * center_mix +
228 samples[3][i] * surround_mix;
230 v1 = samples[1][i] * center_mix +
231 samples[2][i] * front_mix +
232 samples[4][i] * surround_mix;
239 static void ac3_downmix_5_to_1_symmetric_c(float **samples, float **matrix,
243 float front_mix = matrix[0][0];
244 float center_mix = matrix[0][1];
245 float surround_mix = matrix[0][3];
247 for (i = 0; i < len; i++) {
248 samples[0][i] = samples[0][i] * front_mix +
249 samples[1][i] * center_mix +
250 samples[2][i] * front_mix +
251 samples[3][i] * surround_mix +
252 samples[4][i] * surround_mix;
256 static void ac3_downmix_c(float **samples, float **matrix,
257 int out_ch, int in_ch, int len)
263 for (i = 0; i < len; i++) {
265 for (j = 0; j < in_ch; j++) {
266 v0 += samples[j][i] * matrix[0][j];
267 v1 += samples[j][i] * matrix[1][j];
272 } else if (out_ch == 1) {
273 for (i = 0; i < len; i++) {
275 for (j = 0; j < in_ch; j++)
276 v0 += samples[j][i] * matrix[0][j];
282 static void ac3_downmix_5_to_2_symmetric_c_fixed(int32_t **samples, int16_t **matrix,
287 int16_t front_mix = matrix[0][0];
288 int16_t center_mix = matrix[0][1];
289 int16_t surround_mix = matrix[0][3];
291 for (i = 0; i < len; i++) {
292 v0 = (int64_t)samples[0][i] * front_mix +
293 (int64_t)samples[1][i] * center_mix +
294 (int64_t)samples[3][i] * surround_mix;
296 v1 = (int64_t)samples[1][i] * center_mix +
297 (int64_t)samples[2][i] * front_mix +
298 (int64_t)samples[4][i] * surround_mix;
300 samples[0][i] = (v0+2048)>>12;
301 samples[1][i] = (v1+2048)>>12;
305 static void ac3_downmix_5_to_1_symmetric_c_fixed(int32_t **samples, int16_t **matrix,
310 int16_t front_mix = matrix[0][0];
311 int16_t center_mix = matrix[0][1];
312 int16_t surround_mix = matrix[0][3];
314 for (i = 0; i < len; i++) {
315 v0 = (int64_t)samples[0][i] * front_mix +
316 (int64_t)samples[1][i] * center_mix +
317 (int64_t)samples[2][i] * front_mix +
318 (int64_t)samples[3][i] * surround_mix +
319 (int64_t)samples[4][i] * surround_mix;
321 samples[0][i] = (v0+2048)>>12;
325 static void ac3_downmix_c_fixed(int32_t **samples, int16_t **matrix,
326 int out_ch, int in_ch, int len)
331 for (i = 0; i < len; i++) {
333 for (j = 0; j < in_ch; j++) {
334 v0 += (int64_t)samples[j][i] * matrix[0][j];
335 v1 += (int64_t)samples[j][i] * matrix[1][j];
337 samples[0][i] = (v0+2048)>>12;
338 samples[1][i] = (v1+2048)>>12;
340 } else if (out_ch == 1) {
341 for (i = 0; i < len; i++) {
343 for (j = 0; j < in_ch; j++)
344 v0 += (int64_t)samples[j][i] * matrix[0][j];
345 samples[0][i] = (v0+2048)>>12;
350 void ff_ac3dsp_downmix_fixed(AC3DSPContext *c, int32_t **samples, int16_t **matrix,
351 int out_ch, int in_ch, int len)
353 if (c->in_channels != in_ch || c->out_channels != out_ch) {
354 c->in_channels = in_ch;
355 c->out_channels = out_ch;
356 c->downmix_fixed = NULL;
358 if (in_ch == 5 && out_ch == 2 &&
359 !(matrix[1][0] | matrix[0][2] |
360 matrix[1][3] | matrix[0][4] |
361 (matrix[0][1] ^ matrix[1][1]) |
362 (matrix[0][0] ^ matrix[1][2]))) {
363 c->downmix_fixed = ac3_downmix_5_to_2_symmetric_c_fixed;
364 } else if (in_ch == 5 && out_ch == 1 &&
365 matrix[0][0] == matrix[0][2] &&
366 matrix[0][3] == matrix[0][4]) {
367 c->downmix_fixed = ac3_downmix_5_to_1_symmetric_c_fixed;
371 if (c->downmix_fixed)
372 c->downmix_fixed(samples, matrix, len);
374 ac3_downmix_c_fixed(samples, matrix, out_ch, in_ch, len);
377 static void apply_window_int16_c(int16_t *output, const int16_t *input,
378 const int16_t *window, unsigned int len)
383 for (i = 0; i < len2; i++) {
384 int16_t w = window[i];
385 output[i] = (MUL16(input[i], w) + (1 << 14)) >> 15;
386 output[len-i-1] = (MUL16(input[len-i-1], w) + (1 << 14)) >> 15;
390 void ff_ac3dsp_downmix(AC3DSPContext *c, float **samples, float **matrix,
391 int out_ch, int in_ch, int len)
393 if (c->in_channels != in_ch || c->out_channels != out_ch) {
394 int **matrix_cmp = (int **)matrix;
396 c->in_channels = in_ch;
397 c->out_channels = out_ch;
400 if (in_ch == 5 && out_ch == 2 &&
401 !(matrix_cmp[1][0] | matrix_cmp[0][2] |
402 matrix_cmp[1][3] | matrix_cmp[0][4] |
403 (matrix_cmp[0][1] ^ matrix_cmp[1][1]) |
404 (matrix_cmp[0][0] ^ matrix_cmp[1][2]))) {
405 c->downmix = ac3_downmix_5_to_2_symmetric_c;
406 } else if (in_ch == 5 && out_ch == 1 &&
407 matrix_cmp[0][0] == matrix_cmp[0][2] &&
408 matrix_cmp[0][3] == matrix_cmp[0][4]) {
409 c->downmix = ac3_downmix_5_to_1_symmetric_c;
413 ff_ac3dsp_set_downmix_x86(c);
417 c->downmix(samples, matrix, len);
419 ac3_downmix_c(samples, matrix, out_ch, in_ch, len);
422 av_cold void ff_ac3dsp_init(AC3DSPContext *c, int bit_exact)
424 c->ac3_exponent_min = ac3_exponent_min_c;
425 c->ac3_max_msb_abs_int16 = ac3_max_msb_abs_int16_c;
426 c->ac3_lshift_int16 = ac3_lshift_int16_c;
427 c->ac3_rshift_int32 = ac3_rshift_int32_c;
428 c->float_to_fixed24 = float_to_fixed24_c;
429 c->bit_alloc_calc_bap = ac3_bit_alloc_calc_bap_c;
430 c->update_bap_counts = ac3_update_bap_counts_c;
431 c->compute_mantissa_size = ac3_compute_mantissa_size_c;
432 c->extract_exponents = ac3_extract_exponents_c;
433 c->sum_square_butterfly_int32 = ac3_sum_square_butterfly_int32_c;
434 c->sum_square_butterfly_float = ac3_sum_square_butterfly_float_c;
438 c->downmix_fixed = NULL;
439 c->apply_window_int16 = apply_window_int16_c;
442 ff_ac3dsp_init_arm(c, bit_exact);
444 ff_ac3dsp_init_x86(c, bit_exact);
446 ff_ac3dsp_init_mips(c, bit_exact);