2 * x86-optimized AC-3 DSP functions
3 * Copyright (c) 2011 Justin Ruggles
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/attributes.h"
23 #include "libavutil/mem.h"
24 #include "libavutil/x86/asm.h"
25 #include "libavutil/x86/cpu.h"
26 #include "libavcodec/ac3.h"
27 #include "libavcodec/ac3dsp.h"
29 void ff_ac3_exponent_min_mmx (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
30 void ff_ac3_exponent_min_mmxext(uint8_t *exp, int num_reuse_blocks, int nb_coefs);
31 void ff_ac3_exponent_min_sse2 (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
33 int ff_ac3_max_msb_abs_int16_mmx (const int16_t *src, int len);
34 int ff_ac3_max_msb_abs_int16_mmxext(const int16_t *src, int len);
35 int ff_ac3_max_msb_abs_int16_sse2 (const int16_t *src, int len);
36 int ff_ac3_max_msb_abs_int16_ssse3(const int16_t *src, int len);
38 void ff_ac3_lshift_int16_mmx (int16_t *src, unsigned int len, unsigned int shift);
39 void ff_ac3_lshift_int16_sse2(int16_t *src, unsigned int len, unsigned int shift);
41 void ff_ac3_rshift_int32_mmx (int32_t *src, unsigned int len, unsigned int shift);
42 void ff_ac3_rshift_int32_sse2(int32_t *src, unsigned int len, unsigned int shift);
44 void ff_float_to_fixed24_3dnow(int32_t *dst, const float *src, unsigned int len);
45 void ff_float_to_fixed24_sse (int32_t *dst, const float *src, unsigned int len);
46 void ff_float_to_fixed24_sse2 (int32_t *dst, const float *src, unsigned int len);
48 int ff_ac3_compute_mantissa_size_sse2(uint16_t mant_cnt[6][16]);
50 void ff_ac3_extract_exponents_sse2 (uint8_t *exp, int32_t *coef, int nb_coefs);
51 void ff_ac3_extract_exponents_ssse3(uint8_t *exp, int32_t *coef, int nb_coefs);
53 void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
54 const int16_t *window, unsigned int len);
55 void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
56 const int16_t *window, unsigned int len);
57 void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
58 const int16_t *window, unsigned int len);
59 void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
60 const int16_t *window, unsigned int len);
61 void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
62 const int16_t *window, unsigned int len);
63 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
64 const int16_t *window, unsigned int len);
66 #if ARCH_X86_32 && defined(__INTEL_COMPILER)
71 #if HAVE_SSE_INLINE && HAVE_7REGS
76 #define MIX5(mono, stereo) \
78 "movss 0(%1), %%xmm5 \n" \
79 "movss 8(%1), %%xmm6 \n" \
80 "movss 24(%1), %%xmm7 \n" \
81 "shufps $0, %%xmm5, %%xmm5 \n" \
82 "shufps $0, %%xmm6, %%xmm6 \n" \
83 "shufps $0, %%xmm7, %%xmm7 \n" \
85 "movaps (%0, %2), %%xmm0 \n" \
86 "movaps (%0, %3), %%xmm1 \n" \
87 "movaps (%0, %4), %%xmm2 \n" \
88 "movaps (%0, %5), %%xmm3 \n" \
89 "movaps (%0, %6), %%xmm4 \n" \
90 "mulps %%xmm5, %%xmm0 \n" \
91 "mulps %%xmm6, %%xmm1 \n" \
92 "mulps %%xmm5, %%xmm2 \n" \
93 "mulps %%xmm7, %%xmm3 \n" \
94 "mulps %%xmm7, %%xmm4 \n" \
95 stereo("addps %%xmm1, %%xmm0 \n") \
96 "addps %%xmm1, %%xmm2 \n" \
97 "addps %%xmm3, %%xmm0 \n" \
98 "addps %%xmm4, %%xmm2 \n" \
99 mono("addps %%xmm2, %%xmm0 \n") \
100 "movaps %%xmm0, (%0, %2) \n" \
101 stereo("movaps %%xmm2, (%0, %3) \n") \
106 "r"(samples[0] + len), \
107 "r"(samples[1] + len), \
108 "r"(samples[2] + len), \
109 "r"(samples[3] + len), \
110 "r"(samples[4] + len) \
111 : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
112 "%xmm4", "%xmm5", "%xmm6", "%xmm7",) \
116 #define MIX_MISC(stereo) \
120 "mov -%c7(%6, %2, %c8), %3 \n" \
121 "movaps (%3, %0), %%xmm0 \n" \
122 stereo("movaps %%xmm0, %%xmm1 \n") \
123 "mulps %%xmm4, %%xmm0 \n" \
124 stereo("mulps %%xmm5, %%xmm1 \n") \
126 "mov (%6, %2, %c8), %1 \n" \
127 "movaps (%1, %0), %%xmm2 \n" \
128 stereo("movaps %%xmm2, %%xmm3 \n") \
129 "mulps (%4, %2, 8), %%xmm2 \n" \
130 stereo("mulps 16(%4, %2, 8), %%xmm3 \n") \
131 "addps %%xmm2, %%xmm0 \n" \
132 stereo("addps %%xmm3, %%xmm1 \n") \
136 stereo("mov (%6, %2, %c8), %1 \n") \
137 "movaps %%xmm0, (%3, %0) \n" \
138 stereo("movaps %%xmm1, (%1, %0) \n") \
141 : "+&r"(i), "=&r"(j), "=&r"(k), "=&r"(m) \
142 : "r"(matrix_simd + in_ch), \
143 "g"((intptr_t) - 4 * (in_ch - 1)), \
145 "i"(sizeof(float *)), "i"(sizeof(float *)/4) \
149 static void ac3_downmix_sse(float **samples, float (*matrix)[2],
150 int out_ch, int in_ch, int len)
152 int (*matrix_cmp)[2] = (int(*)[2])matrix;
155 i = -len * sizeof(float);
156 if (in_ch == 5 && out_ch == 2 &&
157 !(matrix_cmp[0][1] | matrix_cmp[2][0] |
158 matrix_cmp[3][1] | matrix_cmp[4][0] |
159 (matrix_cmp[1][0] ^ matrix_cmp[1][1]) |
160 (matrix_cmp[0][0] ^ matrix_cmp[2][1]))) {
162 } else if (in_ch == 5 && out_ch == 1 &&
163 matrix_cmp[0][0] == matrix_cmp[2][0] &&
164 matrix_cmp[3][0] == matrix_cmp[4][0]) {
167 LOCAL_ALIGNED(16, float, matrix_simd, [AC3_MAX_CHANNELS], [2][4]);
168 float *samp[AC3_MAX_CHANNELS];
170 for (j = 0; j < in_ch; j++)
171 samp[j] = samples[j] + len;
173 j = 2 * in_ch * sizeof(float);
177 "movss (%2, %0), %%xmm4 \n"
178 "movss 4(%2, %0), %%xmm5 \n"
179 "shufps $0, %%xmm4, %%xmm4 \n"
180 "shufps $0, %%xmm5, %%xmm5 \n"
181 "movaps %%xmm4, (%1, %0, 4) \n"
182 "movaps %%xmm5, 16(%1, %0, 4) \n"
185 : "r"(matrix_simd), "r"(matrix)
196 #endif /* HAVE_SSE_INLINE && HAVE_7REGS */
198 av_cold void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact)
200 int cpu_flags = av_get_cpu_flags();
202 if (EXTERNAL_MMX(cpu_flags)) {
203 c->ac3_exponent_min = ff_ac3_exponent_min_mmx;
204 c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmx;
205 c->ac3_lshift_int16 = ff_ac3_lshift_int16_mmx;
206 c->ac3_rshift_int32 = ff_ac3_rshift_int32_mmx;
208 if (EXTERNAL_AMD3DNOW(cpu_flags)) {
210 c->float_to_fixed24 = ff_float_to_fixed24_3dnow;
213 if (EXTERNAL_MMXEXT(cpu_flags)) {
214 c->ac3_exponent_min = ff_ac3_exponent_min_mmxext;
215 c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmxext;
217 c->apply_window_int16 = ff_apply_window_int16_mmxext;
219 c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
222 if (EXTERNAL_SSE(cpu_flags)) {
223 c->float_to_fixed24 = ff_float_to_fixed24_sse;
225 if (EXTERNAL_SSE2(cpu_flags)) {
226 c->ac3_exponent_min = ff_ac3_exponent_min_sse2;
227 c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_sse2;
228 c->float_to_fixed24 = ff_float_to_fixed24_sse2;
229 c->compute_mantissa_size = ff_ac3_compute_mantissa_size_sse2;
230 c->extract_exponents = ff_ac3_extract_exponents_sse2;
231 if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
232 c->ac3_lshift_int16 = ff_ac3_lshift_int16_sse2;
233 c->ac3_rshift_int32 = ff_ac3_rshift_int32_sse2;
236 c->apply_window_int16 = ff_apply_window_int16_sse2;
237 } else if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
238 c->apply_window_int16 = ff_apply_window_int16_round_sse2;
241 if (EXTERNAL_SSSE3(cpu_flags)) {
242 c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_ssse3;
243 if (cpu_flags & AV_CPU_FLAG_ATOM) {
244 c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
246 c->extract_exponents = ff_ac3_extract_exponents_ssse3;
247 c->apply_window_int16 = ff_apply_window_int16_ssse3;
251 #if HAVE_SSE_INLINE && HAVE_7REGS
252 if (INLINE_SSE(cpu_flags)) {
253 c->downmix = ac3_downmix_sse;