]> git.sesse.net Git - ffmpeg/blob - libavcodec/x86/ac3dsp_init.c
x86: dcadsp: implement SSE lfe_dir
[ffmpeg] / libavcodec / x86 / ac3dsp_init.c
1 /*
2  * x86-optimized AC-3 DSP utils
3  * Copyright (c) 2011 Justin Ruggles
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "libavutil/attributes.h"
23 #include "libavutil/mem.h"
24 #include "libavutil/x86/asm.h"
25 #include "libavutil/x86/cpu.h"
26 #include "dsputil_x86.h"
27 #include "libavcodec/ac3.h"
28 #include "libavcodec/ac3dsp.h"
29
30 void ff_ac3_exponent_min_mmx   (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
31 void ff_ac3_exponent_min_mmxext(uint8_t *exp, int num_reuse_blocks, int nb_coefs);
32 void ff_ac3_exponent_min_sse2  (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
33
34 int ff_ac3_max_msb_abs_int16_mmx  (const int16_t *src, int len);
35 int ff_ac3_max_msb_abs_int16_mmxext(const int16_t *src, int len);
36 int ff_ac3_max_msb_abs_int16_sse2 (const int16_t *src, int len);
37 int ff_ac3_max_msb_abs_int16_ssse3(const int16_t *src, int len);
38
39 void ff_ac3_lshift_int16_mmx (int16_t *src, unsigned int len, unsigned int shift);
40 void ff_ac3_lshift_int16_sse2(int16_t *src, unsigned int len, unsigned int shift);
41
42 void ff_ac3_rshift_int32_mmx (int32_t *src, unsigned int len, unsigned int shift);
43 void ff_ac3_rshift_int32_sse2(int32_t *src, unsigned int len, unsigned int shift);
44
45 void ff_float_to_fixed24_3dnow(int32_t *dst, const float *src, unsigned int len);
46 void ff_float_to_fixed24_sse  (int32_t *dst, const float *src, unsigned int len);
47 void ff_float_to_fixed24_sse2 (int32_t *dst, const float *src, unsigned int len);
48
49 int ff_ac3_compute_mantissa_size_sse2(uint16_t mant_cnt[6][16]);
50
51 void ff_ac3_extract_exponents_3dnow(uint8_t *exp, int32_t *coef, int nb_coefs);
52 void ff_ac3_extract_exponents_sse2 (uint8_t *exp, int32_t *coef, int nb_coefs);
53 void ff_ac3_extract_exponents_ssse3(uint8_t *exp, int32_t *coef, int nb_coefs);
54
55 void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
56                                         const int16_t *window, unsigned int len);
57 void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
58                                       const int16_t *window, unsigned int len);
59 void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
60                                   const int16_t *window, unsigned int len);
61 void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
62                                 const int16_t *window, unsigned int len);
63 void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
64                                  const int16_t *window, unsigned int len);
65 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
66                                       const int16_t *window, unsigned int len);
67
68 #if HAVE_SSE_INLINE && HAVE_7REGS
69
70 #define IF1(x) x
71 #define IF0(x)
72
73 #define MIX5(mono, stereo)                                      \
74     __asm__ volatile (                                          \
75         "movss           0(%1), %%xmm5          \n"             \
76         "movss           8(%1), %%xmm6          \n"             \
77         "movss          24(%1), %%xmm7          \n"             \
78         "shufps     $0, %%xmm5, %%xmm5          \n"             \
79         "shufps     $0, %%xmm6, %%xmm6          \n"             \
80         "shufps     $0, %%xmm7, %%xmm7          \n"             \
81         "1:                                     \n"             \
82         "movaps       (%0, %2), %%xmm0          \n"             \
83         "movaps       (%0, %3), %%xmm1          \n"             \
84         "movaps       (%0, %4), %%xmm2          \n"             \
85         "movaps       (%0, %5), %%xmm3          \n"             \
86         "movaps       (%0, %6), %%xmm4          \n"             \
87         "mulps          %%xmm5, %%xmm0          \n"             \
88         "mulps          %%xmm6, %%xmm1          \n"             \
89         "mulps          %%xmm5, %%xmm2          \n"             \
90         "mulps          %%xmm7, %%xmm3          \n"             \
91         "mulps          %%xmm7, %%xmm4          \n"             \
92  stereo("addps          %%xmm1, %%xmm0          \n")            \
93         "addps          %%xmm1, %%xmm2          \n"             \
94         "addps          %%xmm3, %%xmm0          \n"             \
95         "addps          %%xmm4, %%xmm2          \n"             \
96    mono("addps          %%xmm2, %%xmm0          \n")            \
97         "movaps         %%xmm0, (%0, %2)        \n"             \
98  stereo("movaps         %%xmm2, (%0, %3)        \n")            \
99         "add               $16, %0              \n"             \
100         "jl                 1b                  \n"             \
101         : "+&r"(i)                                              \
102         : "r"(matrix),                                          \
103           "r"(samples[0] + len),                                \
104           "r"(samples[1] + len),                                \
105           "r"(samples[2] + len),                                \
106           "r"(samples[3] + len),                                \
107           "r"(samples[4] + len)                                 \
108         : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3",      \
109                       "%xmm4", "%xmm5", "%xmm6", "%xmm7",)      \
110          "memory"                                               \
111     );
112
113 #define MIX_MISC(stereo)                                        \
114     __asm__ volatile (                                          \
115         "mov              %5, %2            \n"                 \
116         "1:                                 \n"                 \
117         "mov -%c7(%6, %2, %c8), %3          \n"                 \
118         "movaps     (%3, %0), %%xmm0        \n"                 \
119  stereo("movaps       %%xmm0, %%xmm1        \n")                \
120         "mulps        %%xmm4, %%xmm0        \n"                 \
121  stereo("mulps        %%xmm5, %%xmm1        \n")                \
122         "2:                                 \n"                 \
123         "mov   (%6, %2, %c8), %1            \n"                 \
124         "movaps     (%1, %0), %%xmm2        \n"                 \
125  stereo("movaps       %%xmm2, %%xmm3        \n")                \
126         "mulps   (%4, %2, 8), %%xmm2        \n"                 \
127  stereo("mulps 16(%4, %2, 8), %%xmm3        \n")                \
128         "addps        %%xmm2, %%xmm0        \n"                 \
129  stereo("addps        %%xmm3, %%xmm1        \n")                \
130         "add              $4, %2            \n"                 \
131         "jl               2b                \n"                 \
132         "mov              %5, %2            \n"                 \
133  stereo("mov   (%6, %2, %c8), %1            \n")                \
134         "movaps       %%xmm0, (%3, %0)      \n"                 \
135  stereo("movaps       %%xmm1, (%1, %0)      \n")                \
136         "add             $16, %0            \n"                 \
137         "jl               1b                \n"                 \
138         : "+&r"(i), "=&r"(j), "=&r"(k), "=&r"(m)                \
139         : "r"(matrix_simd + in_ch),                             \
140           "g"((intptr_t) - 4 * (in_ch - 1)),                    \
141           "r"(samp + in_ch),                                    \
142           "i"(sizeof(float *)), "i"(sizeof(float *)/4)          \
143         : "memory"                                              \
144     );
145
146 static void ac3_downmix_sse(float **samples, float (*matrix)[2],
147                             int out_ch, int in_ch, int len)
148 {
149     int (*matrix_cmp)[2] = (int(*)[2])matrix;
150     intptr_t i, j, k, m;
151
152     i = -len * sizeof(float);
153     if (in_ch == 5 && out_ch == 2 &&
154         !(matrix_cmp[0][1] | matrix_cmp[2][0]   |
155           matrix_cmp[3][1] | matrix_cmp[4][0]   |
156           (matrix_cmp[1][0] ^ matrix_cmp[1][1]) |
157           (matrix_cmp[0][0] ^ matrix_cmp[2][1]))) {
158         MIX5(IF0, IF1);
159     } else if (in_ch == 5 && out_ch == 1 &&
160                matrix_cmp[0][0] == matrix_cmp[2][0] &&
161                matrix_cmp[3][0] == matrix_cmp[4][0]) {
162         MIX5(IF1, IF0);
163     } else {
164         DECLARE_ALIGNED(16, float, matrix_simd)[AC3_MAX_CHANNELS][2][4];
165         float *samp[AC3_MAX_CHANNELS];
166
167         for (j = 0; j < in_ch; j++)
168             samp[j] = samples[j] + len;
169
170         j = 2 * in_ch * sizeof(float);
171         __asm__ volatile (
172             "1:                                 \n"
173             "sub             $8, %0             \n"
174             "movss     (%2, %0), %%xmm4         \n"
175             "movss    4(%2, %0), %%xmm5         \n"
176             "shufps          $0, %%xmm4, %%xmm4 \n"
177             "shufps          $0, %%xmm5, %%xmm5 \n"
178             "movaps      %%xmm4,   (%1, %0, 4)  \n"
179             "movaps      %%xmm5, 16(%1, %0, 4)  \n"
180             "jg              1b                 \n"
181             : "+&r"(j)
182             : "r"(matrix_simd), "r"(matrix)
183             : "memory"
184         );
185         if (out_ch == 2) {
186             MIX_MISC(IF1);
187         } else {
188             MIX_MISC(IF0);
189         }
190     }
191 }
192
193 #endif /* HAVE_SSE_INLINE && HAVE_7REGS */
194
195 av_cold void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact)
196 {
197     int cpu_flags = av_get_cpu_flags();
198
199     if (EXTERNAL_MMX(cpu_flags)) {
200         c->ac3_exponent_min = ff_ac3_exponent_min_mmx;
201         c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmx;
202         c->ac3_lshift_int16 = ff_ac3_lshift_int16_mmx;
203         c->ac3_rshift_int32 = ff_ac3_rshift_int32_mmx;
204     }
205     if (EXTERNAL_AMD3DNOW(cpu_flags)) {
206         if (!bit_exact) {
207             c->float_to_fixed24 = ff_float_to_fixed24_3dnow;
208         }
209     }
210     if (EXTERNAL_MMXEXT(cpu_flags)) {
211         c->ac3_exponent_min = ff_ac3_exponent_min_mmxext;
212         c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmxext;
213         if (bit_exact) {
214             c->apply_window_int16 = ff_apply_window_int16_mmxext;
215         } else {
216             c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
217         }
218     }
219     if (EXTERNAL_SSE(cpu_flags)) {
220         c->float_to_fixed24 = ff_float_to_fixed24_sse;
221     }
222     if (EXTERNAL_SSE2(cpu_flags)) {
223         c->ac3_exponent_min = ff_ac3_exponent_min_sse2;
224         c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_sse2;
225         c->float_to_fixed24 = ff_float_to_fixed24_sse2;
226         c->compute_mantissa_size = ff_ac3_compute_mantissa_size_sse2;
227         c->extract_exponents = ff_ac3_extract_exponents_sse2;
228         if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
229             c->ac3_lshift_int16 = ff_ac3_lshift_int16_sse2;
230             c->ac3_rshift_int32 = ff_ac3_rshift_int32_sse2;
231         }
232         if (bit_exact) {
233             c->apply_window_int16 = ff_apply_window_int16_sse2;
234         } else if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
235             c->apply_window_int16 = ff_apply_window_int16_round_sse2;
236         }
237     }
238     if (EXTERNAL_SSSE3(cpu_flags)) {
239         c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_ssse3;
240         if (cpu_flags & AV_CPU_FLAG_ATOM) {
241             c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
242         } else {
243             c->extract_exponents = ff_ac3_extract_exponents_ssse3;
244             c->apply_window_int16 = ff_apply_window_int16_ssse3;
245         }
246     }
247
248 #if HAVE_SSE_INLINE && HAVE_7REGS
249     if (INLINE_SSE(cpu_flags)) {
250         c->downmix = ac3_downmix_sse;
251     }
252 #endif
253 }