1 /*****************************************************************************
2 * ac3_srfft_sse.c: accelerated SSE ac3 fft functions
3 *****************************************************************************
4 * Copyright (C) 1999, 2000, 2001 VideoLAN
5 * $Id: ac3_srfft_sse.c,v 1.1 2001/05/15 16:19:42 sam Exp $
7 * Authors: Renaud Dartus <reno@videolan.org>
8 * Aaron Holtzman <aholtzma@engr.uvic.ca>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
23 *****************************************************************************/
25 #define MODULE_NAME imdctsse
26 #include "modules_inner.h"
28 /*****************************************************************************
30 *****************************************************************************/
43 #include "ac3_imdct.h"
44 #include "ac3_srfft.h"
48 static void fft_4_sse (complex_t *x);
49 static void fft_8_sse (complex_t *x);
50 static void fft_asmb_sse (int k, complex_t *x, complex_t *wTB,
51 const complex_t *d, const complex_t *d_3);
53 void _M( fft_64p ) ( complex_t *a )
55 fft_8_sse(&a[0]); fft_4_sse(&a[8]); fft_4_sse(&a[12]);
56 fft_asmb_sse(2, &a[0], &a[8], &delta16[0], &delta16_3[0]);
58 fft_8_sse(&a[16]), fft_8_sse(&a[24]);
59 fft_asmb_sse(4, &a[0], &a[16],&delta32[0], &delta32_3[0]);
61 fft_8_sse(&a[32]); fft_4_sse(&a[40]); fft_4_sse(&a[44]);
62 fft_asmb_sse(2, &a[32], &a[40], &delta16[0], &delta16_3[0]);
64 fft_8_sse(&a[48]); fft_4_sse(&a[56]); fft_4_sse(&a[60]);
65 fft_asmb_sse(2, &a[48], &a[56], &delta16[0], &delta16_3[0]);
67 fft_asmb_sse(8, &a[0], &a[32],&delta64[0], &delta64_3[0]);
70 void _M( fft_128p ) ( complex_t *a )
72 fft_8_sse(&a[0]); fft_4_sse(&a[8]); fft_4_sse(&a[12]);
73 fft_asmb_sse(2, &a[0], &a[8], &delta16[0], &delta16_3[0]);
75 fft_8_sse(&a[16]), fft_8_sse(&a[24]);
76 fft_asmb_sse(4, &a[0], &a[16],&delta32[0], &delta32_3[0]);
78 fft_8_sse(&a[32]); fft_4_sse(&a[40]); fft_4_sse(&a[44]);
79 fft_asmb_sse(2, &a[32], &a[40], &delta16[0], &delta16_3[0]);
81 fft_8_sse(&a[48]); fft_4_sse(&a[56]); fft_4_sse(&a[60]);
82 fft_asmb_sse(2, &a[48], &a[56], &delta16[0], &delta16_3[0]);
84 fft_asmb_sse(8, &a[0], &a[32],&delta64[0], &delta64_3[0]);
86 fft_8_sse(&a[64]); fft_4_sse(&a[72]); fft_4_sse(&a[76]);
88 fft_asmb_sse(2, &a[64], &a[72], &delta16[0], &delta16_3[0]);
90 fft_8_sse(&a[80]); fft_8_sse(&a[88]);
93 fft_asmb_sse(4, &a[64], &a[80],&delta32[0], &delta32_3[0]);
95 fft_8_sse(&a[96]); fft_4_sse(&a[104]), fft_4_sse(&a[108]);
97 fft_asmb_sse(2, &a[96], &a[104], &delta16[0], &delta16_3[0]);
99 fft_8_sse(&a[112]), fft_8_sse(&a[120]);
100 /* fft_32(&a[96]); */
101 fft_asmb_sse(4, &a[96], &a[112], &delta32[0], &delta32_3[0]);
103 /* fft_128(&a[0]); */
104 fft_asmb_sse(16, &a[0], &a[64], &delta128[0], &delta128_3[0]);
110 ".float 0f0.707106781188\n"
111 ".float 0f0.707106781188\n"
112 ".float 0f-0.707106781188\n"
113 ".float 0f-0.707106781188\n"
127 static void fft_4_sse (complex_t *x)
129 __asm__ __volatile__ (
130 "movups (%%eax), %%xmm0\n" /* x[1] | x[0] */
131 "movups 16(%%eax), %%xmm2\n" /* x[3] | x[2] */
132 "movups %%xmm0, %%xmm1\n" /* x[1] | x[0] */
133 "addps %%xmm2, %%xmm0\n" /* x[1] + x[3] | x[0] + x[2] */
134 "subps %%xmm2, %%xmm1\n" /* x[1] - x[3] | x[0] - x[2] */
135 "xorps %%xmm6, %%xmm6\n"
136 "movhlps %%xmm1, %%xmm4\n" /* ? | x[1] - x[3] */
137 "movhlps %%xmm0, %%xmm3\n" /* ? | x[1] + x[3] */
138 "subss %%xmm4, %%xmm6\n" /* 0 | -(x[1] - x[3]).re */
139 "movlhps %%xmm1, %%xmm0\n" /* x[0] - x[2] | x[0] + x[2] */
140 "movlhps %%xmm6, %%xmm4\n" /* 0 | -(x[1] - x[3]).re | (x[1] - x[3]).im | (x[3]-x[1]).re */
141 "movups %%xmm0, %%xmm2\n" /* x[0] - x[2] | x[0] + x[2] */
142 "shufps $0x94, %%xmm4, %%xmm3\n" /* i*(x[1] - x[3]) | x[1] + x[3] */
143 "addps %%xmm3, %%xmm0\n"
144 "subps %%xmm3, %%xmm2\n"
145 "movups %%xmm0, (%%eax)\n"
146 "movups %%xmm2, 16(%%eax)\n"
151 static void fft_8_sse (complex_t *x)
153 __asm__ __volatile__ (
156 "movlps (%%eax), %%xmm0\n" /* x[0] */
157 "movlps 32(%%eax), %%xmm1\n" /* x[4] */
158 "movhps 16(%%eax), %%xmm0\n" /* x[2] | x[0] */
159 "movhps 48(%%eax), %%xmm1\n" /* x[6] | x[4] */
160 "movups %%xmm0, %%xmm2\n" /* x[2] | x[0] */
161 "xorps %%xmm3, %%xmm3\n"
162 "addps %%xmm1, %%xmm0\n" /* x[2] + x[6] | x[0] + x[4] */
163 "subps %%xmm1, %%xmm2\n" /* x[2] - x[6] | x[0] - x[4] */
164 "movhlps %%xmm0, %%xmm5\n" /* x[2] + x[6] */
165 "movhlps %%xmm2, %%xmm4\n" /* x[2] - x[6] */
166 "movlhps %%xmm2, %%xmm0\n" /* x[0] - x[4] | x[0] + x[4] */
167 "subss %%xmm4, %%xmm3\n" /* (x[2]-x[6]).im | -(x[2]-x[6]).re */
168 "movups %%xmm0, %%xmm7\n" /* x[0] - x[4] | x[0] + x[4] */
169 "movups %%xmm3, %%xmm4\n" /* (x[2]-x[6]).im | -(x[2]-x[6]).re */
170 "movlps 8(%%eax), %%xmm1\n" /* x[1] */
171 "shufps $0x14, %%xmm4, %%xmm5\n" /* i*(x[2] - x[6]) | x[2] + x[6] */
173 "addps %%xmm5, %%xmm0\n" /* yt = i*(x2-x6)+x0-x4 | x2+x6+x0+x4 */
174 "subps %%xmm5, %%xmm7\n" /* yb = i*(x6-x2)+x0-x4 | -x6-x2+x0+x4 */
176 "movhps 24(%%eax), %%xmm1\n" /* x[3] | x[1] */
177 "movl $hsqrt2, %%ebx\n"
178 "movlps 40(%%eax), %%xmm2\n" /* x[5] */
179 "movhps 56(%%eax), %%xmm2\n" /* x[7] | x[5] */
180 "movups %%xmm1, %%xmm3\n" /* x[3] | x[1] */
181 "addps %%xmm2, %%xmm1\n" /* x[3] + x[7] | x[1] + x[5] */
182 "subps %%xmm2, %%xmm3\n" /* x[3] - x[7] | x[1] - x[5] */
183 "movups (%%ebx), %%xmm4\n" /* -1/sqrt2 | -1/sqrt2 | 1/sqrt2 | 1/sqrt2 */
184 "movups %%xmm3, %%xmm6\n" /* x[3] - x[7] | x[1] - x[5] */
185 "mulps %%xmm4, %%xmm3\n" /* -1/s2*(x[3] - x[7]) | 1/s2*(x[1] - x[5]) */
186 "shufps $0xc8, %%xmm4, %%xmm4\n" /* -1/sqrt2 | 1/sqrt2 | -1/sqrt2 | 1/sqrt2 */
187 "shufps $0xb1, %%xmm6, %%xmm6\n" /* (x3-x7).re|(x3-x7).im|(x1-x5).re|(x1-x5).im */
188 "mulps %%xmm4, %%xmm6\n" /* (x7-x3).re/s2|(x3-x7).im/s2|(x5-x1).re/s2|(x1-x5).im/s2 */
189 "addps %%xmm3, %%xmm6\n" /* (-1-i)/sqrt2 * (x[3]-x[7]) | (1-i)/sqrt2 * (x[1] - x[5]) */
190 "movhlps %%xmm1, %%xmm5\n" /* x[3] + x[7] */
191 "movlhps %%xmm6, %%xmm1\n" /* (1+i)/sqrt2 * (x[1]-x[5]) | x[1]+x[5] */
192 "shufps $0xe4, %%xmm6, %%xmm5\n" /* (-1-i)/sqrt2 * (x[3]-x[7]) | x[3]+x[7] */
193 "movups %%xmm1, %%xmm3\n" /* (1-i)/sqrt2 * (x[1]-x[5]) | x[1]+x[5] */
195 "addps %%xmm5, %%xmm1\n" /* u */
196 "subps %%xmm5, %%xmm3\n" /* v */
197 "movups %%xmm0, %%xmm2\n" /* yb */
198 "movups %%xmm7, %%xmm4\n" /* yt */
199 "movups (%%ebx), %%xmm5\n"
200 "mulps %%xmm5, %%xmm3\n"
201 "addps %%xmm1, %%xmm0\n" /* yt + u */
202 "subps %%xmm1, %%xmm2\n" /* yt - u */
203 "shufps $0xb1, %%xmm3, %%xmm3\n" /* -i * v */
204 "movups %%xmm0, (%%eax)\n"
205 "movups %%xmm2, 32(%%eax)\n"
206 "addps %%xmm3, %%xmm4\n" /* yb - i*v */
207 "subps %%xmm3, %%xmm7\n" /* yb + i*v */
208 "movups %%xmm4, 16(%%eax)\n"
209 "movups %%xmm7, 48(%%eax)\n"
217 static void fft_asmb_sse (int k, complex_t *x, complex_t *wTB,
218 const complex_t *d, const complex_t *d_3)
220 __asm__ __volatile__ (
222 "movl %%esp, %%ebp\n"
233 "movl 8(%%ebp), %%ecx\n" /* k */
234 "movl 12(%%ebp), %%eax\n" /* x */
235 "movl %%ecx, -4(%%ebp)\n" /* k */
236 "movl 16(%%ebp), %%ebx\n" /* wT */
237 "movl 20(%%ebp), %%edx\n" /* d */
238 "movl 24(%%ebp), %%esi\n" /* d3 */
239 "shll $4, %%ecx\n" /* 16k */
241 "leal (%%eax, %%ecx, 2), %%edi\n"
244 /* TRANSZERO and TRANS */
245 "movups (%%eax), %%xmm0\n" /* x[1] | x[0] */
246 "movups (%%ebx), %%xmm1\n" /* wT[1] | wT[0] */
247 "movups (%%ebx, %%ecx), %%xmm2\n" /* wB[1] | wB[0] */
248 "movlps (%%edx), %%xmm3\n" /* d */
249 "movlps (%%esi), %%xmm4\n" /* d3 */
250 "movhlps %%xmm1, %%xmm5\n" /* wT[1] */
251 "movhlps %%xmm2, %%xmm6\n" /* wB[1] */
252 "shufps $0x50, %%xmm3, %%xmm3\n" /* d[1].im | d[1].im | d[1].re | d[1].re */
253 "shufps $0x50, %%xmm4, %%xmm4\n" /* d3[1].im | d3[1].im | d3[i].re | d3[i].re */
254 "movlhps %%xmm5, %%xmm5\n" /* wT[1] | wT[1] */
255 "movlhps %%xmm6, %%xmm6\n" /* wB[1] | wB[1] */
256 "mulps %%xmm3, %%xmm5\n"
257 "mulps %%xmm4, %%xmm6\n"
258 "movhlps %%xmm5, %%xmm7\n" /* wT[1].im * d[1].im | wT[1].re * d[1].im */
259 "movlhps %%xmm6, %%xmm5\n" /* wB[1].im * d3[1].re | wB[1].re * d3[1].re | wT[1].im * d[1].re | wT[1].re * d[1].re */
260 "shufps $0xb1, %%xmm6, %%xmm7\n" /* wB[1].re * d3[1].im | wB[i].im * d3[1].im | wT[1].re * d[1].im | wT[1].im * d[1].im */
262 "movups (%%edi), %%xmm4\n"
263 "mulps %%xmm4, %%xmm7\n"
264 "addps %%xmm7, %%xmm5\n" /* wB[1] * d3[1] | wT[1] * d[1] */
265 "movlhps %%xmm5, %%xmm1\n" /* d[1] * wT[1] | wT[0] */
266 "shufps $0xe4, %%xmm5, %%xmm2\n" /* d3[1] * wB[1] | wB[0] */
267 "movups %%xmm1, %%xmm3\n" /* d[1] * wT[1] | wT[0] */
268 "leal (%%eax, %%ecx, 2), %%edi\n"
269 "addps %%xmm2, %%xmm1\n" /* u */
270 "subps %%xmm2, %%xmm3\n" /* v */
271 "mulps %%xmm4, %%xmm3\n"
272 "movups (%%eax, %%ecx), %%xmm5\n" /* xk[1] | xk[0] */
273 "shufps $0xb1, %%xmm3, %%xmm3\n" /* -i * v */
274 "movups %%xmm0, %%xmm2\n" /* x[1] | x[0] */
275 "movups %%xmm5, %%xmm6\n" /* xk[1] | xk[0] */
276 "addps %%xmm1, %%xmm0\n"
277 "subps %%xmm1, %%xmm2\n"
278 "addps %%xmm3, %%xmm5\n"
279 "subps %%xmm3, %%xmm6\n"
280 "movups %%xmm0, (%%eax)\n"
281 "movups %%xmm2, (%%edi)\n"
282 "movups %%xmm5, (%%eax, %%ecx)\n"
283 "movups %%xmm6, (%%edi, %%ecx)\n"
291 "movups (%%ebx), %%xmm0\n" /* wT[1] | wT[0] */
292 "movups (%%edx), %%xmm1\n" /* d[1] | d[0] */
294 "movups (%%ebx, %%ecx), %%xmm4\n" /* wB[1] | wB[0] */
295 "movups (%%esi), %%xmm5\n" /* d3[1] | d3[0] */
297 "movhlps %%xmm0, %%xmm2\n" /* wT[1] */
298 "movhlps %%xmm1, %%xmm3\n" /* d[1] */
300 "movhlps %%xmm4, %%xmm6\n" /* wB[1] */
301 "movhlps %%xmm5, %%xmm7\n" /* d3[1] */
303 "shufps $0x50, %%xmm1, %%xmm1\n" /* d[0].im | d[0].im | d[0].re | d[0].re */
304 "shufps $0x50, %%xmm3, %%xmm3\n" /* d[1].im | d[1].im | d[1].re | d[1].re */
306 "movlhps %%xmm0, %%xmm0\n" /* wT[0] | wT[0] */
307 "shufps $0x50, %%xmm5, %%xmm5\n" /* d3[0].im | d3[0].im | d3[0].re | d3[0].re */
308 "movlhps %%xmm2, %%xmm2\n" /* wT[1] | wT[1] */
309 "shufps $0x50, %%xmm7, %%xmm7\n" /* d3[1].im | d3[1].im | d3[1].re | d3[1].re */
311 "mulps %%xmm1, %%xmm0\n" /* d[0].im * wT[0].im | d[0].im * wT[0].re | d[0].re * wT[0].im | d[0].re * wT[0].re */
312 "mulps %%xmm3, %%xmm2\n" /* d[1].im * wT[1].im | d[1].im * wT[1].re | d[1].re * wT[1].im | d[1].re * wT[1].re */
313 "movlhps %%xmm4, %%xmm4\n" /* wB[0] | wB[0] */
314 "movlhps %%xmm6, %%xmm6\n" /* wB[1] | wB[1] */
316 "movhlps %%xmm0, %%xmm1\n" /* d[0].im * wT[0].im | d[0].im * wT[0].re */
317 "movlhps %%xmm2, %%xmm0\n" /* d[1].re * wT[1].im | d[1].re * wT[1].re | d[0].re * wT[0].im | d[0].re * wT[0].re */
318 "mulps %%xmm5, %%xmm4\n" /* wB[0].im * d3[0].im | wB[0].re * d3[0].im | wB[0].im * d3[0].re | wB[0].re * d3[0].re */
319 "mulps %%xmm7, %%xmm6\n" /* wB[1].im * d3[1].im | wB[1].re * d3[1].im | wB[1].im * d3[1].re | wB[1].re * d3[1].re */
320 "shufps $0xb1, %%xmm2, %%xmm1\n" /* d[1].im * wT[1].re | d[1].im * wT[1].im | d[0].im * wT[0].re | d[0].im * wT[0].im */
322 "movups (%%edi), %%xmm3\n" /* 1.0 | -1.0 | 1.0 | -1.0 */
324 "movhlps %%xmm4, %%xmm5\n" /* wB[0].im * d3[0].im | wB[0].re * d3[0].im */
325 "mulps %%xmm3, %%xmm1\n" /* d[1].im * wT[1].re | -d[1].im * wT[1].im | d[0].im * wT[0].re | -d[0].im * wT[0].im */
326 "movlhps %%xmm6, %%xmm4\n" /* wB[1].im * d3[1].re | wB[1].re * d3[1].re | wB[0].im * d3[0].re | wB[0].im * d3[0].re */
327 "addps %%xmm1, %%xmm0\n" /* wT[1] * d[1] | wT[0] * d[0] */
329 "shufps $0xb1, %%xmm6, %%xmm5\n" /* wB[1].re * d3[1].im | wB[1].im * d3[1].im | wB[0].re * d3[0].im | wB[0].im * d3[0].im */
330 "mulps %%xmm3, %%xmm5\n" /* wB[1].re * d3[1].im | -wB[1].im * d3[1].im | wB[0].re * d3[0].im | -wB[0].im * d3[0].im */
331 "addps %%xmm5, %%xmm4\n" /* wB[1] * d3[1] | wB[0] * d3[0] */
333 "movups %%xmm0, %%xmm1\n" /* wT[1] * d[1] | wT[0] * d[0] */
334 "addps %%xmm4, %%xmm0\n" /* u */
335 "subps %%xmm4, %%xmm1\n" /* v */
336 "movups (%%eax), %%xmm6\n" /* x[1] | x[0] */
337 "leal (%%eax, %%ecx, 2), %%edi\n"
338 "mulps %%xmm3, %%xmm1\n"
341 "shufps $0xb1, %%xmm1, %%xmm1\n" /* -i * v */
342 "movups (%%eax, %%ecx), %%xmm7\n" /* xk[1] | xk[0] */
343 "movups %%xmm6, %%xmm2\n"
344 "movups %%xmm7, %%xmm4\n"
345 "addps %%xmm0, %%xmm6\n"
346 "subps %%xmm0, %%xmm2\n"
347 "movups %%xmm6, (%%eax)\n"
348 "movups %%xmm2, (%%edi)\n"
349 "addps %%xmm1, %%xmm7\n"
350 "subps %%xmm1, %%xmm4\n"
352 "movups %%xmm7, (%%eax, %%ecx)\n"
353 "movups %%xmm4, (%%edi, %%ecx)\n"