1 ;*****************************************************************************
2 ;* x86-optimized Float DSP functions
4 ;* Copyright 2006 Loren Merritt
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "x86util.asm"
26 pd_reverse: dd 7, 6, 5, 4, 3, 2, 1, 0
30 ;-----------------------------------------------------------------------------
31 ; void vector_fmul(float *dst, const float *src0, const float *src1, int len)
32 ;-----------------------------------------------------------------------------
34 cglobal vector_fmul, 4,4,2, dst, src0, src1, len
35 lea lenq, [lend*4 - 64]
40 mova m0, [src0q + lenq + (a+0)*mmsize]
41 mova m1, [src0q + lenq + (a+1)*mmsize]
42 mulps m0, m0, [src1q + lenq + (a+0)*mmsize]
43 mulps m1, m1, [src1q + lenq + (a+1)*mmsize]
44 mova [dstq + lenq + (a+0)*mmsize], m0
45 mova [dstq + lenq + (a+1)*mmsize], m1
61 ;------------------------------------------------------------------------------
62 ; void ff_vector_fmac_scalar(float *dst, const float *src, float mul, int len)
63 ;------------------------------------------------------------------------------
65 %macro VECTOR_FMAC_SCALAR 0
67 cglobal vector_fmac_scalar, 3,3,5, dst, src, len
69 cglobal vector_fmac_scalar, 4,4,5, dst, src, mul, len
79 vinsertf128 m0, m0, xm0, 1
86 mova m2, [dstq+lenq+1*mmsize]
87 fmaddps m1, m0, [srcq+lenq], m1
88 fmaddps m2, m0, [srcq+lenq+1*mmsize], m2
90 mulps m1, m0, [srcq+lenq]
91 mulps m2, m0, [srcq+lenq+1*mmsize]
93 mulps m3, m0, [srcq+lenq+2*mmsize]
94 mulps m4, m0, [srcq+lenq+3*mmsize]
96 addps m1, m1, [dstq+lenq]
97 addps m2, m2, [dstq+lenq+1*mmsize]
99 addps m3, m3, [dstq+lenq+2*mmsize]
100 addps m4, m4, [dstq+lenq+3*mmsize]
104 mova [dstq+lenq+1*mmsize], m2
106 mova [dstq+lenq+2*mmsize], m3
107 mova [dstq+lenq+3*mmsize], m4
116 %if HAVE_AVX_EXTERNAL
120 %if HAVE_FMA3_EXTERNAL
125 ;------------------------------------------------------------------------------
126 ; void ff_vector_fmul_scalar(float *dst, const float *src, float mul, int len)
127 ;------------------------------------------------------------------------------
129 %macro VECTOR_FMUL_SCALAR 0
131 cglobal vector_fmul_scalar, 3,3,2, dst, src, len
133 cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len
141 lea lenq, [lend*4-mmsize]
154 ;------------------------------------------------------------------------------
155 ; void ff_vector_dmac_scalar(double *dst, const double *src, double mul,
157 ;------------------------------------------------------------------------------
159 %macro VECTOR_DMAC_SCALAR 0
161 cglobal vector_dmac_scalar, 2,4,5, dst, src, mul, len, lenaddr
163 VBROADCASTSD m0, mulm
166 cglobal vector_dmac_scalar, 3,3,5, dst, src, len
168 cglobal vector_dmac_scalar, 4,4,5, dst, src, mul, len
173 vinsertf128 m0, m0, xm0, 1
176 lea lenq, [lend*8-mmsize*4]
179 movaps m1, [dstq+lenq]
180 movaps m2, [dstq+lenq+1*mmsize]
181 movaps m3, [dstq+lenq+2*mmsize]
182 movaps m4, [dstq+lenq+3*mmsize]
183 fmaddpd m1, m0, [srcq+lenq], m1
184 fmaddpd m2, m0, [srcq+lenq+1*mmsize], m2
185 fmaddpd m3, m0, [srcq+lenq+2*mmsize], m3
186 fmaddpd m4, m0, [srcq+lenq+3*mmsize], m4
188 mulpd m1, m0, [srcq+lenq]
189 mulpd m2, m0, [srcq+lenq+1*mmsize]
190 mulpd m3, m0, [srcq+lenq+2*mmsize]
191 mulpd m4, m0, [srcq+lenq+3*mmsize]
192 addpd m1, m1, [dstq+lenq]
193 addpd m2, m2, [dstq+lenq+1*mmsize]
194 addpd m3, m3, [dstq+lenq+2*mmsize]
195 addpd m4, m4, [dstq+lenq+3*mmsize]
197 movaps [dstq+lenq], m1
198 movaps [dstq+lenq+1*mmsize], m2
199 movaps [dstq+lenq+2*mmsize], m3
200 movaps [dstq+lenq+3*mmsize], m4
208 %if HAVE_AVX_EXTERNAL
212 %if HAVE_FMA3_EXTERNAL
217 ;------------------------------------------------------------------------------
218 ; void ff_vector_dmul_scalar(double *dst, const double *src, double mul,
220 ;------------------------------------------------------------------------------
222 %macro VECTOR_DMUL_SCALAR 0
224 cglobal vector_dmul_scalar, 3,4,3, dst, src, mul, len, lenaddr
227 cglobal vector_dmul_scalar, 3,3,3, dst, src, len
229 cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
232 VBROADCASTSD m0, mulm
239 vinsertf128 ym0, ym0, xm0, 1
242 lea lenq, [lend*8-2*mmsize]
244 mulpd m1, m0, [srcq+lenq ]
245 mulpd m2, m0, [srcq+lenq+mmsize]
246 movaps [dstq+lenq ], m1
247 movaps [dstq+lenq+mmsize], m2
255 %if HAVE_AVX_EXTERNAL
260 ;-----------------------------------------------------------------------------
261 ; vector_fmul_window(float *dst, const float *src0,
262 ; const float *src1, const float *win, int len);
263 ;-----------------------------------------------------------------------------
264 %macro VECTOR_FMUL_WINDOW 0
265 cglobal vector_fmul_window, 5, 6, 6, dst, src0, src1, win, len, len1
267 lea len1q, [lenq - mmsize]
273 mova m0, [winq + lenq]
274 mova m4, [src0q + lenq]
276 mova m1, [winq + len1q]
277 mova m5, [src1q + len1q]
290 pswapd m1, [winq + len1q]
291 pswapd m5, [src1q + len1q]
302 mova [dstq + lenq], m1
303 mova [dstq + len1q], m2
318 ;-----------------------------------------------------------------------------
319 ; vector_fmul_add(float *dst, const float *src0, const float *src1,
320 ; const float *src2, int len)
321 ;-----------------------------------------------------------------------------
322 %macro VECTOR_FMUL_ADD 0
323 cglobal vector_fmul_add, 5,5,4, dst, src0, src1, src2, len
324 lea lenq, [lend*4 - 2*mmsize]
327 mova m0, [src0q + lenq]
328 mova m1, [src0q + lenq + mmsize]
330 mova m2, [src2q + lenq]
331 mova m3, [src2q + lenq + mmsize]
332 fmaddps m0, m0, [src1q + lenq], m2
333 fmaddps m1, m1, [src1q + lenq + mmsize], m3
335 mulps m0, m0, [src1q + lenq]
336 mulps m1, m1, [src1q + lenq + mmsize]
337 addps m0, m0, [src2q + lenq]
338 addps m1, m1, [src2q + lenq + mmsize]
340 mova [dstq + lenq], m0
341 mova [dstq + lenq + mmsize], m1
350 %if HAVE_AVX_EXTERNAL
354 %if HAVE_FMA3_EXTERNAL
359 ;-----------------------------------------------------------------------------
360 ; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
362 ;-----------------------------------------------------------------------------
363 %macro VECTOR_FMUL_REVERSE 0
364 cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
366 movaps m2, [pd_reverse]
368 lea lenq, [lend*4 - 2*mmsize]
372 vpermps m0, m2, [src1q]
373 vpermps m1, m2, [src1q+mmsize]
375 vmovaps xmm0, [src1q + 16]
376 vinsertf128 m0, m0, [src1q], 1
377 vshufps m0, m0, m0, q0123
378 vmovaps xmm1, [src1q + mmsize + 16]
379 vinsertf128 m1, m1, [src1q + mmsize], 1
380 vshufps m1, m1, m1, q0123
383 mova m1, [src1q + mmsize]
387 mulps m0, m0, [src0q + lenq + mmsize]
388 mulps m1, m1, [src0q + lenq]
389 movaps [dstq + lenq + mmsize], m0
390 movaps [dstq + lenq], m1
399 %if HAVE_AVX_EXTERNAL
403 %if HAVE_AVX2_EXTERNAL
408 ; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
410 cglobal scalarproduct_float, 3,3,2, v1, v2, offset
417 movaps xmm1, [v1q+offsetq]
418 mulps xmm1, [v2q+offsetq]
433 ;-----------------------------------------------------------------------------
434 ; void ff_butterflies_float(float *src0, float *src1, int len);
435 ;-----------------------------------------------------------------------------
437 cglobal butterflies_float, 3,3,3, src0, src1, len
443 mova m0, [src0q + lenq]
444 mova m1, [src1q + lenq]
447 mova [src1q + lenq], m2
448 mova [src0q + lenq], m0