1 ;*****************************************************************************
2 ;* x86-optimized Float DSP functions
4 ;* Copyright 2006 Loren Merritt
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "x86util.asm"
27 ;-----------------------------------------------------------------------------
28 ; void vector_fmul(float *dst, const float *src0, const float *src1, int len)
29 ;-----------------------------------------------------------------------------
31 cglobal vector_fmul, 4,4,2, dst, src0, src1, len
32 lea lenq, [lend*4 - 64]
37 mova m0, [src0q + lenq + (a+0)*mmsize]
38 mova m1, [src0q + lenq + (a+1)*mmsize]
39 mulps m0, m0, [src1q + lenq + (a+0)*mmsize]
40 mulps m1, m1, [src1q + lenq + (a+1)*mmsize]
41 mova [dstq + lenq + (a+0)*mmsize], m0
42 mova [dstq + lenq + (a+1)*mmsize], m1
58 ;------------------------------------------------------------------------------
59 ; void ff_vector_fmac_scalar(float *dst, const float *src, float mul, int len)
60 ;------------------------------------------------------------------------------
62 %macro VECTOR_FMAC_SCALAR 0
64 cglobal vector_fmac_scalar, 3,3,3, dst, src, len
66 cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len
76 vinsertf128 m0, m0, xmm0, 1
84 mova m1, [dstq+lenq+(a+0)*mmsize]
85 mova m2, [dstq+lenq+(a+1)*mmsize]
86 fmaddps m1, m0, [srcq+lenq+(a+0)*mmsize], m1
87 fmaddps m2, m0, [srcq+lenq+(a+1)*mmsize], m2
89 mulps m1, m0, [srcq+lenq+(a+0)*mmsize]
90 mulps m2, m0, [srcq+lenq+(a+1)*mmsize]
91 addps m1, m1, [dstq+lenq+(a+0)*mmsize]
92 addps m2, m2, [dstq+lenq+(a+1)*mmsize]
94 mova [dstq+lenq+(a+0)*mmsize], m1
95 mova [dstq+lenq+(a+1)*mmsize], m2
105 %if HAVE_AVX_EXTERNAL
109 %if HAVE_FMA3_EXTERNAL
114 ;------------------------------------------------------------------------------
115 ; void ff_vector_fmul_scalar(float *dst, const float *src, float mul, int len)
116 ;------------------------------------------------------------------------------
118 %macro VECTOR_FMUL_SCALAR 0
120 cglobal vector_fmul_scalar, 3,3,2, dst, src, len
122 cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len
130 lea lenq, [lend*4-mmsize]
143 ;------------------------------------------------------------------------------
144 ; void ff_vector_dmul_scalar(double *dst, const double *src, double mul,
146 ;------------------------------------------------------------------------------
148 %macro VECTOR_DMUL_SCALAR 0
150 cglobal vector_dmul_scalar, 3,4,3, dst, src, mul, len, lenaddr
153 cglobal vector_dmul_scalar, 3,3,3, dst, src, len
155 cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
158 VBROADCASTSD m0, mulm
163 vinsertf128 ymm2, ymm2, xmm2, 1
169 vinsertf128 ymm0, ymm0, xmm0, 1
173 lea lenq, [lend*8-2*mmsize]
175 mulpd m1, m0, [srcq+lenq ]
176 mulpd m2, m0, [srcq+lenq+mmsize]
177 mova [dstq+lenq ], m1
178 mova [dstq+lenq+mmsize], m2
186 %if HAVE_AVX_EXTERNAL
191 ;-----------------------------------------------------------------------------
192 ; vector_fmul_add(float *dst, const float *src0, const float *src1,
193 ; const float *src2, int len)
194 ;-----------------------------------------------------------------------------
195 %macro VECTOR_FMUL_ADD 0
196 cglobal vector_fmul_add, 5,5,4, dst, src0, src1, src2, len
197 lea lenq, [lend*4 - 2*mmsize]
200 mova m0, [src0q + lenq]
201 mova m1, [src0q + lenq + mmsize]
203 mova m2, [src2q + lenq]
204 mova m3, [src2q + lenq + mmsize]
205 fmaddps m0, m0, [src1q + lenq], m2
206 fmaddps m1, m1, [src1q + lenq + mmsize], m3
208 mulps m0, m0, [src1q + lenq]
209 mulps m1, m1, [src1q + lenq + mmsize]
210 addps m0, m0, [src2q + lenq]
211 addps m1, m1, [src2q + lenq + mmsize]
213 mova [dstq + lenq], m0
214 mova [dstq + lenq + mmsize], m1
223 %if HAVE_AVX_EXTERNAL
227 %if HAVE_FMA3_EXTERNAL
232 ;-----------------------------------------------------------------------------
233 ; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
235 ;-----------------------------------------------------------------------------
236 %macro VECTOR_FMUL_REVERSE 0
237 cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
238 lea lenq, [lend*4 - 2*mmsize]
242 vmovaps xmm0, [src1q + 16]
243 vinsertf128 m0, m0, [src1q], 1
244 vshufps m0, m0, m0, q0123
245 vmovaps xmm1, [src1q + mmsize + 16]
246 vinsertf128 m1, m1, [src1q + mmsize], 1
247 vshufps m1, m1, m1, q0123
250 mova m1, [src1q + mmsize]
254 mulps m0, m0, [src0q + lenq + mmsize]
255 mulps m1, m1, [src0q + lenq]
256 mova [dstq + lenq + mmsize], m0
257 mova [dstq + lenq], m1
266 %if HAVE_AVX_EXTERNAL
271 ; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
273 cglobal scalarproduct_float, 3,3,2, v1, v2, offset
280 movaps xmm1, [v1q+offsetq]
281 mulps xmm1, [v2q+offsetq]
296 ;-----------------------------------------------------------------------------
297 ; void ff_butterflies_float(float *src0, float *src1, int len);
298 ;-----------------------------------------------------------------------------
300 cglobal butterflies_float, 3,3,3, src0, src1, len
311 mova m0, [src0q + lenq]
312 mova m1, [src1q + lenq]
315 mova [src1q + lenq], m2
316 mova [src0q + lenq], m0