1 ;*****************************************************************************
2 ;* x86-optimized Float DSP functions
4 ;* Copyright 2006 Loren Merritt
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "x86util.asm"
27 ;-----------------------------------------------------------------------------
28 ; void vector_fmul(float *dst, const float *src0, const float *src1, int len)
29 ;-----------------------------------------------------------------------------
31 cglobal vector_fmul, 4,4,2, dst, src0, src1, len
32 lea lenq, [lend*4 - 64]
37 mova m0, [src0q + lenq + (a+0)*mmsize]
38 mova m1, [src0q + lenq + (a+1)*mmsize]
39 mulps m0, m0, [src1q + lenq + (a+0)*mmsize]
40 mulps m1, m1, [src1q + lenq + (a+1)*mmsize]
41 mova [dstq + lenq + (a+0)*mmsize], m0
42 mova [dstq + lenq + (a+1)*mmsize], m1
58 ;------------------------------------------------------------------------------
59 ; void ff_vector_fmac_scalar(float *dst, const float *src, float mul, int len)
60 ;------------------------------------------------------------------------------
62 %macro VECTOR_FMAC_SCALAR 0
64 cglobal vector_fmac_scalar, 3,3,5, dst, src, len
66 cglobal vector_fmac_scalar, 4,4,5, dst, src, mul, len
76 vinsertf128 m0, m0, xm0, 1
83 mova m2, [dstq+lenq+1*mmsize]
84 fmaddps m1, m0, [srcq+lenq], m1
85 fmaddps m2, m0, [srcq+lenq+1*mmsize], m2
87 mulps m1, m0, [srcq+lenq]
88 mulps m2, m0, [srcq+lenq+1*mmsize]
90 mulps m3, m0, [srcq+lenq+2*mmsize]
91 mulps m4, m0, [srcq+lenq+3*mmsize]
93 addps m1, m1, [dstq+lenq]
94 addps m2, m2, [dstq+lenq+1*mmsize]
96 addps m3, m3, [dstq+lenq+2*mmsize]
97 addps m4, m4, [dstq+lenq+3*mmsize]
101 mova [dstq+lenq+1*mmsize], m2
103 mova [dstq+lenq+2*mmsize], m3
104 mova [dstq+lenq+3*mmsize], m4
113 %if HAVE_AVX_EXTERNAL
117 %if HAVE_FMA3_EXTERNAL
122 ;------------------------------------------------------------------------------
123 ; void ff_vector_fmul_scalar(float *dst, const float *src, float mul, int len)
124 ;------------------------------------------------------------------------------
126 %macro VECTOR_FMUL_SCALAR 0
128 cglobal vector_fmul_scalar, 3,3,2, dst, src, len
130 cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len
138 lea lenq, [lend*4-mmsize]
151 ;------------------------------------------------------------------------------
152 ; void ff_vector_dmul_scalar(double *dst, const double *src, double mul,
154 ;------------------------------------------------------------------------------
156 %macro VECTOR_DMUL_SCALAR 0
158 cglobal vector_dmul_scalar, 3,4,3, dst, src, mul, len, lenaddr
161 cglobal vector_dmul_scalar, 3,3,3, dst, src, len
163 cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
166 VBROADCASTSD m0, mulm
173 vinsertf128 ym0, ym0, xm0, 1
176 lea lenq, [lend*8-2*mmsize]
178 mulpd m1, m0, [srcq+lenq ]
179 mulpd m2, m0, [srcq+lenq+mmsize]
180 mova [dstq+lenq ], m1
181 mova [dstq+lenq+mmsize], m2
189 %if HAVE_AVX_EXTERNAL
194 ;-----------------------------------------------------------------------------
195 ; vector_fmul_add(float *dst, const float *src0, const float *src1,
196 ; const float *src2, int len)
197 ;-----------------------------------------------------------------------------
198 %macro VECTOR_FMUL_ADD 0
199 cglobal vector_fmul_add, 5,5,4, dst, src0, src1, src2, len
200 lea lenq, [lend*4 - 2*mmsize]
203 mova m0, [src0q + lenq]
204 mova m1, [src0q + lenq + mmsize]
206 mova m2, [src2q + lenq]
207 mova m3, [src2q + lenq + mmsize]
208 fmaddps m0, m0, [src1q + lenq], m2
209 fmaddps m1, m1, [src1q + lenq + mmsize], m3
211 mulps m0, m0, [src1q + lenq]
212 mulps m1, m1, [src1q + lenq + mmsize]
213 addps m0, m0, [src2q + lenq]
214 addps m1, m1, [src2q + lenq + mmsize]
216 mova [dstq + lenq], m0
217 mova [dstq + lenq + mmsize], m1
226 %if HAVE_AVX_EXTERNAL
230 %if HAVE_FMA3_EXTERNAL
235 ;-----------------------------------------------------------------------------
236 ; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
238 ;-----------------------------------------------------------------------------
239 %macro VECTOR_FMUL_REVERSE 0
240 cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
241 lea lenq, [lend*4 - 2*mmsize]
245 vmovaps xmm0, [src1q + 16]
246 vinsertf128 m0, m0, [src1q], 1
247 vshufps m0, m0, m0, q0123
248 vmovaps xmm1, [src1q + mmsize + 16]
249 vinsertf128 m1, m1, [src1q + mmsize], 1
250 vshufps m1, m1, m1, q0123
253 mova m1, [src1q + mmsize]
257 mulps m0, m0, [src0q + lenq + mmsize]
258 mulps m1, m1, [src0q + lenq]
259 mova [dstq + lenq + mmsize], m0
260 mova [dstq + lenq], m1
269 %if HAVE_AVX_EXTERNAL
274 ; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
276 cglobal scalarproduct_float, 3,3,2, v1, v2, offset
283 movaps xmm1, [v1q+offsetq]
284 mulps xmm1, [v2q+offsetq]
299 ;-----------------------------------------------------------------------------
300 ; void ff_butterflies_float(float *src0, float *src1, int len);
301 ;-----------------------------------------------------------------------------
303 cglobal butterflies_float, 3,3,3, src0, src1, len
314 mova m0, [src0q + lenq]
315 mova m1, [src1q + lenq]
318 mova [src1q + lenq], m2
319 mova [src0q + lenq], m0