1 ;*****************************************************************************
2 ;* x86-optimized Float DSP functions
4 ;* Copyright 2006 Loren Merritt
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "x86util.asm"
27 ;-----------------------------------------------------------------------------
28 ; void vector_fmul(float *dst, const float *src0, const float *src1, int len)
29 ;-----------------------------------------------------------------------------
31 cglobal vector_fmul, 4,4,2, dst, src0, src1, len
32 lea lenq, [lend*4 - 64]
37 mova m0, [src0q + lenq + (a+0)*mmsize]
38 mova m1, [src0q + lenq + (a+1)*mmsize]
39 mulps m0, m0, [src1q + lenq + (a+0)*mmsize]
40 mulps m1, m1, [src1q + lenq + (a+1)*mmsize]
41 mova [dstq + lenq + (a+0)*mmsize], m0
42 mova [dstq + lenq + (a+1)*mmsize], m1
58 ;------------------------------------------------------------------------------
59 ; void ff_vector_fmac_scalar(float *dst, const float *src, float mul, int len)
60 ;------------------------------------------------------------------------------
62 %macro VECTOR_FMAC_SCALAR 0
64 cglobal vector_fmac_scalar, 3,3,3, dst, src, len
66 cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len
76 vinsertf128 m0, m0, xmm0, 1
83 mulps m1, m0, [srcq+lenq+(a+0)*mmsize]
84 mulps m2, m0, [srcq+lenq+(a+1)*mmsize]
85 addps m1, m1, [dstq+lenq+(a+0)*mmsize]
86 addps m2, m2, [dstq+lenq+(a+1)*mmsize]
87 mova [dstq+lenq+(a+0)*mmsize], m1
88 mova [dstq+lenq+(a+1)*mmsize], m2
103 ;------------------------------------------------------------------------------
104 ; void ff_vector_fmul_scalar(float *dst, const float *src, float mul, int len)
105 ;------------------------------------------------------------------------------
107 %macro VECTOR_FMUL_SCALAR 0
109 cglobal vector_fmul_scalar, 3,3,2, dst, src, len
111 cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len
119 lea lenq, [lend*4-mmsize]
132 ;------------------------------------------------------------------------------
133 ; void ff_vector_dmul_scalar(double *dst, const double *src, double mul,
135 ;------------------------------------------------------------------------------
137 %macro VECTOR_DMUL_SCALAR 0
139 cglobal vector_dmul_scalar, 3,4,3, dst, src, mul, len, lenaddr
142 cglobal vector_dmul_scalar, 3,3,3, dst, src, len
144 cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
147 VBROADCASTSD m0, mulm
152 vinsertf128 ymm2, ymm2, xmm2, 1
158 vinsertf128 ymm0, ymm0, xmm0, 1
162 lea lenq, [lend*8-2*mmsize]
164 mulpd m1, m0, [srcq+lenq ]
165 mulpd m2, m0, [srcq+lenq+mmsize]
166 mova [dstq+lenq ], m1
167 mova [dstq+lenq+mmsize], m2
175 %if HAVE_AVX_EXTERNAL
180 ;-----------------------------------------------------------------------------
181 ; vector_fmul_add(float *dst, const float *src0, const float *src1,
182 ; const float *src2, int len)
183 ;-----------------------------------------------------------------------------
184 %macro VECTOR_FMUL_ADD 0
185 cglobal vector_fmul_add, 5,5,2, dst, src0, src1, src2, len
186 lea lenq, [lend*4 - 2*mmsize]
189 mova m0, [src0q + lenq]
190 mova m1, [src0q + lenq + mmsize]
191 mulps m0, m0, [src1q + lenq]
192 mulps m1, m1, [src1q + lenq + mmsize]
193 addps m0, m0, [src2q + lenq]
194 addps m1, m1, [src2q + lenq + mmsize]
195 mova [dstq + lenq], m0
196 mova [dstq + lenq + mmsize], m1
205 %if HAVE_AVX_EXTERNAL
210 ;-----------------------------------------------------------------------------
211 ; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
213 ;-----------------------------------------------------------------------------
214 %macro VECTOR_FMUL_REVERSE 0
215 cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
216 lea lenq, [lend*4 - 2*mmsize]
220 vmovaps xmm0, [src1q + 16]
221 vinsertf128 m0, m0, [src1q], 1
222 vshufps m0, m0, m0, q0123
223 vmovaps xmm1, [src1q + mmsize + 16]
224 vinsertf128 m1, m1, [src1q + mmsize], 1
225 vshufps m1, m1, m1, q0123
228 mova m1, [src1q + mmsize]
232 mulps m0, m0, [src0q + lenq + mmsize]
233 mulps m1, m1, [src0q + lenq]
234 mova [dstq + lenq + mmsize], m0
235 mova [dstq + lenq], m1
244 %if HAVE_AVX_EXTERNAL
249 ; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
251 cglobal scalarproduct_float, 3,3,2, v1, v2, offset
258 movaps xmm1, [v1q+offsetq]
259 mulps xmm1, [v2q+offsetq]
274 ;-----------------------------------------------------------------------------
275 ; void ff_butterflies_float(float *src0, float *src1, int len);
276 ;-----------------------------------------------------------------------------
278 cglobal butterflies_float, 3,3,3, src0, src1, len
289 mova m0, [src0q + lenq]
290 mova m1, [src1q + lenq]
293 mova [src1q + lenq], m2
294 mova [src0q + lenq], m0