1 ;*****************************************************************************
2 ;* x86-optimized Float DSP functions
4 ;* This file is part of Libav.
6 ;* Libav is free software; you can redistribute it and/or
7 ;* modify it under the terms of the GNU Lesser General Public
8 ;* License as published by the Free Software Foundation; either
9 ;* version 2.1 of the License, or (at your option) any later version.
11 ;* Libav is distributed in the hope that it will be useful,
12 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 ;* Lesser General Public License for more details.
16 ;* You should have received a copy of the GNU Lesser General Public
17 ;* License along with Libav; if not, write to the Free Software
18 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 ;******************************************************************************
21 %include "x86util.asm"
25 ;-----------------------------------------------------------------------------
26 ; void vector_fmul(float *dst, const float *src0, const float *src1, int len)
27 ;-----------------------------------------------------------------------------
29 cglobal vector_fmul, 4,4,2, dst, src0, src1, len
30 lea lenq, [lend*4 - 2*mmsize]
33 mova m0, [src0q + lenq]
34 mova m1, [src0q + lenq + mmsize]
35 mulps m0, m0, [src1q + lenq]
36 mulps m1, m1, [src1q + lenq + mmsize]
37 mova [dstq + lenq], m0
38 mova [dstq + lenq + mmsize], m1
50 ;------------------------------------------------------------------------------
51 ; void ff_vector_fmac_scalar(float *dst, const float *src, float mul, int len)
52 ;------------------------------------------------------------------------------
54 %macro VECTOR_FMAC_SCALAR 0
56 cglobal vector_fmac_scalar, 3,3,3, dst, src, len
58 cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len
68 vinsertf128 m0, m0, xmm0, 1
71 lea lenq, [lend*4-2*mmsize]
73 mulps m1, m0, [srcq+lenq ]
74 mulps m2, m0, [srcq+lenq+mmsize]
75 addps m1, m1, [dstq+lenq ]
76 addps m2, m2, [dstq+lenq+mmsize]
78 mova [dstq+lenq+mmsize], m2
89 ;------------------------------------------------------------------------------
90 ; void ff_vector_fmul_scalar(float *dst, const float *src, float mul, int len)
91 ;------------------------------------------------------------------------------
93 %macro VECTOR_FMUL_SCALAR 0
95 cglobal vector_fmul_scalar, 3,3,2, dst, src, len
97 cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len
105 lea lenq, [lend*4-mmsize]
118 ;------------------------------------------------------------------------------
119 ; void ff_vector_dmul_scalar(double *dst, const double *src, double mul,
121 ;------------------------------------------------------------------------------
123 %macro VECTOR_DMUL_SCALAR 0
125 cglobal vector_dmul_scalar, 3,4,3, dst, src, mul, len, lenaddr
128 cglobal vector_dmul_scalar, 3,3,3, dst, src, len
130 cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
133 VBROADCASTSD m0, mulm
138 vinsertf128 ymm2, ymm2, xmm2, 1
144 vinsertf128 ymm0, ymm0, xmm0, 1
148 lea lenq, [lend*8-2*mmsize]
150 mulpd m1, m0, [srcq+lenq ]
151 mulpd m2, m0, [srcq+lenq+mmsize]
152 mova [dstq+lenq ], m1
153 mova [dstq+lenq+mmsize], m2
161 %if HAVE_AVX_EXTERNAL
166 ;-----------------------------------------------------------------------------
167 ; vector_fmul_add(float *dst, const float *src0, const float *src1,
168 ; const float *src2, int len)
169 ;-----------------------------------------------------------------------------
170 %macro VECTOR_FMUL_ADD 0
171 cglobal vector_fmul_add, 5,5,2, dst, src0, src1, src2, len
172 lea lenq, [lend*4 - 2*mmsize]
175 mova m0, [src0q + lenq]
176 mova m1, [src0q + lenq + mmsize]
177 mulps m0, m0, [src1q + lenq]
178 mulps m1, m1, [src1q + lenq + mmsize]
179 addps m0, m0, [src2q + lenq]
180 addps m1, m1, [src2q + lenq + mmsize]
181 mova [dstq + lenq], m0
182 mova [dstq + lenq + mmsize], m1
194 ;-----------------------------------------------------------------------------
195 ; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
197 ;-----------------------------------------------------------------------------
198 %macro VECTOR_FMUL_REVERSE 0
199 cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
200 lea lenq, [lend*4 - 2*mmsize]
204 vmovaps xmm0, [src1q + 16]
205 vinsertf128 m0, m0, [src1q], 1
206 vshufps m0, m0, m0, q0123
207 vmovaps xmm1, [src1q + mmsize + 16]
208 vinsertf128 m1, m1, [src1q + mmsize], 1
209 vshufps m1, m1, m1, q0123
212 mova m1, [src1q + mmsize]
216 mulps m0, m0, [src0q + lenq + mmsize]
217 mulps m1, m1, [src0q + lenq]
218 mova [dstq + lenq + mmsize], m0
219 mova [dstq + lenq], m1
231 ; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
233 cglobal scalarproduct_float, 3,3,2, v1, v2, offset
240 movaps xmm1, [v1q+offsetq]
241 mulps xmm1, [v2q+offsetq]
256 ;-----------------------------------------------------------------------------
257 ; void ff_butterflies_float(float *src0, float *src1, int len);
258 ;-----------------------------------------------------------------------------
260 cglobal butterflies_float, 3,3,3, src0, src1, len
267 lea src0q, [src0q + lenq]
268 lea src1q, [src1q + lenq]
271 mova m0, [src0q + lenq]
272 mova m1, [src1q + lenq]
275 mova [src1q + lenq], m2
276 mova [src0q + lenq], m0