1 ;*****************************************************************************
2 ;* x86-optimized Float DSP functions
4 ;* Copyright 2006 Loren Merritt
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "x86util.asm"
27 ;-----------------------------------------------------------------------------
28 ; void vector_fmul(float *dst, const float *src0, const float *src1, int len)
29 ;-----------------------------------------------------------------------------
31 cglobal vector_fmul, 4,4,2, dst, src0, src1, len
32 lea lenq, [lend*4 - 2*mmsize]
35 mova m0, [src0q + lenq]
36 mova m1, [src0q + lenq + mmsize]
37 mulps m0, m0, [src1q + lenq]
38 mulps m1, m1, [src1q + lenq + mmsize]
39 mova [dstq + lenq], m0
40 mova [dstq + lenq + mmsize], m1
54 ;------------------------------------------------------------------------------
55 ; void ff_vector_fmac_scalar(float *dst, const float *src, float mul, int len)
56 ;------------------------------------------------------------------------------
58 %macro VECTOR_FMAC_SCALAR 0
60 cglobal vector_fmac_scalar, 3,3,3, dst, src, len
62 cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len
72 vinsertf128 m0, m0, xmm0, 1
75 lea lenq, [lend*4-2*mmsize]
77 mulps m1, m0, [srcq+lenq ]
78 mulps m2, m0, [srcq+lenq+mmsize]
79 addps m1, m1, [dstq+lenq ]
80 addps m2, m2, [dstq+lenq+mmsize]
82 mova [dstq+lenq+mmsize], m2
95 ;------------------------------------------------------------------------------
96 ; void ff_vector_fmul_scalar(float *dst, const float *src, float mul, int len)
97 ;------------------------------------------------------------------------------
99 %macro VECTOR_FMUL_SCALAR 0
101 cglobal vector_fmul_scalar, 3,3,2, dst, src, len
103 cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len
111 lea lenq, [lend*4-mmsize]
124 ;------------------------------------------------------------------------------
125 ; void ff_vector_dmul_scalar(double *dst, const double *src, double mul,
127 ;------------------------------------------------------------------------------
129 %macro VECTOR_DMUL_SCALAR 0
131 cglobal vector_dmul_scalar, 3,4,3, dst, src, mul, len, lenaddr
134 cglobal vector_dmul_scalar, 3,3,3, dst, src, len
136 cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
139 VBROADCASTSD m0, mulm
144 vinsertf128 ymm2, ymm2, xmm2, 1
150 vinsertf128 ymm0, ymm0, xmm0, 1
154 lea lenq, [lend*8-2*mmsize]
156 mulpd m1, m0, [srcq+lenq ]
157 mulpd m2, m0, [srcq+lenq+mmsize]
158 mova [dstq+lenq ], m1
159 mova [dstq+lenq+mmsize], m2
167 %if HAVE_AVX_EXTERNAL
172 ;-----------------------------------------------------------------------------
173 ; vector_fmul_add(float *dst, const float *src0, const float *src1,
174 ; const float *src2, int len)
175 ;-----------------------------------------------------------------------------
176 %macro VECTOR_FMUL_ADD 0
177 cglobal vector_fmul_add, 5,5,2, dst, src0, src1, src2, len
178 lea lenq, [lend*4 - 2*mmsize]
181 mova m0, [src0q + lenq]
182 mova m1, [src0q + lenq + mmsize]
183 mulps m0, m0, [src1q + lenq]
184 mulps m1, m1, [src1q + lenq + mmsize]
185 addps m0, m0, [src2q + lenq]
186 addps m1, m1, [src2q + lenq + mmsize]
187 mova [dstq + lenq], m0
188 mova [dstq + lenq + mmsize], m1
197 %if HAVE_AVX_EXTERNAL
202 ;-----------------------------------------------------------------------------
203 ; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
205 ;-----------------------------------------------------------------------------
206 %macro VECTOR_FMUL_REVERSE 0
207 cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
208 lea lenq, [lend*4 - 2*mmsize]
212 vmovaps xmm0, [src1q + 16]
213 vinsertf128 m0, m0, [src1q], 1
214 vshufps m0, m0, m0, q0123
215 vmovaps xmm1, [src1q + mmsize + 16]
216 vinsertf128 m1, m1, [src1q + mmsize], 1
217 vshufps m1, m1, m1, q0123
220 mova m1, [src1q + mmsize]
224 mulps m0, m0, [src0q + lenq + mmsize]
225 mulps m1, m1, [src0q + lenq]
226 mova [dstq + lenq + mmsize], m0
227 mova [dstq + lenq], m1
236 %if HAVE_AVX_EXTERNAL
241 ; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
243 cglobal scalarproduct_float, 3,3,2, v1, v2, offset
250 movaps xmm1, [v1q+offsetq]
251 mulps xmm1, [v2q+offsetq]