1 ;******************************************************************************
2 ;* Copyright (c) 2008 Loren Merritt
4 ;* This file is part of FFmpeg.
6 ;* FFmpeg is free software; you can redistribute it and/or
7 ;* modify it under the terms of the GNU Lesser General Public
8 ;* License as published by the Free Software Foundation; either
9 ;* version 2.1 of the License, or (at your option) any later version.
11 ;* FFmpeg is distributed in the hope that it will be useful,
12 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 ;* Lesser General Public License for more details.
16 ;* You should have received a copy of the GNU Lesser General Public
17 ;* License along with FFmpeg; if not, write to the Free Software
18 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 ;******************************************************************************
21 %include "libavutil/x86/x86util.asm"
25 %macro SCALARPRODUCT 0
26 ; int ff_scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3,
28 cglobal scalarproduct_and_madd_int16, 4,4,8, v1, v2, v3, order, mul
43 movu m0, [v2q + orderq]
44 movu m1, [v2q + orderq + mmsize]
45 mova m4, [v1q + orderq]
46 mova m5, [v1q + orderq + mmsize]
47 movu m2, [v3q + orderq]
48 movu m3, [v3q + orderq + mmsize]
57 mova [v1q + orderq], m2
58 mova [v1q + orderq + mmsize], m3
72 ; int ff_scalarproduct_and_madd_int32(int16_t *v1, int32_t *v2, int16_t *v3,
74 cglobal scalarproduct_and_madd_int32, 4,4,8, v1, v2, v3, order, mul
80 lea v2q, [v2q + 2*orderq]
84 mova m3, [v1q + orderq]
85 movu m0, [v2q + 2*orderq]
87 movu m1, [v2q + 2*orderq + mmsize]
89 movu m2, [v3q + orderq]
97 mova [v1q + orderq], m2
104 %macro SCALARPRODUCT_LOOP 1
110 mova m4, [v2q + orderq]
111 mova m0, [v2q + orderq + mmsize]
115 mova m5, [v3q + orderq]
116 mova m2, [v3q + orderq + mmsize]
120 mova m0, [v2q + orderq]
121 mova m1, [v2q + orderq + mmsize]
122 mova m2, [v3q + orderq]
123 mova m3, [v3q + orderq + mmsize]
125 %define t0 [v1q + orderq]
126 %define t1 [v1q + orderq + mmsize]
141 mova [v1q + orderq], m2
142 mova [v1q + orderq + mmsize], m3
149 ; int ff_scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3,
150 ; int order, int mul)
152 cglobal scalarproduct_and_madd_int16, 4,5,10, v1, v2, v3, order, mul
162 mova m4, [v2q + orderq]
163 mova m5, [v3q + orderq]
164 ; linear is faster than branch tree or jump table, because the branches taken are cyclic (i.e. predictable)
179 SCALARPRODUCT_LOOP 14
180 SCALARPRODUCT_LOOP 12
181 SCALARPRODUCT_LOOP 10