2 * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/x86/asm.h"
22 #include "libavutil/cpu.h"
23 #include "libswresample/swresample_internal.h"
25 DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL};
27 #define COMMON_CORE_INT16_MMX2 \
28 x86_reg len= -2*c->filter_length;\
30 "movq "MANGLE(ff_resample_int16_rounder)", %%mm0 \n\t"\
32 "movq (%1, %0), %%mm1 \n\t"\
33 "pmaddwd (%2, %0), %%mm1 \n\t"\
34 "paddd %%mm1, %%mm0 \n\t"\
37 "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
38 "paddd %%mm1, %%mm0 \n\t"\
39 "psrad $15, %%mm0 \n\t"\
40 "packssdw %%mm0, %%mm0 \n\t"\
41 "movd %%mm0, (%3) \n\t"\
43 : "r" (((uint8_t*)(src+sample_index))-len),\
44 "r" (((uint8_t*)filter)-len),\
46 NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
49 #define LINEAR_CORE_INT16_MMX2 \
50 x86_reg len= -2*c->filter_length;\
52 "pxor %%mm0, %%mm0 \n\t"\
53 "pxor %%mm2, %%mm2 \n\t"\
55 "movq (%3, %0), %%mm1 \n\t"\
56 "movq %%mm1, %%mm3 \n\t"\
57 "pmaddwd (%4, %0), %%mm1 \n\t"\
58 "pmaddwd (%5, %0), %%mm3 \n\t"\
59 "paddd %%mm1, %%mm0 \n\t"\
60 "paddd %%mm3, %%mm2 \n\t"\
63 "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
64 "pshufw $0x0E, %%mm2, %%mm3 \n\t"\
65 "paddd %%mm1, %%mm0 \n\t"\
66 "paddd %%mm3, %%mm2 \n\t"\
67 "movd %%mm0, %1 \n\t"\
68 "movd %%mm2, %2 \n\t"\
72 : "r" (((uint8_t*)(src+sample_index))-len),\
73 "r" (((uint8_t*)filter)-len),\
74 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
77 #define COMMON_CORE_INT16_SSE2 \
78 x86_reg len= -2*c->filter_length;\
80 "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
82 "movdqu (%1, %0), %%xmm1 \n\t"\
83 "pmaddwd (%2, %0), %%xmm1 \n\t"\
84 "paddd %%xmm1, %%xmm0 \n\t"\
87 "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
88 "paddd %%xmm1, %%xmm0 \n\t"\
89 "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
90 "paddd %%xmm1, %%xmm0 \n\t"\
91 "psrad $15, %%xmm0 \n\t"\
92 "packssdw %%xmm0, %%xmm0 \n\t"\
93 "movd %%xmm0, (%3) \n\t"\
95 : "r" (((uint8_t*)(src+sample_index))-len),\
96 "r" (((uint8_t*)filter)-len),\
98 NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
99 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
102 #define LINEAR_CORE_INT16_SSE2 \
103 x86_reg len= -2*c->filter_length;\
105 "pxor %%xmm0, %%xmm0 \n\t"\
106 "pxor %%xmm2, %%xmm2 \n\t"\
108 "movdqu (%3, %0), %%xmm1 \n\t"\
109 "movdqa %%xmm1, %%xmm3 \n\t"\
110 "pmaddwd (%4, %0), %%xmm1 \n\t"\
111 "pmaddwd (%5, %0), %%xmm3 \n\t"\
112 "paddd %%xmm1, %%xmm0 \n\t"\
113 "paddd %%xmm3, %%xmm2 \n\t"\
116 "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
117 "pshufd $0x0E, %%xmm2, %%xmm3 \n\t"\
118 "paddd %%xmm1, %%xmm0 \n\t"\
119 "paddd %%xmm3, %%xmm2 \n\t"\
120 "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
121 "pshufd $0x01, %%xmm2, %%xmm3 \n\t"\
122 "paddd %%xmm1, %%xmm0 \n\t"\
123 "paddd %%xmm3, %%xmm2 \n\t"\
124 "movd %%xmm0, %1 \n\t"\
125 "movd %%xmm2, %2 \n\t"\
129 : "r" (((uint8_t*)(src+sample_index))-len),\
130 "r" (((uint8_t*)filter)-len),\
131 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
132 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
135 #define COMMON_CORE_FLT_SSE \
136 x86_reg len= -4*c->filter_length;\
138 "xorps %%xmm0, %%xmm0 \n\t"\
140 "movups (%1, %0), %%xmm1 \n\t"\
141 "mulps (%2, %0), %%xmm1 \n\t"\
142 "addps %%xmm1, %%xmm0 \n\t"\
145 "movhlps %%xmm0, %%xmm1 \n\t"\
146 "addps %%xmm1, %%xmm0 \n\t"\
147 "movss %%xmm0, %%xmm1 \n\t"\
148 "shufps $1, %%xmm0, %%xmm0 \n\t"\
149 "addps %%xmm1, %%xmm0 \n\t"\
150 "movss %%xmm0, (%3) \n\t"\
152 : "r" (((uint8_t*)(src+sample_index))-len),\
153 "r" (((uint8_t*)filter)-len),\
155 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
158 #define LINEAR_CORE_FLT_SSE \
159 x86_reg len= -4*c->filter_length;\
161 "xorps %%xmm0, %%xmm0 \n\t"\
162 "xorps %%xmm2, %%xmm2 \n\t"\
164 "movups (%3, %0), %%xmm1 \n\t"\
165 "movaps %%xmm1, %%xmm3 \n\t"\
166 "mulps (%4, %0), %%xmm1 \n\t"\
167 "mulps (%5, %0), %%xmm3 \n\t"\
168 "addps %%xmm1, %%xmm0 \n\t"\
169 "addps %%xmm3, %%xmm2 \n\t"\
172 "movhlps %%xmm0, %%xmm1 \n\t"\
173 "movhlps %%xmm2, %%xmm3 \n\t"\
174 "addps %%xmm1, %%xmm0 \n\t"\
175 "addps %%xmm3, %%xmm2 \n\t"\
176 "movss %%xmm0, %%xmm1 \n\t"\
177 "movss %%xmm2, %%xmm3 \n\t"\
178 "shufps $1, %%xmm0, %%xmm0 \n\t"\
179 "shufps $1, %%xmm2, %%xmm2 \n\t"\
180 "addps %%xmm1, %%xmm0 \n\t"\
181 "addps %%xmm3, %%xmm2 \n\t"\
182 "movss %%xmm0, %1 \n\t"\
183 "movss %%xmm2, %2 \n\t"\
187 : "r" (((uint8_t*)(src+sample_index))-len),\
188 "r" (((uint8_t*)filter)-len),\
189 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
190 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
193 #define COMMON_CORE_FLT_AVX \
194 x86_reg len= -4*c->filter_length;\
196 "vxorps %%ymm0, %%ymm0, %%ymm0 \n\t"\
198 "vmovups (%1, %0), %%ymm1 \n\t"\
199 "vmulps (%2, %0), %%ymm1, %%ymm1 \n\t"\
200 "vaddps %%ymm1, %%ymm0, %%ymm0 \n\t"\
203 "vextractf128 $1, %%ymm0, %%xmm1 \n\t"\
204 "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
205 "vmovhlps %%xmm0, %%xmm1, %%xmm1 \n\t"\
206 "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
207 "vshufps $1, %%xmm0, %%xmm0, %%xmm1 \n\t"\
208 "vaddss %%xmm1, %%xmm0, %%xmm0 \n\t"\
209 "vmovss %%xmm0, (%3) \n\t"\
211 : "r" (((uint8_t*)(src+sample_index))-len),\
212 "r" (((uint8_t*)filter)-len),\
214 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
217 #define LINEAR_CORE_FLT_AVX \
218 x86_reg len= -4*c->filter_length;\
220 "vxorps %%ymm0, %%ymm0, %%ymm0 \n\t"\
221 "vxorps %%ymm2, %%ymm2, %%ymm2 \n\t"\
223 "vmovups (%3, %0), %%ymm1 \n\t"\
224 "vmulps (%5, %0), %%ymm1, %%ymm3 \n\t"\
225 "vmulps (%4, %0), %%ymm1, %%ymm1 \n\t"\
226 "vaddps %%ymm1, %%ymm0, %%ymm0 \n\t"\
227 "vaddps %%ymm3, %%ymm2, %%ymm2 \n\t"\
230 "vextractf128 $1, %%ymm0, %%xmm1 \n\t"\
231 "vextractf128 $1, %%ymm2, %%xmm3 \n\t"\
232 "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
233 "vaddps %%xmm3, %%xmm2, %%xmm2 \n\t"\
234 "vmovhlps %%xmm0, %%xmm1, %%xmm1 \n\t"\
235 "vmovhlps %%xmm2, %%xmm3, %%xmm3 \n\t"\
236 "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
237 "vaddps %%xmm3, %%xmm2, %%xmm2 \n\t"\
238 "vshufps $1, %%xmm0, %%xmm0, %%xmm1 \n\t"\
239 "vshufps $1, %%xmm2, %%xmm2, %%xmm3 \n\t"\
240 "vaddss %%xmm1, %%xmm0, %%xmm0 \n\t"\
241 "vaddss %%xmm3, %%xmm2, %%xmm2 \n\t"\
242 "vmovss %%xmm0, %1 \n\t"\
243 "vmovss %%xmm2, %2 \n\t"\
247 : "r" (((uint8_t*)(src+sample_index))-len),\
248 "r" (((uint8_t*)filter)-len),\
249 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
250 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
253 #define COMMON_CORE_DBL_SSE2 \
254 x86_reg len= -8*c->filter_length;\
256 "xorpd %%xmm0, %%xmm0 \n\t"\
258 "movupd (%1, %0), %%xmm1 \n\t"\
259 "mulpd (%2, %0), %%xmm1 \n\t"\
260 "addpd %%xmm1, %%xmm0 \n\t"\
263 "movhlps %%xmm0, %%xmm1 \n\t"\
264 "addpd %%xmm1, %%xmm0 \n\t"\
265 "movsd %%xmm0, (%3) \n\t"\
267 : "r" (((uint8_t*)(src+sample_index))-len),\
268 "r" (((uint8_t*)filter)-len),\
270 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
273 #define LINEAR_CORE_DBL_SSE2 \
274 x86_reg len= -8*c->filter_length;\
276 "xorpd %%xmm0, %%xmm0 \n\t"\
277 "xorpd %%xmm2, %%xmm2 \n\t"\
279 "movupd (%3, %0), %%xmm1 \n\t"\
280 "movapd %%xmm1, %%xmm3 \n\t"\
281 "mulpd (%4, %0), %%xmm1 \n\t"\
282 "mulpd (%5, %0), %%xmm3 \n\t"\
283 "addpd %%xmm1, %%xmm0 \n\t"\
284 "addpd %%xmm3, %%xmm2 \n\t"\
287 "movhlps %%xmm0, %%xmm1 \n\t"\
288 "movhlps %%xmm2, %%xmm3 \n\t"\
289 "addpd %%xmm1, %%xmm0 \n\t"\
290 "addpd %%xmm3, %%xmm2 \n\t"\
291 "movsd %%xmm0, %1 \n\t"\
292 "movsd %%xmm2, %2 \n\t"\
296 : "r" (((uint8_t*)(src+sample_index))-len),\
297 "r" (((uint8_t*)filter)-len),\
298 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
299 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\