2 * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/x86/asm.h"
22 #include "libavutil/cpu.h"
23 #include "libswresample/swresample_internal.h"
25 int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
26 int swri_resample_int16_sse2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
27 int swri_resample_float_sse (struct ResampleContext *c, float *dst, const float *src, int *consumed, int src_size, int dst_size, int update_ctx);
28 int swri_resample_float_avx (struct ResampleContext *c, float *dst, const float *src, int *consumed, int src_size, int dst_size, int update_ctx);
29 int swri_resample_double_sse2(struct ResampleContext *c, double *dst, const double *src, int *consumed, int src_size, int dst_size, int update_ctx);
31 DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL};
33 #define COMMON_CORE_INT16_MMX2 \
34 x86_reg len= -2*c->filter_length;\
36 "movq "MANGLE(ff_resample_int16_rounder)", %%mm0 \n\t"\
38 "movq (%1, %0), %%mm1 \n\t"\
39 "pmaddwd (%2, %0), %%mm1 \n\t"\
40 "paddd %%mm1, %%mm0 \n\t"\
43 "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
44 "paddd %%mm1, %%mm0 \n\t"\
45 "psrad $15, %%mm0 \n\t"\
46 "packssdw %%mm0, %%mm0 \n\t"\
47 "movd %%mm0, (%3) \n\t"\
49 : "r" (((uint8_t*)(src+sample_index))-len),\
50 "r" (((uint8_t*)filter)-len),\
52 NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
55 #define LINEAR_CORE_INT16_MMX2 \
56 x86_reg len= -2*c->filter_length;\
58 "pxor %%mm0, %%mm0 \n\t"\
59 "pxor %%mm2, %%mm2 \n\t"\
61 "movq (%3, %0), %%mm1 \n\t"\
62 "movq %%mm1, %%mm3 \n\t"\
63 "pmaddwd (%4, %0), %%mm1 \n\t"\
64 "pmaddwd (%5, %0), %%mm3 \n\t"\
65 "paddd %%mm1, %%mm0 \n\t"\
66 "paddd %%mm3, %%mm2 \n\t"\
69 "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
70 "pshufw $0x0E, %%mm2, %%mm3 \n\t"\
71 "paddd %%mm1, %%mm0 \n\t"\
72 "paddd %%mm3, %%mm2 \n\t"\
73 "movd %%mm0, %1 \n\t"\
74 "movd %%mm2, %2 \n\t"\
78 : "r" (((uint8_t*)(src+sample_index))-len),\
79 "r" (((uint8_t*)filter)-len),\
80 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
83 #define COMMON_CORE_INT16_SSE2 \
84 x86_reg len= -2*c->filter_length;\
86 "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
88 "movdqu (%1, %0), %%xmm1 \n\t"\
89 "pmaddwd (%2, %0), %%xmm1 \n\t"\
90 "paddd %%xmm1, %%xmm0 \n\t"\
93 "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
94 "paddd %%xmm1, %%xmm0 \n\t"\
95 "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
96 "paddd %%xmm1, %%xmm0 \n\t"\
97 "psrad $15, %%xmm0 \n\t"\
98 "packssdw %%xmm0, %%xmm0 \n\t"\
99 "movd %%xmm0, (%3) \n\t"\
101 : "r" (((uint8_t*)(src+sample_index))-len),\
102 "r" (((uint8_t*)filter)-len),\
104 NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
105 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
108 #define LINEAR_CORE_INT16_SSE2 \
109 x86_reg len= -2*c->filter_length;\
111 "pxor %%xmm0, %%xmm0 \n\t"\
112 "pxor %%xmm2, %%xmm2 \n\t"\
114 "movdqu (%3, %0), %%xmm1 \n\t"\
115 "movdqa %%xmm1, %%xmm3 \n\t"\
116 "pmaddwd (%4, %0), %%xmm1 \n\t"\
117 "pmaddwd (%5, %0), %%xmm3 \n\t"\
118 "paddd %%xmm1, %%xmm0 \n\t"\
119 "paddd %%xmm3, %%xmm2 \n\t"\
122 "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
123 "pshufd $0x0E, %%xmm2, %%xmm3 \n\t"\
124 "paddd %%xmm1, %%xmm0 \n\t"\
125 "paddd %%xmm3, %%xmm2 \n\t"\
126 "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
127 "pshufd $0x01, %%xmm2, %%xmm3 \n\t"\
128 "paddd %%xmm1, %%xmm0 \n\t"\
129 "paddd %%xmm3, %%xmm2 \n\t"\
130 "movd %%xmm0, %1 \n\t"\
131 "movd %%xmm2, %2 \n\t"\
135 : "r" (((uint8_t*)(src+sample_index))-len),\
136 "r" (((uint8_t*)filter)-len),\
137 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
138 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
141 #define COMMON_CORE_FLT_SSE \
142 x86_reg len= -4*c->filter_length;\
144 "xorps %%xmm0, %%xmm0 \n\t"\
146 "movups (%1, %0), %%xmm1 \n\t"\
147 "mulps (%2, %0), %%xmm1 \n\t"\
148 "addps %%xmm1, %%xmm0 \n\t"\
151 "movhlps %%xmm0, %%xmm1 \n\t"\
152 "addps %%xmm1, %%xmm0 \n\t"\
153 "movss %%xmm0, %%xmm1 \n\t"\
154 "shufps $1, %%xmm0, %%xmm0 \n\t"\
155 "addps %%xmm1, %%xmm0 \n\t"\
156 "movss %%xmm0, (%3) \n\t"\
158 : "r" (((uint8_t*)(src+sample_index))-len),\
159 "r" (((uint8_t*)filter)-len),\
161 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
164 #define LINEAR_CORE_FLT_SSE \
165 x86_reg len= -4*c->filter_length;\
167 "xorps %%xmm0, %%xmm0 \n\t"\
168 "xorps %%xmm2, %%xmm2 \n\t"\
170 "movups (%3, %0), %%xmm1 \n\t"\
171 "movaps %%xmm1, %%xmm3 \n\t"\
172 "mulps (%4, %0), %%xmm1 \n\t"\
173 "mulps (%5, %0), %%xmm3 \n\t"\
174 "addps %%xmm1, %%xmm0 \n\t"\
175 "addps %%xmm3, %%xmm2 \n\t"\
178 "movhlps %%xmm0, %%xmm1 \n\t"\
179 "movhlps %%xmm2, %%xmm3 \n\t"\
180 "addps %%xmm1, %%xmm0 \n\t"\
181 "addps %%xmm3, %%xmm2 \n\t"\
182 "movss %%xmm0, %%xmm1 \n\t"\
183 "movss %%xmm2, %%xmm3 \n\t"\
184 "shufps $1, %%xmm0, %%xmm0 \n\t"\
185 "shufps $1, %%xmm2, %%xmm2 \n\t"\
186 "addps %%xmm1, %%xmm0 \n\t"\
187 "addps %%xmm3, %%xmm2 \n\t"\
188 "movss %%xmm0, %1 \n\t"\
189 "movss %%xmm2, %2 \n\t"\
193 : "r" (((uint8_t*)(src+sample_index))-len),\
194 "r" (((uint8_t*)filter)-len),\
195 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
196 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
199 #define COMMON_CORE_FLT_AVX \
200 x86_reg len= -4*c->filter_length;\
202 "vxorps %%ymm0, %%ymm0, %%ymm0 \n\t"\
204 "vmovups (%1, %0), %%ymm1 \n\t"\
205 "vmulps (%2, %0), %%ymm1, %%ymm1 \n\t"\
206 "vaddps %%ymm1, %%ymm0, %%ymm0 \n\t"\
209 "vextractf128 $1, %%ymm0, %%xmm1 \n\t"\
210 "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
211 "vmovhlps %%xmm0, %%xmm1, %%xmm1 \n\t"\
212 "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
213 "vshufps $1, %%xmm0, %%xmm0, %%xmm1 \n\t"\
214 "vaddss %%xmm1, %%xmm0, %%xmm0 \n\t"\
215 "vmovss %%xmm0, (%3) \n\t"\
217 : "r" (((uint8_t*)(src+sample_index))-len),\
218 "r" (((uint8_t*)filter)-len),\
220 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
223 #define LINEAR_CORE_FLT_AVX \
224 x86_reg len= -4*c->filter_length;\
226 "vxorps %%ymm0, %%ymm0, %%ymm0 \n\t"\
227 "vxorps %%ymm2, %%ymm2, %%ymm2 \n\t"\
229 "vmovups (%3, %0), %%ymm1 \n\t"\
230 "vmulps (%5, %0), %%ymm1, %%ymm3 \n\t"\
231 "vmulps (%4, %0), %%ymm1, %%ymm1 \n\t"\
232 "vaddps %%ymm1, %%ymm0, %%ymm0 \n\t"\
233 "vaddps %%ymm3, %%ymm2, %%ymm2 \n\t"\
236 "vextractf128 $1, %%ymm0, %%xmm1 \n\t"\
237 "vextractf128 $1, %%ymm2, %%xmm3 \n\t"\
238 "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
239 "vaddps %%xmm3, %%xmm2, %%xmm2 \n\t"\
240 "vmovhlps %%xmm0, %%xmm1, %%xmm1 \n\t"\
241 "vmovhlps %%xmm2, %%xmm3, %%xmm3 \n\t"\
242 "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
243 "vaddps %%xmm3, %%xmm2, %%xmm2 \n\t"\
244 "vshufps $1, %%xmm0, %%xmm0, %%xmm1 \n\t"\
245 "vshufps $1, %%xmm2, %%xmm2, %%xmm3 \n\t"\
246 "vaddss %%xmm1, %%xmm0, %%xmm0 \n\t"\
247 "vaddss %%xmm3, %%xmm2, %%xmm2 \n\t"\
248 "vmovss %%xmm0, %1 \n\t"\
249 "vmovss %%xmm2, %2 \n\t"\
253 : "r" (((uint8_t*)(src+sample_index))-len),\
254 "r" (((uint8_t*)filter)-len),\
255 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
256 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
259 #define COMMON_CORE_DBL_SSE2 \
260 x86_reg len= -8*c->filter_length;\
262 "xorpd %%xmm0, %%xmm0 \n\t"\
264 "movupd (%1, %0), %%xmm1 \n\t"\
265 "mulpd (%2, %0), %%xmm1 \n\t"\
266 "addpd %%xmm1, %%xmm0 \n\t"\
269 "movhlps %%xmm0, %%xmm1 \n\t"\
270 "addpd %%xmm1, %%xmm0 \n\t"\
271 "movsd %%xmm0, (%3) \n\t"\
273 : "r" (((uint8_t*)(src+sample_index))-len),\
274 "r" (((uint8_t*)filter)-len),\
276 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
279 #define LINEAR_CORE_DBL_SSE2 \
280 x86_reg len= -8*c->filter_length;\
282 "xorpd %%xmm0, %%xmm0 \n\t"\
283 "xorpd %%xmm2, %%xmm2 \n\t"\
285 "movupd (%3, %0), %%xmm1 \n\t"\
286 "movapd %%xmm1, %%xmm3 \n\t"\
287 "mulpd (%4, %0), %%xmm1 \n\t"\
288 "mulpd (%5, %0), %%xmm3 \n\t"\
289 "addpd %%xmm1, %%xmm0 \n\t"\
290 "addpd %%xmm3, %%xmm2 \n\t"\
293 "movhlps %%xmm0, %%xmm1 \n\t"\
294 "movhlps %%xmm2, %%xmm3 \n\t"\
295 "addpd %%xmm1, %%xmm0 \n\t"\
296 "addpd %%xmm3, %%xmm2 \n\t"\
297 "movsd %%xmm0, %1 \n\t"\
298 "movsd %%xmm2, %2 \n\t"\
302 : "r" (((uint8_t*)(src+sample_index))-len),\
303 "r" (((uint8_t*)filter)-len),\
304 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
305 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\