2 * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/x86/asm.h"
22 #include "libavutil/cpu.h"
23 #include "libswresample/swresample_internal.h"
25 int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
26 int swri_resample_int16_sse2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
27 int swri_resample_float_sse (struct ResampleContext *c, float *dst, const float *src, int *consumed, int src_size, int dst_size, int update_ctx);
28 int swri_resample_double_sse2(struct ResampleContext *c, double *dst, const double *src, int *consumed, int src_size, int dst_size, int update_ctx);
30 DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL};
32 #define COMMON_CORE_INT16_MMX2 \
33 x86_reg len= -2*c->filter_length;\
35 "movq "MANGLE(ff_resample_int16_rounder)", %%mm0 \n\t"\
37 "movq (%1, %0), %%mm1 \n\t"\
38 "pmaddwd (%2, %0), %%mm1 \n\t"\
39 "paddd %%mm1, %%mm0 \n\t"\
42 "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
43 "paddd %%mm1, %%mm0 \n\t"\
44 "psrad $15, %%mm0 \n\t"\
45 "packssdw %%mm0, %%mm0 \n\t"\
46 "movd %%mm0, (%3) \n\t"\
48 : "r" (((uint8_t*)(src+sample_index))-len),\
49 "r" (((uint8_t*)filter)-len),\
51 NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
54 #define LINEAR_CORE_INT16_MMX2 \
55 x86_reg len= -2*c->filter_length;\
57 "pxor %%mm0, %%mm0 \n\t"\
58 "pxor %%mm2, %%mm2 \n\t"\
60 "movq (%3, %0), %%mm1 \n\t"\
61 "movq %%mm1, %%mm3 \n\t"\
62 "pmaddwd (%4, %0), %%mm1 \n\t"\
63 "pmaddwd (%5, %0), %%mm3 \n\t"\
64 "paddd %%mm1, %%mm0 \n\t"\
65 "paddd %%mm3, %%mm2 \n\t"\
68 "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
69 "pshufw $0x0E, %%mm2, %%mm3 \n\t"\
70 "paddd %%mm1, %%mm0 \n\t"\
71 "paddd %%mm3, %%mm2 \n\t"\
72 "movd %%mm0, %1 \n\t"\
73 "movd %%mm2, %2 \n\t"\
77 : "r" (((uint8_t*)(src+sample_index))-len),\
78 "r" (((uint8_t*)filter)-len),\
79 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
82 #define COMMON_CORE_INT16_SSE2 \
83 x86_reg len= -2*c->filter_length;\
85 "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
87 "movdqu (%1, %0), %%xmm1 \n\t"\
88 "pmaddwd (%2, %0), %%xmm1 \n\t"\
89 "paddd %%xmm1, %%xmm0 \n\t"\
92 "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
93 "paddd %%xmm1, %%xmm0 \n\t"\
94 "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
95 "paddd %%xmm1, %%xmm0 \n\t"\
96 "psrad $15, %%xmm0 \n\t"\
97 "packssdw %%xmm0, %%xmm0 \n\t"\
98 "movd %%xmm0, (%3) \n\t"\
100 : "r" (((uint8_t*)(src+sample_index))-len),\
101 "r" (((uint8_t*)filter)-len),\
103 NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
104 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
107 #define LINEAR_CORE_INT16_SSE2 \
108 x86_reg len= -2*c->filter_length;\
110 "pxor %%xmm0, %%xmm0 \n\t"\
111 "pxor %%xmm2, %%xmm2 \n\t"\
113 "movdqu (%3, %0), %%xmm1 \n\t"\
114 "movdqa %%xmm1, %%xmm3 \n\t"\
115 "pmaddwd (%4, %0), %%xmm1 \n\t"\
116 "pmaddwd (%5, %0), %%xmm3 \n\t"\
117 "paddd %%xmm1, %%xmm0 \n\t"\
118 "paddd %%xmm3, %%xmm2 \n\t"\
121 "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
122 "pshufd $0x0E, %%xmm2, %%xmm3 \n\t"\
123 "paddd %%xmm1, %%xmm0 \n\t"\
124 "paddd %%xmm3, %%xmm2 \n\t"\
125 "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
126 "pshufd $0x01, %%xmm2, %%xmm3 \n\t"\
127 "paddd %%xmm1, %%xmm0 \n\t"\
128 "paddd %%xmm3, %%xmm2 \n\t"\
129 "movd %%xmm0, %1 \n\t"\
130 "movd %%xmm2, %2 \n\t"\
134 : "r" (((uint8_t*)(src+sample_index))-len),\
135 "r" (((uint8_t*)filter)-len),\
136 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
137 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
140 #define COMMON_CORE_FLT_SSE \
141 x86_reg len= -4*c->filter_length;\
143 "xorps %%xmm0, %%xmm0 \n\t"\
145 "movups (%1, %0), %%xmm1 \n\t"\
146 "mulps (%2, %0), %%xmm1 \n\t"\
147 "addps %%xmm1, %%xmm0 \n\t"\
150 "movhlps %%xmm0, %%xmm1 \n\t"\
151 "addps %%xmm1, %%xmm0 \n\t"\
152 "movss %%xmm0, %%xmm1 \n\t"\
153 "shufps $1, %%xmm0, %%xmm0 \n\t"\
154 "addps %%xmm1, %%xmm0 \n\t"\
155 "movss %%xmm0, (%3) \n\t"\
157 : "r" (((uint8_t*)(src+sample_index))-len),\
158 "r" (((uint8_t*)filter)-len),\
160 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
163 #define LINEAR_CORE_FLT_SSE \
164 x86_reg len= -4*c->filter_length;\
166 "xorps %%xmm0, %%xmm0 \n\t"\
167 "xorps %%xmm2, %%xmm2 \n\t"\
169 "movups (%3, %0), %%xmm1 \n\t"\
170 "movaps %%xmm1, %%xmm3 \n\t"\
171 "mulps (%4, %0), %%xmm1 \n\t"\
172 "mulps (%5, %0), %%xmm3 \n\t"\
173 "addps %%xmm1, %%xmm0 \n\t"\
174 "addps %%xmm3, %%xmm2 \n\t"\
177 "movhlps %%xmm0, %%xmm1 \n\t"\
178 "movhlps %%xmm2, %%xmm3 \n\t"\
179 "addps %%xmm1, %%xmm0 \n\t"\
180 "addps %%xmm3, %%xmm2 \n\t"\
181 "movss %%xmm0, %%xmm1 \n\t"\
182 "movss %%xmm2, %%xmm3 \n\t"\
183 "shufps $1, %%xmm0, %%xmm0 \n\t"\
184 "shufps $1, %%xmm2, %%xmm2 \n\t"\
185 "addps %%xmm1, %%xmm0 \n\t"\
186 "addps %%xmm3, %%xmm2 \n\t"\
187 "movss %%xmm0, %1 \n\t"\
188 "movss %%xmm2, %2 \n\t"\
192 : "r" (((uint8_t*)(src+sample_index))-len),\
193 "r" (((uint8_t*)filter)-len),\
194 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
195 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
198 #define COMMON_CORE_DBL_SSE2 \
199 x86_reg len= -8*c->filter_length;\
201 "xorpd %%xmm0, %%xmm0 \n\t"\
203 "movupd (%1, %0), %%xmm1 \n\t"\
204 "mulpd (%2, %0), %%xmm1 \n\t"\
205 "addpd %%xmm1, %%xmm0 \n\t"\
208 "movhlps %%xmm0, %%xmm1 \n\t"\
209 "addpd %%xmm1, %%xmm0 \n\t"\
210 "movsd %%xmm0, (%3) \n\t"\
212 : "r" (((uint8_t*)(src+sample_index))-len),\
213 "r" (((uint8_t*)filter)-len),\
215 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
218 #define LINEAR_CORE_DBL_SSE2 \
219 x86_reg len= -8*c->filter_length;\
221 "xorpd %%xmm0, %%xmm0 \n\t"\
222 "xorpd %%xmm2, %%xmm2 \n\t"\
224 "movupd (%3, %0), %%xmm1 \n\t"\
225 "movapd %%xmm1, %%xmm3 \n\t"\
226 "mulpd (%4, %0), %%xmm1 \n\t"\
227 "mulpd (%5, %0), %%xmm3 \n\t"\
228 "addpd %%xmm1, %%xmm0 \n\t"\
229 "addpd %%xmm3, %%xmm2 \n\t"\
232 "movhlps %%xmm0, %%xmm1 \n\t"\
233 "movhlps %%xmm2, %%xmm3 \n\t"\
234 "addpd %%xmm1, %%xmm0 \n\t"\
235 "addpd %%xmm3, %%xmm2 \n\t"\
236 "movsd %%xmm0, %1 \n\t"\
237 "movsd %%xmm2, %2 \n\t"\
241 : "r" (((uint8_t*)(src+sample_index))-len),\
242 "r" (((uint8_t*)filter)-len),\
243 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
244 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\