2 * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/x86/asm.h"
22 #include "libavutil/cpu.h"
23 #include "libswresample/swresample_internal.h"
25 int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
26 int swri_resample_int16_sse2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
27 int swri_resample_float_sse (struct ResampleContext *c, float *dst, const float *src, int *consumed, int src_size, int dst_size, int update_ctx);
29 DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL};
31 #define COMMON_CORE_INT16_MMX2 \
32 x86_reg len= -2*c->filter_length;\
34 "movq "MANGLE(ff_resample_int16_rounder)", %%mm0 \n\t"\
36 "movq (%1, %0), %%mm1 \n\t"\
37 "pmaddwd (%2, %0), %%mm1 \n\t"\
38 "paddd %%mm1, %%mm0 \n\t"\
41 "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
42 "paddd %%mm1, %%mm0 \n\t"\
43 "psrad $15, %%mm0 \n\t"\
44 "packssdw %%mm0, %%mm0 \n\t"\
45 "movd %%mm0, (%3) \n\t"\
47 : "r" (((uint8_t*)(src+sample_index))-len),\
48 "r" (((uint8_t*)filter)-len),\
50 NAMED_CONSTRAINTS_ADD(ff_resample_int16_rounder)\
53 #define LINEAR_CORE_INT16_MMX2 \
54 x86_reg len= -2*c->filter_length;\
56 "pxor %%mm0, %%mm0 \n\t"\
57 "pxor %%mm2, %%mm2 \n\t"\
59 "movq (%3, %0), %%mm1 \n\t"\
60 "movq %%mm1, %%mm3 \n\t"\
61 "pmaddwd (%4, %0), %%mm1 \n\t"\
62 "pmaddwd (%5, %0), %%mm3 \n\t"\
63 "paddd %%mm1, %%mm0 \n\t"\
64 "paddd %%mm3, %%mm2 \n\t"\
67 "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
68 "pshufw $0x0E, %%mm2, %%mm3 \n\t"\
69 "paddd %%mm1, %%mm0 \n\t"\
70 "paddd %%mm3, %%mm2 \n\t"\
71 "movd %%mm0, %1 \n\t"\
72 "movd %%mm2, %2 \n\t"\
76 : "r" (((uint8_t*)(src+sample_index))-len),\
77 "r" (((uint8_t*)filter)-len),\
78 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
81 #define COMMON_CORE_INT16_SSE2 \
82 x86_reg len= -2*c->filter_length;\
84 "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
86 "movdqu (%1, %0), %%xmm1 \n\t"\
87 "pmaddwd (%2, %0), %%xmm1 \n\t"\
88 "paddd %%xmm1, %%xmm0 \n\t"\
91 "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
92 "paddd %%xmm1, %%xmm0 \n\t"\
93 "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
94 "paddd %%xmm1, %%xmm0 \n\t"\
95 "psrad $15, %%xmm0 \n\t"\
96 "packssdw %%xmm0, %%xmm0 \n\t"\
97 "movd %%xmm0, (%3) \n\t"\
99 : "r" (((uint8_t*)(src+sample_index))-len),\
100 "r" (((uint8_t*)filter)-len),\
102 NAMED_CONSTRAINTS_ADD(ff_resample_int16_rounder)\
105 #define LINEAR_CORE_INT16_SSE2 \
106 x86_reg len= -2*c->filter_length;\
108 "pxor %%xmm0, %%xmm0 \n\t"\
109 "pxor %%xmm2, %%xmm2 \n\t"\
111 "movdqu (%3, %0), %%xmm1 \n\t"\
112 "movdqa %%xmm1, %%xmm3 \n\t"\
113 "pmaddwd (%4, %0), %%xmm1 \n\t"\
114 "pmaddwd (%5, %0), %%xmm3 \n\t"\
115 "paddd %%xmm1, %%xmm0 \n\t"\
116 "paddd %%xmm3, %%xmm2 \n\t"\
119 "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
120 "pshufd $0x0E, %%xmm2, %%xmm3 \n\t"\
121 "paddd %%xmm1, %%xmm0 \n\t"\
122 "paddd %%xmm3, %%xmm2 \n\t"\
123 "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
124 "pshufd $0x01, %%xmm2, %%xmm3 \n\t"\
125 "paddd %%xmm1, %%xmm0 \n\t"\
126 "paddd %%xmm3, %%xmm2 \n\t"\
127 "movd %%xmm0, %1 \n\t"\
128 "movd %%xmm2, %2 \n\t"\
132 : "r" (((uint8_t*)(src+sample_index))-len),\
133 "r" (((uint8_t*)filter)-len),\
134 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
135 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
138 #define COMMON_CORE_FLT_SSE \
139 x86_reg len= -4*c->filter_length;\
141 "xorps %%xmm0, %%xmm0 \n\t"\
143 "movups (%1, %0), %%xmm1 \n\t"\
144 "mulps (%2, %0), %%xmm1 \n\t"\
145 "addps %%xmm1, %%xmm0 \n\t"\
148 "movhlps %%xmm0, %%xmm1 \n\t"\
149 "addps %%xmm1, %%xmm0 \n\t"\
150 "movss %%xmm0, %%xmm1 \n\t"\
151 "shufps $1, %%xmm0, %%xmm0 \n\t"\
152 "addps %%xmm1, %%xmm0 \n\t"\
153 "movss %%xmm0, (%3) \n\t"\
155 : "r" (((uint8_t*)(src+sample_index))-len),\
156 "r" (((uint8_t*)filter)-len),\
160 #define LINEAR_CORE_FLT_SSE \
161 x86_reg len= -4*c->filter_length;\
163 "xorps %%xmm0, %%xmm0 \n\t"\
164 "xorps %%xmm2, %%xmm2 \n\t"\
166 "movups (%3, %0), %%xmm1 \n\t"\
167 "movaps %%xmm1, %%xmm3 \n\t"\
168 "mulps (%4, %0), %%xmm1 \n\t"\
169 "mulps (%5, %0), %%xmm3 \n\t"\
170 "addps %%xmm1, %%xmm0 \n\t"\
171 "addps %%xmm3, %%xmm2 \n\t"\
174 "movhlps %%xmm0, %%xmm1 \n\t"\
175 "movhlps %%xmm2, %%xmm3 \n\t"\
176 "addps %%xmm1, %%xmm0 \n\t"\
177 "addps %%xmm3, %%xmm2 \n\t"\
178 "movss %%xmm0, %%xmm1 \n\t"\
179 "movss %%xmm2, %%xmm3 \n\t"\
180 "shufps $1, %%xmm0, %%xmm0 \n\t"\
181 "shufps $1, %%xmm2, %%xmm2 \n\t"\
182 "addps %%xmm1, %%xmm0 \n\t"\
183 "addps %%xmm3, %%xmm2 \n\t"\
184 "movss %%xmm0, %1 \n\t"\
185 "movss %%xmm2, %2 \n\t"\
189 : "r" (((uint8_t*)(src+sample_index))-len),\
190 "r" (((uint8_t*)filter)-len),\
191 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
192 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\