2 * MMX optimized motion estimation
3 * Copyright (c) 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer
6 * mostly by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "libavutil/x86_cpu.h"
26 #include "libavcodec/dsputil.h"
27 #include "dsputil_mmx.h"
29 DECLARE_ASM_CONST(8, uint64_t, round_tab)[3]={
30 0x0000000000000000ULL,
31 0x0001000100010001ULL,
32 0x0002000200020002ULL,
35 DECLARE_ASM_CONST(8, uint64_t, bone)= 0x0101010101010101LL;
37 static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
39 x86_reg len= -(stride*h);
43 "movq (%1, %%"REG_a"), %%mm0 \n\t"
44 "movq (%2, %%"REG_a"), %%mm2 \n\t"
45 "movq (%2, %%"REG_a"), %%mm4 \n\t"
46 "add %3, %%"REG_a" \n\t"
47 "psubusb %%mm0, %%mm2 \n\t"
48 "psubusb %%mm4, %%mm0 \n\t"
49 "movq (%1, %%"REG_a"), %%mm1 \n\t"
50 "movq (%2, %%"REG_a"), %%mm3 \n\t"
51 "movq (%2, %%"REG_a"), %%mm5 \n\t"
52 "psubusb %%mm1, %%mm3 \n\t"
53 "psubusb %%mm5, %%mm1 \n\t"
54 "por %%mm2, %%mm0 \n\t"
55 "por %%mm1, %%mm3 \n\t"
56 "movq %%mm0, %%mm1 \n\t"
57 "movq %%mm3, %%mm2 \n\t"
58 "punpcklbw %%mm7, %%mm0 \n\t"
59 "punpckhbw %%mm7, %%mm1 \n\t"
60 "punpcklbw %%mm7, %%mm3 \n\t"
61 "punpckhbw %%mm7, %%mm2 \n\t"
62 "paddw %%mm1, %%mm0 \n\t"
63 "paddw %%mm3, %%mm2 \n\t"
64 "paddw %%mm2, %%mm0 \n\t"
65 "paddw %%mm0, %%mm6 \n\t"
66 "add %3, %%"REG_a" \n\t"
69 : "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg)stride)
73 static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
78 "movq (%1), %%mm0 \n\t"
79 "movq (%1, %3), %%mm1 \n\t"
80 "psadbw (%2), %%mm0 \n\t"
81 "psadbw (%2, %3), %%mm1 \n\t"
82 "paddw %%mm0, %%mm6 \n\t"
83 "paddw %%mm1, %%mm6 \n\t"
84 "lea (%1,%3,2), %1 \n\t"
85 "lea (%2,%3,2), %2 \n\t"
88 : "+r" (h), "+r" (blk1), "+r" (blk2)
89 : "r" ((x86_reg)stride)
93 static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)
97 "pxor %%xmm2, %%xmm2 \n\t"
100 "movdqu (%1), %%xmm0 \n\t"
101 "movdqu (%1, %4), %%xmm1 \n\t"
102 "psadbw (%2), %%xmm0 \n\t"
103 "psadbw (%2, %4), %%xmm1 \n\t"
104 "paddw %%xmm0, %%xmm2 \n\t"
105 "paddw %%xmm1, %%xmm2 \n\t"
106 "lea (%1,%4,2), %1 \n\t"
107 "lea (%2,%4,2), %2 \n\t"
110 "movhlps %%xmm2, %%xmm0 \n\t"
111 "paddw %%xmm0, %%xmm2 \n\t"
112 "movd %%xmm2, %3 \n\t"
113 : "+r" (h), "+r" (blk1), "+r" (blk2), "=r"(ret)
114 : "r" ((x86_reg)stride)
119 static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
124 "movq (%1), %%mm0 \n\t"
125 "movq (%1, %3), %%mm1 \n\t"
126 "pavgb 1(%1), %%mm0 \n\t"
127 "pavgb 1(%1, %3), %%mm1 \n\t"
128 "psadbw (%2), %%mm0 \n\t"
129 "psadbw (%2, %3), %%mm1 \n\t"
130 "paddw %%mm0, %%mm6 \n\t"
131 "paddw %%mm1, %%mm6 \n\t"
132 "lea (%1,%3,2), %1 \n\t"
133 "lea (%2,%3,2), %2 \n\t"
136 : "+r" (h), "+r" (blk1), "+r" (blk2)
137 : "r" ((x86_reg)stride)
141 static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
144 "movq (%1), %%mm0 \n\t"
148 "movq (%1), %%mm1 \n\t"
149 "movq (%1, %3), %%mm2 \n\t"
150 "pavgb %%mm1, %%mm0 \n\t"
151 "pavgb %%mm2, %%mm1 \n\t"
152 "psadbw (%2), %%mm0 \n\t"
153 "psadbw (%2, %3), %%mm1 \n\t"
154 "paddw %%mm0, %%mm6 \n\t"
155 "paddw %%mm1, %%mm6 \n\t"
156 "movq %%mm2, %%mm0 \n\t"
157 "lea (%1,%3,2), %1 \n\t"
158 "lea (%2,%3,2), %2 \n\t"
161 : "+r" (h), "+r" (blk1), "+r" (blk2)
162 : "r" ((x86_reg)stride)
166 static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
169 "movq "MANGLE(bone)", %%mm5 \n\t"
170 "movq (%1), %%mm0 \n\t"
171 "pavgb 1(%1), %%mm0 \n\t"
175 "movq (%1), %%mm1 \n\t"
176 "movq (%1,%3), %%mm2 \n\t"
177 "pavgb 1(%1), %%mm1 \n\t"
178 "pavgb 1(%1,%3), %%mm2 \n\t"
179 "psubusb %%mm5, %%mm1 \n\t"
180 "pavgb %%mm1, %%mm0 \n\t"
181 "pavgb %%mm2, %%mm1 \n\t"
182 "psadbw (%2), %%mm0 \n\t"
183 "psadbw (%2,%3), %%mm1 \n\t"
184 "paddw %%mm0, %%mm6 \n\t"
185 "paddw %%mm1, %%mm6 \n\t"
186 "movq %%mm2, %%mm0 \n\t"
187 "lea (%1,%3,2), %1 \n\t"
188 "lea (%2,%3,2), %2 \n\t"
191 : "+r" (h), "+r" (blk1), "+r" (blk2)
192 : "r" ((x86_reg)stride)
196 static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
198 x86_reg len= -(stride*h);
202 "movq (%1, %%"REG_a"), %%mm0 \n\t"
203 "movq (%2, %%"REG_a"), %%mm1 \n\t"
204 "movq (%1, %%"REG_a"), %%mm2 \n\t"
205 "movq (%2, %%"REG_a"), %%mm3 \n\t"
206 "punpcklbw %%mm7, %%mm0 \n\t"
207 "punpcklbw %%mm7, %%mm1 \n\t"
208 "punpckhbw %%mm7, %%mm2 \n\t"
209 "punpckhbw %%mm7, %%mm3 \n\t"
210 "paddw %%mm0, %%mm1 \n\t"
211 "paddw %%mm2, %%mm3 \n\t"
212 "movq (%3, %%"REG_a"), %%mm4 \n\t"
213 "movq (%3, %%"REG_a"), %%mm2 \n\t"
214 "paddw %%mm5, %%mm1 \n\t"
215 "paddw %%mm5, %%mm3 \n\t"
216 "psrlw $1, %%mm1 \n\t"
217 "psrlw $1, %%mm3 \n\t"
218 "packuswb %%mm3, %%mm1 \n\t"
219 "psubusb %%mm1, %%mm4 \n\t"
220 "psubusb %%mm2, %%mm1 \n\t"
221 "por %%mm4, %%mm1 \n\t"
222 "movq %%mm1, %%mm0 \n\t"
223 "punpcklbw %%mm7, %%mm0 \n\t"
224 "punpckhbw %%mm7, %%mm1 \n\t"
225 "paddw %%mm1, %%mm0 \n\t"
226 "paddw %%mm0, %%mm6 \n\t"
227 "add %4, %%"REG_a" \n\t"
230 : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((x86_reg)stride)
234 static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
236 x86_reg len= -(stride*h);
238 "movq (%1, %%"REG_a"), %%mm0 \n\t"
239 "movq 1(%1, %%"REG_a"), %%mm2 \n\t"
240 "movq %%mm0, %%mm1 \n\t"
241 "movq %%mm2, %%mm3 \n\t"
242 "punpcklbw %%mm7, %%mm0 \n\t"
243 "punpckhbw %%mm7, %%mm1 \n\t"
244 "punpcklbw %%mm7, %%mm2 \n\t"
245 "punpckhbw %%mm7, %%mm3 \n\t"
246 "paddw %%mm2, %%mm0 \n\t"
247 "paddw %%mm3, %%mm1 \n\t"
250 "movq (%2, %%"REG_a"), %%mm2 \n\t"
251 "movq 1(%2, %%"REG_a"), %%mm4 \n\t"
252 "movq %%mm2, %%mm3 \n\t"
253 "movq %%mm4, %%mm5 \n\t"
254 "punpcklbw %%mm7, %%mm2 \n\t"
255 "punpckhbw %%mm7, %%mm3 \n\t"
256 "punpcklbw %%mm7, %%mm4 \n\t"
257 "punpckhbw %%mm7, %%mm5 \n\t"
258 "paddw %%mm4, %%mm2 \n\t"
259 "paddw %%mm5, %%mm3 \n\t"
260 "movq 16+"MANGLE(round_tab)", %%mm5 \n\t"
261 "paddw %%mm2, %%mm0 \n\t"
262 "paddw %%mm3, %%mm1 \n\t"
263 "paddw %%mm5, %%mm0 \n\t"
264 "paddw %%mm5, %%mm1 \n\t"
265 "movq (%3, %%"REG_a"), %%mm4 \n\t"
266 "movq (%3, %%"REG_a"), %%mm5 \n\t"
267 "psrlw $2, %%mm0 \n\t"
268 "psrlw $2, %%mm1 \n\t"
269 "packuswb %%mm1, %%mm0 \n\t"
270 "psubusb %%mm0, %%mm4 \n\t"
271 "psubusb %%mm5, %%mm0 \n\t"
272 "por %%mm4, %%mm0 \n\t"
273 "movq %%mm0, %%mm4 \n\t"
274 "punpcklbw %%mm7, %%mm0 \n\t"
275 "punpckhbw %%mm7, %%mm4 \n\t"
276 "paddw %%mm0, %%mm6 \n\t"
277 "paddw %%mm4, %%mm6 \n\t"
278 "movq %%mm2, %%mm0 \n\t"
279 "movq %%mm3, %%mm1 \n\t"
280 "add %4, %%"REG_a" \n\t"
283 : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" ((x86_reg)stride)
287 static inline int sum_mmx(void)
291 "movq %%mm6, %%mm0 \n\t"
292 "psrlq $32, %%mm6 \n\t"
293 "paddw %%mm0, %%mm6 \n\t"
294 "movq %%mm6, %%mm0 \n\t"
295 "psrlq $16, %%mm6 \n\t"
296 "paddw %%mm0, %%mm6 \n\t"
297 "movd %%mm6, %0 \n\t"
303 static inline int sum_mmx2(void)
307 "movd %%mm6, %0 \n\t"
313 static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
315 sad8_2_mmx(blk1, blk1+1, blk2, stride, h);
317 static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
319 sad8_2_mmx(blk1, blk1+stride, blk2, stride, h);
323 #define PIX_SAD(suf)\
324 static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
327 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
328 "pxor %%mm6, %%mm6 \n\t":);\
330 sad8_1_ ## suf(blk1, blk2, stride, 8);\
332 return sum_ ## suf();\
334 static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
337 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
338 "pxor %%mm6, %%mm6 \n\t"\
339 "movq %0, %%mm5 \n\t"\
340 :: "m"(round_tab[1]) \
343 sad8_x2a_ ## suf(blk1, blk2, stride, 8);\
345 return sum_ ## suf();\
348 static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
351 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
352 "pxor %%mm6, %%mm6 \n\t"\
353 "movq %0, %%mm5 \n\t"\
354 :: "m"(round_tab[1]) \
357 sad8_y2a_ ## suf(blk1, blk2, stride, 8);\
359 return sum_ ## suf();\
362 static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
365 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
366 "pxor %%mm6, %%mm6 \n\t"\
369 sad8_4_ ## suf(blk1, blk2, stride, 8);\
371 return sum_ ## suf();\
374 static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
376 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
377 "pxor %%mm6, %%mm6 \n\t":);\
379 sad8_1_ ## suf(blk1 , blk2 , stride, h);\
380 sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\
382 return sum_ ## suf();\
384 static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
386 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
387 "pxor %%mm6, %%mm6 \n\t"\
388 "movq %0, %%mm5 \n\t"\
389 :: "m"(round_tab[1]) \
392 sad8_x2a_ ## suf(blk1 , blk2 , stride, h);\
393 sad8_x2a_ ## suf(blk1+8, blk2+8, stride, h);\
395 return sum_ ## suf();\
397 static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
399 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
400 "pxor %%mm6, %%mm6 \n\t"\
401 "movq %0, %%mm5 \n\t"\
402 :: "m"(round_tab[1]) \
405 sad8_y2a_ ## suf(blk1 , blk2 , stride, h);\
406 sad8_y2a_ ## suf(blk1+8, blk2+8, stride, h);\
408 return sum_ ## suf();\
410 static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
412 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
413 "pxor %%mm6, %%mm6 \n\t"\
416 sad8_4_ ## suf(blk1 , blk2 , stride, h);\
417 sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\
419 return sum_ ## suf();\
425 void ff_dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
427 int mm_flags = av_get_cpu_flags();
429 if (mm_flags & AV_CPU_FLAG_MMX) {
430 c->pix_abs[0][0] = sad16_mmx;
431 c->pix_abs[0][1] = sad16_x2_mmx;
432 c->pix_abs[0][2] = sad16_y2_mmx;
433 c->pix_abs[0][3] = sad16_xy2_mmx;
434 c->pix_abs[1][0] = sad8_mmx;
435 c->pix_abs[1][1] = sad8_x2_mmx;
436 c->pix_abs[1][2] = sad8_y2_mmx;
437 c->pix_abs[1][3] = sad8_xy2_mmx;
439 c->sad[0]= sad16_mmx;
442 if (mm_flags & AV_CPU_FLAG_MMX2) {
443 c->pix_abs[0][0] = sad16_mmx2;
444 c->pix_abs[1][0] = sad8_mmx2;
446 c->sad[0]= sad16_mmx2;
447 c->sad[1]= sad8_mmx2;
449 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
450 c->pix_abs[0][1] = sad16_x2_mmx2;
451 c->pix_abs[0][2] = sad16_y2_mmx2;
452 c->pix_abs[0][3] = sad16_xy2_mmx2;
453 c->pix_abs[1][1] = sad8_x2_mmx2;
454 c->pix_abs[1][2] = sad8_y2_mmx2;
455 c->pix_abs[1][3] = sad8_xy2_mmx2;
458 if ((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW) && avctx->codec_id != CODEC_ID_SNOW) {
459 c->sad[0]= sad16_sse2;