2 * MMX optimized motion estimation
3 * Copyright (c) 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer
6 * mostly by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "libavutil/internal.h"
26 #include "libavutil/mem.h"
27 #include "libavutil/x86/asm.h"
28 #include "libavcodec/dsputil.h"
29 #include "dsputil_mmx.h"
33 DECLARE_ASM_CONST(8, uint64_t, round_tab)[3]={
34 0x0000000000000000ULL,
35 0x0001000100010001ULL,
36 0x0002000200020002ULL,
39 DECLARE_ASM_CONST(8, uint64_t, bone)= 0x0101010101010101LL;
41 static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
43 x86_reg len= -(stride*h);
47 "movq (%1, %%"REG_a"), %%mm0 \n\t"
48 "movq (%2, %%"REG_a"), %%mm2 \n\t"
49 "movq (%2, %%"REG_a"), %%mm4 \n\t"
50 "add %3, %%"REG_a" \n\t"
51 "psubusb %%mm0, %%mm2 \n\t"
52 "psubusb %%mm4, %%mm0 \n\t"
53 "movq (%1, %%"REG_a"), %%mm1 \n\t"
54 "movq (%2, %%"REG_a"), %%mm3 \n\t"
55 "movq (%2, %%"REG_a"), %%mm5 \n\t"
56 "psubusb %%mm1, %%mm3 \n\t"
57 "psubusb %%mm5, %%mm1 \n\t"
58 "por %%mm2, %%mm0 \n\t"
59 "por %%mm1, %%mm3 \n\t"
60 "movq %%mm0, %%mm1 \n\t"
61 "movq %%mm3, %%mm2 \n\t"
62 "punpcklbw %%mm7, %%mm0 \n\t"
63 "punpckhbw %%mm7, %%mm1 \n\t"
64 "punpcklbw %%mm7, %%mm3 \n\t"
65 "punpckhbw %%mm7, %%mm2 \n\t"
66 "paddw %%mm1, %%mm0 \n\t"
67 "paddw %%mm3, %%mm2 \n\t"
68 "paddw %%mm2, %%mm0 \n\t"
69 "paddw %%mm0, %%mm6 \n\t"
70 "add %3, %%"REG_a" \n\t"
73 : "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg)stride)
77 static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
82 "movq (%1), %%mm0 \n\t"
83 "movq (%1, %3), %%mm1 \n\t"
84 "psadbw (%2), %%mm0 \n\t"
85 "psadbw (%2, %3), %%mm1 \n\t"
86 "paddw %%mm0, %%mm6 \n\t"
87 "paddw %%mm1, %%mm6 \n\t"
88 "lea (%1,%3,2), %1 \n\t"
89 "lea (%2,%3,2), %2 \n\t"
92 : "+r" (h), "+r" (blk1), "+r" (blk2)
93 : "r" ((x86_reg)stride)
97 static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)
101 "pxor %%xmm2, %%xmm2 \n\t"
104 "movdqu (%1), %%xmm0 \n\t"
105 "movdqu (%1, %4), %%xmm1 \n\t"
106 "psadbw (%2), %%xmm0 \n\t"
107 "psadbw (%2, %4), %%xmm1 \n\t"
108 "paddw %%xmm0, %%xmm2 \n\t"
109 "paddw %%xmm1, %%xmm2 \n\t"
110 "lea (%1,%4,2), %1 \n\t"
111 "lea (%2,%4,2), %2 \n\t"
114 "movhlps %%xmm2, %%xmm0 \n\t"
115 "paddw %%xmm0, %%xmm2 \n\t"
116 "movd %%xmm2, %3 \n\t"
117 : "+r" (h), "+r" (blk1), "+r" (blk2), "=r"(ret)
118 : "r" ((x86_reg)stride)
123 static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
128 "movq (%1), %%mm0 \n\t"
129 "movq (%1, %3), %%mm1 \n\t"
130 "pavgb 1(%1), %%mm0 \n\t"
131 "pavgb 1(%1, %3), %%mm1 \n\t"
132 "psadbw (%2), %%mm0 \n\t"
133 "psadbw (%2, %3), %%mm1 \n\t"
134 "paddw %%mm0, %%mm6 \n\t"
135 "paddw %%mm1, %%mm6 \n\t"
136 "lea (%1,%3,2), %1 \n\t"
137 "lea (%2,%3,2), %2 \n\t"
140 : "+r" (h), "+r" (blk1), "+r" (blk2)
141 : "r" ((x86_reg)stride)
145 static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
148 "movq (%1), %%mm0 \n\t"
152 "movq (%1), %%mm1 \n\t"
153 "movq (%1, %3), %%mm2 \n\t"
154 "pavgb %%mm1, %%mm0 \n\t"
155 "pavgb %%mm2, %%mm1 \n\t"
156 "psadbw (%2), %%mm0 \n\t"
157 "psadbw (%2, %3), %%mm1 \n\t"
158 "paddw %%mm0, %%mm6 \n\t"
159 "paddw %%mm1, %%mm6 \n\t"
160 "movq %%mm2, %%mm0 \n\t"
161 "lea (%1,%3,2), %1 \n\t"
162 "lea (%2,%3,2), %2 \n\t"
165 : "+r" (h), "+r" (blk1), "+r" (blk2)
166 : "r" ((x86_reg)stride)
170 static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
173 "movq "MANGLE(bone)", %%mm5 \n\t"
174 "movq (%1), %%mm0 \n\t"
175 "pavgb 1(%1), %%mm0 \n\t"
179 "movq (%1), %%mm1 \n\t"
180 "movq (%1,%3), %%mm2 \n\t"
181 "pavgb 1(%1), %%mm1 \n\t"
182 "pavgb 1(%1,%3), %%mm2 \n\t"
183 "psubusb %%mm5, %%mm1 \n\t"
184 "pavgb %%mm1, %%mm0 \n\t"
185 "pavgb %%mm2, %%mm1 \n\t"
186 "psadbw (%2), %%mm0 \n\t"
187 "psadbw (%2,%3), %%mm1 \n\t"
188 "paddw %%mm0, %%mm6 \n\t"
189 "paddw %%mm1, %%mm6 \n\t"
190 "movq %%mm2, %%mm0 \n\t"
191 "lea (%1,%3,2), %1 \n\t"
192 "lea (%2,%3,2), %2 \n\t"
195 : "+r" (h), "+r" (blk1), "+r" (blk2)
196 : "r" ((x86_reg)stride)
200 static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
202 x86_reg len= -(stride*h);
206 "movq (%1, %%"REG_a"), %%mm0 \n\t"
207 "movq (%2, %%"REG_a"), %%mm1 \n\t"
208 "movq (%1, %%"REG_a"), %%mm2 \n\t"
209 "movq (%2, %%"REG_a"), %%mm3 \n\t"
210 "punpcklbw %%mm7, %%mm0 \n\t"
211 "punpcklbw %%mm7, %%mm1 \n\t"
212 "punpckhbw %%mm7, %%mm2 \n\t"
213 "punpckhbw %%mm7, %%mm3 \n\t"
214 "paddw %%mm0, %%mm1 \n\t"
215 "paddw %%mm2, %%mm3 \n\t"
216 "movq (%3, %%"REG_a"), %%mm4 \n\t"
217 "movq (%3, %%"REG_a"), %%mm2 \n\t"
218 "paddw %%mm5, %%mm1 \n\t"
219 "paddw %%mm5, %%mm3 \n\t"
220 "psrlw $1, %%mm1 \n\t"
221 "psrlw $1, %%mm3 \n\t"
222 "packuswb %%mm3, %%mm1 \n\t"
223 "psubusb %%mm1, %%mm4 \n\t"
224 "psubusb %%mm2, %%mm1 \n\t"
225 "por %%mm4, %%mm1 \n\t"
226 "movq %%mm1, %%mm0 \n\t"
227 "punpcklbw %%mm7, %%mm0 \n\t"
228 "punpckhbw %%mm7, %%mm1 \n\t"
229 "paddw %%mm1, %%mm0 \n\t"
230 "paddw %%mm0, %%mm6 \n\t"
231 "add %4, %%"REG_a" \n\t"
234 : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((x86_reg)stride)
238 static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
240 x86_reg len= -(stride*h);
242 "movq (%1, %%"REG_a"), %%mm0 \n\t"
243 "movq 1(%1, %%"REG_a"), %%mm2 \n\t"
244 "movq %%mm0, %%mm1 \n\t"
245 "movq %%mm2, %%mm3 \n\t"
246 "punpcklbw %%mm7, %%mm0 \n\t"
247 "punpckhbw %%mm7, %%mm1 \n\t"
248 "punpcklbw %%mm7, %%mm2 \n\t"
249 "punpckhbw %%mm7, %%mm3 \n\t"
250 "paddw %%mm2, %%mm0 \n\t"
251 "paddw %%mm3, %%mm1 \n\t"
254 "movq (%2, %%"REG_a"), %%mm2 \n\t"
255 "movq 1(%2, %%"REG_a"), %%mm4 \n\t"
256 "movq %%mm2, %%mm3 \n\t"
257 "movq %%mm4, %%mm5 \n\t"
258 "punpcklbw %%mm7, %%mm2 \n\t"
259 "punpckhbw %%mm7, %%mm3 \n\t"
260 "punpcklbw %%mm7, %%mm4 \n\t"
261 "punpckhbw %%mm7, %%mm5 \n\t"
262 "paddw %%mm4, %%mm2 \n\t"
263 "paddw %%mm5, %%mm3 \n\t"
264 "movq 16+"MANGLE(round_tab)", %%mm5 \n\t"
265 "paddw %%mm2, %%mm0 \n\t"
266 "paddw %%mm3, %%mm1 \n\t"
267 "paddw %%mm5, %%mm0 \n\t"
268 "paddw %%mm5, %%mm1 \n\t"
269 "movq (%3, %%"REG_a"), %%mm4 \n\t"
270 "movq (%3, %%"REG_a"), %%mm5 \n\t"
271 "psrlw $2, %%mm0 \n\t"
272 "psrlw $2, %%mm1 \n\t"
273 "packuswb %%mm1, %%mm0 \n\t"
274 "psubusb %%mm0, %%mm4 \n\t"
275 "psubusb %%mm5, %%mm0 \n\t"
276 "por %%mm4, %%mm0 \n\t"
277 "movq %%mm0, %%mm4 \n\t"
278 "punpcklbw %%mm7, %%mm0 \n\t"
279 "punpckhbw %%mm7, %%mm4 \n\t"
280 "paddw %%mm0, %%mm6 \n\t"
281 "paddw %%mm4, %%mm6 \n\t"
282 "movq %%mm2, %%mm0 \n\t"
283 "movq %%mm3, %%mm1 \n\t"
284 "add %4, %%"REG_a" \n\t"
287 : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" ((x86_reg)stride)
291 static inline int sum_mmx(void)
295 "movq %%mm6, %%mm0 \n\t"
296 "psrlq $32, %%mm6 \n\t"
297 "paddw %%mm0, %%mm6 \n\t"
298 "movq %%mm6, %%mm0 \n\t"
299 "psrlq $16, %%mm6 \n\t"
300 "paddw %%mm0, %%mm6 \n\t"
301 "movd %%mm6, %0 \n\t"
307 static inline int sum_mmx2(void)
311 "movd %%mm6, %0 \n\t"
317 static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
319 sad8_2_mmx(blk1, blk1+1, blk2, stride, h);
321 static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
323 sad8_2_mmx(blk1, blk1+stride, blk2, stride, h);
327 #define PIX_SAD(suf)\
328 static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
331 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
332 "pxor %%mm6, %%mm6 \n\t":);\
334 sad8_1_ ## suf(blk1, blk2, stride, 8);\
336 return sum_ ## suf();\
338 static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
341 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
342 "pxor %%mm6, %%mm6 \n\t"\
343 "movq %0, %%mm5 \n\t"\
344 :: "m"(round_tab[1]) \
347 sad8_x2a_ ## suf(blk1, blk2, stride, 8);\
349 return sum_ ## suf();\
352 static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
355 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
356 "pxor %%mm6, %%mm6 \n\t"\
357 "movq %0, %%mm5 \n\t"\
358 :: "m"(round_tab[1]) \
361 sad8_y2a_ ## suf(blk1, blk2, stride, 8);\
363 return sum_ ## suf();\
366 static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
369 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
370 "pxor %%mm6, %%mm6 \n\t"\
373 sad8_4_ ## suf(blk1, blk2, stride, 8);\
375 return sum_ ## suf();\
378 static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
380 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
381 "pxor %%mm6, %%mm6 \n\t":);\
383 sad8_1_ ## suf(blk1 , blk2 , stride, h);\
384 sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\
386 return sum_ ## suf();\
388 static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
390 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
391 "pxor %%mm6, %%mm6 \n\t"\
392 "movq %0, %%mm5 \n\t"\
393 :: "m"(round_tab[1]) \
396 sad8_x2a_ ## suf(blk1 , blk2 , stride, h);\
397 sad8_x2a_ ## suf(blk1+8, blk2+8, stride, h);\
399 return sum_ ## suf();\
401 static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
403 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
404 "pxor %%mm6, %%mm6 \n\t"\
405 "movq %0, %%mm5 \n\t"\
406 :: "m"(round_tab[1]) \
409 sad8_y2a_ ## suf(blk1 , blk2 , stride, h);\
410 sad8_y2a_ ## suf(blk1+8, blk2+8, stride, h);\
412 return sum_ ## suf();\
414 static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
416 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
417 "pxor %%mm6, %%mm6 \n\t"\
420 sad8_4_ ## suf(blk1 , blk2 , stride, h);\
421 sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\
423 return sum_ ## suf();\
429 #endif /* HAVE_INLINE_ASM */
431 void ff_dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
434 int mm_flags = av_get_cpu_flags();
436 if (mm_flags & AV_CPU_FLAG_MMX) {
437 c->pix_abs[0][0] = sad16_mmx;
438 c->pix_abs[0][1] = sad16_x2_mmx;
439 c->pix_abs[0][2] = sad16_y2_mmx;
440 c->pix_abs[0][3] = sad16_xy2_mmx;
441 c->pix_abs[1][0] = sad8_mmx;
442 c->pix_abs[1][1] = sad8_x2_mmx;
443 c->pix_abs[1][2] = sad8_y2_mmx;
444 c->pix_abs[1][3] = sad8_xy2_mmx;
446 c->sad[0]= sad16_mmx;
449 if (mm_flags & AV_CPU_FLAG_MMXEXT) {
450 c->pix_abs[0][0] = sad16_mmx2;
451 c->pix_abs[1][0] = sad8_mmx2;
453 c->sad[0]= sad16_mmx2;
454 c->sad[1]= sad8_mmx2;
456 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
457 c->pix_abs[0][1] = sad16_x2_mmx2;
458 c->pix_abs[0][2] = sad16_y2_mmx2;
459 c->pix_abs[0][3] = sad16_xy2_mmx2;
460 c->pix_abs[1][1] = sad8_x2_mmx2;
461 c->pix_abs[1][2] = sad8_y2_mmx2;
462 c->pix_abs[1][3] = sad8_xy2_mmx2;
465 if ((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW) && avctx->codec_id != AV_CODEC_ID_SNOW) {
466 c->sad[0]= sad16_sse2;
468 #endif /* HAVE_INLINE_ASM */