2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of Libav.
8 * Libav is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * Libav is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with Libav; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
25 #include "libavutil/cpu.h"
26 #include "libavutil/x86/asm.h"
27 #include "libavutil/x86/cpu.h"
28 #include "libavcodec/dsputil.h"
29 #include "libavcodec/mpegvideo.h"
30 #include "libavcodec/mathops.h"
31 #include "dsputil_mmx.h"
36 static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
39 "mov $-128, %%"REG_a" \n\t"
40 "pxor %%mm7, %%mm7 \n\t"
43 "movq (%0), %%mm0 \n\t"
44 "movq (%0, %2), %%mm2 \n\t"
45 "movq %%mm0, %%mm1 \n\t"
46 "movq %%mm2, %%mm3 \n\t"
47 "punpcklbw %%mm7, %%mm0 \n\t"
48 "punpckhbw %%mm7, %%mm1 \n\t"
49 "punpcklbw %%mm7, %%mm2 \n\t"
50 "punpckhbw %%mm7, %%mm3 \n\t"
51 "movq %%mm0, (%1, %%"REG_a") \n\t"
52 "movq %%mm1, 8(%1, %%"REG_a") \n\t"
53 "movq %%mm2, 16(%1, %%"REG_a") \n\t"
54 "movq %%mm3, 24(%1, %%"REG_a") \n\t"
56 "add $32, %%"REG_a" \n\t"
59 : "r" (block+64), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*2)
64 static void get_pixels_sse2(DCTELEM *block, const uint8_t *pixels, int line_size)
67 "pxor %%xmm4, %%xmm4 \n\t"
68 "movq (%0), %%xmm0 \n\t"
69 "movq (%0, %2), %%xmm1 \n\t"
70 "movq (%0, %2,2), %%xmm2 \n\t"
71 "movq (%0, %3), %%xmm3 \n\t"
72 "lea (%0,%2,4), %0 \n\t"
73 "punpcklbw %%xmm4, %%xmm0 \n\t"
74 "punpcklbw %%xmm4, %%xmm1 \n\t"
75 "punpcklbw %%xmm4, %%xmm2 \n\t"
76 "punpcklbw %%xmm4, %%xmm3 \n\t"
77 "movdqa %%xmm0, (%1) \n\t"
78 "movdqa %%xmm1, 16(%1) \n\t"
79 "movdqa %%xmm2, 32(%1) \n\t"
80 "movdqa %%xmm3, 48(%1) \n\t"
81 "movq (%0), %%xmm0 \n\t"
82 "movq (%0, %2), %%xmm1 \n\t"
83 "movq (%0, %2,2), %%xmm2 \n\t"
84 "movq (%0, %3), %%xmm3 \n\t"
85 "punpcklbw %%xmm4, %%xmm0 \n\t"
86 "punpcklbw %%xmm4, %%xmm1 \n\t"
87 "punpcklbw %%xmm4, %%xmm2 \n\t"
88 "punpcklbw %%xmm4, %%xmm3 \n\t"
89 "movdqa %%xmm0, 64(%1) \n\t"
90 "movdqa %%xmm1, 80(%1) \n\t"
91 "movdqa %%xmm2, 96(%1) \n\t"
92 "movdqa %%xmm3, 112(%1) \n\t"
94 : "r" (block), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3)
98 static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
101 "pxor %%mm7, %%mm7 \n\t"
102 "mov $-128, %%"REG_a" \n\t"
105 "movq (%0), %%mm0 \n\t"
106 "movq (%1), %%mm2 \n\t"
107 "movq %%mm0, %%mm1 \n\t"
108 "movq %%mm2, %%mm3 \n\t"
109 "punpcklbw %%mm7, %%mm0 \n\t"
110 "punpckhbw %%mm7, %%mm1 \n\t"
111 "punpcklbw %%mm7, %%mm2 \n\t"
112 "punpckhbw %%mm7, %%mm3 \n\t"
113 "psubw %%mm2, %%mm0 \n\t"
114 "psubw %%mm3, %%mm1 \n\t"
115 "movq %%mm0, (%2, %%"REG_a") \n\t"
116 "movq %%mm1, 8(%2, %%"REG_a") \n\t"
119 "add $16, %%"REG_a" \n\t"
121 : "+r" (s1), "+r" (s2)
122 : "r" (block+64), "r" ((x86_reg)stride)
127 static int pix_sum16_mmx(uint8_t * pix, int line_size){
130 x86_reg index= -line_size*h;
133 "pxor %%mm7, %%mm7 \n\t"
134 "pxor %%mm6, %%mm6 \n\t"
136 "movq (%2, %1), %%mm0 \n\t"
137 "movq (%2, %1), %%mm1 \n\t"
138 "movq 8(%2, %1), %%mm2 \n\t"
139 "movq 8(%2, %1), %%mm3 \n\t"
140 "punpcklbw %%mm7, %%mm0 \n\t"
141 "punpckhbw %%mm7, %%mm1 \n\t"
142 "punpcklbw %%mm7, %%mm2 \n\t"
143 "punpckhbw %%mm7, %%mm3 \n\t"
144 "paddw %%mm0, %%mm1 \n\t"
145 "paddw %%mm2, %%mm3 \n\t"
146 "paddw %%mm1, %%mm3 \n\t"
147 "paddw %%mm3, %%mm6 \n\t"
150 "movq %%mm6, %%mm5 \n\t"
151 "psrlq $32, %%mm6 \n\t"
152 "paddw %%mm5, %%mm6 \n\t"
153 "movq %%mm6, %%mm5 \n\t"
154 "psrlq $16, %%mm6 \n\t"
155 "paddw %%mm5, %%mm6 \n\t"
156 "movd %%mm6, %0 \n\t"
157 "andl $0xFFFF, %0 \n\t"
158 : "=&r" (sum), "+r" (index)
159 : "r" (pix - index), "r" ((x86_reg)line_size)
165 static int pix_norm1_mmx(uint8_t *pix, int line_size) {
172 "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
173 "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
175 "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
177 "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
178 "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
180 "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
181 "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
182 "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
184 "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
185 "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
187 "pmaddwd %%mm3,%%mm3\n"
188 "pmaddwd %%mm4,%%mm4\n"
190 "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
191 pix2^2+pix3^2+pix6^2+pix7^2) */
192 "paddd %%mm3,%%mm4\n"
193 "paddd %%mm2,%%mm7\n"
196 "paddd %%mm4,%%mm7\n"
201 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
202 "paddd %%mm7,%%mm1\n"
204 : "+r" (pix), "=r"(tmp) : "r" ((x86_reg)line_size) : "%ecx" );
208 static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
213 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
214 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
216 "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
217 "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
218 "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
219 "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
221 /* todo: mm1-mm2, mm3-mm4 */
222 /* algo: subtract mm1 from mm2 with saturation and vice versa */
223 /* OR the results to get absolute difference */
226 "psubusb %%mm2,%%mm1\n"
227 "psubusb %%mm4,%%mm3\n"
228 "psubusb %%mm5,%%mm2\n"
229 "psubusb %%mm6,%%mm4\n"
234 /* now convert to 16-bit vectors so we can square them */
238 "punpckhbw %%mm0,%%mm2\n"
239 "punpckhbw %%mm0,%%mm4\n"
240 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
241 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
243 "pmaddwd %%mm2,%%mm2\n"
244 "pmaddwd %%mm4,%%mm4\n"
245 "pmaddwd %%mm1,%%mm1\n"
246 "pmaddwd %%mm3,%%mm3\n"
248 "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
249 "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
251 "paddd %%mm2,%%mm1\n"
252 "paddd %%mm4,%%mm3\n"
253 "paddd %%mm1,%%mm7\n"
254 "paddd %%mm3,%%mm7\n"
260 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
261 "paddd %%mm7,%%mm1\n"
263 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
264 : "r" ((x86_reg)line_size) , "m" (h)
269 static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
273 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
274 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
276 "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
277 "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
278 "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
279 "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
281 /* todo: mm1-mm2, mm3-mm4 */
282 /* algo: subtract mm1 from mm2 with saturation and vice versa */
283 /* OR the results to get absolute difference */
286 "psubusb %%mm2,%%mm1\n"
287 "psubusb %%mm4,%%mm3\n"
288 "psubusb %%mm5,%%mm2\n"
289 "psubusb %%mm6,%%mm4\n"
294 /* now convert to 16-bit vectors so we can square them */
298 "punpckhbw %%mm0,%%mm2\n"
299 "punpckhbw %%mm0,%%mm4\n"
300 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
301 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
303 "pmaddwd %%mm2,%%mm2\n"
304 "pmaddwd %%mm4,%%mm4\n"
305 "pmaddwd %%mm1,%%mm1\n"
306 "pmaddwd %%mm3,%%mm3\n"
311 "paddd %%mm2,%%mm1\n"
312 "paddd %%mm4,%%mm3\n"
313 "paddd %%mm1,%%mm7\n"
314 "paddd %%mm3,%%mm7\n"
320 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
321 "paddd %%mm7,%%mm1\n"
323 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
324 : "r" ((x86_reg)line_size) , "m" (h)
329 static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
337 "movq %%mm0, %%mm1\n"
341 "movq %%mm0, %%mm2\n"
342 "movq %%mm1, %%mm3\n"
343 "punpcklbw %%mm7,%%mm0\n"
344 "punpcklbw %%mm7,%%mm1\n"
345 "punpckhbw %%mm7,%%mm2\n"
346 "punpckhbw %%mm7,%%mm3\n"
347 "psubw %%mm1, %%mm0\n"
348 "psubw %%mm3, %%mm2\n"
353 "movq %%mm4, %%mm1\n"
357 "movq %%mm4, %%mm5\n"
358 "movq %%mm1, %%mm3\n"
359 "punpcklbw %%mm7,%%mm4\n"
360 "punpcklbw %%mm7,%%mm1\n"
361 "punpckhbw %%mm7,%%mm5\n"
362 "punpckhbw %%mm7,%%mm3\n"
363 "psubw %%mm1, %%mm4\n"
364 "psubw %%mm3, %%mm5\n"
365 "psubw %%mm4, %%mm0\n"
366 "psubw %%mm5, %%mm2\n"
367 "pxor %%mm3, %%mm3\n"
368 "pxor %%mm1, %%mm1\n"
369 "pcmpgtw %%mm0, %%mm3\n\t"
370 "pcmpgtw %%mm2, %%mm1\n\t"
371 "pxor %%mm3, %%mm0\n"
372 "pxor %%mm1, %%mm2\n"
373 "psubw %%mm3, %%mm0\n"
374 "psubw %%mm1, %%mm2\n"
375 "paddw %%mm0, %%mm2\n"
376 "paddw %%mm2, %%mm6\n"
382 "movq %%mm0, %%mm1\n"
386 "movq %%mm0, %%mm2\n"
387 "movq %%mm1, %%mm3\n"
388 "punpcklbw %%mm7,%%mm0\n"
389 "punpcklbw %%mm7,%%mm1\n"
390 "punpckhbw %%mm7,%%mm2\n"
391 "punpckhbw %%mm7,%%mm3\n"
392 "psubw %%mm1, %%mm0\n"
393 "psubw %%mm3, %%mm2\n"
394 "psubw %%mm0, %%mm4\n"
395 "psubw %%mm2, %%mm5\n"
396 "pxor %%mm3, %%mm3\n"
397 "pxor %%mm1, %%mm1\n"
398 "pcmpgtw %%mm4, %%mm3\n\t"
399 "pcmpgtw %%mm5, %%mm1\n\t"
400 "pxor %%mm3, %%mm4\n"
401 "pxor %%mm1, %%mm5\n"
402 "psubw %%mm3, %%mm4\n"
403 "psubw %%mm1, %%mm5\n"
404 "paddw %%mm4, %%mm5\n"
405 "paddw %%mm5, %%mm6\n"
410 "movq %%mm4, %%mm1\n"
414 "movq %%mm4, %%mm5\n"
415 "movq %%mm1, %%mm3\n"
416 "punpcklbw %%mm7,%%mm4\n"
417 "punpcklbw %%mm7,%%mm1\n"
418 "punpckhbw %%mm7,%%mm5\n"
419 "punpckhbw %%mm7,%%mm3\n"
420 "psubw %%mm1, %%mm4\n"
421 "psubw %%mm3, %%mm5\n"
422 "psubw %%mm4, %%mm0\n"
423 "psubw %%mm5, %%mm2\n"
424 "pxor %%mm3, %%mm3\n"
425 "pxor %%mm1, %%mm1\n"
426 "pcmpgtw %%mm0, %%mm3\n\t"
427 "pcmpgtw %%mm2, %%mm1\n\t"
428 "pxor %%mm3, %%mm0\n"
429 "pxor %%mm1, %%mm2\n"
430 "psubw %%mm3, %%mm0\n"
431 "psubw %%mm1, %%mm2\n"
432 "paddw %%mm0, %%mm2\n"
433 "paddw %%mm2, %%mm6\n"
439 "movq %%mm6, %%mm0\n"
440 "punpcklwd %%mm7,%%mm0\n"
441 "punpckhwd %%mm7,%%mm6\n"
442 "paddd %%mm0, %%mm6\n"
446 "paddd %%mm6,%%mm0\n"
448 : "+r" (pix1), "=r"(tmp)
449 : "r" ((x86_reg)line_size) , "g" (h-2)
454 static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
464 "movq %%mm0, %%mm2\n"
465 "movq %%mm1, %%mm3\n"
466 "punpcklbw %%mm7,%%mm0\n"
467 "punpcklbw %%mm7,%%mm1\n"
468 "punpckhbw %%mm7,%%mm2\n"
469 "punpckhbw %%mm7,%%mm3\n"
470 "psubw %%mm1, %%mm0\n"
471 "psubw %%mm3, %%mm2\n"
477 "movq %%mm4, %%mm5\n"
478 "movq %%mm1, %%mm3\n"
479 "punpcklbw %%mm7,%%mm4\n"
480 "punpcklbw %%mm7,%%mm1\n"
481 "punpckhbw %%mm7,%%mm5\n"
482 "punpckhbw %%mm7,%%mm3\n"
483 "psubw %%mm1, %%mm4\n"
484 "psubw %%mm3, %%mm5\n"
485 "psubw %%mm4, %%mm0\n"
486 "psubw %%mm5, %%mm2\n"
487 "pxor %%mm3, %%mm3\n"
488 "pxor %%mm1, %%mm1\n"
489 "pcmpgtw %%mm0, %%mm3\n\t"
490 "pcmpgtw %%mm2, %%mm1\n\t"
491 "pxor %%mm3, %%mm0\n"
492 "pxor %%mm1, %%mm2\n"
493 "psubw %%mm3, %%mm0\n"
494 "psubw %%mm1, %%mm2\n"
495 "paddw %%mm0, %%mm2\n"
496 "paddw %%mm2, %%mm6\n"
503 "movq %%mm0, %%mm2\n"
504 "movq %%mm1, %%mm3\n"
505 "punpcklbw %%mm7,%%mm0\n"
506 "punpcklbw %%mm7,%%mm1\n"
507 "punpckhbw %%mm7,%%mm2\n"
508 "punpckhbw %%mm7,%%mm3\n"
509 "psubw %%mm1, %%mm0\n"
510 "psubw %%mm3, %%mm2\n"
511 "psubw %%mm0, %%mm4\n"
512 "psubw %%mm2, %%mm5\n"
513 "pxor %%mm3, %%mm3\n"
514 "pxor %%mm1, %%mm1\n"
515 "pcmpgtw %%mm4, %%mm3\n\t"
516 "pcmpgtw %%mm5, %%mm1\n\t"
517 "pxor %%mm3, %%mm4\n"
518 "pxor %%mm1, %%mm5\n"
519 "psubw %%mm3, %%mm4\n"
520 "psubw %%mm1, %%mm5\n"
521 "paddw %%mm4, %%mm5\n"
522 "paddw %%mm5, %%mm6\n"
528 "movq %%mm4, %%mm5\n"
529 "movq %%mm1, %%mm3\n"
530 "punpcklbw %%mm7,%%mm4\n"
531 "punpcklbw %%mm7,%%mm1\n"
532 "punpckhbw %%mm7,%%mm5\n"
533 "punpckhbw %%mm7,%%mm3\n"
534 "psubw %%mm1, %%mm4\n"
535 "psubw %%mm3, %%mm5\n"
536 "psubw %%mm4, %%mm0\n"
537 "psubw %%mm5, %%mm2\n"
538 "pxor %%mm3, %%mm3\n"
539 "pxor %%mm1, %%mm1\n"
540 "pcmpgtw %%mm0, %%mm3\n\t"
541 "pcmpgtw %%mm2, %%mm1\n\t"
542 "pxor %%mm3, %%mm0\n"
543 "pxor %%mm1, %%mm2\n"
544 "psubw %%mm3, %%mm0\n"
545 "psubw %%mm1, %%mm2\n"
546 "paddw %%mm0, %%mm2\n"
547 "paddw %%mm2, %%mm6\n"
553 "movq %%mm6, %%mm0\n"
554 "punpcklwd %%mm7,%%mm0\n"
555 "punpckhwd %%mm7,%%mm6\n"
556 "paddd %%mm0, %%mm6\n"
560 "paddd %%mm6,%%mm0\n"
562 : "+r" (pix1), "=r"(tmp)
563 : "r" ((x86_reg)line_size) , "g" (h-2)
565 return tmp + hf_noise8_mmx(pix+8, line_size, h);
568 static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
569 MpegEncContext *c = p;
572 if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
573 else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
574 score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
576 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
577 else return score1 + FFABS(score2)*8;
580 static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
581 MpegEncContext *c = p;
582 int score1= sse8_mmx(c, pix1, pix2, line_size, h);
583 int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
585 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
586 else return score1 + FFABS(score2)*8;
589 static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
592 assert( (((int)pix) & 7) == 0);
593 assert((line_size &7) ==0);
595 #define SUM(in0, in1, out0, out1) \
596 "movq (%0), %%mm2\n"\
597 "movq 8(%0), %%mm3\n"\
599 "movq %%mm2, " #out0 "\n"\
600 "movq %%mm3, " #out1 "\n"\
601 "psubusb " #in0 ", %%mm2\n"\
602 "psubusb " #in1 ", %%mm3\n"\
603 "psubusb " #out0 ", " #in0 "\n"\
604 "psubusb " #out1 ", " #in1 "\n"\
605 "por %%mm2, " #in0 "\n"\
606 "por %%mm3, " #in1 "\n"\
607 "movq " #in0 ", %%mm2\n"\
608 "movq " #in1 ", %%mm3\n"\
609 "punpcklbw %%mm7, " #in0 "\n"\
610 "punpcklbw %%mm7, " #in1 "\n"\
611 "punpckhbw %%mm7, %%mm2\n"\
612 "punpckhbw %%mm7, %%mm3\n"\
613 "paddw " #in1 ", " #in0 "\n"\
614 "paddw %%mm3, %%mm2\n"\
615 "paddw %%mm2, " #in0 "\n"\
616 "paddw " #in0 ", %%mm6\n"
629 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
631 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
638 "paddw %%mm6,%%mm0\n"
641 "paddw %%mm6,%%mm0\n"
643 : "+r" (pix), "=r"(tmp)
644 : "r" ((x86_reg)line_size) , "m" (h)
650 static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
653 assert( (((int)pix) & 7) == 0);
654 assert((line_size &7) ==0);
656 #define SUM(in0, in1, out0, out1) \
657 "movq (%0), " #out0 "\n"\
658 "movq 8(%0), " #out1 "\n"\
660 "psadbw " #out0 ", " #in0 "\n"\
661 "psadbw " #out1 ", " #in1 "\n"\
662 "paddw " #in1 ", " #in0 "\n"\
663 "paddw " #in0 ", %%mm6\n"
675 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
677 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
683 : "+r" (pix), "=r"(tmp)
684 : "r" ((x86_reg)line_size) , "m" (h)
690 static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
693 assert( (((int)pix1) & 7) == 0);
694 assert( (((int)pix2) & 7) == 0);
695 assert((line_size &7) ==0);
697 #define SUM(in0, in1, out0, out1) \
699 "movq (%1)," #out0 "\n"\
700 "movq 8(%0),%%mm3\n"\
701 "movq 8(%1)," #out1 "\n"\
704 "psubb " #out0 ", %%mm2\n"\
705 "psubb " #out1 ", %%mm3\n"\
706 "pxor %%mm7, %%mm2\n"\
707 "pxor %%mm7, %%mm3\n"\
708 "movq %%mm2, " #out0 "\n"\
709 "movq %%mm3, " #out1 "\n"\
710 "psubusb " #in0 ", %%mm2\n"\
711 "psubusb " #in1 ", %%mm3\n"\
712 "psubusb " #out0 ", " #in0 "\n"\
713 "psubusb " #out1 ", " #in1 "\n"\
714 "por %%mm2, " #in0 "\n"\
715 "por %%mm3, " #in1 "\n"\
716 "movq " #in0 ", %%mm2\n"\
717 "movq " #in1 ", %%mm3\n"\
718 "punpcklbw %%mm7, " #in0 "\n"\
719 "punpcklbw %%mm7, " #in1 "\n"\
720 "punpckhbw %%mm7, %%mm2\n"\
721 "punpckhbw %%mm7, %%mm3\n"\
722 "paddw " #in1 ", " #in0 "\n"\
723 "paddw %%mm3, %%mm2\n"\
724 "paddw %%mm2, " #in0 "\n"\
725 "paddw " #in0 ", %%mm6\n"
731 "pcmpeqw %%mm7,%%mm7\n"
733 "packsswb %%mm7, %%mm7\n"
740 "psubb %%mm2, %%mm0\n"
741 "psubb %%mm3, %%mm1\n"
742 "pxor %%mm7, %%mm0\n"
743 "pxor %%mm7, %%mm1\n"
747 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
749 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
756 "paddw %%mm6,%%mm0\n"
759 "paddw %%mm6,%%mm0\n"
761 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
762 : "r" ((x86_reg)line_size) , "m" (h)
768 static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
771 assert( (((int)pix1) & 7) == 0);
772 assert( (((int)pix2) & 7) == 0);
773 assert((line_size &7) ==0);
775 #define SUM(in0, in1, out0, out1) \
776 "movq (%0)," #out0 "\n"\
778 "movq 8(%0)," #out1 "\n"\
779 "movq 8(%1),%%mm3\n"\
782 "psubb %%mm2, " #out0 "\n"\
783 "psubb %%mm3, " #out1 "\n"\
784 "pxor %%mm7, " #out0 "\n"\
785 "pxor %%mm7, " #out1 "\n"\
786 "psadbw " #out0 ", " #in0 "\n"\
787 "psadbw " #out1 ", " #in1 "\n"\
788 "paddw " #in1 ", " #in0 "\n"\
789 "paddw " #in0 ", %%mm6\n"
794 "pcmpeqw %%mm7,%%mm7\n"
796 "packsswb %%mm7, %%mm7\n"
803 "psubb %%mm2, %%mm0\n"
804 "psubb %%mm3, %%mm1\n"
805 "pxor %%mm7, %%mm0\n"
806 "pxor %%mm7, %%mm1\n"
810 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
812 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
818 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
819 : "r" ((x86_reg)line_size) , "m" (h)
825 static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
829 "movq (%2, %0), %%mm0 \n\t"
830 "movq (%1, %0), %%mm1 \n\t"
831 "psubb %%mm0, %%mm1 \n\t"
832 "movq %%mm1, (%3, %0) \n\t"
833 "movq 8(%2, %0), %%mm0 \n\t"
834 "movq 8(%1, %0), %%mm1 \n\t"
835 "psubb %%mm0, %%mm1 \n\t"
836 "movq %%mm1, 8(%3, %0) \n\t"
841 : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w-15)
844 dst[i+0] = src1[i+0]-src2[i+0];
847 static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top){
853 "movq -1(%1, %0), %%mm0 \n\t" // LT
854 "movq (%1, %0), %%mm1 \n\t" // T
855 "movq -1(%2, %0), %%mm2 \n\t" // L
856 "movq (%2, %0), %%mm3 \n\t" // X
857 "movq %%mm2, %%mm4 \n\t" // L
858 "psubb %%mm0, %%mm2 \n\t"
859 "paddb %%mm1, %%mm2 \n\t" // L + T - LT
860 "movq %%mm4, %%mm5 \n\t" // L
861 "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
862 "pminub %%mm5, %%mm1 \n\t" // min(T, L)
863 "pminub %%mm2, %%mm4 \n\t"
864 "pmaxub %%mm1, %%mm4 \n\t"
865 "psubb %%mm4, %%mm3 \n\t" // dst - pred
866 "movq %%mm3, (%3, %0) \n\t"
871 : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w)
877 dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
879 *left_top= src1[w-1];
883 #define MMABS_MMX(a,z)\
884 "pxor " #z ", " #z " \n\t"\
885 "pcmpgtw " #a ", " #z " \n\t"\
886 "pxor " #z ", " #a " \n\t"\
887 "psubw " #z ", " #a " \n\t"
889 #define MMABS_MMX2(a,z)\
890 "pxor " #z ", " #z " \n\t"\
891 "psubw " #a ", " #z " \n\t"\
892 "pmaxsw " #z ", " #a " \n\t"
894 #define MMABS_SSSE3(a,z)\
895 "pabsw " #a ", " #a " \n\t"
897 #define MMABS_SUM(a,z, sum)\
899 "paddusw " #a ", " #sum " \n\t"
901 /* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to
902 * about 100k on extreme inputs. But that's very unlikely to occur in natural video,
903 * and it's even more unlikely to not have any alternative mvs/modes with lower cost. */
904 #define HSUM_MMX(a, t, dst)\
905 "movq "#a", "#t" \n\t"\
906 "psrlq $32, "#a" \n\t"\
907 "paddusw "#t", "#a" \n\t"\
908 "movq "#a", "#t" \n\t"\
909 "psrlq $16, "#a" \n\t"\
910 "paddusw "#t", "#a" \n\t"\
911 "movd "#a", "#dst" \n\t"\
913 #define HSUM_MMX2(a, t, dst)\
914 "pshufw $0x0E, "#a", "#t" \n\t"\
915 "paddusw "#t", "#a" \n\t"\
916 "pshufw $0x01, "#a", "#t" \n\t"\
917 "paddusw "#t", "#a" \n\t"\
918 "movd "#a", "#dst" \n\t"\
920 #define HSUM_SSE2(a, t, dst)\
921 "movhlps "#a", "#t" \n\t"\
922 "paddusw "#t", "#a" \n\t"\
923 "pshuflw $0x0E, "#a", "#t" \n\t"\
924 "paddusw "#t", "#a" \n\t"\
925 "pshuflw $0x01, "#a", "#t" \n\t"\
926 "paddusw "#t", "#a" \n\t"\
927 "movd "#a", "#dst" \n\t"\
929 #define DCT_SAD4(m,mm,o)\
930 "mov"#m" "#o"+ 0(%1), "#mm"2 \n\t"\
931 "mov"#m" "#o"+16(%1), "#mm"3 \n\t"\
932 "mov"#m" "#o"+32(%1), "#mm"4 \n\t"\
933 "mov"#m" "#o"+48(%1), "#mm"5 \n\t"\
934 MMABS_SUM(mm##2, mm##6, mm##0)\
935 MMABS_SUM(mm##3, mm##7, mm##1)\
936 MMABS_SUM(mm##4, mm##6, mm##0)\
937 MMABS_SUM(mm##5, mm##7, mm##1)\
940 "pxor %%mm0, %%mm0 \n\t"\
941 "pxor %%mm1, %%mm1 \n\t"\
942 DCT_SAD4(q, %%mm, 0)\
943 DCT_SAD4(q, %%mm, 8)\
944 DCT_SAD4(q, %%mm, 64)\
945 DCT_SAD4(q, %%mm, 72)\
946 "paddusw %%mm1, %%mm0 \n\t"\
947 HSUM(%%mm0, %%mm1, %0)
949 #define DCT_SAD_SSE2\
950 "pxor %%xmm0, %%xmm0 \n\t"\
951 "pxor %%xmm1, %%xmm1 \n\t"\
952 DCT_SAD4(dqa, %%xmm, 0)\
953 DCT_SAD4(dqa, %%xmm, 64)\
954 "paddusw %%xmm1, %%xmm0 \n\t"\
955 HSUM(%%xmm0, %%xmm1, %0)
957 #define DCT_SAD_FUNC(cpu) \
958 static int sum_abs_dctelem_##cpu(DCTELEM *block){\
968 #define DCT_SAD DCT_SAD_MMX
969 #define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
970 #define MMABS(a,z) MMABS_MMX(a,z)
975 #define HSUM(a,t,dst) HSUM_MMX2(a,t,dst)
976 #define MMABS(a,z) MMABS_MMX2(a,z)
981 #define DCT_SAD DCT_SAD_SSE2
982 #define HSUM(a,t,dst) HSUM_SSE2(a,t,dst)
986 #if HAVE_SSSE3_INLINE
987 #define MMABS(a,z) MMABS_SSSE3(a,z)
994 static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int size){
998 "pxor %%mm4, %%mm4 \n"
1001 "movq (%2,%0), %%mm2 \n"
1002 "movq (%3,%0,2), %%mm0 \n"
1003 "movq 8(%3,%0,2), %%mm1 \n"
1004 "punpckhbw %%mm2, %%mm3 \n"
1005 "punpcklbw %%mm2, %%mm2 \n"
1006 "psraw $8, %%mm3 \n"
1007 "psraw $8, %%mm2 \n"
1008 "psubw %%mm3, %%mm1 \n"
1009 "psubw %%mm2, %%mm0 \n"
1010 "pmaddwd %%mm1, %%mm1 \n"
1011 "pmaddwd %%mm0, %%mm0 \n"
1012 "paddd %%mm1, %%mm4 \n"
1013 "paddd %%mm0, %%mm4 \n"
1015 "movq %%mm4, %%mm3 \n"
1016 "psrlq $32, %%mm3 \n"
1017 "paddd %%mm3, %%mm4 \n"
1020 :"r"(pix1), "r"(pix2)
1025 #define PHADDD(a, t)\
1026 "movq "#a", "#t" \n\t"\
1027 "psrlq $32, "#a" \n\t"\
1028 "paddd "#t", "#a" \n\t"
1030 pmulhw: dst[0-15]=(src[0-15]*dst[0-15])[16-31]
1031 pmulhrw: dst[0-15]=(src[0-15]*dst[0-15] + 0x8000)[16-31]
1032 pmulhrsw: dst[0-15]=(src[0-15]*dst[0-15] + 0x4000)[15-30]
1034 #define PMULHRW(x, y, s, o)\
1035 "pmulhw " #s ", "#x " \n\t"\
1036 "pmulhw " #s ", "#y " \n\t"\
1037 "paddw " #o ", "#x " \n\t"\
1038 "paddw " #o ", "#y " \n\t"\
1039 "psraw $1, "#x " \n\t"\
1040 "psraw $1, "#y " \n\t"
1041 #define DEF(x) x ## _mmx
1042 #define SET_RND MOVQ_WONE
1043 #define SCALE_OFFSET 1
1045 #include "dsputil_qns_template.c"
1052 #define DEF(x) x ## _3dnow
1054 #define SCALE_OFFSET 0
1055 #define PMULHRW(x, y, s, o)\
1056 "pmulhrw " #s ", "#x " \n\t"\
1057 "pmulhrw " #s ", "#y " \n\t"
1059 #include "dsputil_qns_template.c"
1066 #if HAVE_SSSE3_INLINE
1068 #define DEF(x) x ## _ssse3
1070 #define SCALE_OFFSET -1
1071 #define PHADDD(a, t)\
1072 "pshufw $0x0E, "#a", "#t" \n\t"\
1073 "paddd "#t", "#a" \n\t" /* faster than phaddd on core2 */
1074 #define PMULHRW(x, y, s, o)\
1075 "pmulhrsw " #s ", "#x " \n\t"\
1076 "pmulhrsw " #s ", "#y " \n\t"
1078 #include "dsputil_qns_template.c"
1085 #endif /* HAVE_SSSE3_INLINE */
1087 #endif /* HAVE_INLINE_ASM */
1089 int ff_sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h);
1091 #define hadamard_func(cpu) \
1092 int ff_hadamard8_diff_##cpu (void *s, uint8_t *src1, uint8_t *src2, \
1093 int stride, int h); \
1094 int ff_hadamard8_diff16_##cpu(void *s, uint8_t *src1, uint8_t *src2, \
1100 hadamard_func(ssse3)
1102 void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
1104 int mm_flags = av_get_cpu_flags();
1107 int bit_depth = avctx->bits_per_raw_sample;
1109 if (mm_flags & AV_CPU_FLAG_MMX) {
1110 const int dct_algo = avctx->dct_algo;
1111 if (avctx->bits_per_raw_sample <= 8 &&
1112 (dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX)) {
1113 if(mm_flags & AV_CPU_FLAG_SSE2){
1114 c->fdct = ff_fdct_sse2;
1115 } else if (mm_flags & AV_CPU_FLAG_MMXEXT) {
1116 c->fdct = ff_fdct_mmx2;
1118 c->fdct = ff_fdct_mmx;
1123 c->get_pixels = get_pixels_mmx;
1124 c->diff_pixels = diff_pixels_mmx;
1125 c->pix_sum = pix_sum16_mmx;
1127 c->diff_bytes= diff_bytes_mmx;
1128 c->sum_abs_dctelem= sum_abs_dctelem_mmx;
1130 c->pix_norm1 = pix_norm1_mmx;
1131 c->sse[0] = sse16_mmx;
1132 c->sse[1] = sse8_mmx;
1133 c->vsad[4]= vsad_intra16_mmx;
1135 c->nsse[0] = nsse16_mmx;
1136 c->nsse[1] = nsse8_mmx;
1137 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1138 c->vsad[0] = vsad16_mmx;
1141 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1142 c->try_8x8basis= try_8x8basis_mmx;
1144 c->add_8x8basis= add_8x8basis_mmx;
1146 c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
1148 if (mm_flags & AV_CPU_FLAG_MMXEXT) {
1149 c->sum_abs_dctelem= sum_abs_dctelem_mmx2;
1150 c->vsad[4]= vsad_intra16_mmx2;
1152 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1153 c->vsad[0] = vsad16_mmx2;
1156 c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
1159 if(mm_flags & AV_CPU_FLAG_SSE2){
1161 c->get_pixels = get_pixels_sse2;
1162 c->sum_abs_dctelem= sum_abs_dctelem_sse2;
1165 #if HAVE_SSSE3_INLINE
1166 if(mm_flags & AV_CPU_FLAG_SSSE3){
1167 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1168 c->try_8x8basis= try_8x8basis_ssse3;
1170 c->add_8x8basis= add_8x8basis_ssse3;
1171 c->sum_abs_dctelem= sum_abs_dctelem_ssse3;
1175 if(mm_flags & AV_CPU_FLAG_3DNOW){
1176 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1177 c->try_8x8basis= try_8x8basis_3dnow;
1179 c->add_8x8basis= add_8x8basis_3dnow;
1182 #endif /* HAVE_INLINE_ASM */
1184 if (EXTERNAL_MMX(mm_flags)) {
1185 c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
1186 c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
1188 if (EXTERNAL_MMXEXT(mm_flags)) {
1189 c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx2;
1190 c->hadamard8_diff[1] = ff_hadamard8_diff_mmx2;
1193 if (EXTERNAL_SSE2(mm_flags)) {
1194 c->sse[0] = ff_sse16_sse2;
1196 #if HAVE_ALIGNED_STACK
1197 c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
1198 c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
1202 if (EXTERNAL_SSSE3(mm_flags) && HAVE_ALIGNED_STACK) {
1203 c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
1204 c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
1208 ff_dsputil_init_pix_mmx(c, avctx);