2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of Libav.
8 * Libav is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * Libav is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with Libav; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
25 #include "libavutil/cpu.h"
26 #include "libavutil/x86/asm.h"
27 #include "libavcodec/dsputil.h"
28 #include "libavcodec/mpegvideo.h"
29 #include "libavcodec/mathops.h"
30 #include "dsputil_mmx.h"
35 static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
38 "mov $-128, %%"REG_a" \n\t"
39 "pxor %%mm7, %%mm7 \n\t"
42 "movq (%0), %%mm0 \n\t"
43 "movq (%0, %2), %%mm2 \n\t"
44 "movq %%mm0, %%mm1 \n\t"
45 "movq %%mm2, %%mm3 \n\t"
46 "punpcklbw %%mm7, %%mm0 \n\t"
47 "punpckhbw %%mm7, %%mm1 \n\t"
48 "punpcklbw %%mm7, %%mm2 \n\t"
49 "punpckhbw %%mm7, %%mm3 \n\t"
50 "movq %%mm0, (%1, %%"REG_a") \n\t"
51 "movq %%mm1, 8(%1, %%"REG_a") \n\t"
52 "movq %%mm2, 16(%1, %%"REG_a") \n\t"
53 "movq %%mm3, 24(%1, %%"REG_a") \n\t"
55 "add $32, %%"REG_a" \n\t"
58 : "r" (block+64), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*2)
63 static void get_pixels_sse2(DCTELEM *block, const uint8_t *pixels, int line_size)
66 "pxor %%xmm4, %%xmm4 \n\t"
67 "movq (%0), %%xmm0 \n\t"
68 "movq (%0, %2), %%xmm1 \n\t"
69 "movq (%0, %2,2), %%xmm2 \n\t"
70 "movq (%0, %3), %%xmm3 \n\t"
71 "lea (%0,%2,4), %0 \n\t"
72 "punpcklbw %%xmm4, %%xmm0 \n\t"
73 "punpcklbw %%xmm4, %%xmm1 \n\t"
74 "punpcklbw %%xmm4, %%xmm2 \n\t"
75 "punpcklbw %%xmm4, %%xmm3 \n\t"
76 "movdqa %%xmm0, (%1) \n\t"
77 "movdqa %%xmm1, 16(%1) \n\t"
78 "movdqa %%xmm2, 32(%1) \n\t"
79 "movdqa %%xmm3, 48(%1) \n\t"
80 "movq (%0), %%xmm0 \n\t"
81 "movq (%0, %2), %%xmm1 \n\t"
82 "movq (%0, %2,2), %%xmm2 \n\t"
83 "movq (%0, %3), %%xmm3 \n\t"
84 "punpcklbw %%xmm4, %%xmm0 \n\t"
85 "punpcklbw %%xmm4, %%xmm1 \n\t"
86 "punpcklbw %%xmm4, %%xmm2 \n\t"
87 "punpcklbw %%xmm4, %%xmm3 \n\t"
88 "movdqa %%xmm0, 64(%1) \n\t"
89 "movdqa %%xmm1, 80(%1) \n\t"
90 "movdqa %%xmm2, 96(%1) \n\t"
91 "movdqa %%xmm3, 112(%1) \n\t"
93 : "r" (block), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3)
97 static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
100 "pxor %%mm7, %%mm7 \n\t"
101 "mov $-128, %%"REG_a" \n\t"
104 "movq (%0), %%mm0 \n\t"
105 "movq (%1), %%mm2 \n\t"
106 "movq %%mm0, %%mm1 \n\t"
107 "movq %%mm2, %%mm3 \n\t"
108 "punpcklbw %%mm7, %%mm0 \n\t"
109 "punpckhbw %%mm7, %%mm1 \n\t"
110 "punpcklbw %%mm7, %%mm2 \n\t"
111 "punpckhbw %%mm7, %%mm3 \n\t"
112 "psubw %%mm2, %%mm0 \n\t"
113 "psubw %%mm3, %%mm1 \n\t"
114 "movq %%mm0, (%2, %%"REG_a") \n\t"
115 "movq %%mm1, 8(%2, %%"REG_a") \n\t"
118 "add $16, %%"REG_a" \n\t"
120 : "+r" (s1), "+r" (s2)
121 : "r" (block+64), "r" ((x86_reg)stride)
126 static int pix_sum16_mmx(uint8_t * pix, int line_size){
129 x86_reg index= -line_size*h;
132 "pxor %%mm7, %%mm7 \n\t"
133 "pxor %%mm6, %%mm6 \n\t"
135 "movq (%2, %1), %%mm0 \n\t"
136 "movq (%2, %1), %%mm1 \n\t"
137 "movq 8(%2, %1), %%mm2 \n\t"
138 "movq 8(%2, %1), %%mm3 \n\t"
139 "punpcklbw %%mm7, %%mm0 \n\t"
140 "punpckhbw %%mm7, %%mm1 \n\t"
141 "punpcklbw %%mm7, %%mm2 \n\t"
142 "punpckhbw %%mm7, %%mm3 \n\t"
143 "paddw %%mm0, %%mm1 \n\t"
144 "paddw %%mm2, %%mm3 \n\t"
145 "paddw %%mm1, %%mm3 \n\t"
146 "paddw %%mm3, %%mm6 \n\t"
149 "movq %%mm6, %%mm5 \n\t"
150 "psrlq $32, %%mm6 \n\t"
151 "paddw %%mm5, %%mm6 \n\t"
152 "movq %%mm6, %%mm5 \n\t"
153 "psrlq $16, %%mm6 \n\t"
154 "paddw %%mm5, %%mm6 \n\t"
155 "movd %%mm6, %0 \n\t"
156 "andl $0xFFFF, %0 \n\t"
157 : "=&r" (sum), "+r" (index)
158 : "r" (pix - index), "r" ((x86_reg)line_size)
164 static int pix_norm1_mmx(uint8_t *pix, int line_size) {
171 "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
172 "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
174 "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
176 "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
177 "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
179 "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
180 "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
181 "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
183 "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
184 "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
186 "pmaddwd %%mm3,%%mm3\n"
187 "pmaddwd %%mm4,%%mm4\n"
189 "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
190 pix2^2+pix3^2+pix6^2+pix7^2) */
191 "paddd %%mm3,%%mm4\n"
192 "paddd %%mm2,%%mm7\n"
195 "paddd %%mm4,%%mm7\n"
200 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
201 "paddd %%mm7,%%mm1\n"
203 : "+r" (pix), "=r"(tmp) : "r" ((x86_reg)line_size) : "%ecx" );
207 static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
212 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
213 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
215 "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
216 "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
217 "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
218 "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
220 /* todo: mm1-mm2, mm3-mm4 */
221 /* algo: subtract mm1 from mm2 with saturation and vice versa */
222 /* OR the results to get absolute difference */
225 "psubusb %%mm2,%%mm1\n"
226 "psubusb %%mm4,%%mm3\n"
227 "psubusb %%mm5,%%mm2\n"
228 "psubusb %%mm6,%%mm4\n"
233 /* now convert to 16-bit vectors so we can square them */
237 "punpckhbw %%mm0,%%mm2\n"
238 "punpckhbw %%mm0,%%mm4\n"
239 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
240 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
242 "pmaddwd %%mm2,%%mm2\n"
243 "pmaddwd %%mm4,%%mm4\n"
244 "pmaddwd %%mm1,%%mm1\n"
245 "pmaddwd %%mm3,%%mm3\n"
247 "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
248 "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
250 "paddd %%mm2,%%mm1\n"
251 "paddd %%mm4,%%mm3\n"
252 "paddd %%mm1,%%mm7\n"
253 "paddd %%mm3,%%mm7\n"
259 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
260 "paddd %%mm7,%%mm1\n"
262 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
263 : "r" ((x86_reg)line_size) , "m" (h)
268 static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
272 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
273 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
275 "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
276 "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
277 "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
278 "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
280 /* todo: mm1-mm2, mm3-mm4 */
281 /* algo: subtract mm1 from mm2 with saturation and vice versa */
282 /* OR the results to get absolute difference */
285 "psubusb %%mm2,%%mm1\n"
286 "psubusb %%mm4,%%mm3\n"
287 "psubusb %%mm5,%%mm2\n"
288 "psubusb %%mm6,%%mm4\n"
293 /* now convert to 16-bit vectors so we can square them */
297 "punpckhbw %%mm0,%%mm2\n"
298 "punpckhbw %%mm0,%%mm4\n"
299 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
300 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
302 "pmaddwd %%mm2,%%mm2\n"
303 "pmaddwd %%mm4,%%mm4\n"
304 "pmaddwd %%mm1,%%mm1\n"
305 "pmaddwd %%mm3,%%mm3\n"
310 "paddd %%mm2,%%mm1\n"
311 "paddd %%mm4,%%mm3\n"
312 "paddd %%mm1,%%mm7\n"
313 "paddd %%mm3,%%mm7\n"
319 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
320 "paddd %%mm7,%%mm1\n"
322 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
323 : "r" ((x86_reg)line_size) , "m" (h)
328 static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
336 "movq %%mm0, %%mm1\n"
340 "movq %%mm0, %%mm2\n"
341 "movq %%mm1, %%mm3\n"
342 "punpcklbw %%mm7,%%mm0\n"
343 "punpcklbw %%mm7,%%mm1\n"
344 "punpckhbw %%mm7,%%mm2\n"
345 "punpckhbw %%mm7,%%mm3\n"
346 "psubw %%mm1, %%mm0\n"
347 "psubw %%mm3, %%mm2\n"
352 "movq %%mm4, %%mm1\n"
356 "movq %%mm4, %%mm5\n"
357 "movq %%mm1, %%mm3\n"
358 "punpcklbw %%mm7,%%mm4\n"
359 "punpcklbw %%mm7,%%mm1\n"
360 "punpckhbw %%mm7,%%mm5\n"
361 "punpckhbw %%mm7,%%mm3\n"
362 "psubw %%mm1, %%mm4\n"
363 "psubw %%mm3, %%mm5\n"
364 "psubw %%mm4, %%mm0\n"
365 "psubw %%mm5, %%mm2\n"
366 "pxor %%mm3, %%mm3\n"
367 "pxor %%mm1, %%mm1\n"
368 "pcmpgtw %%mm0, %%mm3\n\t"
369 "pcmpgtw %%mm2, %%mm1\n\t"
370 "pxor %%mm3, %%mm0\n"
371 "pxor %%mm1, %%mm2\n"
372 "psubw %%mm3, %%mm0\n"
373 "psubw %%mm1, %%mm2\n"
374 "paddw %%mm0, %%mm2\n"
375 "paddw %%mm2, %%mm6\n"
381 "movq %%mm0, %%mm1\n"
385 "movq %%mm0, %%mm2\n"
386 "movq %%mm1, %%mm3\n"
387 "punpcklbw %%mm7,%%mm0\n"
388 "punpcklbw %%mm7,%%mm1\n"
389 "punpckhbw %%mm7,%%mm2\n"
390 "punpckhbw %%mm7,%%mm3\n"
391 "psubw %%mm1, %%mm0\n"
392 "psubw %%mm3, %%mm2\n"
393 "psubw %%mm0, %%mm4\n"
394 "psubw %%mm2, %%mm5\n"
395 "pxor %%mm3, %%mm3\n"
396 "pxor %%mm1, %%mm1\n"
397 "pcmpgtw %%mm4, %%mm3\n\t"
398 "pcmpgtw %%mm5, %%mm1\n\t"
399 "pxor %%mm3, %%mm4\n"
400 "pxor %%mm1, %%mm5\n"
401 "psubw %%mm3, %%mm4\n"
402 "psubw %%mm1, %%mm5\n"
403 "paddw %%mm4, %%mm5\n"
404 "paddw %%mm5, %%mm6\n"
409 "movq %%mm4, %%mm1\n"
413 "movq %%mm4, %%mm5\n"
414 "movq %%mm1, %%mm3\n"
415 "punpcklbw %%mm7,%%mm4\n"
416 "punpcklbw %%mm7,%%mm1\n"
417 "punpckhbw %%mm7,%%mm5\n"
418 "punpckhbw %%mm7,%%mm3\n"
419 "psubw %%mm1, %%mm4\n"
420 "psubw %%mm3, %%mm5\n"
421 "psubw %%mm4, %%mm0\n"
422 "psubw %%mm5, %%mm2\n"
423 "pxor %%mm3, %%mm3\n"
424 "pxor %%mm1, %%mm1\n"
425 "pcmpgtw %%mm0, %%mm3\n\t"
426 "pcmpgtw %%mm2, %%mm1\n\t"
427 "pxor %%mm3, %%mm0\n"
428 "pxor %%mm1, %%mm2\n"
429 "psubw %%mm3, %%mm0\n"
430 "psubw %%mm1, %%mm2\n"
431 "paddw %%mm0, %%mm2\n"
432 "paddw %%mm2, %%mm6\n"
438 "movq %%mm6, %%mm0\n"
439 "punpcklwd %%mm7,%%mm0\n"
440 "punpckhwd %%mm7,%%mm6\n"
441 "paddd %%mm0, %%mm6\n"
445 "paddd %%mm6,%%mm0\n"
447 : "+r" (pix1), "=r"(tmp)
448 : "r" ((x86_reg)line_size) , "g" (h-2)
453 static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
463 "movq %%mm0, %%mm2\n"
464 "movq %%mm1, %%mm3\n"
465 "punpcklbw %%mm7,%%mm0\n"
466 "punpcklbw %%mm7,%%mm1\n"
467 "punpckhbw %%mm7,%%mm2\n"
468 "punpckhbw %%mm7,%%mm3\n"
469 "psubw %%mm1, %%mm0\n"
470 "psubw %%mm3, %%mm2\n"
476 "movq %%mm4, %%mm5\n"
477 "movq %%mm1, %%mm3\n"
478 "punpcklbw %%mm7,%%mm4\n"
479 "punpcklbw %%mm7,%%mm1\n"
480 "punpckhbw %%mm7,%%mm5\n"
481 "punpckhbw %%mm7,%%mm3\n"
482 "psubw %%mm1, %%mm4\n"
483 "psubw %%mm3, %%mm5\n"
484 "psubw %%mm4, %%mm0\n"
485 "psubw %%mm5, %%mm2\n"
486 "pxor %%mm3, %%mm3\n"
487 "pxor %%mm1, %%mm1\n"
488 "pcmpgtw %%mm0, %%mm3\n\t"
489 "pcmpgtw %%mm2, %%mm1\n\t"
490 "pxor %%mm3, %%mm0\n"
491 "pxor %%mm1, %%mm2\n"
492 "psubw %%mm3, %%mm0\n"
493 "psubw %%mm1, %%mm2\n"
494 "paddw %%mm0, %%mm2\n"
495 "paddw %%mm2, %%mm6\n"
502 "movq %%mm0, %%mm2\n"
503 "movq %%mm1, %%mm3\n"
504 "punpcklbw %%mm7,%%mm0\n"
505 "punpcklbw %%mm7,%%mm1\n"
506 "punpckhbw %%mm7,%%mm2\n"
507 "punpckhbw %%mm7,%%mm3\n"
508 "psubw %%mm1, %%mm0\n"
509 "psubw %%mm3, %%mm2\n"
510 "psubw %%mm0, %%mm4\n"
511 "psubw %%mm2, %%mm5\n"
512 "pxor %%mm3, %%mm3\n"
513 "pxor %%mm1, %%mm1\n"
514 "pcmpgtw %%mm4, %%mm3\n\t"
515 "pcmpgtw %%mm5, %%mm1\n\t"
516 "pxor %%mm3, %%mm4\n"
517 "pxor %%mm1, %%mm5\n"
518 "psubw %%mm3, %%mm4\n"
519 "psubw %%mm1, %%mm5\n"
520 "paddw %%mm4, %%mm5\n"
521 "paddw %%mm5, %%mm6\n"
527 "movq %%mm4, %%mm5\n"
528 "movq %%mm1, %%mm3\n"
529 "punpcklbw %%mm7,%%mm4\n"
530 "punpcklbw %%mm7,%%mm1\n"
531 "punpckhbw %%mm7,%%mm5\n"
532 "punpckhbw %%mm7,%%mm3\n"
533 "psubw %%mm1, %%mm4\n"
534 "psubw %%mm3, %%mm5\n"
535 "psubw %%mm4, %%mm0\n"
536 "psubw %%mm5, %%mm2\n"
537 "pxor %%mm3, %%mm3\n"
538 "pxor %%mm1, %%mm1\n"
539 "pcmpgtw %%mm0, %%mm3\n\t"
540 "pcmpgtw %%mm2, %%mm1\n\t"
541 "pxor %%mm3, %%mm0\n"
542 "pxor %%mm1, %%mm2\n"
543 "psubw %%mm3, %%mm0\n"
544 "psubw %%mm1, %%mm2\n"
545 "paddw %%mm0, %%mm2\n"
546 "paddw %%mm2, %%mm6\n"
552 "movq %%mm6, %%mm0\n"
553 "punpcklwd %%mm7,%%mm0\n"
554 "punpckhwd %%mm7,%%mm6\n"
555 "paddd %%mm0, %%mm6\n"
559 "paddd %%mm6,%%mm0\n"
561 : "+r" (pix1), "=r"(tmp)
562 : "r" ((x86_reg)line_size) , "g" (h-2)
564 return tmp + hf_noise8_mmx(pix+8, line_size, h);
567 static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
568 MpegEncContext *c = p;
571 if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
572 else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
573 score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
575 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
576 else return score1 + FFABS(score2)*8;
579 static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
580 MpegEncContext *c = p;
581 int score1= sse8_mmx(c, pix1, pix2, line_size, h);
582 int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
584 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
585 else return score1 + FFABS(score2)*8;
588 static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
591 assert( (((int)pix) & 7) == 0);
592 assert((line_size &7) ==0);
594 #define SUM(in0, in1, out0, out1) \
595 "movq (%0), %%mm2\n"\
596 "movq 8(%0), %%mm3\n"\
598 "movq %%mm2, " #out0 "\n"\
599 "movq %%mm3, " #out1 "\n"\
600 "psubusb " #in0 ", %%mm2\n"\
601 "psubusb " #in1 ", %%mm3\n"\
602 "psubusb " #out0 ", " #in0 "\n"\
603 "psubusb " #out1 ", " #in1 "\n"\
604 "por %%mm2, " #in0 "\n"\
605 "por %%mm3, " #in1 "\n"\
606 "movq " #in0 ", %%mm2\n"\
607 "movq " #in1 ", %%mm3\n"\
608 "punpcklbw %%mm7, " #in0 "\n"\
609 "punpcklbw %%mm7, " #in1 "\n"\
610 "punpckhbw %%mm7, %%mm2\n"\
611 "punpckhbw %%mm7, %%mm3\n"\
612 "paddw " #in1 ", " #in0 "\n"\
613 "paddw %%mm3, %%mm2\n"\
614 "paddw %%mm2, " #in0 "\n"\
615 "paddw " #in0 ", %%mm6\n"
628 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
630 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
637 "paddw %%mm6,%%mm0\n"
640 "paddw %%mm6,%%mm0\n"
642 : "+r" (pix), "=r"(tmp)
643 : "r" ((x86_reg)line_size) , "m" (h)
649 static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
652 assert( (((int)pix) & 7) == 0);
653 assert((line_size &7) ==0);
655 #define SUM(in0, in1, out0, out1) \
656 "movq (%0), " #out0 "\n"\
657 "movq 8(%0), " #out1 "\n"\
659 "psadbw " #out0 ", " #in0 "\n"\
660 "psadbw " #out1 ", " #in1 "\n"\
661 "paddw " #in1 ", " #in0 "\n"\
662 "paddw " #in0 ", %%mm6\n"
674 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
676 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
682 : "+r" (pix), "=r"(tmp)
683 : "r" ((x86_reg)line_size) , "m" (h)
689 static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
692 assert( (((int)pix1) & 7) == 0);
693 assert( (((int)pix2) & 7) == 0);
694 assert((line_size &7) ==0);
696 #define SUM(in0, in1, out0, out1) \
698 "movq (%1)," #out0 "\n"\
699 "movq 8(%0),%%mm3\n"\
700 "movq 8(%1)," #out1 "\n"\
703 "psubb " #out0 ", %%mm2\n"\
704 "psubb " #out1 ", %%mm3\n"\
705 "pxor %%mm7, %%mm2\n"\
706 "pxor %%mm7, %%mm3\n"\
707 "movq %%mm2, " #out0 "\n"\
708 "movq %%mm3, " #out1 "\n"\
709 "psubusb " #in0 ", %%mm2\n"\
710 "psubusb " #in1 ", %%mm3\n"\
711 "psubusb " #out0 ", " #in0 "\n"\
712 "psubusb " #out1 ", " #in1 "\n"\
713 "por %%mm2, " #in0 "\n"\
714 "por %%mm3, " #in1 "\n"\
715 "movq " #in0 ", %%mm2\n"\
716 "movq " #in1 ", %%mm3\n"\
717 "punpcklbw %%mm7, " #in0 "\n"\
718 "punpcklbw %%mm7, " #in1 "\n"\
719 "punpckhbw %%mm7, %%mm2\n"\
720 "punpckhbw %%mm7, %%mm3\n"\
721 "paddw " #in1 ", " #in0 "\n"\
722 "paddw %%mm3, %%mm2\n"\
723 "paddw %%mm2, " #in0 "\n"\
724 "paddw " #in0 ", %%mm6\n"
730 "pcmpeqw %%mm7,%%mm7\n"
732 "packsswb %%mm7, %%mm7\n"
739 "psubb %%mm2, %%mm0\n"
740 "psubb %%mm3, %%mm1\n"
741 "pxor %%mm7, %%mm0\n"
742 "pxor %%mm7, %%mm1\n"
746 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
748 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
755 "paddw %%mm6,%%mm0\n"
758 "paddw %%mm6,%%mm0\n"
760 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
761 : "r" ((x86_reg)line_size) , "m" (h)
767 static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
770 assert( (((int)pix1) & 7) == 0);
771 assert( (((int)pix2) & 7) == 0);
772 assert((line_size &7) ==0);
774 #define SUM(in0, in1, out0, out1) \
775 "movq (%0)," #out0 "\n"\
777 "movq 8(%0)," #out1 "\n"\
778 "movq 8(%1),%%mm3\n"\
781 "psubb %%mm2, " #out0 "\n"\
782 "psubb %%mm3, " #out1 "\n"\
783 "pxor %%mm7, " #out0 "\n"\
784 "pxor %%mm7, " #out1 "\n"\
785 "psadbw " #out0 ", " #in0 "\n"\
786 "psadbw " #out1 ", " #in1 "\n"\
787 "paddw " #in1 ", " #in0 "\n"\
788 "paddw " #in0 ", %%mm6\n"
793 "pcmpeqw %%mm7,%%mm7\n"
795 "packsswb %%mm7, %%mm7\n"
802 "psubb %%mm2, %%mm0\n"
803 "psubb %%mm3, %%mm1\n"
804 "pxor %%mm7, %%mm0\n"
805 "pxor %%mm7, %%mm1\n"
809 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
811 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
817 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
818 : "r" ((x86_reg)line_size) , "m" (h)
824 static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
828 "movq (%2, %0), %%mm0 \n\t"
829 "movq (%1, %0), %%mm1 \n\t"
830 "psubb %%mm0, %%mm1 \n\t"
831 "movq %%mm1, (%3, %0) \n\t"
832 "movq 8(%2, %0), %%mm0 \n\t"
833 "movq 8(%1, %0), %%mm1 \n\t"
834 "psubb %%mm0, %%mm1 \n\t"
835 "movq %%mm1, 8(%3, %0) \n\t"
840 : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w-15)
843 dst[i+0] = src1[i+0]-src2[i+0];
846 static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top){
852 "movq -1(%1, %0), %%mm0 \n\t" // LT
853 "movq (%1, %0), %%mm1 \n\t" // T
854 "movq -1(%2, %0), %%mm2 \n\t" // L
855 "movq (%2, %0), %%mm3 \n\t" // X
856 "movq %%mm2, %%mm4 \n\t" // L
857 "psubb %%mm0, %%mm2 \n\t"
858 "paddb %%mm1, %%mm2 \n\t" // L + T - LT
859 "movq %%mm4, %%mm5 \n\t" // L
860 "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
861 "pminub %%mm5, %%mm1 \n\t" // min(T, L)
862 "pminub %%mm2, %%mm4 \n\t"
863 "pmaxub %%mm1, %%mm4 \n\t"
864 "psubb %%mm4, %%mm3 \n\t" // dst - pred
865 "movq %%mm3, (%3, %0) \n\t"
870 : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w)
876 dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
878 *left_top= src1[w-1];
882 #define MMABS_MMX(a,z)\
883 "pxor " #z ", " #z " \n\t"\
884 "pcmpgtw " #a ", " #z " \n\t"\
885 "pxor " #z ", " #a " \n\t"\
886 "psubw " #z ", " #a " \n\t"
888 #define MMABS_MMX2(a,z)\
889 "pxor " #z ", " #z " \n\t"\
890 "psubw " #a ", " #z " \n\t"\
891 "pmaxsw " #z ", " #a " \n\t"
893 #define MMABS_SSSE3(a,z)\
894 "pabsw " #a ", " #a " \n\t"
896 #define MMABS_SUM(a,z, sum)\
898 "paddusw " #a ", " #sum " \n\t"
900 /* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to
901 * about 100k on extreme inputs. But that's very unlikely to occur in natural video,
902 * and it's even more unlikely to not have any alternative mvs/modes with lower cost. */
903 #define HSUM_MMX(a, t, dst)\
904 "movq "#a", "#t" \n\t"\
905 "psrlq $32, "#a" \n\t"\
906 "paddusw "#t", "#a" \n\t"\
907 "movq "#a", "#t" \n\t"\
908 "psrlq $16, "#a" \n\t"\
909 "paddusw "#t", "#a" \n\t"\
910 "movd "#a", "#dst" \n\t"\
912 #define HSUM_MMX2(a, t, dst)\
913 "pshufw $0x0E, "#a", "#t" \n\t"\
914 "paddusw "#t", "#a" \n\t"\
915 "pshufw $0x01, "#a", "#t" \n\t"\
916 "paddusw "#t", "#a" \n\t"\
917 "movd "#a", "#dst" \n\t"\
919 #define HSUM_SSE2(a, t, dst)\
920 "movhlps "#a", "#t" \n\t"\
921 "paddusw "#t", "#a" \n\t"\
922 "pshuflw $0x0E, "#a", "#t" \n\t"\
923 "paddusw "#t", "#a" \n\t"\
924 "pshuflw $0x01, "#a", "#t" \n\t"\
925 "paddusw "#t", "#a" \n\t"\
926 "movd "#a", "#dst" \n\t"\
928 #define DCT_SAD4(m,mm,o)\
929 "mov"#m" "#o"+ 0(%1), "#mm"2 \n\t"\
930 "mov"#m" "#o"+16(%1), "#mm"3 \n\t"\
931 "mov"#m" "#o"+32(%1), "#mm"4 \n\t"\
932 "mov"#m" "#o"+48(%1), "#mm"5 \n\t"\
933 MMABS_SUM(mm##2, mm##6, mm##0)\
934 MMABS_SUM(mm##3, mm##7, mm##1)\
935 MMABS_SUM(mm##4, mm##6, mm##0)\
936 MMABS_SUM(mm##5, mm##7, mm##1)\
939 "pxor %%mm0, %%mm0 \n\t"\
940 "pxor %%mm1, %%mm1 \n\t"\
941 DCT_SAD4(q, %%mm, 0)\
942 DCT_SAD4(q, %%mm, 8)\
943 DCT_SAD4(q, %%mm, 64)\
944 DCT_SAD4(q, %%mm, 72)\
945 "paddusw %%mm1, %%mm0 \n\t"\
946 HSUM(%%mm0, %%mm1, %0)
948 #define DCT_SAD_SSE2\
949 "pxor %%xmm0, %%xmm0 \n\t"\
950 "pxor %%xmm1, %%xmm1 \n\t"\
951 DCT_SAD4(dqa, %%xmm, 0)\
952 DCT_SAD4(dqa, %%xmm, 64)\
953 "paddusw %%xmm1, %%xmm0 \n\t"\
954 HSUM(%%xmm0, %%xmm1, %0)
956 #define DCT_SAD_FUNC(cpu) \
957 static int sum_abs_dctelem_##cpu(DCTELEM *block){\
967 #define DCT_SAD DCT_SAD_MMX
968 #define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
969 #define MMABS(a,z) MMABS_MMX(a,z)
974 #define HSUM(a,t,dst) HSUM_MMX2(a,t,dst)
975 #define MMABS(a,z) MMABS_MMX2(a,z)
980 #define DCT_SAD DCT_SAD_SSE2
981 #define HSUM(a,t,dst) HSUM_SSE2(a,t,dst)
985 #if HAVE_SSSE3_INLINE
986 #define MMABS(a,z) MMABS_SSSE3(a,z)
993 static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int size){
997 "pxor %%mm4, %%mm4 \n"
1000 "movq (%2,%0), %%mm2 \n"
1001 "movq (%3,%0,2), %%mm0 \n"
1002 "movq 8(%3,%0,2), %%mm1 \n"
1003 "punpckhbw %%mm2, %%mm3 \n"
1004 "punpcklbw %%mm2, %%mm2 \n"
1005 "psraw $8, %%mm3 \n"
1006 "psraw $8, %%mm2 \n"
1007 "psubw %%mm3, %%mm1 \n"
1008 "psubw %%mm2, %%mm0 \n"
1009 "pmaddwd %%mm1, %%mm1 \n"
1010 "pmaddwd %%mm0, %%mm0 \n"
1011 "paddd %%mm1, %%mm4 \n"
1012 "paddd %%mm0, %%mm4 \n"
1014 "movq %%mm4, %%mm3 \n"
1015 "psrlq $32, %%mm3 \n"
1016 "paddd %%mm3, %%mm4 \n"
1019 :"r"(pix1), "r"(pix2)
1024 #define PHADDD(a, t)\
1025 "movq "#a", "#t" \n\t"\
1026 "psrlq $32, "#a" \n\t"\
1027 "paddd "#t", "#a" \n\t"
1029 pmulhw: dst[0-15]=(src[0-15]*dst[0-15])[16-31]
1030 pmulhrw: dst[0-15]=(src[0-15]*dst[0-15] + 0x8000)[16-31]
1031 pmulhrsw: dst[0-15]=(src[0-15]*dst[0-15] + 0x4000)[15-30]
1033 #define PMULHRW(x, y, s, o)\
1034 "pmulhw " #s ", "#x " \n\t"\
1035 "pmulhw " #s ", "#y " \n\t"\
1036 "paddw " #o ", "#x " \n\t"\
1037 "paddw " #o ", "#y " \n\t"\
1038 "psraw $1, "#x " \n\t"\
1039 "psraw $1, "#y " \n\t"
1040 #define DEF(x) x ## _mmx
1041 #define SET_RND MOVQ_WONE
1042 #define SCALE_OFFSET 1
1044 #include "dsputil_mmx_qns_template.c"
1051 #define DEF(x) x ## _3dnow
1053 #define SCALE_OFFSET 0
1054 #define PMULHRW(x, y, s, o)\
1055 "pmulhrw " #s ", "#x " \n\t"\
1056 "pmulhrw " #s ", "#y " \n\t"
1058 #include "dsputil_mmx_qns_template.c"
1065 #if HAVE_SSSE3_INLINE
1067 #define DEF(x) x ## _ssse3
1069 #define SCALE_OFFSET -1
1070 #define PHADDD(a, t)\
1071 "pshufw $0x0E, "#a", "#t" \n\t"\
1072 "paddd "#t", "#a" \n\t" /* faster than phaddd on core2 */
1073 #define PMULHRW(x, y, s, o)\
1074 "pmulhrsw " #s ", "#x " \n\t"\
1075 "pmulhrsw " #s ", "#y " \n\t"
1077 #include "dsputil_mmx_qns_template.c"
1084 #endif /* HAVE_SSSE3_INLINE */
1086 #endif /* HAVE_INLINE_ASM */
1088 int ff_sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h);
1090 #define hadamard_func(cpu) \
1091 int ff_hadamard8_diff_##cpu (void *s, uint8_t *src1, uint8_t *src2, \
1092 int stride, int h); \
1093 int ff_hadamard8_diff16_##cpu(void *s, uint8_t *src1, uint8_t *src2, \
1099 hadamard_func(ssse3)
1101 void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
1103 int mm_flags = av_get_cpu_flags();
1106 int bit_depth = avctx->bits_per_raw_sample;
1108 if (mm_flags & AV_CPU_FLAG_MMX) {
1109 const int dct_algo = avctx->dct_algo;
1110 if (avctx->bits_per_raw_sample <= 8 &&
1111 (dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX)) {
1112 if(mm_flags & AV_CPU_FLAG_SSE2){
1113 c->fdct = ff_fdct_sse2;
1114 } else if (mm_flags & AV_CPU_FLAG_MMXEXT) {
1115 c->fdct = ff_fdct_mmx2;
1117 c->fdct = ff_fdct_mmx;
1122 c->get_pixels = get_pixels_mmx;
1123 c->diff_pixels = diff_pixels_mmx;
1124 c->pix_sum = pix_sum16_mmx;
1126 c->diff_bytes= diff_bytes_mmx;
1127 c->sum_abs_dctelem= sum_abs_dctelem_mmx;
1129 c->pix_norm1 = pix_norm1_mmx;
1130 c->sse[0] = sse16_mmx;
1131 c->sse[1] = sse8_mmx;
1132 c->vsad[4]= vsad_intra16_mmx;
1134 c->nsse[0] = nsse16_mmx;
1135 c->nsse[1] = nsse8_mmx;
1136 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1137 c->vsad[0] = vsad16_mmx;
1140 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1141 c->try_8x8basis= try_8x8basis_mmx;
1143 c->add_8x8basis= add_8x8basis_mmx;
1145 c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
1147 if (mm_flags & AV_CPU_FLAG_MMXEXT) {
1148 c->sum_abs_dctelem= sum_abs_dctelem_mmx2;
1149 c->vsad[4]= vsad_intra16_mmx2;
1151 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1152 c->vsad[0] = vsad16_mmx2;
1155 c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
1158 if(mm_flags & AV_CPU_FLAG_SSE2){
1160 c->get_pixels = get_pixels_sse2;
1161 c->sum_abs_dctelem= sum_abs_dctelem_sse2;
1164 #if HAVE_SSSE3_INLINE
1165 if(mm_flags & AV_CPU_FLAG_SSSE3){
1166 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1167 c->try_8x8basis= try_8x8basis_ssse3;
1169 c->add_8x8basis= add_8x8basis_ssse3;
1170 c->sum_abs_dctelem= sum_abs_dctelem_ssse3;
1174 if(mm_flags & AV_CPU_FLAG_3DNOW){
1175 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1176 c->try_8x8basis= try_8x8basis_3dnow;
1178 c->add_8x8basis= add_8x8basis_3dnow;
1181 #endif /* HAVE_INLINE_ASM */
1184 if (mm_flags & AV_CPU_FLAG_MMX) {
1185 c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
1186 c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
1188 if (mm_flags & AV_CPU_FLAG_MMXEXT) {
1189 c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx2;
1190 c->hadamard8_diff[1] = ff_hadamard8_diff_mmx2;
1193 if (mm_flags & AV_CPU_FLAG_SSE2){
1194 c->sse[0] = ff_sse16_sse2;
1196 #if HAVE_ALIGNED_STACK
1197 c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
1198 c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
1202 #if HAVE_SSSE3 && HAVE_ALIGNED_STACK
1203 if (mm_flags & AV_CPU_FLAG_SSSE3) {
1204 c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
1205 c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
1209 #endif /* HAVE_YASM */
1211 ff_dsputil_init_pix_mmx(c, avctx);