2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
25 #include "libavutil/cpu.h"
26 #include "libavutil/x86/asm.h"
27 #include "libavutil/x86/cpu.h"
28 #include "libavcodec/dsputil.h"
29 #include "libavcodec/mpegvideo.h"
30 #include "libavcodec/mathops.h"
31 #include "dsputil_mmx.h"
33 void ff_get_pixels_mmx(int16_t *block, const uint8_t *pixels, int line_size);
34 void ff_get_pixels_sse2(int16_t *block, const uint8_t *pixels, int line_size);
35 void ff_diff_pixels_mmx(int16_t *block, const uint8_t *s1, const uint8_t *s2, int stride);
36 int ff_pix_sum16_mmx(uint8_t * pix, int line_size);
37 int ff_pix_norm1_mmx(uint8_t *pix, int line_size);
41 static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
46 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
47 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
49 "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
50 "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
51 "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
52 "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
54 /* todo: mm1-mm2, mm3-mm4 */
55 /* algo: subtract mm1 from mm2 with saturation and vice versa */
56 /* OR the results to get absolute difference */
59 "psubusb %%mm2,%%mm1\n"
60 "psubusb %%mm4,%%mm3\n"
61 "psubusb %%mm5,%%mm2\n"
62 "psubusb %%mm6,%%mm4\n"
67 /* now convert to 16-bit vectors so we can square them */
71 "punpckhbw %%mm0,%%mm2\n"
72 "punpckhbw %%mm0,%%mm4\n"
73 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
74 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
76 "pmaddwd %%mm2,%%mm2\n"
77 "pmaddwd %%mm4,%%mm4\n"
78 "pmaddwd %%mm1,%%mm1\n"
79 "pmaddwd %%mm3,%%mm3\n"
81 "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
82 "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
93 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
96 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
97 : "r" ((x86_reg)line_size) , "m" (h)
102 static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
106 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
107 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
109 "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
110 "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
111 "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
112 "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
114 /* todo: mm1-mm2, mm3-mm4 */
115 /* algo: subtract mm1 from mm2 with saturation and vice versa */
116 /* OR the results to get absolute difference */
119 "psubusb %%mm2,%%mm1\n"
120 "psubusb %%mm4,%%mm3\n"
121 "psubusb %%mm5,%%mm2\n"
122 "psubusb %%mm6,%%mm4\n"
127 /* now convert to 16-bit vectors so we can square them */
131 "punpckhbw %%mm0,%%mm2\n"
132 "punpckhbw %%mm0,%%mm4\n"
133 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
134 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
136 "pmaddwd %%mm2,%%mm2\n"
137 "pmaddwd %%mm4,%%mm4\n"
138 "pmaddwd %%mm1,%%mm1\n"
139 "pmaddwd %%mm3,%%mm3\n"
144 "paddd %%mm2,%%mm1\n"
145 "paddd %%mm4,%%mm3\n"
146 "paddd %%mm1,%%mm7\n"
147 "paddd %%mm3,%%mm7\n"
153 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
154 "paddd %%mm7,%%mm1\n"
156 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
157 : "r" ((x86_reg)line_size) , "m" (h)
162 static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
170 "movq %%mm0, %%mm1\n"
174 "movq %%mm0, %%mm2\n"
175 "movq %%mm1, %%mm3\n"
176 "punpcklbw %%mm7,%%mm0\n"
177 "punpcklbw %%mm7,%%mm1\n"
178 "punpckhbw %%mm7,%%mm2\n"
179 "punpckhbw %%mm7,%%mm3\n"
180 "psubw %%mm1, %%mm0\n"
181 "psubw %%mm3, %%mm2\n"
186 "movq %%mm4, %%mm1\n"
190 "movq %%mm4, %%mm5\n"
191 "movq %%mm1, %%mm3\n"
192 "punpcklbw %%mm7,%%mm4\n"
193 "punpcklbw %%mm7,%%mm1\n"
194 "punpckhbw %%mm7,%%mm5\n"
195 "punpckhbw %%mm7,%%mm3\n"
196 "psubw %%mm1, %%mm4\n"
197 "psubw %%mm3, %%mm5\n"
198 "psubw %%mm4, %%mm0\n"
199 "psubw %%mm5, %%mm2\n"
200 "pxor %%mm3, %%mm3\n"
201 "pxor %%mm1, %%mm1\n"
202 "pcmpgtw %%mm0, %%mm3\n\t"
203 "pcmpgtw %%mm2, %%mm1\n\t"
204 "pxor %%mm3, %%mm0\n"
205 "pxor %%mm1, %%mm2\n"
206 "psubw %%mm3, %%mm0\n"
207 "psubw %%mm1, %%mm2\n"
208 "paddw %%mm0, %%mm2\n"
209 "paddw %%mm2, %%mm6\n"
215 "movq %%mm0, %%mm1\n"
219 "movq %%mm0, %%mm2\n"
220 "movq %%mm1, %%mm3\n"
221 "punpcklbw %%mm7,%%mm0\n"
222 "punpcklbw %%mm7,%%mm1\n"
223 "punpckhbw %%mm7,%%mm2\n"
224 "punpckhbw %%mm7,%%mm3\n"
225 "psubw %%mm1, %%mm0\n"
226 "psubw %%mm3, %%mm2\n"
227 "psubw %%mm0, %%mm4\n"
228 "psubw %%mm2, %%mm5\n"
229 "pxor %%mm3, %%mm3\n"
230 "pxor %%mm1, %%mm1\n"
231 "pcmpgtw %%mm4, %%mm3\n\t"
232 "pcmpgtw %%mm5, %%mm1\n\t"
233 "pxor %%mm3, %%mm4\n"
234 "pxor %%mm1, %%mm5\n"
235 "psubw %%mm3, %%mm4\n"
236 "psubw %%mm1, %%mm5\n"
237 "paddw %%mm4, %%mm5\n"
238 "paddw %%mm5, %%mm6\n"
243 "movq %%mm4, %%mm1\n"
247 "movq %%mm4, %%mm5\n"
248 "movq %%mm1, %%mm3\n"
249 "punpcklbw %%mm7,%%mm4\n"
250 "punpcklbw %%mm7,%%mm1\n"
251 "punpckhbw %%mm7,%%mm5\n"
252 "punpckhbw %%mm7,%%mm3\n"
253 "psubw %%mm1, %%mm4\n"
254 "psubw %%mm3, %%mm5\n"
255 "psubw %%mm4, %%mm0\n"
256 "psubw %%mm5, %%mm2\n"
257 "pxor %%mm3, %%mm3\n"
258 "pxor %%mm1, %%mm1\n"
259 "pcmpgtw %%mm0, %%mm3\n\t"
260 "pcmpgtw %%mm2, %%mm1\n\t"
261 "pxor %%mm3, %%mm0\n"
262 "pxor %%mm1, %%mm2\n"
263 "psubw %%mm3, %%mm0\n"
264 "psubw %%mm1, %%mm2\n"
265 "paddw %%mm0, %%mm2\n"
266 "paddw %%mm2, %%mm6\n"
272 "movq %%mm6, %%mm0\n"
273 "punpcklwd %%mm7,%%mm0\n"
274 "punpckhwd %%mm7,%%mm6\n"
275 "paddd %%mm0, %%mm6\n"
279 "paddd %%mm6,%%mm0\n"
281 : "+r" (pix1), "=r"(tmp)
282 : "r" ((x86_reg)line_size) , "g" (h-2)
287 static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
297 "movq %%mm0, %%mm2\n"
298 "movq %%mm1, %%mm3\n"
299 "punpcklbw %%mm7,%%mm0\n"
300 "punpcklbw %%mm7,%%mm1\n"
301 "punpckhbw %%mm7,%%mm2\n"
302 "punpckhbw %%mm7,%%mm3\n"
303 "psubw %%mm1, %%mm0\n"
304 "psubw %%mm3, %%mm2\n"
310 "movq %%mm4, %%mm5\n"
311 "movq %%mm1, %%mm3\n"
312 "punpcklbw %%mm7,%%mm4\n"
313 "punpcklbw %%mm7,%%mm1\n"
314 "punpckhbw %%mm7,%%mm5\n"
315 "punpckhbw %%mm7,%%mm3\n"
316 "psubw %%mm1, %%mm4\n"
317 "psubw %%mm3, %%mm5\n"
318 "psubw %%mm4, %%mm0\n"
319 "psubw %%mm5, %%mm2\n"
320 "pxor %%mm3, %%mm3\n"
321 "pxor %%mm1, %%mm1\n"
322 "pcmpgtw %%mm0, %%mm3\n\t"
323 "pcmpgtw %%mm2, %%mm1\n\t"
324 "pxor %%mm3, %%mm0\n"
325 "pxor %%mm1, %%mm2\n"
326 "psubw %%mm3, %%mm0\n"
327 "psubw %%mm1, %%mm2\n"
328 "paddw %%mm0, %%mm2\n"
329 "paddw %%mm2, %%mm6\n"
336 "movq %%mm0, %%mm2\n"
337 "movq %%mm1, %%mm3\n"
338 "punpcklbw %%mm7,%%mm0\n"
339 "punpcklbw %%mm7,%%mm1\n"
340 "punpckhbw %%mm7,%%mm2\n"
341 "punpckhbw %%mm7,%%mm3\n"
342 "psubw %%mm1, %%mm0\n"
343 "psubw %%mm3, %%mm2\n"
344 "psubw %%mm0, %%mm4\n"
345 "psubw %%mm2, %%mm5\n"
346 "pxor %%mm3, %%mm3\n"
347 "pxor %%mm1, %%mm1\n"
348 "pcmpgtw %%mm4, %%mm3\n\t"
349 "pcmpgtw %%mm5, %%mm1\n\t"
350 "pxor %%mm3, %%mm4\n"
351 "pxor %%mm1, %%mm5\n"
352 "psubw %%mm3, %%mm4\n"
353 "psubw %%mm1, %%mm5\n"
354 "paddw %%mm4, %%mm5\n"
355 "paddw %%mm5, %%mm6\n"
361 "movq %%mm4, %%mm5\n"
362 "movq %%mm1, %%mm3\n"
363 "punpcklbw %%mm7,%%mm4\n"
364 "punpcklbw %%mm7,%%mm1\n"
365 "punpckhbw %%mm7,%%mm5\n"
366 "punpckhbw %%mm7,%%mm3\n"
367 "psubw %%mm1, %%mm4\n"
368 "psubw %%mm3, %%mm5\n"
369 "psubw %%mm4, %%mm0\n"
370 "psubw %%mm5, %%mm2\n"
371 "pxor %%mm3, %%mm3\n"
372 "pxor %%mm1, %%mm1\n"
373 "pcmpgtw %%mm0, %%mm3\n\t"
374 "pcmpgtw %%mm2, %%mm1\n\t"
375 "pxor %%mm3, %%mm0\n"
376 "pxor %%mm1, %%mm2\n"
377 "psubw %%mm3, %%mm0\n"
378 "psubw %%mm1, %%mm2\n"
379 "paddw %%mm0, %%mm2\n"
380 "paddw %%mm2, %%mm6\n"
386 "movq %%mm6, %%mm0\n"
387 "punpcklwd %%mm7,%%mm0\n"
388 "punpckhwd %%mm7,%%mm6\n"
389 "paddd %%mm0, %%mm6\n"
393 "paddd %%mm6,%%mm0\n"
395 : "+r" (pix1), "=r"(tmp)
396 : "r" ((x86_reg)line_size) , "g" (h-2)
398 return tmp + hf_noise8_mmx(pix+8, line_size, h);
401 static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
402 MpegEncContext *c = p;
405 if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
406 else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
407 score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
409 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
410 else return score1 + FFABS(score2)*8;
413 static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
414 MpegEncContext *c = p;
415 int score1= sse8_mmx(c, pix1, pix2, line_size, h);
416 int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
418 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
419 else return score1 + FFABS(score2)*8;
422 static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
425 av_assert2( (((int)pix) & 7) == 0);
426 av_assert2((line_size &7) ==0);
428 #define SUM(in0, in1, out0, out1) \
429 "movq (%0), %%mm2\n"\
430 "movq 8(%0), %%mm3\n"\
432 "movq %%mm2, " #out0 "\n"\
433 "movq %%mm3, " #out1 "\n"\
434 "psubusb " #in0 ", %%mm2\n"\
435 "psubusb " #in1 ", %%mm3\n"\
436 "psubusb " #out0 ", " #in0 "\n"\
437 "psubusb " #out1 ", " #in1 "\n"\
438 "por %%mm2, " #in0 "\n"\
439 "por %%mm3, " #in1 "\n"\
440 "movq " #in0 ", %%mm2\n"\
441 "movq " #in1 ", %%mm3\n"\
442 "punpcklbw %%mm7, " #in0 "\n"\
443 "punpcklbw %%mm7, " #in1 "\n"\
444 "punpckhbw %%mm7, %%mm2\n"\
445 "punpckhbw %%mm7, %%mm3\n"\
446 "paddw " #in1 ", " #in0 "\n"\
447 "paddw %%mm3, %%mm2\n"\
448 "paddw %%mm2, " #in0 "\n"\
449 "paddw " #in0 ", %%mm6\n"
462 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
464 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
471 "paddw %%mm6,%%mm0\n"
474 "paddw %%mm6,%%mm0\n"
476 : "+r" (pix), "=r"(tmp)
477 : "r" ((x86_reg)line_size) , "m" (h)
483 static int vsad_intra16_mmxext(void *v, uint8_t *pix, uint8_t *dummy,
484 int line_size, int h)
488 av_assert2( (((int)pix) & 7) == 0);
489 av_assert2((line_size &7) ==0);
491 #define SUM(in0, in1, out0, out1) \
492 "movq (%0), " #out0 "\n"\
493 "movq 8(%0), " #out1 "\n"\
495 "psadbw " #out0 ", " #in0 "\n"\
496 "psadbw " #out1 ", " #in1 "\n"\
497 "paddw " #in1 ", " #in0 "\n"\
498 "paddw " #in0 ", %%mm6\n"
510 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
512 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
518 : "+r" (pix), "=r"(tmp)
519 : "r" ((x86_reg)line_size) , "m" (h)
525 static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
528 av_assert2( (((int)pix1) & 7) == 0);
529 av_assert2( (((int)pix2) & 7) == 0);
530 av_assert2((line_size &7) ==0);
532 #define SUM(in0, in1, out0, out1) \
534 "movq (%1)," #out0 "\n"\
535 "movq 8(%0),%%mm3\n"\
536 "movq 8(%1)," #out1 "\n"\
539 "psubb " #out0 ", %%mm2\n"\
540 "psubb " #out1 ", %%mm3\n"\
541 "pxor %%mm7, %%mm2\n"\
542 "pxor %%mm7, %%mm3\n"\
543 "movq %%mm2, " #out0 "\n"\
544 "movq %%mm3, " #out1 "\n"\
545 "psubusb " #in0 ", %%mm2\n"\
546 "psubusb " #in1 ", %%mm3\n"\
547 "psubusb " #out0 ", " #in0 "\n"\
548 "psubusb " #out1 ", " #in1 "\n"\
549 "por %%mm2, " #in0 "\n"\
550 "por %%mm3, " #in1 "\n"\
551 "movq " #in0 ", %%mm2\n"\
552 "movq " #in1 ", %%mm3\n"\
553 "punpcklbw %%mm7, " #in0 "\n"\
554 "punpcklbw %%mm7, " #in1 "\n"\
555 "punpckhbw %%mm7, %%mm2\n"\
556 "punpckhbw %%mm7, %%mm3\n"\
557 "paddw " #in1 ", " #in0 "\n"\
558 "paddw %%mm3, %%mm2\n"\
559 "paddw %%mm2, " #in0 "\n"\
560 "paddw " #in0 ", %%mm6\n"
566 "pcmpeqw %%mm7,%%mm7\n"
568 "packsswb %%mm7, %%mm7\n"
575 "psubb %%mm2, %%mm0\n"
576 "psubb %%mm3, %%mm1\n"
577 "pxor %%mm7, %%mm0\n"
578 "pxor %%mm7, %%mm1\n"
582 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
584 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
591 "paddw %%mm6,%%mm0\n"
594 "paddw %%mm6,%%mm0\n"
596 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
597 : "r" ((x86_reg)line_size) , "m" (h)
603 static int vsad16_mmxext(void *v, uint8_t *pix1, uint8_t *pix2,
604 int line_size, int h)
608 av_assert2( (((int)pix1) & 7) == 0);
609 av_assert2( (((int)pix2) & 7) == 0);
610 av_assert2((line_size &7) ==0);
612 #define SUM(in0, in1, out0, out1) \
613 "movq (%0)," #out0 "\n"\
615 "movq 8(%0)," #out1 "\n"\
616 "movq 8(%1),%%mm3\n"\
619 "psubb %%mm2, " #out0 "\n"\
620 "psubb %%mm3, " #out1 "\n"\
621 "pxor %%mm7, " #out0 "\n"\
622 "pxor %%mm7, " #out1 "\n"\
623 "psadbw " #out0 ", " #in0 "\n"\
624 "psadbw " #out1 ", " #in1 "\n"\
625 "paddw " #in1 ", " #in0 "\n"\
626 "paddw " #in0 ", %%mm6\n"
631 "pcmpeqw %%mm7,%%mm7\n"
633 "packsswb %%mm7, %%mm7\n"
640 "psubb %%mm2, %%mm0\n"
641 "psubb %%mm3, %%mm1\n"
642 "pxor %%mm7, %%mm0\n"
643 "pxor %%mm7, %%mm1\n"
647 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
649 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
655 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
656 : "r" ((x86_reg)line_size) , "m" (h)
662 static void diff_bytes_mmx(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w){
667 "movq (%2, %0), %%mm0 \n\t"
668 "movq (%1, %0), %%mm1 \n\t"
669 "psubb %%mm0, %%mm1 \n\t"
670 "movq %%mm1, (%3, %0) \n\t"
671 "movq 8(%2, %0), %%mm0 \n\t"
672 "movq 8(%1, %0), %%mm1 \n\t"
673 "psubb %%mm0, %%mm1 \n\t"
674 "movq %%mm1, 8(%3, %0) \n\t"
679 : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w-15)
682 dst[i+0] = src1[i+0]-src2[i+0];
685 static void sub_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *src1,
686 const uint8_t *src2, int w,
687 int *left, int *left_top)
693 "movq (%1, %0), %%mm0 \n\t" // LT
694 "psllq $8, %%mm0 \n\t"
696 "movq (%1, %0), %%mm1 \n\t" // T
697 "movq -1(%2, %0), %%mm2 \n\t" // L
698 "movq (%2, %0), %%mm3 \n\t" // X
699 "movq %%mm2, %%mm4 \n\t" // L
700 "psubb %%mm0, %%mm2 \n\t"
701 "paddb %%mm1, %%mm2 \n\t" // L + T - LT
702 "movq %%mm4, %%mm5 \n\t" // L
703 "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
704 "pminub %%mm5, %%mm1 \n\t" // min(T, L)
705 "pminub %%mm2, %%mm4 \n\t"
706 "pmaxub %%mm1, %%mm4 \n\t"
707 "psubb %%mm4, %%mm3 \n\t" // dst - pred
708 "movq %%mm3, (%3, %0) \n\t"
710 "movq -1(%1, %0), %%mm0 \n\t" // LT
714 : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w)
720 dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
722 *left_top= src1[w-1];
726 #define MMABS_MMX(a,z)\
727 "pxor " #z ", " #z " \n\t"\
728 "pcmpgtw " #a ", " #z " \n\t"\
729 "pxor " #z ", " #a " \n\t"\
730 "psubw " #z ", " #a " \n\t"
732 #define MMABS_MMXEXT(a, z) \
733 "pxor " #z ", " #z " \n\t"\
734 "psubw " #a ", " #z " \n\t"\
735 "pmaxsw " #z ", " #a " \n\t"
737 #define MMABS_SSSE3(a,z)\
738 "pabsw " #a ", " #a " \n\t"
740 #define MMABS_SUM(a,z, sum)\
742 "paddusw " #a ", " #sum " \n\t"
744 /* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to
745 * about 100k on extreme inputs. But that's very unlikely to occur in natural video,
746 * and it's even more unlikely to not have any alternative mvs/modes with lower cost. */
747 #define HSUM_MMX(a, t, dst)\
748 "movq "#a", "#t" \n\t"\
749 "psrlq $32, "#a" \n\t"\
750 "paddusw "#t", "#a" \n\t"\
751 "movq "#a", "#t" \n\t"\
752 "psrlq $16, "#a" \n\t"\
753 "paddusw "#t", "#a" \n\t"\
754 "movd "#a", "#dst" \n\t"\
756 #define HSUM_MMXEXT(a, t, dst) \
757 "pshufw $0x0E, "#a", "#t" \n\t"\
758 "paddusw "#t", "#a" \n\t"\
759 "pshufw $0x01, "#a", "#t" \n\t"\
760 "paddusw "#t", "#a" \n\t"\
761 "movd "#a", "#dst" \n\t"\
763 #define HSUM_SSE2(a, t, dst)\
764 "movhlps "#a", "#t" \n\t"\
765 "paddusw "#t", "#a" \n\t"\
766 "pshuflw $0x0E, "#a", "#t" \n\t"\
767 "paddusw "#t", "#a" \n\t"\
768 "pshuflw $0x01, "#a", "#t" \n\t"\
769 "paddusw "#t", "#a" \n\t"\
770 "movd "#a", "#dst" \n\t"\
772 #define DCT_SAD4(m,mm,o)\
773 "mov"#m" "#o"+ 0(%1), "#mm"2 \n\t"\
774 "mov"#m" "#o"+16(%1), "#mm"3 \n\t"\
775 "mov"#m" "#o"+32(%1), "#mm"4 \n\t"\
776 "mov"#m" "#o"+48(%1), "#mm"5 \n\t"\
777 MMABS_SUM(mm##2, mm##6, mm##0)\
778 MMABS_SUM(mm##3, mm##7, mm##1)\
779 MMABS_SUM(mm##4, mm##6, mm##0)\
780 MMABS_SUM(mm##5, mm##7, mm##1)\
783 "pxor %%mm0, %%mm0 \n\t"\
784 "pxor %%mm1, %%mm1 \n\t"\
785 DCT_SAD4(q, %%mm, 0)\
786 DCT_SAD4(q, %%mm, 8)\
787 DCT_SAD4(q, %%mm, 64)\
788 DCT_SAD4(q, %%mm, 72)\
789 "paddusw %%mm1, %%mm0 \n\t"\
790 HSUM(%%mm0, %%mm1, %0)
792 #define DCT_SAD_SSE2\
793 "pxor %%xmm0, %%xmm0 \n\t"\
794 "pxor %%xmm1, %%xmm1 \n\t"\
795 DCT_SAD4(dqa, %%xmm, 0)\
796 DCT_SAD4(dqa, %%xmm, 64)\
797 "paddusw %%xmm1, %%xmm0 \n\t"\
798 HSUM(%%xmm0, %%xmm1, %0)
800 #define DCT_SAD_FUNC(cpu) \
801 static int sum_abs_dctelem_##cpu(int16_t *block){\
811 #define DCT_SAD DCT_SAD_MMX
812 #define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
813 #define MMABS(a,z) MMABS_MMX(a,z)
818 #define HSUM(a,t,dst) HSUM_MMXEXT(a,t,dst)
819 #define MMABS(a,z) MMABS_MMXEXT(a,z)
824 #define DCT_SAD DCT_SAD_SSE2
825 #define HSUM(a,t,dst) HSUM_SSE2(a,t,dst)
829 #if HAVE_SSSE3_INLINE
830 #define MMABS(a,z) MMABS_SSSE3(a,z)
837 static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int size){
841 "pxor %%mm4, %%mm4 \n"
844 "movq (%2,%0), %%mm2 \n"
845 "movq (%3,%0,2), %%mm0 \n"
846 "movq 8(%3,%0,2), %%mm1 \n"
847 "punpckhbw %%mm2, %%mm3 \n"
848 "punpcklbw %%mm2, %%mm2 \n"
851 "psubw %%mm3, %%mm1 \n"
852 "psubw %%mm2, %%mm0 \n"
853 "pmaddwd %%mm1, %%mm1 \n"
854 "pmaddwd %%mm0, %%mm0 \n"
855 "paddd %%mm1, %%mm4 \n"
856 "paddd %%mm0, %%mm4 \n"
858 "movq %%mm4, %%mm3 \n"
859 "psrlq $32, %%mm3 \n"
860 "paddd %%mm3, %%mm4 \n"
863 :"r"(pix1), "r"(pix2)
868 #define PHADDD(a, t)\
869 "movq "#a", "#t" \n\t"\
870 "psrlq $32, "#a" \n\t"\
871 "paddd "#t", "#a" \n\t"
873 pmulhw: dst[0-15]=(src[0-15]*dst[0-15])[16-31]
874 pmulhrw: dst[0-15]=(src[0-15]*dst[0-15] + 0x8000)[16-31]
875 pmulhrsw: dst[0-15]=(src[0-15]*dst[0-15] + 0x4000)[15-30]
877 #define PMULHRW(x, y, s, o)\
878 "pmulhw " #s ", "#x " \n\t"\
879 "pmulhw " #s ", "#y " \n\t"\
880 "paddw " #o ", "#x " \n\t"\
881 "paddw " #o ", "#y " \n\t"\
882 "psraw $1, "#x " \n\t"\
883 "psraw $1, "#y " \n\t"
884 #define DEF(x) x ## _mmx
885 #define SET_RND MOVQ_WONE
886 #define SCALE_OFFSET 1
888 #include "dsputil_qns_template.c"
895 #define DEF(x) x ## _3dnow
897 #define SCALE_OFFSET 0
898 #define PMULHRW(x, y, s, o)\
899 "pmulhrw " #s ", "#x " \n\t"\
900 "pmulhrw " #s ", "#y " \n\t"
902 #include "dsputil_qns_template.c"
909 #if HAVE_SSSE3_INLINE
911 #define DEF(x) x ## _ssse3
913 #define SCALE_OFFSET -1
914 #define PHADDD(a, t)\
915 "pshufw $0x0E, "#a", "#t" \n\t"\
916 "paddd "#t", "#a" \n\t" /* faster than phaddd on core2 */
917 #define PMULHRW(x, y, s, o)\
918 "pmulhrsw " #s ", "#x " \n\t"\
919 "pmulhrsw " #s ", "#y " \n\t"
921 #include "dsputil_qns_template.c"
928 #endif /* HAVE_SSSE3_INLINE */
930 #endif /* HAVE_INLINE_ASM */
932 int ff_sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h);
934 #define hadamard_func(cpu) \
935 int ff_hadamard8_diff_##cpu (void *s, uint8_t *src1, uint8_t *src2, \
936 int stride, int h); \
937 int ff_hadamard8_diff16_##cpu(void *s, uint8_t *src1, uint8_t *src2, \
941 hadamard_func(mmxext)
945 void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
947 int mm_flags = av_get_cpu_flags();
948 int bit_depth = avctx->bits_per_raw_sample;
951 if (EXTERNAL_MMX(mm_flags)) {
953 c->get_pixels = ff_get_pixels_mmx;
954 c->diff_pixels = ff_diff_pixels_mmx;
955 c->pix_sum = ff_pix_sum16_mmx;
957 c->pix_norm1 = ff_pix_norm1_mmx;
959 if (EXTERNAL_SSE2(mm_flags))
961 c->get_pixels = ff_get_pixels_sse2;
962 #endif /* HAVE_YASM */
965 if (mm_flags & AV_CPU_FLAG_MMX) {
966 const int dct_algo = avctx->dct_algo;
967 if (avctx->bits_per_raw_sample <= 8 &&
968 (dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX)) {
969 if(mm_flags & AV_CPU_FLAG_SSE2){
970 c->fdct = ff_fdct_sse2;
971 } else if (mm_flags & AV_CPU_FLAG_MMXEXT) {
972 c->fdct = ff_fdct_mmxext;
974 c->fdct = ff_fdct_mmx;
979 c->diff_bytes= diff_bytes_mmx;
980 c->sum_abs_dctelem= sum_abs_dctelem_mmx;
982 c->sse[0] = sse16_mmx;
983 c->sse[1] = sse8_mmx;
984 c->vsad[4]= vsad_intra16_mmx;
986 c->nsse[0] = nsse16_mmx;
987 c->nsse[1] = nsse8_mmx;
988 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
989 c->vsad[0] = vsad16_mmx;
992 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
993 c->try_8x8basis= try_8x8basis_mmx;
995 c->add_8x8basis= add_8x8basis_mmx;
997 c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
999 if (mm_flags & AV_CPU_FLAG_MMXEXT) {
1000 c->sum_abs_dctelem = sum_abs_dctelem_mmxext;
1001 c->vsad[4] = vsad_intra16_mmxext;
1003 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1004 c->vsad[0] = vsad16_mmxext;
1007 c->sub_hfyu_median_prediction = sub_hfyu_median_prediction_mmxext;
1010 if(mm_flags & AV_CPU_FLAG_SSE2){
1011 c->sum_abs_dctelem= sum_abs_dctelem_sse2;
1014 #if HAVE_SSSE3_INLINE
1015 if(mm_flags & AV_CPU_FLAG_SSSE3){
1016 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1017 c->try_8x8basis= try_8x8basis_ssse3;
1019 c->add_8x8basis= add_8x8basis_ssse3;
1020 c->sum_abs_dctelem= sum_abs_dctelem_ssse3;
1024 if(mm_flags & AV_CPU_FLAG_3DNOW){
1025 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1026 c->try_8x8basis= try_8x8basis_3dnow;
1028 c->add_8x8basis= add_8x8basis_3dnow;
1031 #endif /* HAVE_INLINE_ASM */
1033 if (EXTERNAL_MMX(mm_flags)) {
1034 c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
1035 c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
1037 if (EXTERNAL_MMXEXT(mm_flags)) {
1038 c->hadamard8_diff[0] = ff_hadamard8_diff16_mmxext;
1039 c->hadamard8_diff[1] = ff_hadamard8_diff_mmxext;
1042 if (EXTERNAL_SSE2(mm_flags)) {
1043 c->sse[0] = ff_sse16_sse2;
1045 #if HAVE_ALIGNED_STACK
1046 c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
1047 c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
1051 if (EXTERNAL_SSSE3(mm_flags) && HAVE_ALIGNED_STACK) {
1052 c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
1053 c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
1057 ff_dsputil_init_pix_mmx(c, avctx);