2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
23 #include "../dsputil.h"
24 #include "../simple_idct.h"
25 #include "../mpegvideo.h"
31 extern const uint8_t ff_h263_loop_filter_strength[32];
32 extern void ff_idct_xvid_mmx(short *block);
33 extern void ff_idct_xvid_mmx2(short *block);
35 int mm_flags; /* multimedia extension flags */
37 /* pixel operations */
38 static const uint64_t mm_bone attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
39 static const uint64_t mm_wone attribute_used __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
40 static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
42 static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL;
43 static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
44 static const uint64_t ff_pw_4 attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL;
45 static const uint64_t ff_pw_5 attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL;
46 static const uint64_t ff_pw_8 attribute_used __attribute__ ((aligned(8))) = 0x0008000800080008ULL;
47 static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
48 static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL;
49 static const uint64_t ff_pw_64 attribute_used __attribute__ ((aligned(8))) = 0x0040004000400040ULL;
50 static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
52 static const uint64_t ff_pb_3F attribute_used __attribute__ ((aligned(8))) = 0x3F3F3F3F3F3F3F3FULL;
53 static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
55 #define JUMPALIGN() __asm __volatile (".balign 8"::)
56 #define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
58 #define MOVQ_WONE(regd) \
60 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
61 "psrlw $15, %%" #regd ::)
63 #define MOVQ_BFE(regd) \
65 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
66 "paddb %%" #regd ", %%" #regd " \n\t" ::)
69 #define MOVQ_BONE(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone))
70 #define MOVQ_WTWO(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo))
72 // for shared library it's better to use this way for accessing constants
74 #define MOVQ_BONE(regd) \
76 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
77 "psrlw $15, %%" #regd " \n\t" \
78 "packuswb %%" #regd ", %%" #regd " \n\t" ::)
80 #define MOVQ_WTWO(regd) \
82 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
83 "psrlw $15, %%" #regd " \n\t" \
84 "psllw $1, %%" #regd " \n\t"::)
88 // using regr as temporary and for the output result
89 // first argument is unmodifed and second is trashed
90 // regfe is supposed to contain 0xfefefefefefefefe
91 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
92 "movq " #rega ", " #regr " \n\t"\
93 "pand " #regb ", " #regr " \n\t"\
94 "pxor " #rega ", " #regb " \n\t"\
95 "pand " #regfe "," #regb " \n\t"\
96 "psrlq $1, " #regb " \n\t"\
97 "paddb " #regb ", " #regr " \n\t"
99 #define PAVGB_MMX(rega, regb, regr, regfe) \
100 "movq " #rega ", " #regr " \n\t"\
101 "por " #regb ", " #regr " \n\t"\
102 "pxor " #rega ", " #regb " \n\t"\
103 "pand " #regfe "," #regb " \n\t"\
104 "psrlq $1, " #regb " \n\t"\
105 "psubb " #regb ", " #regr " \n\t"
107 // mm6 is supposed to contain 0xfefefefefefefefe
108 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
109 "movq " #rega ", " #regr " \n\t"\
110 "movq " #regc ", " #regp " \n\t"\
111 "pand " #regb ", " #regr " \n\t"\
112 "pand " #regd ", " #regp " \n\t"\
113 "pxor " #rega ", " #regb " \n\t"\
114 "pxor " #regc ", " #regd " \n\t"\
115 "pand %%mm6, " #regb " \n\t"\
116 "pand %%mm6, " #regd " \n\t"\
117 "psrlq $1, " #regb " \n\t"\
118 "psrlq $1, " #regd " \n\t"\
119 "paddb " #regb ", " #regr " \n\t"\
120 "paddb " #regd ", " #regp " \n\t"
122 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
123 "movq " #rega ", " #regr " \n\t"\
124 "movq " #regc ", " #regp " \n\t"\
125 "por " #regb ", " #regr " \n\t"\
126 "por " #regd ", " #regp " \n\t"\
127 "pxor " #rega ", " #regb " \n\t"\
128 "pxor " #regc ", " #regd " \n\t"\
129 "pand %%mm6, " #regb " \n\t"\
130 "pand %%mm6, " #regd " \n\t"\
131 "psrlq $1, " #regd " \n\t"\
132 "psrlq $1, " #regb " \n\t"\
133 "psubb " #regb ", " #regr " \n\t"\
134 "psubb " #regd ", " #regp " \n\t"
136 /***********************************/
137 /* MMX no rounding */
138 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
139 #define SET_RND MOVQ_WONE
140 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
141 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
143 #include "dsputil_mmx_rnd.h"
149 /***********************************/
152 #define DEF(x, y) x ## _ ## y ##_mmx
153 #define SET_RND MOVQ_WTWO
154 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
155 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
157 #include "dsputil_mmx_rnd.h"
164 /***********************************/
167 #define DEF(x) x ## _3dnow
168 /* for Athlons PAVGUSB is prefered */
169 #define PAVGB "pavgusb"
171 #include "dsputil_mmx_avg.h"
176 /***********************************/
179 #define DEF(x) x ## _mmx2
181 /* Introduced only in MMX2 set */
182 #define PAVGB "pavgb"
184 #include "dsputil_mmx_avg.h"
189 /***********************************/
192 #ifdef CONFIG_ENCODERS
193 static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
196 "mov $-128, %%"REG_a" \n\t"
197 "pxor %%mm7, %%mm7 \n\t"
200 "movq (%0), %%mm0 \n\t"
201 "movq (%0, %2), %%mm2 \n\t"
202 "movq %%mm0, %%mm1 \n\t"
203 "movq %%mm2, %%mm3 \n\t"
204 "punpcklbw %%mm7, %%mm0 \n\t"
205 "punpckhbw %%mm7, %%mm1 \n\t"
206 "punpcklbw %%mm7, %%mm2 \n\t"
207 "punpckhbw %%mm7, %%mm3 \n\t"
208 "movq %%mm0, (%1, %%"REG_a") \n\t"
209 "movq %%mm1, 8(%1, %%"REG_a") \n\t"
210 "movq %%mm2, 16(%1, %%"REG_a") \n\t"
211 "movq %%mm3, 24(%1, %%"REG_a") \n\t"
213 "add $32, %%"REG_a" \n\t"
216 : "r" (block+64), "r" ((long)line_size), "r" ((long)line_size*2)
221 static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
224 "pxor %%mm7, %%mm7 \n\t"
225 "mov $-128, %%"REG_a" \n\t"
228 "movq (%0), %%mm0 \n\t"
229 "movq (%1), %%mm2 \n\t"
230 "movq %%mm0, %%mm1 \n\t"
231 "movq %%mm2, %%mm3 \n\t"
232 "punpcklbw %%mm7, %%mm0 \n\t"
233 "punpckhbw %%mm7, %%mm1 \n\t"
234 "punpcklbw %%mm7, %%mm2 \n\t"
235 "punpckhbw %%mm7, %%mm3 \n\t"
236 "psubw %%mm2, %%mm0 \n\t"
237 "psubw %%mm3, %%mm1 \n\t"
238 "movq %%mm0, (%2, %%"REG_a") \n\t"
239 "movq %%mm1, 8(%2, %%"REG_a") \n\t"
242 "add $16, %%"REG_a" \n\t"
244 : "+r" (s1), "+r" (s2)
245 : "r" (block+64), "r" ((long)stride)
249 #endif //CONFIG_ENCODERS
251 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
256 /* read the pixels */
261 "movq %3, %%mm0 \n\t"
262 "movq 8%3, %%mm1 \n\t"
263 "movq 16%3, %%mm2 \n\t"
264 "movq 24%3, %%mm3 \n\t"
265 "movq 32%3, %%mm4 \n\t"
266 "movq 40%3, %%mm5 \n\t"
267 "movq 48%3, %%mm6 \n\t"
268 "movq 56%3, %%mm7 \n\t"
269 "packuswb %%mm1, %%mm0 \n\t"
270 "packuswb %%mm3, %%mm2 \n\t"
271 "packuswb %%mm5, %%mm4 \n\t"
272 "packuswb %%mm7, %%mm6 \n\t"
273 "movq %%mm0, (%0) \n\t"
274 "movq %%mm2, (%0, %1) \n\t"
275 "movq %%mm4, (%0, %1, 2) \n\t"
276 "movq %%mm6, (%0, %2) \n\t"
277 ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "m"(*p)
282 // if here would be an exact copy of the code above
283 // compiler would generate some very strange code
286 "movq (%3), %%mm0 \n\t"
287 "movq 8(%3), %%mm1 \n\t"
288 "movq 16(%3), %%mm2 \n\t"
289 "movq 24(%3), %%mm3 \n\t"
290 "movq 32(%3), %%mm4 \n\t"
291 "movq 40(%3), %%mm5 \n\t"
292 "movq 48(%3), %%mm6 \n\t"
293 "movq 56(%3), %%mm7 \n\t"
294 "packuswb %%mm1, %%mm0 \n\t"
295 "packuswb %%mm3, %%mm2 \n\t"
296 "packuswb %%mm5, %%mm4 \n\t"
297 "packuswb %%mm7, %%mm6 \n\t"
298 "movq %%mm0, (%0) \n\t"
299 "movq %%mm2, (%0, %1) \n\t"
300 "movq %%mm4, (%0, %1, 2) \n\t"
301 "movq %%mm6, (%0, %2) \n\t"
302 ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "r"(p)
306 static DECLARE_ALIGNED_8(const unsigned char, vector128[8]) =
307 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
309 void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
313 movq_m2r(*vector128, mm1);
314 for (i = 0; i < 8; i++) {
315 movq_m2r(*(block), mm0);
316 packsswb_m2r(*(block + 4), mm0);
319 movq_r2m(mm0, *pixels);
324 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
330 /* read the pixels */
337 "movq (%2), %%mm0 \n\t"
338 "movq 8(%2), %%mm1 \n\t"
339 "movq 16(%2), %%mm2 \n\t"
340 "movq 24(%2), %%mm3 \n\t"
341 "movq %0, %%mm4 \n\t"
342 "movq %1, %%mm6 \n\t"
343 "movq %%mm4, %%mm5 \n\t"
344 "punpcklbw %%mm7, %%mm4 \n\t"
345 "punpckhbw %%mm7, %%mm5 \n\t"
346 "paddsw %%mm4, %%mm0 \n\t"
347 "paddsw %%mm5, %%mm1 \n\t"
348 "movq %%mm6, %%mm5 \n\t"
349 "punpcklbw %%mm7, %%mm6 \n\t"
350 "punpckhbw %%mm7, %%mm5 \n\t"
351 "paddsw %%mm6, %%mm2 \n\t"
352 "paddsw %%mm5, %%mm3 \n\t"
353 "packuswb %%mm1, %%mm0 \n\t"
354 "packuswb %%mm3, %%mm2 \n\t"
355 "movq %%mm0, %0 \n\t"
356 "movq %%mm2, %1 \n\t"
357 :"+m"(*pix), "+m"(*(pix+line_size))
365 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
368 "lea (%3, %3), %%"REG_a" \n\t"
371 "movd (%1), %%mm0 \n\t"
372 "movd (%1, %3), %%mm1 \n\t"
373 "movd %%mm0, (%2) \n\t"
374 "movd %%mm1, (%2, %3) \n\t"
375 "add %%"REG_a", %1 \n\t"
376 "add %%"REG_a", %2 \n\t"
377 "movd (%1), %%mm0 \n\t"
378 "movd (%1, %3), %%mm1 \n\t"
379 "movd %%mm0, (%2) \n\t"
380 "movd %%mm1, (%2, %3) \n\t"
381 "add %%"REG_a", %1 \n\t"
382 "add %%"REG_a", %2 \n\t"
385 : "+g"(h), "+r" (pixels), "+r" (block)
386 : "r"((long)line_size)
391 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
394 "lea (%3, %3), %%"REG_a" \n\t"
397 "movq (%1), %%mm0 \n\t"
398 "movq (%1, %3), %%mm1 \n\t"
399 "movq %%mm0, (%2) \n\t"
400 "movq %%mm1, (%2, %3) \n\t"
401 "add %%"REG_a", %1 \n\t"
402 "add %%"REG_a", %2 \n\t"
403 "movq (%1), %%mm0 \n\t"
404 "movq (%1, %3), %%mm1 \n\t"
405 "movq %%mm0, (%2) \n\t"
406 "movq %%mm1, (%2, %3) \n\t"
407 "add %%"REG_a", %1 \n\t"
408 "add %%"REG_a", %2 \n\t"
411 : "+g"(h), "+r" (pixels), "+r" (block)
412 : "r"((long)line_size)
417 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
420 "lea (%3, %3), %%"REG_a" \n\t"
423 "movq (%1), %%mm0 \n\t"
424 "movq 8(%1), %%mm4 \n\t"
425 "movq (%1, %3), %%mm1 \n\t"
426 "movq 8(%1, %3), %%mm5 \n\t"
427 "movq %%mm0, (%2) \n\t"
428 "movq %%mm4, 8(%2) \n\t"
429 "movq %%mm1, (%2, %3) \n\t"
430 "movq %%mm5, 8(%2, %3) \n\t"
431 "add %%"REG_a", %1 \n\t"
432 "add %%"REG_a", %2 \n\t"
433 "movq (%1), %%mm0 \n\t"
434 "movq 8(%1), %%mm4 \n\t"
435 "movq (%1, %3), %%mm1 \n\t"
436 "movq 8(%1, %3), %%mm5 \n\t"
437 "movq %%mm0, (%2) \n\t"
438 "movq %%mm4, 8(%2) \n\t"
439 "movq %%mm1, (%2, %3) \n\t"
440 "movq %%mm5, 8(%2, %3) \n\t"
441 "add %%"REG_a", %1 \n\t"
442 "add %%"REG_a", %2 \n\t"
445 : "+g"(h), "+r" (pixels), "+r" (block)
446 : "r"((long)line_size)
451 static void clear_blocks_mmx(DCTELEM *blocks)
454 "pxor %%mm7, %%mm7 \n\t"
455 "mov $-128*6, %%"REG_a" \n\t"
457 "movq %%mm7, (%0, %%"REG_a") \n\t"
458 "movq %%mm7, 8(%0, %%"REG_a") \n\t"
459 "movq %%mm7, 16(%0, %%"REG_a") \n\t"
460 "movq %%mm7, 24(%0, %%"REG_a") \n\t"
461 "add $32, %%"REG_a" \n\t"
463 : : "r" (((uint8_t *)blocks)+128*6)
468 #ifdef CONFIG_ENCODERS
469 static int pix_sum16_mmx(uint8_t * pix, int line_size){
472 long index= -line_size*h;
475 "pxor %%mm7, %%mm7 \n\t"
476 "pxor %%mm6, %%mm6 \n\t"
478 "movq (%2, %1), %%mm0 \n\t"
479 "movq (%2, %1), %%mm1 \n\t"
480 "movq 8(%2, %1), %%mm2 \n\t"
481 "movq 8(%2, %1), %%mm3 \n\t"
482 "punpcklbw %%mm7, %%mm0 \n\t"
483 "punpckhbw %%mm7, %%mm1 \n\t"
484 "punpcklbw %%mm7, %%mm2 \n\t"
485 "punpckhbw %%mm7, %%mm3 \n\t"
486 "paddw %%mm0, %%mm1 \n\t"
487 "paddw %%mm2, %%mm3 \n\t"
488 "paddw %%mm1, %%mm3 \n\t"
489 "paddw %%mm3, %%mm6 \n\t"
492 "movq %%mm6, %%mm5 \n\t"
493 "psrlq $32, %%mm6 \n\t"
494 "paddw %%mm5, %%mm6 \n\t"
495 "movq %%mm6, %%mm5 \n\t"
496 "psrlq $16, %%mm6 \n\t"
497 "paddw %%mm5, %%mm6 \n\t"
498 "movd %%mm6, %0 \n\t"
499 "andl $0xFFFF, %0 \n\t"
500 : "=&r" (sum), "+r" (index)
501 : "r" (pix - index), "r" ((long)line_size)
506 #endif //CONFIG_ENCODERS
508 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
512 "movq (%1, %0), %%mm0 \n\t"
513 "movq (%2, %0), %%mm1 \n\t"
514 "paddb %%mm0, %%mm1 \n\t"
515 "movq %%mm1, (%2, %0) \n\t"
516 "movq 8(%1, %0), %%mm0 \n\t"
517 "movq 8(%2, %0), %%mm1 \n\t"
518 "paddb %%mm0, %%mm1 \n\t"
519 "movq %%mm1, 8(%2, %0) \n\t"
524 : "r"(src), "r"(dst), "r"((long)w-15)
527 dst[i+0] += src[i+0];
530 #define H263_LOOP_FILTER \
531 "pxor %%mm7, %%mm7 \n\t"\
532 "movq %0, %%mm0 \n\t"\
533 "movq %0, %%mm1 \n\t"\
534 "movq %3, %%mm2 \n\t"\
535 "movq %3, %%mm3 \n\t"\
536 "punpcklbw %%mm7, %%mm0 \n\t"\
537 "punpckhbw %%mm7, %%mm1 \n\t"\
538 "punpcklbw %%mm7, %%mm2 \n\t"\
539 "punpckhbw %%mm7, %%mm3 \n\t"\
540 "psubw %%mm2, %%mm0 \n\t"\
541 "psubw %%mm3, %%mm1 \n\t"\
542 "movq %1, %%mm2 \n\t"\
543 "movq %1, %%mm3 \n\t"\
544 "movq %2, %%mm4 \n\t"\
545 "movq %2, %%mm5 \n\t"\
546 "punpcklbw %%mm7, %%mm2 \n\t"\
547 "punpckhbw %%mm7, %%mm3 \n\t"\
548 "punpcklbw %%mm7, %%mm4 \n\t"\
549 "punpckhbw %%mm7, %%mm5 \n\t"\
550 "psubw %%mm2, %%mm4 \n\t"\
551 "psubw %%mm3, %%mm5 \n\t"\
552 "psllw $2, %%mm4 \n\t"\
553 "psllw $2, %%mm5 \n\t"\
554 "paddw %%mm0, %%mm4 \n\t"\
555 "paddw %%mm1, %%mm5 \n\t"\
556 "pxor %%mm6, %%mm6 \n\t"\
557 "pcmpgtw %%mm4, %%mm6 \n\t"\
558 "pcmpgtw %%mm5, %%mm7 \n\t"\
559 "pxor %%mm6, %%mm4 \n\t"\
560 "pxor %%mm7, %%mm5 \n\t"\
561 "psubw %%mm6, %%mm4 \n\t"\
562 "psubw %%mm7, %%mm5 \n\t"\
563 "psrlw $3, %%mm4 \n\t"\
564 "psrlw $3, %%mm5 \n\t"\
565 "packuswb %%mm5, %%mm4 \n\t"\
566 "packsswb %%mm7, %%mm6 \n\t"\
567 "pxor %%mm7, %%mm7 \n\t"\
568 "movd %4, %%mm2 \n\t"\
569 "punpcklbw %%mm2, %%mm2 \n\t"\
570 "punpcklbw %%mm2, %%mm2 \n\t"\
571 "punpcklbw %%mm2, %%mm2 \n\t"\
572 "psubusb %%mm4, %%mm2 \n\t"\
573 "movq %%mm2, %%mm3 \n\t"\
574 "psubusb %%mm4, %%mm3 \n\t"\
575 "psubb %%mm3, %%mm2 \n\t"\
576 "movq %1, %%mm3 \n\t"\
577 "movq %2, %%mm4 \n\t"\
578 "pxor %%mm6, %%mm3 \n\t"\
579 "pxor %%mm6, %%mm4 \n\t"\
580 "paddusb %%mm2, %%mm3 \n\t"\
581 "psubusb %%mm2, %%mm4 \n\t"\
582 "pxor %%mm6, %%mm3 \n\t"\
583 "pxor %%mm6, %%mm4 \n\t"\
584 "paddusb %%mm2, %%mm2 \n\t"\
585 "packsswb %%mm1, %%mm0 \n\t"\
586 "pcmpgtb %%mm0, %%mm7 \n\t"\
587 "pxor %%mm7, %%mm0 \n\t"\
588 "psubb %%mm7, %%mm0 \n\t"\
589 "movq %%mm0, %%mm1 \n\t"\
590 "psubusb %%mm2, %%mm0 \n\t"\
591 "psubb %%mm0, %%mm1 \n\t"\
592 "pand %5, %%mm1 \n\t"\
593 "psrlw $2, %%mm1 \n\t"\
594 "pxor %%mm7, %%mm1 \n\t"\
595 "psubb %%mm7, %%mm1 \n\t"\
596 "movq %0, %%mm5 \n\t"\
597 "movq %3, %%mm6 \n\t"\
598 "psubb %%mm1, %%mm5 \n\t"\
599 "paddb %%mm1, %%mm6 \n\t"
601 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
602 const int strength= ff_h263_loop_filter_strength[qscale];
608 "movq %%mm3, %1 \n\t"
609 "movq %%mm4, %2 \n\t"
610 "movq %%mm5, %0 \n\t"
611 "movq %%mm6, %3 \n\t"
612 : "+m" (*(uint64_t*)(src - 2*stride)),
613 "+m" (*(uint64_t*)(src - 1*stride)),
614 "+m" (*(uint64_t*)(src + 0*stride)),
615 "+m" (*(uint64_t*)(src + 1*stride))
616 : "g" (2*strength), "m"(ff_pb_FC)
620 static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
621 asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
622 "movd %4, %%mm0 \n\t"
623 "movd %5, %%mm1 \n\t"
624 "movd %6, %%mm2 \n\t"
625 "movd %7, %%mm3 \n\t"
626 "punpcklbw %%mm1, %%mm0 \n\t"
627 "punpcklbw %%mm3, %%mm2 \n\t"
628 "movq %%mm0, %%mm1 \n\t"
629 "punpcklwd %%mm2, %%mm0 \n\t"
630 "punpckhwd %%mm2, %%mm1 \n\t"
631 "movd %%mm0, %0 \n\t"
632 "punpckhdq %%mm0, %%mm0 \n\t"
633 "movd %%mm0, %1 \n\t"
634 "movd %%mm1, %2 \n\t"
635 "punpckhdq %%mm1, %%mm1 \n\t"
636 "movd %%mm1, %3 \n\t"
638 : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
639 "=m" (*(uint32_t*)(dst + 1*dst_stride)),
640 "=m" (*(uint32_t*)(dst + 2*dst_stride)),
641 "=m" (*(uint32_t*)(dst + 3*dst_stride))
642 : "m" (*(uint32_t*)(src + 0*src_stride)),
643 "m" (*(uint32_t*)(src + 1*src_stride)),
644 "m" (*(uint32_t*)(src + 2*src_stride)),
645 "m" (*(uint32_t*)(src + 3*src_stride))
649 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
650 const int strength= ff_h263_loop_filter_strength[qscale];
651 uint64_t temp[4] __attribute__ ((aligned(8)));
652 uint8_t *btemp= (uint8_t*)temp;
656 transpose4x4(btemp , src , 8, stride);
657 transpose4x4(btemp+4, src + 4*stride, 8, stride);
659 H263_LOOP_FILTER // 5 3 4 6
665 : "g" (2*strength), "m"(ff_pb_FC)
669 "movq %%mm5, %%mm1 \n\t"
670 "movq %%mm4, %%mm0 \n\t"
671 "punpcklbw %%mm3, %%mm5 \n\t"
672 "punpcklbw %%mm6, %%mm4 \n\t"
673 "punpckhbw %%mm3, %%mm1 \n\t"
674 "punpckhbw %%mm6, %%mm0 \n\t"
675 "movq %%mm5, %%mm3 \n\t"
676 "movq %%mm1, %%mm6 \n\t"
677 "punpcklwd %%mm4, %%mm5 \n\t"
678 "punpcklwd %%mm0, %%mm1 \n\t"
679 "punpckhwd %%mm4, %%mm3 \n\t"
680 "punpckhwd %%mm0, %%mm6 \n\t"
681 "movd %%mm5, (%0) \n\t"
682 "punpckhdq %%mm5, %%mm5 \n\t"
683 "movd %%mm5, (%0,%2) \n\t"
684 "movd %%mm3, (%0,%2,2) \n\t"
685 "punpckhdq %%mm3, %%mm3 \n\t"
686 "movd %%mm3, (%0,%3) \n\t"
687 "movd %%mm1, (%1) \n\t"
688 "punpckhdq %%mm1, %%mm1 \n\t"
689 "movd %%mm1, (%1,%2) \n\t"
690 "movd %%mm6, (%1,%2,2) \n\t"
691 "punpckhdq %%mm6, %%mm6 \n\t"
692 "movd %%mm6, (%1,%3) \n\t"
694 "r" (src + 4*stride),
695 "r" ((long) stride ),
696 "r" ((long)(3*stride))
700 #ifdef CONFIG_ENCODERS
701 static int pix_norm1_mmx(uint8_t *pix, int line_size) {
708 "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
709 "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
711 "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
713 "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
714 "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
716 "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
717 "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
718 "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
720 "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
721 "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
723 "pmaddwd %%mm3,%%mm3\n"
724 "pmaddwd %%mm4,%%mm4\n"
726 "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
727 pix2^2+pix3^2+pix6^2+pix7^2) */
728 "paddd %%mm3,%%mm4\n"
729 "paddd %%mm2,%%mm7\n"
732 "paddd %%mm4,%%mm7\n"
737 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
738 "paddd %%mm7,%%mm1\n"
740 : "+r" (pix), "=r"(tmp) : "r" ((long)line_size) : "%ecx" );
744 static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
749 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
750 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
752 "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
753 "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
754 "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
755 "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
757 /* todo: mm1-mm2, mm3-mm4 */
758 /* algo: substract mm1 from mm2 with saturation and vice versa */
759 /* OR the results to get absolute difference */
762 "psubusb %%mm2,%%mm1\n"
763 "psubusb %%mm4,%%mm3\n"
764 "psubusb %%mm5,%%mm2\n"
765 "psubusb %%mm6,%%mm4\n"
770 /* now convert to 16-bit vectors so we can square them */
774 "punpckhbw %%mm0,%%mm2\n"
775 "punpckhbw %%mm0,%%mm4\n"
776 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
777 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
779 "pmaddwd %%mm2,%%mm2\n"
780 "pmaddwd %%mm4,%%mm4\n"
781 "pmaddwd %%mm1,%%mm1\n"
782 "pmaddwd %%mm3,%%mm3\n"
784 "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
785 "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
787 "paddd %%mm2,%%mm1\n"
788 "paddd %%mm4,%%mm3\n"
789 "paddd %%mm1,%%mm7\n"
790 "paddd %%mm3,%%mm7\n"
796 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
797 "paddd %%mm7,%%mm1\n"
799 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
800 : "r" ((long)line_size) , "m" (h)
805 static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
809 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
810 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
812 "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
813 "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
814 "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
815 "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
817 /* todo: mm1-mm2, mm3-mm4 */
818 /* algo: substract mm1 from mm2 with saturation and vice versa */
819 /* OR the results to get absolute difference */
822 "psubusb %%mm2,%%mm1\n"
823 "psubusb %%mm4,%%mm3\n"
824 "psubusb %%mm5,%%mm2\n"
825 "psubusb %%mm6,%%mm4\n"
830 /* now convert to 16-bit vectors so we can square them */
834 "punpckhbw %%mm0,%%mm2\n"
835 "punpckhbw %%mm0,%%mm4\n"
836 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
837 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
839 "pmaddwd %%mm2,%%mm2\n"
840 "pmaddwd %%mm4,%%mm4\n"
841 "pmaddwd %%mm1,%%mm1\n"
842 "pmaddwd %%mm3,%%mm3\n"
847 "paddd %%mm2,%%mm1\n"
848 "paddd %%mm4,%%mm3\n"
849 "paddd %%mm1,%%mm7\n"
850 "paddd %%mm3,%%mm7\n"
856 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
857 "paddd %%mm7,%%mm1\n"
859 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
860 : "r" ((long)line_size) , "m" (h)
865 static int sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
869 "pxor %%xmm0,%%xmm0\n" /* mm0 = 0 */
870 "pxor %%xmm7,%%xmm7\n" /* mm7 holds the sum */
872 "movdqu (%0),%%xmm1\n" /* mm1 = pix1[0][0-15] */
873 "movdqu (%1),%%xmm2\n" /* mm2 = pix2[0][0-15] */
874 "movdqu (%0,%4),%%xmm3\n" /* mm3 = pix1[1][0-15] */
875 "movdqu (%1,%4),%%xmm4\n" /* mm4 = pix2[1][0-15] */
877 /* todo: mm1-mm2, mm3-mm4 */
878 /* algo: substract mm1 from mm2 with saturation and vice versa */
879 /* OR the results to get absolute difference */
880 "movdqa %%xmm1,%%xmm5\n"
881 "movdqa %%xmm3,%%xmm6\n"
882 "psubusb %%xmm2,%%xmm1\n"
883 "psubusb %%xmm4,%%xmm3\n"
884 "psubusb %%xmm5,%%xmm2\n"
885 "psubusb %%xmm6,%%xmm4\n"
887 "por %%xmm1,%%xmm2\n"
888 "por %%xmm3,%%xmm4\n"
890 /* now convert to 16-bit vectors so we can square them */
891 "movdqa %%xmm2,%%xmm1\n"
892 "movdqa %%xmm4,%%xmm3\n"
894 "punpckhbw %%xmm0,%%xmm2\n"
895 "punpckhbw %%xmm0,%%xmm4\n"
896 "punpcklbw %%xmm0,%%xmm1\n" /* mm1 now spread over (mm1,mm2) */
897 "punpcklbw %%xmm0,%%xmm3\n" /* mm4 now spread over (mm3,mm4) */
899 "pmaddwd %%xmm2,%%xmm2\n"
900 "pmaddwd %%xmm4,%%xmm4\n"
901 "pmaddwd %%xmm1,%%xmm1\n"
902 "pmaddwd %%xmm3,%%xmm3\n"
904 "lea (%0,%4,2), %0\n" /* pix1 += 2*line_size */
905 "lea (%1,%4,2), %1\n" /* pix2 += 2*line_size */
907 "paddd %%xmm2,%%xmm1\n"
908 "paddd %%xmm4,%%xmm3\n"
909 "paddd %%xmm1,%%xmm7\n"
910 "paddd %%xmm3,%%xmm7\n"
915 "movdqa %%xmm7,%%xmm1\n"
916 "psrldq $8, %%xmm7\n" /* shift hi qword to lo */
917 "paddd %%xmm1,%%xmm7\n"
918 "movdqa %%xmm7,%%xmm1\n"
919 "psrldq $4, %%xmm7\n" /* shift hi dword to lo */
920 "paddd %%xmm1,%%xmm7\n"
922 : "+r" (pix1), "+r" (pix2), "+r"(h), "=r"(tmp)
923 : "r" ((long)line_size));
927 static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
935 "movq %%mm0, %%mm1\n"
939 "movq %%mm0, %%mm2\n"
940 "movq %%mm1, %%mm3\n"
941 "punpcklbw %%mm7,%%mm0\n"
942 "punpcklbw %%mm7,%%mm1\n"
943 "punpckhbw %%mm7,%%mm2\n"
944 "punpckhbw %%mm7,%%mm3\n"
945 "psubw %%mm1, %%mm0\n"
946 "psubw %%mm3, %%mm2\n"
951 "movq %%mm4, %%mm1\n"
955 "movq %%mm4, %%mm5\n"
956 "movq %%mm1, %%mm3\n"
957 "punpcklbw %%mm7,%%mm4\n"
958 "punpcklbw %%mm7,%%mm1\n"
959 "punpckhbw %%mm7,%%mm5\n"
960 "punpckhbw %%mm7,%%mm3\n"
961 "psubw %%mm1, %%mm4\n"
962 "psubw %%mm3, %%mm5\n"
963 "psubw %%mm4, %%mm0\n"
964 "psubw %%mm5, %%mm2\n"
965 "pxor %%mm3, %%mm3\n"
966 "pxor %%mm1, %%mm1\n"
967 "pcmpgtw %%mm0, %%mm3\n\t"
968 "pcmpgtw %%mm2, %%mm1\n\t"
969 "pxor %%mm3, %%mm0\n"
970 "pxor %%mm1, %%mm2\n"
971 "psubw %%mm3, %%mm0\n"
972 "psubw %%mm1, %%mm2\n"
973 "paddw %%mm0, %%mm2\n"
974 "paddw %%mm2, %%mm6\n"
980 "movq %%mm0, %%mm1\n"
984 "movq %%mm0, %%mm2\n"
985 "movq %%mm1, %%mm3\n"
986 "punpcklbw %%mm7,%%mm0\n"
987 "punpcklbw %%mm7,%%mm1\n"
988 "punpckhbw %%mm7,%%mm2\n"
989 "punpckhbw %%mm7,%%mm3\n"
990 "psubw %%mm1, %%mm0\n"
991 "psubw %%mm3, %%mm2\n"
992 "psubw %%mm0, %%mm4\n"
993 "psubw %%mm2, %%mm5\n"
994 "pxor %%mm3, %%mm3\n"
995 "pxor %%mm1, %%mm1\n"
996 "pcmpgtw %%mm4, %%mm3\n\t"
997 "pcmpgtw %%mm5, %%mm1\n\t"
998 "pxor %%mm3, %%mm4\n"
999 "pxor %%mm1, %%mm5\n"
1000 "psubw %%mm3, %%mm4\n"
1001 "psubw %%mm1, %%mm5\n"
1002 "paddw %%mm4, %%mm5\n"
1003 "paddw %%mm5, %%mm6\n"
1008 "movq %%mm4, %%mm1\n"
1012 "movq %%mm4, %%mm5\n"
1013 "movq %%mm1, %%mm3\n"
1014 "punpcklbw %%mm7,%%mm4\n"
1015 "punpcklbw %%mm7,%%mm1\n"
1016 "punpckhbw %%mm7,%%mm5\n"
1017 "punpckhbw %%mm7,%%mm3\n"
1018 "psubw %%mm1, %%mm4\n"
1019 "psubw %%mm3, %%mm5\n"
1020 "psubw %%mm4, %%mm0\n"
1021 "psubw %%mm5, %%mm2\n"
1022 "pxor %%mm3, %%mm3\n"
1023 "pxor %%mm1, %%mm1\n"
1024 "pcmpgtw %%mm0, %%mm3\n\t"
1025 "pcmpgtw %%mm2, %%mm1\n\t"
1026 "pxor %%mm3, %%mm0\n"
1027 "pxor %%mm1, %%mm2\n"
1028 "psubw %%mm3, %%mm0\n"
1029 "psubw %%mm1, %%mm2\n"
1030 "paddw %%mm0, %%mm2\n"
1031 "paddw %%mm2, %%mm6\n"
1037 "movq %%mm6, %%mm0\n"
1038 "punpcklwd %%mm7,%%mm0\n"
1039 "punpckhwd %%mm7,%%mm6\n"
1040 "paddd %%mm0, %%mm6\n"
1042 "movq %%mm6,%%mm0\n"
1043 "psrlq $32, %%mm6\n"
1044 "paddd %%mm6,%%mm0\n"
1046 : "+r" (pix1), "=r"(tmp)
1047 : "r" ((long)line_size) , "g" (h-2)
1052 static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
1054 uint8_t * pix= pix1;
1057 "pxor %%mm7,%%mm7\n"
1058 "pxor %%mm6,%%mm6\n"
1061 "movq 1(%0),%%mm1\n"
1062 "movq %%mm0, %%mm2\n"
1063 "movq %%mm1, %%mm3\n"
1064 "punpcklbw %%mm7,%%mm0\n"
1065 "punpcklbw %%mm7,%%mm1\n"
1066 "punpckhbw %%mm7,%%mm2\n"
1067 "punpckhbw %%mm7,%%mm3\n"
1068 "psubw %%mm1, %%mm0\n"
1069 "psubw %%mm3, %%mm2\n"
1074 "movq 1(%0),%%mm1\n"
1075 "movq %%mm4, %%mm5\n"
1076 "movq %%mm1, %%mm3\n"
1077 "punpcklbw %%mm7,%%mm4\n"
1078 "punpcklbw %%mm7,%%mm1\n"
1079 "punpckhbw %%mm7,%%mm5\n"
1080 "punpckhbw %%mm7,%%mm3\n"
1081 "psubw %%mm1, %%mm4\n"
1082 "psubw %%mm3, %%mm5\n"
1083 "psubw %%mm4, %%mm0\n"
1084 "psubw %%mm5, %%mm2\n"
1085 "pxor %%mm3, %%mm3\n"
1086 "pxor %%mm1, %%mm1\n"
1087 "pcmpgtw %%mm0, %%mm3\n\t"
1088 "pcmpgtw %%mm2, %%mm1\n\t"
1089 "pxor %%mm3, %%mm0\n"
1090 "pxor %%mm1, %%mm2\n"
1091 "psubw %%mm3, %%mm0\n"
1092 "psubw %%mm1, %%mm2\n"
1093 "paddw %%mm0, %%mm2\n"
1094 "paddw %%mm2, %%mm6\n"
1100 "movq 1(%0),%%mm1\n"
1101 "movq %%mm0, %%mm2\n"
1102 "movq %%mm1, %%mm3\n"
1103 "punpcklbw %%mm7,%%mm0\n"
1104 "punpcklbw %%mm7,%%mm1\n"
1105 "punpckhbw %%mm7,%%mm2\n"
1106 "punpckhbw %%mm7,%%mm3\n"
1107 "psubw %%mm1, %%mm0\n"
1108 "psubw %%mm3, %%mm2\n"
1109 "psubw %%mm0, %%mm4\n"
1110 "psubw %%mm2, %%mm5\n"
1111 "pxor %%mm3, %%mm3\n"
1112 "pxor %%mm1, %%mm1\n"
1113 "pcmpgtw %%mm4, %%mm3\n\t"
1114 "pcmpgtw %%mm5, %%mm1\n\t"
1115 "pxor %%mm3, %%mm4\n"
1116 "pxor %%mm1, %%mm5\n"
1117 "psubw %%mm3, %%mm4\n"
1118 "psubw %%mm1, %%mm5\n"
1119 "paddw %%mm4, %%mm5\n"
1120 "paddw %%mm5, %%mm6\n"
1125 "movq 1(%0),%%mm1\n"
1126 "movq %%mm4, %%mm5\n"
1127 "movq %%mm1, %%mm3\n"
1128 "punpcklbw %%mm7,%%mm4\n"
1129 "punpcklbw %%mm7,%%mm1\n"
1130 "punpckhbw %%mm7,%%mm5\n"
1131 "punpckhbw %%mm7,%%mm3\n"
1132 "psubw %%mm1, %%mm4\n"
1133 "psubw %%mm3, %%mm5\n"
1134 "psubw %%mm4, %%mm0\n"
1135 "psubw %%mm5, %%mm2\n"
1136 "pxor %%mm3, %%mm3\n"
1137 "pxor %%mm1, %%mm1\n"
1138 "pcmpgtw %%mm0, %%mm3\n\t"
1139 "pcmpgtw %%mm2, %%mm1\n\t"
1140 "pxor %%mm3, %%mm0\n"
1141 "pxor %%mm1, %%mm2\n"
1142 "psubw %%mm3, %%mm0\n"
1143 "psubw %%mm1, %%mm2\n"
1144 "paddw %%mm0, %%mm2\n"
1145 "paddw %%mm2, %%mm6\n"
1151 "movq %%mm6, %%mm0\n"
1152 "punpcklwd %%mm7,%%mm0\n"
1153 "punpckhwd %%mm7,%%mm6\n"
1154 "paddd %%mm0, %%mm6\n"
1156 "movq %%mm6,%%mm0\n"
1157 "psrlq $32, %%mm6\n"
1158 "paddd %%mm6,%%mm0\n"
1160 : "+r" (pix1), "=r"(tmp)
1161 : "r" ((long)line_size) , "g" (h-2)
1163 return tmp + hf_noise8_mmx(pix+8, line_size, h);
1166 static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1167 MpegEncContext *c = p;
1170 if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
1171 else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
1172 score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
1174 if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
1175 else return score1 + ABS(score2)*8;
1178 static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1179 MpegEncContext *c = p;
1180 int score1= sse8_mmx(c, pix1, pix2, line_size, h);
1181 int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
1183 if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
1184 else return score1 + ABS(score2)*8;
1187 static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
1190 assert( (((int)pix) & 7) == 0);
1191 assert((line_size &7) ==0);
1193 #define SUM(in0, in1, out0, out1) \
1194 "movq (%0), %%mm2\n"\
1195 "movq 8(%0), %%mm3\n"\
1197 "movq %%mm2, " #out0 "\n"\
1198 "movq %%mm3, " #out1 "\n"\
1199 "psubusb " #in0 ", %%mm2\n"\
1200 "psubusb " #in1 ", %%mm3\n"\
1201 "psubusb " #out0 ", " #in0 "\n"\
1202 "psubusb " #out1 ", " #in1 "\n"\
1203 "por %%mm2, " #in0 "\n"\
1204 "por %%mm3, " #in1 "\n"\
1205 "movq " #in0 ", %%mm2\n"\
1206 "movq " #in1 ", %%mm3\n"\
1207 "punpcklbw %%mm7, " #in0 "\n"\
1208 "punpcklbw %%mm7, " #in1 "\n"\
1209 "punpckhbw %%mm7, %%mm2\n"\
1210 "punpckhbw %%mm7, %%mm3\n"\
1211 "paddw " #in1 ", " #in0 "\n"\
1212 "paddw %%mm3, %%mm2\n"\
1213 "paddw %%mm2, " #in0 "\n"\
1214 "paddw " #in0 ", %%mm6\n"
1219 "pxor %%mm6,%%mm6\n"
1220 "pxor %%mm7,%%mm7\n"
1222 "movq 8(%0),%%mm1\n"
1225 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1228 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1230 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1235 "movq %%mm6,%%mm0\n"
1236 "psrlq $32, %%mm6\n"
1237 "paddw %%mm6,%%mm0\n"
1238 "movq %%mm0,%%mm6\n"
1239 "psrlq $16, %%mm0\n"
1240 "paddw %%mm6,%%mm0\n"
1242 : "+r" (pix), "=r"(tmp)
1243 : "r" ((long)line_size) , "m" (h)
1245 return tmp & 0xFFFF;
1249 static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
1252 assert( (((int)pix) & 7) == 0);
1253 assert((line_size &7) ==0);
1255 #define SUM(in0, in1, out0, out1) \
1256 "movq (%0), " #out0 "\n"\
1257 "movq 8(%0), " #out1 "\n"\
1259 "psadbw " #out0 ", " #in0 "\n"\
1260 "psadbw " #out1 ", " #in1 "\n"\
1261 "paddw " #in1 ", " #in0 "\n"\
1262 "paddw " #in0 ", %%mm6\n"
1266 "pxor %%mm6,%%mm6\n"
1267 "pxor %%mm7,%%mm7\n"
1269 "movq 8(%0),%%mm1\n"
1272 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1275 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1277 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1283 : "+r" (pix), "=r"(tmp)
1284 : "r" ((long)line_size) , "m" (h)
1290 static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1293 assert( (((int)pix1) & 7) == 0);
1294 assert( (((int)pix2) & 7) == 0);
1295 assert((line_size &7) ==0);
1297 #define SUM(in0, in1, out0, out1) \
1298 "movq (%0),%%mm2\n"\
1299 "movq (%1)," #out0 "\n"\
1300 "movq 8(%0),%%mm3\n"\
1301 "movq 8(%1)," #out1 "\n"\
1304 "psubb " #out0 ", %%mm2\n"\
1305 "psubb " #out1 ", %%mm3\n"\
1306 "pxor %%mm7, %%mm2\n"\
1307 "pxor %%mm7, %%mm3\n"\
1308 "movq %%mm2, " #out0 "\n"\
1309 "movq %%mm3, " #out1 "\n"\
1310 "psubusb " #in0 ", %%mm2\n"\
1311 "psubusb " #in1 ", %%mm3\n"\
1312 "psubusb " #out0 ", " #in0 "\n"\
1313 "psubusb " #out1 ", " #in1 "\n"\
1314 "por %%mm2, " #in0 "\n"\
1315 "por %%mm3, " #in1 "\n"\
1316 "movq " #in0 ", %%mm2\n"\
1317 "movq " #in1 ", %%mm3\n"\
1318 "punpcklbw %%mm7, " #in0 "\n"\
1319 "punpcklbw %%mm7, " #in1 "\n"\
1320 "punpckhbw %%mm7, %%mm2\n"\
1321 "punpckhbw %%mm7, %%mm3\n"\
1322 "paddw " #in1 ", " #in0 "\n"\
1323 "paddw %%mm3, %%mm2\n"\
1324 "paddw %%mm2, " #in0 "\n"\
1325 "paddw " #in0 ", %%mm6\n"
1330 "pxor %%mm6,%%mm6\n"
1331 "pcmpeqw %%mm7,%%mm7\n"
1332 "psllw $15, %%mm7\n"
1333 "packsswb %%mm7, %%mm7\n"
1336 "movq 8(%0),%%mm1\n"
1337 "movq 8(%1),%%mm3\n"
1341 "psubb %%mm2, %%mm0\n"
1342 "psubb %%mm3, %%mm1\n"
1343 "pxor %%mm7, %%mm0\n"
1344 "pxor %%mm7, %%mm1\n"
1345 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1348 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1350 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1355 "movq %%mm6,%%mm0\n"
1356 "psrlq $32, %%mm6\n"
1357 "paddw %%mm6,%%mm0\n"
1358 "movq %%mm0,%%mm6\n"
1359 "psrlq $16, %%mm0\n"
1360 "paddw %%mm6,%%mm0\n"
1362 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
1363 : "r" ((long)line_size) , "m" (h)
1365 return tmp & 0x7FFF;
1369 static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1372 assert( (((int)pix1) & 7) == 0);
1373 assert( (((int)pix2) & 7) == 0);
1374 assert((line_size &7) ==0);
1376 #define SUM(in0, in1, out0, out1) \
1377 "movq (%0)," #out0 "\n"\
1378 "movq (%1),%%mm2\n"\
1379 "movq 8(%0)," #out1 "\n"\
1380 "movq 8(%1),%%mm3\n"\
1383 "psubb %%mm2, " #out0 "\n"\
1384 "psubb %%mm3, " #out1 "\n"\
1385 "pxor %%mm7, " #out0 "\n"\
1386 "pxor %%mm7, " #out1 "\n"\
1387 "psadbw " #out0 ", " #in0 "\n"\
1388 "psadbw " #out1 ", " #in1 "\n"\
1389 "paddw " #in1 ", " #in0 "\n"\
1390 "paddw " #in0 ", %%mm6\n"
1394 "pxor %%mm6,%%mm6\n"
1395 "pcmpeqw %%mm7,%%mm7\n"
1396 "psllw $15, %%mm7\n"
1397 "packsswb %%mm7, %%mm7\n"
1400 "movq 8(%0),%%mm1\n"
1401 "movq 8(%1),%%mm3\n"
1405 "psubb %%mm2, %%mm0\n"
1406 "psubb %%mm3, %%mm1\n"
1407 "pxor %%mm7, %%mm0\n"
1408 "pxor %%mm7, %%mm1\n"
1409 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1412 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1414 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1420 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
1421 : "r" ((long)line_size) , "m" (h)
1427 static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
1431 "movq (%2, %0), %%mm0 \n\t"
1432 "movq (%1, %0), %%mm1 \n\t"
1433 "psubb %%mm0, %%mm1 \n\t"
1434 "movq %%mm1, (%3, %0) \n\t"
1435 "movq 8(%2, %0), %%mm0 \n\t"
1436 "movq 8(%1, %0), %%mm1 \n\t"
1437 "psubb %%mm0, %%mm1 \n\t"
1438 "movq %%mm1, 8(%3, %0) \n\t"
1443 : "r"(src1), "r"(src2), "r"(dst), "r"((long)w-15)
1446 dst[i+0] = src1[i+0]-src2[i+0];
1449 static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
1455 "movq -1(%1, %0), %%mm0 \n\t" // LT
1456 "movq (%1, %0), %%mm1 \n\t" // T
1457 "movq -1(%2, %0), %%mm2 \n\t" // L
1458 "movq (%2, %0), %%mm3 \n\t" // X
1459 "movq %%mm2, %%mm4 \n\t" // L
1460 "psubb %%mm0, %%mm2 \n\t"
1461 "paddb %%mm1, %%mm2 \n\t" // L + T - LT
1462 "movq %%mm4, %%mm5 \n\t" // L
1463 "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
1464 "pminub %%mm5, %%mm1 \n\t" // min(T, L)
1465 "pminub %%mm2, %%mm4 \n\t"
1466 "pmaxub %%mm1, %%mm4 \n\t"
1467 "psubb %%mm4, %%mm3 \n\t" // dst - pred
1468 "movq %%mm3, (%3, %0) \n\t"
1473 : "r"(src1), "r"(src2), "r"(dst), "r"((long)w)
1479 dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
1481 *left_top= src1[w-1];
1485 #define LBUTTERFLY2(a1,b1,a2,b2)\
1486 "paddw " #b1 ", " #a1 " \n\t"\
1487 "paddw " #b2 ", " #a2 " \n\t"\
1488 "paddw " #b1 ", " #b1 " \n\t"\
1489 "paddw " #b2 ", " #b2 " \n\t"\
1490 "psubw " #a1 ", " #b1 " \n\t"\
1491 "psubw " #a2 ", " #b2 " \n\t"
1494 LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)\
1495 LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)\
1496 LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)\
1497 LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)\
1498 LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\
1499 LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\
1502 "pxor " #z ", " #z " \n\t"\
1503 "pcmpgtw " #a ", " #z " \n\t"\
1504 "pxor " #z ", " #a " \n\t"\
1505 "psubw " #z ", " #a " \n\t"
1507 #define MMABS_SUM(a,z, sum)\
1508 "pxor " #z ", " #z " \n\t"\
1509 "pcmpgtw " #a ", " #z " \n\t"\
1510 "pxor " #z ", " #a " \n\t"\
1511 "psubw " #z ", " #a " \n\t"\
1512 "paddusw " #a ", " #sum " \n\t"
1514 #define MMABS_MMX2(a,z)\
1515 "pxor " #z ", " #z " \n\t"\
1516 "psubw " #a ", " #z " \n\t"\
1517 "pmaxsw " #z ", " #a " \n\t"
1519 #define MMABS_SUM_MMX2(a,z, sum)\
1520 "pxor " #z ", " #z " \n\t"\
1521 "psubw " #a ", " #z " \n\t"\
1522 "pmaxsw " #z ", " #a " \n\t"\
1523 "paddusw " #a ", " #sum " \n\t"
1525 #define SBUTTERFLY(a,b,t,n)\
1526 "movq " #a ", " #t " \n\t" /* abcd */\
1527 "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
1528 "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
1530 #define TRANSPOSE4(a,b,c,d,t)\
1531 SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
1532 SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
1533 SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
1534 SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
1536 #define LOAD4(o, a, b, c, d)\
1537 "movq "#o"(%1), " #a " \n\t"\
1538 "movq "#o"+16(%1), " #b " \n\t"\
1539 "movq "#o"+32(%1), " #c " \n\t"\
1540 "movq "#o"+48(%1), " #d " \n\t"
1542 #define STORE4(o, a, b, c, d)\
1543 "movq "#a", "#o"(%1) \n\t"\
1544 "movq "#b", "#o"+16(%1) \n\t"\
1545 "movq "#c", "#o"+32(%1) \n\t"\
1546 "movq "#d", "#o"+48(%1) \n\t"\
1548 static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
1549 DECLARE_ALIGNED_8(uint64_t, temp[16]);
1554 diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
1557 LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1558 LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
1562 "movq %%mm7, 112(%1) \n\t"
1564 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1565 STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
1567 "movq 112(%1), %%mm7 \n\t"
1568 TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1569 STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
1571 LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
1572 LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1576 "movq %%mm7, 120(%1) \n\t"
1578 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1579 STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
1581 "movq 120(%1), %%mm7 \n\t"
1582 TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1583 "movq %%mm7, %%mm5 \n\t"//FIXME remove
1584 "movq %%mm6, %%mm7 \n\t"
1585 "movq %%mm0, %%mm6 \n\t"
1586 // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1588 LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
1589 // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1592 "movq %%mm7, 64(%1) \n\t"
1594 MMABS_SUM(%%mm1, %%mm7, %%mm0)
1595 MMABS_SUM(%%mm2, %%mm7, %%mm0)
1596 MMABS_SUM(%%mm3, %%mm7, %%mm0)
1597 MMABS_SUM(%%mm4, %%mm7, %%mm0)
1598 MMABS_SUM(%%mm5, %%mm7, %%mm0)
1599 MMABS_SUM(%%mm6, %%mm7, %%mm0)
1600 "movq 64(%1), %%mm1 \n\t"
1601 MMABS_SUM(%%mm1, %%mm7, %%mm0)
1602 "movq %%mm0, 64(%1) \n\t"
1604 LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1605 LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
1608 "movq %%mm7, (%1) \n\t"
1610 MMABS_SUM(%%mm1, %%mm7, %%mm0)
1611 MMABS_SUM(%%mm2, %%mm7, %%mm0)
1612 MMABS_SUM(%%mm3, %%mm7, %%mm0)
1613 MMABS_SUM(%%mm4, %%mm7, %%mm0)
1614 MMABS_SUM(%%mm5, %%mm7, %%mm0)
1615 MMABS_SUM(%%mm6, %%mm7, %%mm0)
1616 "movq (%1), %%mm1 \n\t"
1617 MMABS_SUM(%%mm1, %%mm7, %%mm0)
1618 "movq 64(%1), %%mm1 \n\t"
1619 MMABS_SUM(%%mm1, %%mm7, %%mm0)
1621 "movq %%mm0, %%mm1 \n\t"
1622 "psrlq $32, %%mm0 \n\t"
1623 "paddusw %%mm1, %%mm0 \n\t"
1624 "movq %%mm0, %%mm1 \n\t"
1625 "psrlq $16, %%mm0 \n\t"
1626 "paddusw %%mm1, %%mm0 \n\t"
1627 "movd %%mm0, %0 \n\t"
1635 static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
1636 DECLARE_ALIGNED_8(uint64_t, temp[16]);
1641 diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
1644 LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1645 LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
1649 "movq %%mm7, 112(%1) \n\t"
1651 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1652 STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
1654 "movq 112(%1), %%mm7 \n\t"
1655 TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1656 STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
1658 LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
1659 LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1663 "movq %%mm7, 120(%1) \n\t"
1665 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1666 STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
1668 "movq 120(%1), %%mm7 \n\t"
1669 TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1670 "movq %%mm7, %%mm5 \n\t"//FIXME remove
1671 "movq %%mm6, %%mm7 \n\t"
1672 "movq %%mm0, %%mm6 \n\t"
1673 // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1675 LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
1676 // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1679 "movq %%mm7, 64(%1) \n\t"
1680 MMABS_MMX2(%%mm0, %%mm7)
1681 MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1682 MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
1683 MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
1684 MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
1685 MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
1686 MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
1687 "movq 64(%1), %%mm1 \n\t"
1688 MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1689 "movq %%mm0, 64(%1) \n\t"
1691 LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1692 LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
1695 "movq %%mm7, (%1) \n\t"
1696 MMABS_MMX2(%%mm0, %%mm7)
1697 MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1698 MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
1699 MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
1700 MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
1701 MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
1702 MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
1703 "movq (%1), %%mm1 \n\t"
1704 MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1705 "movq 64(%1), %%mm1 \n\t"
1706 MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1708 "pshufw $0x0E, %%mm0, %%mm1 \n\t"
1709 "paddusw %%mm1, %%mm0 \n\t"
1710 "pshufw $0x01, %%mm0, %%mm1 \n\t"
1711 "paddusw %%mm1, %%mm0 \n\t"
1712 "movd %%mm0, %0 \n\t"
1721 WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
1722 WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
1723 #endif //CONFIG_ENCODERS
1725 #define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
1726 #define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d)
1728 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
1729 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
1730 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
1731 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
1732 "movq "#in7", " #m3 " \n\t" /* d */\
1733 "movq "#in0", %%mm5 \n\t" /* D */\
1734 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
1735 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
1736 "movq "#in1", %%mm5 \n\t" /* C */\
1737 "movq "#in2", %%mm6 \n\t" /* B */\
1738 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
1739 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
1740 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
1741 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
1742 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
1743 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
1744 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
1745 "psraw $5, %%mm5 \n\t"\
1746 "packuswb %%mm5, %%mm5 \n\t"\
1747 OP(%%mm5, out, %%mm7, d)
1749 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
1750 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1754 "pxor %%mm7, %%mm7 \n\t"\
1756 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1757 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1758 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1759 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1760 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1761 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1762 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1763 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1764 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1765 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1766 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1767 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1768 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1769 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1770 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1771 "paddw %%mm3, %%mm5 \n\t" /* b */\
1772 "paddw %%mm2, %%mm6 \n\t" /* c */\
1773 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1774 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1775 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1776 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1777 "paddw %%mm4, %%mm0 \n\t" /* a */\
1778 "paddw %%mm1, %%mm5 \n\t" /* d */\
1779 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1780 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1781 "paddw %6, %%mm6 \n\t"\
1782 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1783 "psraw $5, %%mm0 \n\t"\
1784 "movq %%mm0, %5 \n\t"\
1785 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1787 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
1788 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
1789 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
1790 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
1791 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
1792 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
1793 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
1794 "paddw %%mm0, %%mm2 \n\t" /* b */\
1795 "paddw %%mm5, %%mm3 \n\t" /* c */\
1796 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1797 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1798 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
1799 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
1800 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
1801 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
1802 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1803 "paddw %%mm2, %%mm1 \n\t" /* a */\
1804 "paddw %%mm6, %%mm4 \n\t" /* d */\
1805 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1806 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
1807 "paddw %6, %%mm1 \n\t"\
1808 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
1809 "psraw $5, %%mm3 \n\t"\
1810 "movq %5, %%mm1 \n\t"\
1811 "packuswb %%mm3, %%mm1 \n\t"\
1812 OP_MMX2(%%mm1, (%1),%%mm4, q)\
1813 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
1815 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
1816 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
1817 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
1818 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
1819 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
1820 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
1821 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
1822 "paddw %%mm1, %%mm5 \n\t" /* b */\
1823 "paddw %%mm4, %%mm0 \n\t" /* c */\
1824 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1825 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
1826 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
1827 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
1828 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
1829 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
1830 "paddw %%mm3, %%mm2 \n\t" /* d */\
1831 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
1832 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
1833 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
1834 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
1835 "paddw %%mm2, %%mm6 \n\t" /* a */\
1836 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
1837 "paddw %6, %%mm0 \n\t"\
1838 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1839 "psraw $5, %%mm0 \n\t"\
1840 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1842 "paddw %%mm5, %%mm3 \n\t" /* a */\
1843 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
1844 "paddw %%mm4, %%mm6 \n\t" /* b */\
1845 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
1846 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
1847 "paddw %%mm1, %%mm4 \n\t" /* c */\
1848 "paddw %%mm2, %%mm5 \n\t" /* d */\
1849 "paddw %%mm6, %%mm6 \n\t" /* 2b */\
1850 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
1851 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
1852 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
1853 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
1854 "paddw %6, %%mm4 \n\t"\
1855 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
1856 "psraw $5, %%mm4 \n\t"\
1857 "packuswb %%mm4, %%mm0 \n\t"\
1858 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1864 : "+a"(src), "+c"(dst), "+m"(h)\
1865 : "d"((long)srcStride), "S"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1870 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1873 /* quick HACK, XXX FIXME MUST be optimized */\
1876 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1877 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1878 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1879 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1880 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1881 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1882 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1883 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1884 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1885 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1886 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1887 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1888 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1889 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1890 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1891 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1893 "movq (%0), %%mm0 \n\t"\
1894 "movq 8(%0), %%mm1 \n\t"\
1895 "paddw %2, %%mm0 \n\t"\
1896 "paddw %2, %%mm1 \n\t"\
1897 "psraw $5, %%mm0 \n\t"\
1898 "psraw $5, %%mm1 \n\t"\
1899 "packuswb %%mm1, %%mm0 \n\t"\
1900 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1901 "movq 16(%0), %%mm0 \n\t"\
1902 "movq 24(%0), %%mm1 \n\t"\
1903 "paddw %2, %%mm0 \n\t"\
1904 "paddw %2, %%mm1 \n\t"\
1905 "psraw $5, %%mm0 \n\t"\
1906 "psraw $5, %%mm1 \n\t"\
1907 "packuswb %%mm1, %%mm0 \n\t"\
1908 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1909 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1917 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1921 "pxor %%mm7, %%mm7 \n\t"\
1923 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1924 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1925 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1926 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1927 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1928 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1929 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1930 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1931 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1932 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1933 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1934 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1935 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1936 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1937 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1938 "paddw %%mm3, %%mm5 \n\t" /* b */\
1939 "paddw %%mm2, %%mm6 \n\t" /* c */\
1940 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1941 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1942 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1943 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1944 "paddw %%mm4, %%mm0 \n\t" /* a */\
1945 "paddw %%mm1, %%mm5 \n\t" /* d */\
1946 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1947 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1948 "paddw %6, %%mm6 \n\t"\
1949 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1950 "psraw $5, %%mm0 \n\t"\
1951 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1953 "movd 5(%0), %%mm5 \n\t" /* FGHI */\
1954 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
1955 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
1956 "paddw %%mm5, %%mm1 \n\t" /* a */\
1957 "paddw %%mm6, %%mm2 \n\t" /* b */\
1958 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
1959 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
1960 "paddw %%mm6, %%mm3 \n\t" /* c */\
1961 "paddw %%mm5, %%mm4 \n\t" /* d */\
1962 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1963 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1964 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1965 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1966 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
1967 "paddw %6, %%mm1 \n\t"\
1968 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
1969 "psraw $5, %%mm3 \n\t"\
1970 "packuswb %%mm3, %%mm0 \n\t"\
1971 OP_MMX2(%%mm0, (%1), %%mm4, q)\
1977 : "+a"(src), "+c"(dst), "+m"(h)\
1978 : "S"((long)srcStride), "D"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1983 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1986 /* quick HACK, XXX FIXME MUST be optimized */\
1989 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1990 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1991 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1992 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1993 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1994 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1995 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1996 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1998 "movq (%0), %%mm0 \n\t"\
1999 "movq 8(%0), %%mm1 \n\t"\
2000 "paddw %2, %%mm0 \n\t"\
2001 "paddw %2, %%mm1 \n\t"\
2002 "psraw $5, %%mm0 \n\t"\
2003 "psraw $5, %%mm1 \n\t"\
2004 "packuswb %%mm1, %%mm0 \n\t"\
2005 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
2006 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
2014 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
2016 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2017 uint64_t temp[17*4];\
2018 uint64_t *temp_ptr= temp;\
2023 "pxor %%mm7, %%mm7 \n\t"\
2025 "movq (%0), %%mm0 \n\t"\
2026 "movq (%0), %%mm1 \n\t"\
2027 "movq 8(%0), %%mm2 \n\t"\
2028 "movq 8(%0), %%mm3 \n\t"\
2029 "punpcklbw %%mm7, %%mm0 \n\t"\
2030 "punpckhbw %%mm7, %%mm1 \n\t"\
2031 "punpcklbw %%mm7, %%mm2 \n\t"\
2032 "punpckhbw %%mm7, %%mm3 \n\t"\
2033 "movq %%mm0, (%1) \n\t"\
2034 "movq %%mm1, 17*8(%1) \n\t"\
2035 "movq %%mm2, 2*17*8(%1) \n\t"\
2036 "movq %%mm3, 3*17*8(%1) \n\t"\
2041 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
2042 : "r" ((long)srcStride)\
2049 /*FIXME reorder for speed */\
2051 /*"pxor %%mm7, %%mm7 \n\t"*/\
2053 "movq (%0), %%mm0 \n\t"\
2054 "movq 8(%0), %%mm1 \n\t"\
2055 "movq 16(%0), %%mm2 \n\t"\
2056 "movq 24(%0), %%mm3 \n\t"\
2057 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
2058 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
2060 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
2062 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
2064 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
2065 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
2067 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
2068 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
2070 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
2071 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
2073 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
2074 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
2076 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
2078 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
2080 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
2081 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
2083 "add $136, %0 \n\t"\
2088 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
2089 : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(long)dstStride)\
2094 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2095 uint64_t temp[9*2];\
2096 uint64_t *temp_ptr= temp;\
2101 "pxor %%mm7, %%mm7 \n\t"\
2103 "movq (%0), %%mm0 \n\t"\
2104 "movq (%0), %%mm1 \n\t"\
2105 "punpcklbw %%mm7, %%mm0 \n\t"\
2106 "punpckhbw %%mm7, %%mm1 \n\t"\
2107 "movq %%mm0, (%1) \n\t"\
2108 "movq %%mm1, 9*8(%1) \n\t"\
2113 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
2114 : "r" ((long)srcStride)\
2121 /*FIXME reorder for speed */\
2123 /*"pxor %%mm7, %%mm7 \n\t"*/\
2125 "movq (%0), %%mm0 \n\t"\
2126 "movq 8(%0), %%mm1 \n\t"\
2127 "movq 16(%0), %%mm2 \n\t"\
2128 "movq 24(%0), %%mm3 \n\t"\
2129 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
2130 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
2132 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
2134 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
2136 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
2138 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
2140 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
2141 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
2148 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
2149 : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(long)dstStride)\
2154 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
2155 OPNAME ## pixels8_mmx(dst, src, stride, 8);\
2158 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2160 uint8_t * const half= (uint8_t*)temp;\
2161 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
2162 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
2165 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2166 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
2169 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2171 uint8_t * const half= (uint8_t*)temp;\
2172 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
2173 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
2176 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2178 uint8_t * const half= (uint8_t*)temp;\
2179 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
2180 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
2183 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2184 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
2187 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2189 uint8_t * const half= (uint8_t*)temp;\
2190 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
2191 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
2193 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2194 uint64_t half[8 + 9];\
2195 uint8_t * const halfH= ((uint8_t*)half) + 64;\
2196 uint8_t * const halfHV= ((uint8_t*)half);\
2197 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2198 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
2199 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2200 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
2202 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2203 uint64_t half[8 + 9];\
2204 uint8_t * const halfH= ((uint8_t*)half) + 64;\
2205 uint8_t * const halfHV= ((uint8_t*)half);\
2206 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2207 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
2208 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2209 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
2211 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2212 uint64_t half[8 + 9];\
2213 uint8_t * const halfH= ((uint8_t*)half) + 64;\
2214 uint8_t * const halfHV= ((uint8_t*)half);\
2215 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2216 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
2217 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2218 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
2220 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2221 uint64_t half[8 + 9];\
2222 uint8_t * const halfH= ((uint8_t*)half) + 64;\
2223 uint8_t * const halfHV= ((uint8_t*)half);\
2224 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2225 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
2226 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2227 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
2229 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2230 uint64_t half[8 + 9];\
2231 uint8_t * const halfH= ((uint8_t*)half) + 64;\
2232 uint8_t * const halfHV= ((uint8_t*)half);\
2233 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2234 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2235 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
2237 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2238 uint64_t half[8 + 9];\
2239 uint8_t * const halfH= ((uint8_t*)half) + 64;\
2240 uint8_t * const halfHV= ((uint8_t*)half);\
2241 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2242 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2243 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
2245 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2246 uint64_t half[8 + 9];\
2247 uint8_t * const halfH= ((uint8_t*)half);\
2248 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2249 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
2250 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
2252 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2253 uint64_t half[8 + 9];\
2254 uint8_t * const halfH= ((uint8_t*)half);\
2255 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2256 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
2257 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
2259 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2261 uint8_t * const halfH= ((uint8_t*)half);\
2262 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2263 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
2265 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
2266 OPNAME ## pixels16_mmx(dst, src, stride, 16);\
2269 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2271 uint8_t * const half= (uint8_t*)temp;\
2272 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
2273 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
2276 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2277 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
2280 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2282 uint8_t * const half= (uint8_t*)temp;\
2283 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
2284 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
2287 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2289 uint8_t * const half= (uint8_t*)temp;\
2290 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
2291 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
2294 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2295 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
2298 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2300 uint8_t * const half= (uint8_t*)temp;\
2301 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
2302 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
2304 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2305 uint64_t half[16*2 + 17*2];\
2306 uint8_t * const halfH= ((uint8_t*)half) + 256;\
2307 uint8_t * const halfHV= ((uint8_t*)half);\
2308 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2309 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
2310 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2311 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
2313 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2314 uint64_t half[16*2 + 17*2];\
2315 uint8_t * const halfH= ((uint8_t*)half) + 256;\
2316 uint8_t * const halfHV= ((uint8_t*)half);\
2317 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2318 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
2319 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2320 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
2322 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2323 uint64_t half[16*2 + 17*2];\
2324 uint8_t * const halfH= ((uint8_t*)half) + 256;\
2325 uint8_t * const halfHV= ((uint8_t*)half);\
2326 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2327 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
2328 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2329 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
2331 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2332 uint64_t half[16*2 + 17*2];\
2333 uint8_t * const halfH= ((uint8_t*)half) + 256;\
2334 uint8_t * const halfHV= ((uint8_t*)half);\
2335 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2336 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
2337 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2338 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
2340 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2341 uint64_t half[16*2 + 17*2];\
2342 uint8_t * const halfH= ((uint8_t*)half) + 256;\
2343 uint8_t * const halfHV= ((uint8_t*)half);\
2344 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2345 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2346 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
2348 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2349 uint64_t half[16*2 + 17*2];\
2350 uint8_t * const halfH= ((uint8_t*)half) + 256;\
2351 uint8_t * const halfHV= ((uint8_t*)half);\
2352 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2353 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2354 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
2356 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2357 uint64_t half[17*2];\
2358 uint8_t * const halfH= ((uint8_t*)half);\
2359 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2360 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
2361 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2363 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2364 uint64_t half[17*2];\
2365 uint8_t * const halfH= ((uint8_t*)half);\
2366 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2367 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
2368 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2370 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2371 uint64_t half[17*2];\
2372 uint8_t * const halfH= ((uint8_t*)half);\
2373 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2374 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2377 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
2378 #define AVG_3DNOW_OP(a,b,temp, size) \
2379 "mov" #size " " #b ", " #temp " \n\t"\
2380 "pavgusb " #temp ", " #a " \n\t"\
2381 "mov" #size " " #a ", " #b " \n\t"
2382 #define AVG_MMX2_OP(a,b,temp, size) \
2383 "mov" #size " " #b ", " #temp " \n\t"\
2384 "pavgb " #temp ", " #a " \n\t"\
2385 "mov" #size " " #a ", " #b " \n\t"
2387 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
2388 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
2389 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
2390 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
2391 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
2392 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
2393 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
2394 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
2395 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
2398 static void just_return() { return; }
2401 #define SET_QPEL_FUNC(postfix1, postfix2) \
2402 c->put_ ## postfix1 = put_ ## postfix2;\
2403 c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\
2404 c->avg_ ## postfix1 = avg_ ## postfix2;
2406 static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
2409 assert(ABS(scale) < 256);
2410 scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
2413 "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
2414 "psrlw $15, %%mm6 \n\t" // 1w
2415 "pxor %%mm7, %%mm7 \n\t"
2416 "movd %4, %%mm5 \n\t"
2417 "punpcklwd %%mm5, %%mm5 \n\t"
2418 "punpcklwd %%mm5, %%mm5 \n\t"
2420 "movq (%1, %0), %%mm0 \n\t"
2421 "movq 8(%1, %0), %%mm1 \n\t"
2422 "pmulhw %%mm5, %%mm0 \n\t"
2423 "pmulhw %%mm5, %%mm1 \n\t"
2424 "paddw %%mm6, %%mm0 \n\t"
2425 "paddw %%mm6, %%mm1 \n\t"
2426 "psraw $1, %%mm0 \n\t"
2427 "psraw $1, %%mm1 \n\t"
2428 "paddw (%2, %0), %%mm0 \n\t"
2429 "paddw 8(%2, %0), %%mm1 \n\t"
2430 "psraw $6, %%mm0 \n\t"
2431 "psraw $6, %%mm1 \n\t"
2432 "pmullw (%3, %0), %%mm0 \n\t"
2433 "pmullw 8(%3, %0), %%mm1 \n\t"
2434 "pmaddwd %%mm0, %%mm0 \n\t"
2435 "pmaddwd %%mm1, %%mm1 \n\t"
2436 "paddd %%mm1, %%mm0 \n\t"
2437 "psrld $4, %%mm0 \n\t"
2438 "paddd %%mm0, %%mm7 \n\t"
2440 "cmp $128, %0 \n\t" //FIXME optimize & bench
2442 "movq %%mm7, %%mm6 \n\t"
2443 "psrlq $32, %%mm7 \n\t"
2444 "paddd %%mm6, %%mm7 \n\t"
2445 "psrld $2, %%mm7 \n\t"
2446 "movd %%mm7, %0 \n\t"
2449 : "r"(basis), "r"(rem), "r"(weight), "g"(scale)
2454 static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){
2457 if(ABS(scale) < 256){
2458 scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
2460 "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
2461 "psrlw $15, %%mm6 \n\t" // 1w
2462 "movd %3, %%mm5 \n\t"
2463 "punpcklwd %%mm5, %%mm5 \n\t"
2464 "punpcklwd %%mm5, %%mm5 \n\t"
2466 "movq (%1, %0), %%mm0 \n\t"
2467 "movq 8(%1, %0), %%mm1 \n\t"
2468 "pmulhw %%mm5, %%mm0 \n\t"
2469 "pmulhw %%mm5, %%mm1 \n\t"
2470 "paddw %%mm6, %%mm0 \n\t"
2471 "paddw %%mm6, %%mm1 \n\t"
2472 "psraw $1, %%mm0 \n\t"
2473 "psraw $1, %%mm1 \n\t"
2474 "paddw (%2, %0), %%mm0 \n\t"
2475 "paddw 8(%2, %0), %%mm1 \n\t"
2476 "movq %%mm0, (%2, %0) \n\t"
2477 "movq %%mm1, 8(%2, %0) \n\t"
2479 "cmp $128, %0 \n\t" //FIXME optimize & bench
2483 : "r"(basis), "r"(rem), "g"(scale)
2486 for(i=0; i<8*8; i++){
2487 rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
2492 #include "h264dsp_mmx.c"
2494 /* external functions, from idct_mmx.c */
2495 void ff_mmx_idct(DCTELEM *block);
2496 void ff_mmxext_idct(DCTELEM *block);
2498 void ff_vp3_idct_sse2(int16_t *input_data);
2499 void ff_vp3_idct_mmx(int16_t *data);
2500 void ff_vp3_dsp_init_mmx(void);
2502 /* XXX: those functions should be suppressed ASAP when all IDCTs are
2504 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
2506 ff_mmx_idct (block);
2507 put_pixels_clamped_mmx(block, dest, line_size);
2509 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
2511 ff_mmx_idct (block);
2512 add_pixels_clamped_mmx(block, dest, line_size);
2514 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
2516 ff_mmxext_idct (block);
2517 put_pixels_clamped_mmx(block, dest, line_size);
2519 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
2521 ff_mmxext_idct (block);
2522 add_pixels_clamped_mmx(block, dest, line_size);
2524 static void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block)
2526 ff_vp3_idct_sse2(block);
2527 put_signed_pixels_clamped_mmx(block, dest, line_size);
2529 static void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block)
2531 ff_vp3_idct_sse2(block);
2532 add_pixels_clamped_mmx(block, dest, line_size);
2534 static void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block)
2536 ff_vp3_idct_mmx(block);
2537 put_signed_pixels_clamped_mmx(block, dest, line_size);
2539 static void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block)
2541 ff_vp3_idct_mmx(block);
2542 add_pixels_clamped_mmx(block, dest, line_size);
2545 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
2547 ff_idct_xvid_mmx (block);
2548 put_pixels_clamped_mmx(block, dest, line_size);
2550 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
2552 ff_idct_xvid_mmx (block);
2553 add_pixels_clamped_mmx(block, dest, line_size);
2555 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
2557 ff_idct_xvid_mmx2 (block);
2558 put_pixels_clamped_mmx(block, dest, line_size);
2560 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
2562 ff_idct_xvid_mmx2 (block);
2563 add_pixels_clamped_mmx(block, dest, line_size);
2567 #ifdef CONFIG_SNOW_ENCODER
2568 extern void ff_snow_horizontal_compose97i_sse2(DWTELEM *b, int width);
2569 extern void ff_snow_horizontal_compose97i_mmx(DWTELEM *b, int width);
2570 extern void ff_snow_vertical_compose97i_sse2(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width);
2571 extern void ff_snow_vertical_compose97i_mmx(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width);
2574 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2576 mm_flags = mm_support();
2578 if (avctx->dsp_mask) {
2579 if (avctx->dsp_mask & FF_MM_FORCE)
2580 mm_flags |= (avctx->dsp_mask & 0xffff);
2582 mm_flags &= ~(avctx->dsp_mask & 0xffff);
2586 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
2587 if (mm_flags & MM_MMX)
2588 av_log(avctx, AV_LOG_INFO, " mmx");
2589 if (mm_flags & MM_MMXEXT)
2590 av_log(avctx, AV_LOG_INFO, " mmxext");
2591 if (mm_flags & MM_3DNOW)
2592 av_log(avctx, AV_LOG_INFO, " 3dnow");
2593 if (mm_flags & MM_SSE)
2594 av_log(avctx, AV_LOG_INFO, " sse");
2595 if (mm_flags & MM_SSE2)
2596 av_log(avctx, AV_LOG_INFO, " sse2");
2597 av_log(avctx, AV_LOG_INFO, "\n");
2600 if (mm_flags & MM_MMX) {
2601 const int idct_algo= avctx->idct_algo;
2603 #ifdef CONFIG_ENCODERS
2604 const int dct_algo = avctx->dct_algo;
2605 if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
2606 if(mm_flags & MM_SSE2){
2607 c->fdct = ff_fdct_sse2;
2608 }else if(mm_flags & MM_MMXEXT){
2609 c->fdct = ff_fdct_mmx2;
2611 c->fdct = ff_fdct_mmx;
2614 #endif //CONFIG_ENCODERS
2615 if(avctx->lowres==0){
2616 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2617 c->idct_put= ff_simple_idct_put_mmx;
2618 c->idct_add= ff_simple_idct_add_mmx;
2619 c->idct = ff_simple_idct_mmx;
2620 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2621 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2622 if(mm_flags & MM_MMXEXT){
2623 c->idct_put= ff_libmpeg2mmx2_idct_put;
2624 c->idct_add= ff_libmpeg2mmx2_idct_add;
2625 c->idct = ff_mmxext_idct;
2627 c->idct_put= ff_libmpeg2mmx_idct_put;
2628 c->idct_add= ff_libmpeg2mmx_idct_add;
2629 c->idct = ff_mmx_idct;
2631 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
2632 }else if(idct_algo==FF_IDCT_VP3){
2633 if(mm_flags & MM_SSE2){
2634 c->idct_put= ff_vp3_idct_put_sse2;
2635 c->idct_add= ff_vp3_idct_add_sse2;
2636 c->idct = ff_vp3_idct_sse2;
2637 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2639 ff_vp3_dsp_init_mmx();
2640 c->idct_put= ff_vp3_idct_put_mmx;
2641 c->idct_add= ff_vp3_idct_add_mmx;
2642 c->idct = ff_vp3_idct_mmx;
2643 c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
2646 }else if(idct_algo==FF_IDCT_XVIDMMX){
2647 if(mm_flags & MM_MMXEXT){
2648 c->idct_put= ff_idct_xvid_mmx2_put;
2649 c->idct_add= ff_idct_xvid_mmx2_add;
2650 c->idct = ff_idct_xvid_mmx2;
2652 c->idct_put= ff_idct_xvid_mmx_put;
2653 c->idct_add= ff_idct_xvid_mmx_add;
2654 c->idct = ff_idct_xvid_mmx;
2660 #ifdef CONFIG_ENCODERS
2661 c->get_pixels = get_pixels_mmx;
2662 c->diff_pixels = diff_pixels_mmx;
2663 #endif //CONFIG_ENCODERS
2664 c->put_pixels_clamped = put_pixels_clamped_mmx;
2665 c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
2666 c->add_pixels_clamped = add_pixels_clamped_mmx;
2667 c->clear_blocks = clear_blocks_mmx;
2668 #ifdef CONFIG_ENCODERS
2669 c->pix_sum = pix_sum16_mmx;
2670 #endif //CONFIG_ENCODERS
2672 c->put_pixels_tab[0][0] = put_pixels16_mmx;
2673 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx;
2674 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx;
2675 c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx;
2677 c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx;
2678 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx;
2679 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx;
2680 c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx;
2682 c->avg_pixels_tab[0][0] = avg_pixels16_mmx;
2683 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx;
2684 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx;
2685 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx;
2687 c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx;
2688 c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx;
2689 c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx;
2690 c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx;
2692 c->put_pixels_tab[1][0] = put_pixels8_mmx;
2693 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx;
2694 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx;
2695 c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx;
2697 c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx;
2698 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx;
2699 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx;
2700 c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx;
2702 c->avg_pixels_tab[1][0] = avg_pixels8_mmx;
2703 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx;
2704 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx;
2705 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx;
2707 c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx;
2708 c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx;
2709 c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx;
2710 c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx;
2712 c->add_bytes= add_bytes_mmx;
2713 #ifdef CONFIG_ENCODERS
2714 c->diff_bytes= diff_bytes_mmx;
2716 c->hadamard8_diff[0]= hadamard8_diff16_mmx;
2717 c->hadamard8_diff[1]= hadamard8_diff_mmx;
2719 c->pix_norm1 = pix_norm1_mmx;
2720 c->sse[0] = (mm_flags & MM_SSE2) ? sse16_sse2 : sse16_mmx;
2721 c->sse[1] = sse8_mmx;
2722 c->vsad[4]= vsad_intra16_mmx;
2724 c->nsse[0] = nsse16_mmx;
2725 c->nsse[1] = nsse8_mmx;
2726 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2727 c->vsad[0] = vsad16_mmx;
2730 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2731 c->try_8x8basis= try_8x8basis_mmx;
2733 c->add_8x8basis= add_8x8basis_mmx;
2735 #endif //CONFIG_ENCODERS
2737 c->h263_v_loop_filter= h263_v_loop_filter_mmx;
2738 c->h263_h_loop_filter= h263_h_loop_filter_mmx;
2739 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx;
2740 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
2742 c->h264_idct_dc_add=
2743 c->h264_idct_add= ff_h264_idct_add_mmx;
2744 c->h264_idct8_dc_add=
2745 c->h264_idct8_add= ff_h264_idct8_add_mmx;
2747 if (mm_flags & MM_MMXEXT) {
2748 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2749 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2751 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2752 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2753 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2755 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2756 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2758 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2759 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2760 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2762 #ifdef CONFIG_ENCODERS
2763 c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
2764 c->hadamard8_diff[1]= hadamard8_diff_mmx2;
2765 c->vsad[4]= vsad_intra16_mmx2;
2766 #endif //CONFIG_ENCODERS
2768 c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
2769 c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
2771 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2772 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2773 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2774 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2775 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2776 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2777 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2778 #ifdef CONFIG_ENCODERS
2779 c->vsad[0] = vsad16_mmx2;
2780 #endif //CONFIG_ENCODERS
2784 SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2)
2785 SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2)
2786 SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2)
2787 SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2)
2788 SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2)
2789 SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2)
2790 SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2)
2791 SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2)
2792 SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2)
2793 SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2)
2794 SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2)
2795 SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2)
2796 SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2)
2797 SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2)
2798 SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2)
2799 SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2)
2800 SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2)
2801 SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2)
2802 SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2)
2803 SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2)
2804 SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2)
2805 SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2)
2806 SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2)
2807 SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2)
2808 SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2)
2809 SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2)
2810 SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2)
2811 SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2)
2812 SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2)
2813 SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2)
2814 SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2)
2815 SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2)
2819 #define dspfunc(PFX, IDX, NUM) \
2820 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_mmx2; \
2821 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_mmx2; \
2822 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_mmx2; \
2823 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_mmx2; \
2824 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_mmx2; \
2825 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_mmx2; \
2826 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_mmx2; \
2827 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_mmx2; \
2828 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_mmx2; \
2829 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_mmx2; \
2830 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_mmx2; \
2831 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_mmx2; \
2832 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_mmx2; \
2833 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_mmx2; \
2834 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_mmx2; \
2835 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_mmx2
2837 dspfunc(put_h264_qpel, 0, 16);
2838 dspfunc(put_h264_qpel, 1, 8);
2839 dspfunc(put_h264_qpel, 2, 4);
2840 dspfunc(avg_h264_qpel, 0, 16);
2841 dspfunc(avg_h264_qpel, 1, 8);
2842 dspfunc(avg_h264_qpel, 2, 4);
2845 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2;
2846 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
2847 c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
2848 c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
2849 c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
2850 c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
2851 c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
2852 c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
2854 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
2855 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
2856 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
2857 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
2858 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
2859 c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
2860 c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
2861 c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
2863 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
2864 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
2865 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
2866 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
2867 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
2868 c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
2869 c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
2870 c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
2872 #ifdef CONFIG_ENCODERS
2873 c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
2874 #endif //CONFIG_ENCODERS
2875 } else if (mm_flags & MM_3DNOW) {
2876 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2877 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2879 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2880 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2881 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2883 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2884 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2886 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2887 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2888 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2890 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2891 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2892 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2893 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2894 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2895 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2896 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2899 SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow)
2900 SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow)
2901 SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow)
2902 SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow)
2903 SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow)
2904 SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow)
2905 SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow)
2906 SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow)
2907 SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow)
2908 SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow)
2909 SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow)
2910 SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow)
2911 SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow)
2912 SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow)
2913 SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow)
2914 SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow)
2915 SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow)
2916 SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow)
2917 SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow)
2918 SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow)
2919 SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow)
2920 SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow)
2921 SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow)
2922 SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow)
2923 SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow)
2924 SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow)
2925 SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow)
2926 SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow)
2927 SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow)
2928 SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow)
2929 SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow)
2930 SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow)
2932 #define dspfunc(PFX, IDX, NUM) \
2933 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_3dnow; \
2934 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_3dnow; \
2935 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_3dnow; \
2936 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_3dnow; \
2937 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_3dnow; \
2938 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_3dnow; \
2939 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_3dnow; \
2940 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_3dnow; \
2941 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_3dnow; \
2942 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_3dnow; \
2943 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_3dnow; \
2944 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_3dnow; \
2945 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_3dnow; \
2946 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_3dnow; \
2947 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_3dnow; \
2948 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_3dnow
2950 dspfunc(put_h264_qpel, 0, 16);
2951 dspfunc(put_h264_qpel, 1, 8);
2952 dspfunc(put_h264_qpel, 2, 4);
2953 dspfunc(avg_h264_qpel, 0, 16);
2954 dspfunc(avg_h264_qpel, 1, 8);
2955 dspfunc(avg_h264_qpel, 2, 4);
2957 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow;
2958 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
2961 #ifdef CONFIG_SNOW_ENCODER
2962 if(mm_flags & MM_SSE2){
2963 c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
2964 c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
2967 c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
2968 c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
2973 #ifdef CONFIG_ENCODERS
2974 dsputil_init_pix_mmx(c, avctx);
2975 #endif //CONFIG_ENCODERS
2977 // for speed testing
2978 get_pixels = just_return;
2979 put_pixels_clamped = just_return;
2980 add_pixels_clamped = just_return;
2982 pix_abs16x16 = just_return;
2983 pix_abs16x16_x2 = just_return;
2984 pix_abs16x16_y2 = just_return;
2985 pix_abs16x16_xy2 = just_return;
2987 put_pixels_tab[0] = just_return;
2988 put_pixels_tab[1] = just_return;
2989 put_pixels_tab[2] = just_return;
2990 put_pixels_tab[3] = just_return;
2992 put_no_rnd_pixels_tab[0] = just_return;
2993 put_no_rnd_pixels_tab[1] = just_return;
2994 put_no_rnd_pixels_tab[2] = just_return;
2995 put_no_rnd_pixels_tab[3] = just_return;
2997 avg_pixels_tab[0] = just_return;
2998 avg_pixels_tab[1] = just_return;
2999 avg_pixels_tab[2] = just_return;
3000 avg_pixels_tab[3] = just_return;
3002 avg_no_rnd_pixels_tab[0] = just_return;
3003 avg_no_rnd_pixels_tab[1] = just_return;
3004 avg_no_rnd_pixels_tab[2] = just_return;
3005 avg_no_rnd_pixels_tab[3] = just_return;
3007 //av_fdct = just_return;
3008 //ff_idct = just_return;