2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
25 #include "libavutil/x86_cpu.h"
26 #include "libavcodec/dsputil.h"
27 #include "libavcodec/h264dsp.h"
28 #include "libavcodec/mpegvideo.h"
29 #include "libavcodec/simple_idct.h"
30 #include "dsputil_mmx.h"
31 #include "idct_xvid.h"
36 /* pixel operations */
37 DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL;
38 DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
40 DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
41 {0x8000000080000000ULL, 0x8000000080000000ULL};
43 DECLARE_ALIGNED(8, const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL;
44 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4 ) = {0x0004000400040004ULL, 0x0004000400040004ULL};
45 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
46 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
47 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9 ) = {0x0009000900090009ULL, 0x0009000900090009ULL};
48 DECLARE_ALIGNED(8, const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
49 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
50 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18 ) = {0x0012001200120012ULL, 0x0012001200120012ULL};
51 DECLARE_ALIGNED(8, const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
52 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27 ) = {0x001B001B001B001BULL, 0x001B001B001B001BULL};
53 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
54 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
55 DECLARE_ALIGNED(8, const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
56 DECLARE_ALIGNED(8, const uint64_t, ff_pw_53 ) = 0x0035003500350035ULL;
57 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63 ) = {0x003F003F003F003FULL, 0x003F003F003F003FULL};
58 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
59 DECLARE_ALIGNED(8, const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
60 DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
61 DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
63 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1 ) = {0x0101010101010101ULL, 0x0101010101010101ULL};
64 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3 ) = {0x0303030303030303ULL, 0x0303030303030303ULL};
65 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4 ) = {0x0404040404040404ULL, 0x0404040404040404ULL};
66 DECLARE_ALIGNED(8, const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
67 DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
68 DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
69 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80 ) = {0x8080808080808080ULL, 0x8080808080808080ULL};
70 DECLARE_ALIGNED(8, const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
71 DECLARE_ALIGNED(8, const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
72 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8 ) = {0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL};
73 DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
74 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE ) = {0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL};
76 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
77 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
79 #define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::)
80 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
82 #define MOVQ_BFE(regd) \
84 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
85 "paddb %%" #regd ", %%" #regd " \n\t" ::)
88 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
89 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
91 // for shared library it's better to use this way for accessing constants
93 #define MOVQ_BONE(regd) \
95 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
96 "psrlw $15, %%" #regd " \n\t" \
97 "packuswb %%" #regd ", %%" #regd " \n\t" ::)
99 #define MOVQ_WTWO(regd) \
101 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
102 "psrlw $15, %%" #regd " \n\t" \
103 "psllw $1, %%" #regd " \n\t"::)
107 // using regr as temporary and for the output result
108 // first argument is unmodifed and second is trashed
109 // regfe is supposed to contain 0xfefefefefefefefe
110 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
111 "movq " #rega ", " #regr " \n\t"\
112 "pand " #regb ", " #regr " \n\t"\
113 "pxor " #rega ", " #regb " \n\t"\
114 "pand " #regfe "," #regb " \n\t"\
115 "psrlq $1, " #regb " \n\t"\
116 "paddb " #regb ", " #regr " \n\t"
118 #define PAVGB_MMX(rega, regb, regr, regfe) \
119 "movq " #rega ", " #regr " \n\t"\
120 "por " #regb ", " #regr " \n\t"\
121 "pxor " #rega ", " #regb " \n\t"\
122 "pand " #regfe "," #regb " \n\t"\
123 "psrlq $1, " #regb " \n\t"\
124 "psubb " #regb ", " #regr " \n\t"
126 // mm6 is supposed to contain 0xfefefefefefefefe
127 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
128 "movq " #rega ", " #regr " \n\t"\
129 "movq " #regc ", " #regp " \n\t"\
130 "pand " #regb ", " #regr " \n\t"\
131 "pand " #regd ", " #regp " \n\t"\
132 "pxor " #rega ", " #regb " \n\t"\
133 "pxor " #regc ", " #regd " \n\t"\
134 "pand %%mm6, " #regb " \n\t"\
135 "pand %%mm6, " #regd " \n\t"\
136 "psrlq $1, " #regb " \n\t"\
137 "psrlq $1, " #regd " \n\t"\
138 "paddb " #regb ", " #regr " \n\t"\
139 "paddb " #regd ", " #regp " \n\t"
141 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
142 "movq " #rega ", " #regr " \n\t"\
143 "movq " #regc ", " #regp " \n\t"\
144 "por " #regb ", " #regr " \n\t"\
145 "por " #regd ", " #regp " \n\t"\
146 "pxor " #rega ", " #regb " \n\t"\
147 "pxor " #regc ", " #regd " \n\t"\
148 "pand %%mm6, " #regb " \n\t"\
149 "pand %%mm6, " #regd " \n\t"\
150 "psrlq $1, " #regd " \n\t"\
151 "psrlq $1, " #regb " \n\t"\
152 "psubb " #regb ", " #regr " \n\t"\
153 "psubb " #regd ", " #regp " \n\t"
155 /***********************************/
156 /* MMX no rounding */
157 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
158 #define SET_RND MOVQ_WONE
159 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
160 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
161 #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
163 #include "dsputil_mmx_rnd_template.c"
169 /***********************************/
172 #define DEF(x, y) x ## _ ## y ##_mmx
173 #define SET_RND MOVQ_WTWO
174 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
175 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
177 #include "dsputil_mmx_rnd_template.c"
185 /***********************************/
188 #define DEF(x) x ## _3dnow
189 #define PAVGB "pavgusb"
192 #include "dsputil_mmx_avg_template.c"
198 /***********************************/
201 #define DEF(x) x ## _mmx2
203 /* Introduced only in MMX2 set */
204 #define PAVGB "pavgb"
207 #include "dsputil_mmx_avg_template.c"
213 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
214 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
215 #define put_pixels16_mmx2 put_pixels16_mmx
216 #define put_pixels8_mmx2 put_pixels8_mmx
217 #define put_pixels4_mmx2 put_pixels4_mmx
218 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
219 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
220 #define put_pixels16_3dnow put_pixels16_mmx
221 #define put_pixels8_3dnow put_pixels8_mmx
222 #define put_pixels4_3dnow put_pixels4_mmx
223 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
224 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
226 /***********************************/
229 void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
234 /* read the pixels */
239 "movq %3, %%mm0 \n\t"
240 "movq 8%3, %%mm1 \n\t"
241 "movq 16%3, %%mm2 \n\t"
242 "movq 24%3, %%mm3 \n\t"
243 "movq 32%3, %%mm4 \n\t"
244 "movq 40%3, %%mm5 \n\t"
245 "movq 48%3, %%mm6 \n\t"
246 "movq 56%3, %%mm7 \n\t"
247 "packuswb %%mm1, %%mm0 \n\t"
248 "packuswb %%mm3, %%mm2 \n\t"
249 "packuswb %%mm5, %%mm4 \n\t"
250 "packuswb %%mm7, %%mm6 \n\t"
251 "movq %%mm0, (%0) \n\t"
252 "movq %%mm2, (%0, %1) \n\t"
253 "movq %%mm4, (%0, %1, 2) \n\t"
254 "movq %%mm6, (%0, %2) \n\t"
255 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
260 // if here would be an exact copy of the code above
261 // compiler would generate some very strange code
264 "movq (%3), %%mm0 \n\t"
265 "movq 8(%3), %%mm1 \n\t"
266 "movq 16(%3), %%mm2 \n\t"
267 "movq 24(%3), %%mm3 \n\t"
268 "movq 32(%3), %%mm4 \n\t"
269 "movq 40(%3), %%mm5 \n\t"
270 "movq 48(%3), %%mm6 \n\t"
271 "movq 56(%3), %%mm7 \n\t"
272 "packuswb %%mm1, %%mm0 \n\t"
273 "packuswb %%mm3, %%mm2 \n\t"
274 "packuswb %%mm5, %%mm4 \n\t"
275 "packuswb %%mm7, %%mm6 \n\t"
276 "movq %%mm0, (%0) \n\t"
277 "movq %%mm2, (%0, %1) \n\t"
278 "movq %%mm4, (%0, %1, 2) \n\t"
279 "movq %%mm6, (%0, %2) \n\t"
280 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
284 DECLARE_ASM_CONST(8, uint8_t, ff_vector128)[8] =
285 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
287 #define put_signed_pixels_clamped_mmx_half(off) \
288 "movq "#off"(%2), %%mm1 \n\t"\
289 "movq 16+"#off"(%2), %%mm2 \n\t"\
290 "movq 32+"#off"(%2), %%mm3 \n\t"\
291 "movq 48+"#off"(%2), %%mm4 \n\t"\
292 "packsswb 8+"#off"(%2), %%mm1 \n\t"\
293 "packsswb 24+"#off"(%2), %%mm2 \n\t"\
294 "packsswb 40+"#off"(%2), %%mm3 \n\t"\
295 "packsswb 56+"#off"(%2), %%mm4 \n\t"\
296 "paddb %%mm0, %%mm1 \n\t"\
297 "paddb %%mm0, %%mm2 \n\t"\
298 "paddb %%mm0, %%mm3 \n\t"\
299 "paddb %%mm0, %%mm4 \n\t"\
300 "movq %%mm1, (%0) \n\t"\
301 "movq %%mm2, (%0, %3) \n\t"\
302 "movq %%mm3, (%0, %3, 2) \n\t"\
303 "movq %%mm4, (%0, %1) \n\t"
305 void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
307 x86_reg line_skip = line_size;
311 "movq "MANGLE(ff_vector128)", %%mm0 \n\t"
312 "lea (%3, %3, 2), %1 \n\t"
313 put_signed_pixels_clamped_mmx_half(0)
314 "lea (%0, %3, 4), %0 \n\t"
315 put_signed_pixels_clamped_mmx_half(64)
316 :"+&r" (pixels), "=&r" (line_skip3)
317 :"r" (block), "r"(line_skip)
321 void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
327 /* read the pixels */
334 "movq (%2), %%mm0 \n\t"
335 "movq 8(%2), %%mm1 \n\t"
336 "movq 16(%2), %%mm2 \n\t"
337 "movq 24(%2), %%mm3 \n\t"
338 "movq %0, %%mm4 \n\t"
339 "movq %1, %%mm6 \n\t"
340 "movq %%mm4, %%mm5 \n\t"
341 "punpcklbw %%mm7, %%mm4 \n\t"
342 "punpckhbw %%mm7, %%mm5 \n\t"
343 "paddsw %%mm4, %%mm0 \n\t"
344 "paddsw %%mm5, %%mm1 \n\t"
345 "movq %%mm6, %%mm5 \n\t"
346 "punpcklbw %%mm7, %%mm6 \n\t"
347 "punpckhbw %%mm7, %%mm5 \n\t"
348 "paddsw %%mm6, %%mm2 \n\t"
349 "paddsw %%mm5, %%mm3 \n\t"
350 "packuswb %%mm1, %%mm0 \n\t"
351 "packuswb %%mm3, %%mm2 \n\t"
352 "movq %%mm0, %0 \n\t"
353 "movq %%mm2, %1 \n\t"
354 :"+m"(*pix), "+m"(*(pix+line_size))
362 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
365 "lea (%3, %3), %%"REG_a" \n\t"
368 "movd (%1), %%mm0 \n\t"
369 "movd (%1, %3), %%mm1 \n\t"
370 "movd %%mm0, (%2) \n\t"
371 "movd %%mm1, (%2, %3) \n\t"
372 "add %%"REG_a", %1 \n\t"
373 "add %%"REG_a", %2 \n\t"
374 "movd (%1), %%mm0 \n\t"
375 "movd (%1, %3), %%mm1 \n\t"
376 "movd %%mm0, (%2) \n\t"
377 "movd %%mm1, (%2, %3) \n\t"
378 "add %%"REG_a", %1 \n\t"
379 "add %%"REG_a", %2 \n\t"
382 : "+g"(h), "+r" (pixels), "+r" (block)
383 : "r"((x86_reg)line_size)
388 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
391 "lea (%3, %3), %%"REG_a" \n\t"
394 "movq (%1), %%mm0 \n\t"
395 "movq (%1, %3), %%mm1 \n\t"
396 "movq %%mm0, (%2) \n\t"
397 "movq %%mm1, (%2, %3) \n\t"
398 "add %%"REG_a", %1 \n\t"
399 "add %%"REG_a", %2 \n\t"
400 "movq (%1), %%mm0 \n\t"
401 "movq (%1, %3), %%mm1 \n\t"
402 "movq %%mm0, (%2) \n\t"
403 "movq %%mm1, (%2, %3) \n\t"
404 "add %%"REG_a", %1 \n\t"
405 "add %%"REG_a", %2 \n\t"
408 : "+g"(h), "+r" (pixels), "+r" (block)
409 : "r"((x86_reg)line_size)
414 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
417 "lea (%3, %3), %%"REG_a" \n\t"
420 "movq (%1), %%mm0 \n\t"
421 "movq 8(%1), %%mm4 \n\t"
422 "movq (%1, %3), %%mm1 \n\t"
423 "movq 8(%1, %3), %%mm5 \n\t"
424 "movq %%mm0, (%2) \n\t"
425 "movq %%mm4, 8(%2) \n\t"
426 "movq %%mm1, (%2, %3) \n\t"
427 "movq %%mm5, 8(%2, %3) \n\t"
428 "add %%"REG_a", %1 \n\t"
429 "add %%"REG_a", %2 \n\t"
430 "movq (%1), %%mm0 \n\t"
431 "movq 8(%1), %%mm4 \n\t"
432 "movq (%1, %3), %%mm1 \n\t"
433 "movq 8(%1, %3), %%mm5 \n\t"
434 "movq %%mm0, (%2) \n\t"
435 "movq %%mm4, 8(%2) \n\t"
436 "movq %%mm1, (%2, %3) \n\t"
437 "movq %%mm5, 8(%2, %3) \n\t"
438 "add %%"REG_a", %1 \n\t"
439 "add %%"REG_a", %2 \n\t"
442 : "+g"(h), "+r" (pixels), "+r" (block)
443 : "r"((x86_reg)line_size)
448 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
452 "movdqu (%1), %%xmm0 \n\t"
453 "movdqu (%1,%3), %%xmm1 \n\t"
454 "movdqu (%1,%3,2), %%xmm2 \n\t"
455 "movdqu (%1,%4), %%xmm3 \n\t"
456 "movdqa %%xmm0, (%2) \n\t"
457 "movdqa %%xmm1, (%2,%3) \n\t"
458 "movdqa %%xmm2, (%2,%3,2) \n\t"
459 "movdqa %%xmm3, (%2,%4) \n\t"
461 "lea (%1,%3,4), %1 \n\t"
462 "lea (%2,%3,4), %2 \n\t"
464 : "+g"(h), "+r" (pixels), "+r" (block)
465 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
470 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
474 "movdqu (%1), %%xmm0 \n\t"
475 "movdqu (%1,%3), %%xmm1 \n\t"
476 "movdqu (%1,%3,2), %%xmm2 \n\t"
477 "movdqu (%1,%4), %%xmm3 \n\t"
478 "pavgb (%2), %%xmm0 \n\t"
479 "pavgb (%2,%3), %%xmm1 \n\t"
480 "pavgb (%2,%3,2), %%xmm2 \n\t"
481 "pavgb (%2,%4), %%xmm3 \n\t"
482 "movdqa %%xmm0, (%2) \n\t"
483 "movdqa %%xmm1, (%2,%3) \n\t"
484 "movdqa %%xmm2, (%2,%3,2) \n\t"
485 "movdqa %%xmm3, (%2,%4) \n\t"
487 "lea (%1,%3,4), %1 \n\t"
488 "lea (%2,%3,4), %2 \n\t"
490 : "+g"(h), "+r" (pixels), "+r" (block)
491 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
496 #define CLEAR_BLOCKS(name,n) \
497 static void name(DCTELEM *blocks)\
500 "pxor %%mm7, %%mm7 \n\t"\
501 "mov %1, %%"REG_a" \n\t"\
503 "movq %%mm7, (%0, %%"REG_a") \n\t"\
504 "movq %%mm7, 8(%0, %%"REG_a") \n\t"\
505 "movq %%mm7, 16(%0, %%"REG_a") \n\t"\
506 "movq %%mm7, 24(%0, %%"REG_a") \n\t"\
507 "add $32, %%"REG_a" \n\t"\
509 : : "r" (((uint8_t *)blocks)+128*n),\
514 CLEAR_BLOCKS(clear_blocks_mmx, 6)
515 CLEAR_BLOCKS(clear_block_mmx, 1)
517 static void clear_block_sse(DCTELEM *block)
520 "xorps %%xmm0, %%xmm0 \n"
521 "movaps %%xmm0, (%0) \n"
522 "movaps %%xmm0, 16(%0) \n"
523 "movaps %%xmm0, 32(%0) \n"
524 "movaps %%xmm0, 48(%0) \n"
525 "movaps %%xmm0, 64(%0) \n"
526 "movaps %%xmm0, 80(%0) \n"
527 "movaps %%xmm0, 96(%0) \n"
528 "movaps %%xmm0, 112(%0) \n"
534 static void clear_blocks_sse(DCTELEM *blocks)
537 "xorps %%xmm0, %%xmm0 \n"
538 "mov %1, %%"REG_a" \n"
540 "movaps %%xmm0, (%0, %%"REG_a") \n"
541 "movaps %%xmm0, 16(%0, %%"REG_a") \n"
542 "movaps %%xmm0, 32(%0, %%"REG_a") \n"
543 "movaps %%xmm0, 48(%0, %%"REG_a") \n"
544 "movaps %%xmm0, 64(%0, %%"REG_a") \n"
545 "movaps %%xmm0, 80(%0, %%"REG_a") \n"
546 "movaps %%xmm0, 96(%0, %%"REG_a") \n"
547 "movaps %%xmm0, 112(%0, %%"REG_a") \n"
548 "add $128, %%"REG_a" \n"
550 : : "r" (((uint8_t *)blocks)+128*6),
556 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
561 "movq (%1, %0), %%mm0 \n\t"
562 "movq (%2, %0), %%mm1 \n\t"
563 "paddb %%mm0, %%mm1 \n\t"
564 "movq %%mm1, (%2, %0) \n\t"
565 "movq 8(%1, %0), %%mm0 \n\t"
566 "movq 8(%2, %0), %%mm1 \n\t"
567 "paddb %%mm0, %%mm1 \n\t"
568 "movq %%mm1, 8(%2, %0) \n\t"
574 : "r"(src), "r"(dst), "r"((x86_reg)w-15)
577 dst[i+0] += src[i+0];
580 static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
585 "movq (%2, %0), %%mm0 \n\t"
586 "movq 8(%2, %0), %%mm1 \n\t"
587 "paddb (%3, %0), %%mm0 \n\t"
588 "paddb 8(%3, %0), %%mm1 \n\t"
589 "movq %%mm0, (%1, %0) \n\t"
590 "movq %%mm1, 8(%1, %0) \n\t"
596 : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15)
599 dst[i] = src1[i] + src2[i];
602 #if HAVE_7REGS && HAVE_TEN_OPERANDS
603 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) {
606 int l = *left & 0xff;
607 int tl = *left_top & 0xff;
612 "movzx (%3,%4), %2 \n"
625 "add (%6,%4), %b0 \n"
626 "mov %b0, (%5,%4) \n"
629 :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
630 :"r"(dst+w), "r"(diff+w), "rm"(top+w)
637 #define H263_LOOP_FILTER \
638 "pxor %%mm7, %%mm7 \n\t"\
639 "movq %0, %%mm0 \n\t"\
640 "movq %0, %%mm1 \n\t"\
641 "movq %3, %%mm2 \n\t"\
642 "movq %3, %%mm3 \n\t"\
643 "punpcklbw %%mm7, %%mm0 \n\t"\
644 "punpckhbw %%mm7, %%mm1 \n\t"\
645 "punpcklbw %%mm7, %%mm2 \n\t"\
646 "punpckhbw %%mm7, %%mm3 \n\t"\
647 "psubw %%mm2, %%mm0 \n\t"\
648 "psubw %%mm3, %%mm1 \n\t"\
649 "movq %1, %%mm2 \n\t"\
650 "movq %1, %%mm3 \n\t"\
651 "movq %2, %%mm4 \n\t"\
652 "movq %2, %%mm5 \n\t"\
653 "punpcklbw %%mm7, %%mm2 \n\t"\
654 "punpckhbw %%mm7, %%mm3 \n\t"\
655 "punpcklbw %%mm7, %%mm4 \n\t"\
656 "punpckhbw %%mm7, %%mm5 \n\t"\
657 "psubw %%mm2, %%mm4 \n\t"\
658 "psubw %%mm3, %%mm5 \n\t"\
659 "psllw $2, %%mm4 \n\t"\
660 "psllw $2, %%mm5 \n\t"\
661 "paddw %%mm0, %%mm4 \n\t"\
662 "paddw %%mm1, %%mm5 \n\t"\
663 "pxor %%mm6, %%mm6 \n\t"\
664 "pcmpgtw %%mm4, %%mm6 \n\t"\
665 "pcmpgtw %%mm5, %%mm7 \n\t"\
666 "pxor %%mm6, %%mm4 \n\t"\
667 "pxor %%mm7, %%mm5 \n\t"\
668 "psubw %%mm6, %%mm4 \n\t"\
669 "psubw %%mm7, %%mm5 \n\t"\
670 "psrlw $3, %%mm4 \n\t"\
671 "psrlw $3, %%mm5 \n\t"\
672 "packuswb %%mm5, %%mm4 \n\t"\
673 "packsswb %%mm7, %%mm6 \n\t"\
674 "pxor %%mm7, %%mm7 \n\t"\
675 "movd %4, %%mm2 \n\t"\
676 "punpcklbw %%mm2, %%mm2 \n\t"\
677 "punpcklbw %%mm2, %%mm2 \n\t"\
678 "punpcklbw %%mm2, %%mm2 \n\t"\
679 "psubusb %%mm4, %%mm2 \n\t"\
680 "movq %%mm2, %%mm3 \n\t"\
681 "psubusb %%mm4, %%mm3 \n\t"\
682 "psubb %%mm3, %%mm2 \n\t"\
683 "movq %1, %%mm3 \n\t"\
684 "movq %2, %%mm4 \n\t"\
685 "pxor %%mm6, %%mm3 \n\t"\
686 "pxor %%mm6, %%mm4 \n\t"\
687 "paddusb %%mm2, %%mm3 \n\t"\
688 "psubusb %%mm2, %%mm4 \n\t"\
689 "pxor %%mm6, %%mm3 \n\t"\
690 "pxor %%mm6, %%mm4 \n\t"\
691 "paddusb %%mm2, %%mm2 \n\t"\
692 "packsswb %%mm1, %%mm0 \n\t"\
693 "pcmpgtb %%mm0, %%mm7 \n\t"\
694 "pxor %%mm7, %%mm0 \n\t"\
695 "psubb %%mm7, %%mm0 \n\t"\
696 "movq %%mm0, %%mm1 \n\t"\
697 "psubusb %%mm2, %%mm0 \n\t"\
698 "psubb %%mm0, %%mm1 \n\t"\
699 "pand %5, %%mm1 \n\t"\
700 "psrlw $2, %%mm1 \n\t"\
701 "pxor %%mm7, %%mm1 \n\t"\
702 "psubb %%mm7, %%mm1 \n\t"\
703 "movq %0, %%mm5 \n\t"\
704 "movq %3, %%mm6 \n\t"\
705 "psubb %%mm1, %%mm5 \n\t"\
706 "paddb %%mm1, %%mm6 \n\t"
708 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
709 if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
710 const int strength= ff_h263_loop_filter_strength[qscale];
716 "movq %%mm3, %1 \n\t"
717 "movq %%mm4, %2 \n\t"
718 "movq %%mm5, %0 \n\t"
719 "movq %%mm6, %3 \n\t"
720 : "+m" (*(uint64_t*)(src - 2*stride)),
721 "+m" (*(uint64_t*)(src - 1*stride)),
722 "+m" (*(uint64_t*)(src + 0*stride)),
723 "+m" (*(uint64_t*)(src + 1*stride))
724 : "g" (2*strength), "m"(ff_pb_FC)
729 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
730 if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
731 const int strength= ff_h263_loop_filter_strength[qscale];
732 DECLARE_ALIGNED(8, uint64_t, temp)[4];
733 uint8_t *btemp= (uint8_t*)temp;
737 transpose4x4(btemp , src , 8, stride);
738 transpose4x4(btemp+4, src + 4*stride, 8, stride);
740 H263_LOOP_FILTER // 5 3 4 6
746 : "g" (2*strength), "m"(ff_pb_FC)
750 "movq %%mm5, %%mm1 \n\t"
751 "movq %%mm4, %%mm0 \n\t"
752 "punpcklbw %%mm3, %%mm5 \n\t"
753 "punpcklbw %%mm6, %%mm4 \n\t"
754 "punpckhbw %%mm3, %%mm1 \n\t"
755 "punpckhbw %%mm6, %%mm0 \n\t"
756 "movq %%mm5, %%mm3 \n\t"
757 "movq %%mm1, %%mm6 \n\t"
758 "punpcklwd %%mm4, %%mm5 \n\t"
759 "punpcklwd %%mm0, %%mm1 \n\t"
760 "punpckhwd %%mm4, %%mm3 \n\t"
761 "punpckhwd %%mm0, %%mm6 \n\t"
762 "movd %%mm5, (%0) \n\t"
763 "punpckhdq %%mm5, %%mm5 \n\t"
764 "movd %%mm5, (%0,%2) \n\t"
765 "movd %%mm3, (%0,%2,2) \n\t"
766 "punpckhdq %%mm3, %%mm3 \n\t"
767 "movd %%mm3, (%0,%3) \n\t"
768 "movd %%mm1, (%1) \n\t"
769 "punpckhdq %%mm1, %%mm1 \n\t"
770 "movd %%mm1, (%1,%2) \n\t"
771 "movd %%mm6, (%1,%2,2) \n\t"
772 "punpckhdq %%mm6, %%mm6 \n\t"
773 "movd %%mm6, (%1,%3) \n\t"
775 "r" (src + 4*stride),
776 "r" ((x86_reg) stride ),
777 "r" ((x86_reg)(3*stride))
782 /* draw the edges of width 'w' of an image of size width, height
783 this mmx version can only handle w==8 || w==16 */
784 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
786 uint8_t *ptr, *last_line;
789 last_line = buf + (height - 1) * wrap;
796 "movd (%0), %%mm0 \n\t"
797 "punpcklbw %%mm0, %%mm0 \n\t"
798 "punpcklwd %%mm0, %%mm0 \n\t"
799 "punpckldq %%mm0, %%mm0 \n\t"
800 "movq %%mm0, -8(%0) \n\t"
801 "movq -8(%0, %2), %%mm1 \n\t"
802 "punpckhbw %%mm1, %%mm1 \n\t"
803 "punpckhwd %%mm1, %%mm1 \n\t"
804 "punpckhdq %%mm1, %%mm1 \n\t"
805 "movq %%mm1, (%0, %2) \n\t"
810 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
817 "movd (%0), %%mm0 \n\t"
818 "punpcklbw %%mm0, %%mm0 \n\t"
819 "punpcklwd %%mm0, %%mm0 \n\t"
820 "punpckldq %%mm0, %%mm0 \n\t"
821 "movq %%mm0, -8(%0) \n\t"
822 "movq %%mm0, -16(%0) \n\t"
823 "movq -8(%0, %2), %%mm1 \n\t"
824 "punpckhbw %%mm1, %%mm1 \n\t"
825 "punpckhwd %%mm1, %%mm1 \n\t"
826 "punpckhdq %%mm1, %%mm1 \n\t"
827 "movq %%mm1, (%0, %2) \n\t"
828 "movq %%mm1, 8(%0, %2) \n\t"
833 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
838 /* top and bottom (and hopefully also the corners) */
839 ptr= buf - (i + 1) * wrap - w;
842 "movq (%1, %0), %%mm0 \n\t"
843 "movq %%mm0, (%0) \n\t"
844 "movq %%mm0, (%0, %2) \n\t"
845 "movq %%mm0, (%0, %2, 2) \n\t"
846 "movq %%mm0, (%0, %3) \n\t"
851 : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
853 ptr= last_line + (i + 1) * wrap - w;
856 "movq (%1, %0), %%mm0 \n\t"
857 "movq %%mm0, (%0) \n\t"
858 "movq %%mm0, (%0, %2) \n\t"
859 "movq %%mm0, (%0, %2, 2) \n\t"
860 "movq %%mm0, (%0, %3) \n\t"
865 : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
870 #define PAETH(cpu, abs3)\
871 static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
876 "pxor %%mm7, %%mm7 \n"\
877 "movd (%1,%0), %%mm0 \n"\
878 "movd (%2,%0), %%mm1 \n"\
879 "punpcklbw %%mm7, %%mm0 \n"\
880 "punpcklbw %%mm7, %%mm1 \n"\
883 "movq %%mm1, %%mm2 \n"\
884 "movd (%2,%0), %%mm1 \n"\
885 "movq %%mm2, %%mm3 \n"\
886 "punpcklbw %%mm7, %%mm1 \n"\
887 "movq %%mm2, %%mm4 \n"\
888 "psubw %%mm1, %%mm3 \n"\
889 "psubw %%mm0, %%mm4 \n"\
890 "movq %%mm3, %%mm5 \n"\
891 "paddw %%mm4, %%mm5 \n"\
893 "movq %%mm4, %%mm6 \n"\
894 "pminsw %%mm5, %%mm6 \n"\
895 "pcmpgtw %%mm6, %%mm3 \n"\
896 "pcmpgtw %%mm5, %%mm4 \n"\
897 "movq %%mm4, %%mm6 \n"\
898 "pand %%mm3, %%mm4 \n"\
899 "pandn %%mm3, %%mm6 \n"\
900 "pandn %%mm0, %%mm3 \n"\
901 "movd (%3,%0), %%mm0 \n"\
902 "pand %%mm1, %%mm6 \n"\
903 "pand %%mm4, %%mm2 \n"\
904 "punpcklbw %%mm7, %%mm0 \n"\
906 "paddw %%mm6, %%mm0 \n"\
907 "paddw %%mm2, %%mm3 \n"\
908 "paddw %%mm3, %%mm0 \n"\
909 "pand %%mm5, %%mm0 \n"\
910 "movq %%mm0, %%mm3 \n"\
911 "packuswb %%mm3, %%mm3 \n"\
912 "movd %%mm3, (%1,%0) \n"\
917 :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
924 "psubw %%mm5, %%mm7 \n"\
925 "pmaxsw %%mm7, %%mm5 \n"\
926 "pxor %%mm6, %%mm6 \n"\
927 "pxor %%mm7, %%mm7 \n"\
928 "psubw %%mm3, %%mm6 \n"\
929 "psubw %%mm4, %%mm7 \n"\
930 "pmaxsw %%mm6, %%mm3 \n"\
931 "pmaxsw %%mm7, %%mm4 \n"\
932 "pxor %%mm7, %%mm7 \n"
935 "pabsw %%mm3, %%mm3 \n"\
936 "pabsw %%mm4, %%mm4 \n"\
937 "pabsw %%mm5, %%mm5 \n"
939 PAETH(mmx2, ABS3_MMX2)
941 PAETH(ssse3, ABS3_SSSE3)
944 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
945 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
946 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
947 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
948 "movq "#in7", " #m3 " \n\t" /* d */\
949 "movq "#in0", %%mm5 \n\t" /* D */\
950 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
951 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
952 "movq "#in1", %%mm5 \n\t" /* C */\
953 "movq "#in2", %%mm6 \n\t" /* B */\
954 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
955 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
956 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
957 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
958 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
959 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
960 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
961 "psraw $5, %%mm5 \n\t"\
962 "packuswb %%mm5, %%mm5 \n\t"\
963 OP(%%mm5, out, %%mm7, d)
965 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
966 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
970 "pxor %%mm7, %%mm7 \n\t"\
972 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
973 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
974 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
975 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
976 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
977 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
978 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
979 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
980 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
981 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
982 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
983 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
984 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
985 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
986 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
987 "paddw %%mm3, %%mm5 \n\t" /* b */\
988 "paddw %%mm2, %%mm6 \n\t" /* c */\
989 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
990 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
991 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
992 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
993 "paddw %%mm4, %%mm0 \n\t" /* a */\
994 "paddw %%mm1, %%mm5 \n\t" /* d */\
995 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
996 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
997 "paddw %6, %%mm6 \n\t"\
998 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
999 "psraw $5, %%mm0 \n\t"\
1000 "movq %%mm0, %5 \n\t"\
1001 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1003 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
1004 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
1005 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
1006 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
1007 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
1008 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
1009 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
1010 "paddw %%mm0, %%mm2 \n\t" /* b */\
1011 "paddw %%mm5, %%mm3 \n\t" /* c */\
1012 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1013 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1014 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
1015 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
1016 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
1017 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
1018 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1019 "paddw %%mm2, %%mm1 \n\t" /* a */\
1020 "paddw %%mm6, %%mm4 \n\t" /* d */\
1021 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1022 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
1023 "paddw %6, %%mm1 \n\t"\
1024 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
1025 "psraw $5, %%mm3 \n\t"\
1026 "movq %5, %%mm1 \n\t"\
1027 "packuswb %%mm3, %%mm1 \n\t"\
1028 OP_MMX2(%%mm1, (%1),%%mm4, q)\
1029 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
1031 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
1032 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
1033 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
1034 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
1035 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
1036 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
1037 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
1038 "paddw %%mm1, %%mm5 \n\t" /* b */\
1039 "paddw %%mm4, %%mm0 \n\t" /* c */\
1040 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1041 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
1042 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
1043 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
1044 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
1045 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
1046 "paddw %%mm3, %%mm2 \n\t" /* d */\
1047 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
1048 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
1049 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
1050 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
1051 "paddw %%mm2, %%mm6 \n\t" /* a */\
1052 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
1053 "paddw %6, %%mm0 \n\t"\
1054 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1055 "psraw $5, %%mm0 \n\t"\
1056 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1058 "paddw %%mm5, %%mm3 \n\t" /* a */\
1059 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
1060 "paddw %%mm4, %%mm6 \n\t" /* b */\
1061 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
1062 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
1063 "paddw %%mm1, %%mm4 \n\t" /* c */\
1064 "paddw %%mm2, %%mm5 \n\t" /* d */\
1065 "paddw %%mm6, %%mm6 \n\t" /* 2b */\
1066 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
1067 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
1068 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
1069 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
1070 "paddw %6, %%mm4 \n\t"\
1071 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
1072 "psraw $5, %%mm4 \n\t"\
1073 "packuswb %%mm4, %%mm0 \n\t"\
1074 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1080 : "+a"(src), "+c"(dst), "+D"(h)\
1081 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1086 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1089 /* quick HACK, XXX FIXME MUST be optimized */\
1092 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1093 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1094 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1095 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1096 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1097 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1098 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1099 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1100 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1101 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1102 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1103 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1104 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1105 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1106 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1107 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1109 "movq (%0), %%mm0 \n\t"\
1110 "movq 8(%0), %%mm1 \n\t"\
1111 "paddw %2, %%mm0 \n\t"\
1112 "paddw %2, %%mm1 \n\t"\
1113 "psraw $5, %%mm0 \n\t"\
1114 "psraw $5, %%mm1 \n\t"\
1115 "packuswb %%mm1, %%mm0 \n\t"\
1116 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1117 "movq 16(%0), %%mm0 \n\t"\
1118 "movq 24(%0), %%mm1 \n\t"\
1119 "paddw %2, %%mm0 \n\t"\
1120 "paddw %2, %%mm1 \n\t"\
1121 "psraw $5, %%mm0 \n\t"\
1122 "psraw $5, %%mm1 \n\t"\
1123 "packuswb %%mm1, %%mm0 \n\t"\
1124 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1125 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1133 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1135 "pxor %%mm7, %%mm7 \n\t"\
1137 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1138 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1139 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1140 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1141 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1142 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1143 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1144 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1145 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1146 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1147 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1148 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1149 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1150 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1151 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1152 "paddw %%mm3, %%mm5 \n\t" /* b */\
1153 "paddw %%mm2, %%mm6 \n\t" /* c */\
1154 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1155 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1156 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1157 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1158 "paddw %%mm4, %%mm0 \n\t" /* a */\
1159 "paddw %%mm1, %%mm5 \n\t" /* d */\
1160 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1161 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1162 "paddw %5, %%mm6 \n\t"\
1163 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1164 "psraw $5, %%mm0 \n\t"\
1165 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1167 "movd 5(%0), %%mm5 \n\t" /* FGHI */\
1168 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
1169 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
1170 "paddw %%mm5, %%mm1 \n\t" /* a */\
1171 "paddw %%mm6, %%mm2 \n\t" /* b */\
1172 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
1173 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
1174 "paddw %%mm6, %%mm3 \n\t" /* c */\
1175 "paddw %%mm5, %%mm4 \n\t" /* d */\
1176 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1177 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1178 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1179 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1180 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
1181 "paddw %5, %%mm1 \n\t"\
1182 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
1183 "psraw $5, %%mm3 \n\t"\
1184 "packuswb %%mm3, %%mm0 \n\t"\
1185 OP_MMX2(%%mm0, (%1), %%mm4, q)\
1191 : "+a"(src), "+c"(dst), "+d"(h)\
1192 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
1197 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1200 /* quick HACK, XXX FIXME MUST be optimized */\
1203 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1204 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1205 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1206 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1207 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1208 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1209 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1210 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1212 "movq (%0), %%mm0 \n\t"\
1213 "movq 8(%0), %%mm1 \n\t"\
1214 "paddw %2, %%mm0 \n\t"\
1215 "paddw %2, %%mm1 \n\t"\
1216 "psraw $5, %%mm0 \n\t"\
1217 "psraw $5, %%mm1 \n\t"\
1218 "packuswb %%mm1, %%mm0 \n\t"\
1219 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1220 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1228 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1230 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1231 uint64_t temp[17*4];\
1232 uint64_t *temp_ptr= temp;\
1237 "pxor %%mm7, %%mm7 \n\t"\
1239 "movq (%0), %%mm0 \n\t"\
1240 "movq (%0), %%mm1 \n\t"\
1241 "movq 8(%0), %%mm2 \n\t"\
1242 "movq 8(%0), %%mm3 \n\t"\
1243 "punpcklbw %%mm7, %%mm0 \n\t"\
1244 "punpckhbw %%mm7, %%mm1 \n\t"\
1245 "punpcklbw %%mm7, %%mm2 \n\t"\
1246 "punpckhbw %%mm7, %%mm3 \n\t"\
1247 "movq %%mm0, (%1) \n\t"\
1248 "movq %%mm1, 17*8(%1) \n\t"\
1249 "movq %%mm2, 2*17*8(%1) \n\t"\
1250 "movq %%mm3, 3*17*8(%1) \n\t"\
1255 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1256 : "r" ((x86_reg)srcStride)\
1263 /*FIXME reorder for speed */\
1265 /*"pxor %%mm7, %%mm7 \n\t"*/\
1267 "movq (%0), %%mm0 \n\t"\
1268 "movq 8(%0), %%mm1 \n\t"\
1269 "movq 16(%0), %%mm2 \n\t"\
1270 "movq 24(%0), %%mm3 \n\t"\
1271 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1272 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1274 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1276 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1278 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1279 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1281 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1282 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1284 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1285 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1287 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1288 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1290 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1292 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1294 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1295 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1297 "add $136, %0 \n\t"\
1302 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1303 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
1308 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1309 uint64_t temp[9*2];\
1310 uint64_t *temp_ptr= temp;\
1315 "pxor %%mm7, %%mm7 \n\t"\
1317 "movq (%0), %%mm0 \n\t"\
1318 "movq (%0), %%mm1 \n\t"\
1319 "punpcklbw %%mm7, %%mm0 \n\t"\
1320 "punpckhbw %%mm7, %%mm1 \n\t"\
1321 "movq %%mm0, (%1) \n\t"\
1322 "movq %%mm1, 9*8(%1) \n\t"\
1327 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1328 : "r" ((x86_reg)srcStride)\
1335 /*FIXME reorder for speed */\
1337 /*"pxor %%mm7, %%mm7 \n\t"*/\
1339 "movq (%0), %%mm0 \n\t"\
1340 "movq 8(%0), %%mm1 \n\t"\
1341 "movq 16(%0), %%mm2 \n\t"\
1342 "movq 24(%0), %%mm3 \n\t"\
1343 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1344 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1346 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1348 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1350 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1352 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
1354 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
1355 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
1362 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1363 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
1368 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1369 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
1372 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1374 uint8_t * const half= (uint8_t*)temp;\
1375 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1376 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1379 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1380 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
1383 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1385 uint8_t * const half= (uint8_t*)temp;\
1386 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1387 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
1390 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1392 uint8_t * const half= (uint8_t*)temp;\
1393 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1394 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1397 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1398 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
1401 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1403 uint8_t * const half= (uint8_t*)temp;\
1404 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1405 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
1407 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1408 uint64_t half[8 + 9];\
1409 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1410 uint8_t * const halfHV= ((uint8_t*)half);\
1411 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1412 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1413 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1414 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1416 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1417 uint64_t half[8 + 9];\
1418 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1419 uint8_t * const halfHV= ((uint8_t*)half);\
1420 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1421 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1422 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1423 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1425 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1426 uint64_t half[8 + 9];\
1427 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1428 uint8_t * const halfHV= ((uint8_t*)half);\
1429 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1430 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1431 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1432 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1434 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1435 uint64_t half[8 + 9];\
1436 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1437 uint8_t * const halfHV= ((uint8_t*)half);\
1438 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1439 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1440 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1441 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1443 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1444 uint64_t half[8 + 9];\
1445 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1446 uint8_t * const halfHV= ((uint8_t*)half);\
1447 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1448 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1449 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1451 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1452 uint64_t half[8 + 9];\
1453 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1454 uint8_t * const halfHV= ((uint8_t*)half);\
1455 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1456 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1457 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1459 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1460 uint64_t half[8 + 9];\
1461 uint8_t * const halfH= ((uint8_t*)half);\
1462 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1463 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1464 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1466 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1467 uint64_t half[8 + 9];\
1468 uint8_t * const halfH= ((uint8_t*)half);\
1469 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1470 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1471 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1473 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1475 uint8_t * const halfH= ((uint8_t*)half);\
1476 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1477 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1479 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1480 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
1483 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1485 uint8_t * const half= (uint8_t*)temp;\
1486 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1487 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1490 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1491 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
1494 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1496 uint8_t * const half= (uint8_t*)temp;\
1497 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1498 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
1501 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1503 uint8_t * const half= (uint8_t*)temp;\
1504 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1505 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1508 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1509 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
1512 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1514 uint8_t * const half= (uint8_t*)temp;\
1515 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1516 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
1518 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1519 uint64_t half[16*2 + 17*2];\
1520 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1521 uint8_t * const halfHV= ((uint8_t*)half);\
1522 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1523 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1524 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1525 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1527 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1528 uint64_t half[16*2 + 17*2];\
1529 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1530 uint8_t * const halfHV= ((uint8_t*)half);\
1531 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1532 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1533 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1534 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1536 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1537 uint64_t half[16*2 + 17*2];\
1538 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1539 uint8_t * const halfHV= ((uint8_t*)half);\
1540 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1541 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1542 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1543 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1545 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1546 uint64_t half[16*2 + 17*2];\
1547 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1548 uint8_t * const halfHV= ((uint8_t*)half);\
1549 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1550 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1551 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1552 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1554 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1555 uint64_t half[16*2 + 17*2];\
1556 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1557 uint8_t * const halfHV= ((uint8_t*)half);\
1558 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1559 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1560 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1562 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1563 uint64_t half[16*2 + 17*2];\
1564 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1565 uint8_t * const halfHV= ((uint8_t*)half);\
1566 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1567 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1568 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1570 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1571 uint64_t half[17*2];\
1572 uint8_t * const halfH= ((uint8_t*)half);\
1573 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1574 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1575 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1577 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1578 uint64_t half[17*2];\
1579 uint8_t * const halfH= ((uint8_t*)half);\
1580 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1581 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1582 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1584 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1585 uint64_t half[17*2];\
1586 uint8_t * const halfH= ((uint8_t*)half);\
1587 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1588 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1591 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
1592 #define AVG_3DNOW_OP(a,b,temp, size) \
1593 "mov" #size " " #b ", " #temp " \n\t"\
1594 "pavgusb " #temp ", " #a " \n\t"\
1595 "mov" #size " " #a ", " #b " \n\t"
1596 #define AVG_MMX2_OP(a,b,temp, size) \
1597 "mov" #size " " #b ", " #temp " \n\t"\
1598 "pavgb " #temp ", " #a " \n\t"\
1599 "mov" #size " " #a ", " #b " \n\t"
1601 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
1602 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
1603 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
1604 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
1605 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
1606 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
1607 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
1608 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
1609 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
1611 /***********************************/
1612 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
1614 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
1615 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1616 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
1618 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
1619 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1620 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
1623 #define QPEL_2TAP(OPNAME, SIZE, MMX)\
1624 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
1625 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
1626 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
1627 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
1628 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
1629 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
1630 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
1631 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
1632 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
1633 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1634 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
1636 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1637 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
1639 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
1640 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
1641 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
1642 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
1643 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
1644 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
1645 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
1646 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
1648 QPEL_2TAP(put_, 16, mmx2)
1649 QPEL_2TAP(avg_, 16, mmx2)
1650 QPEL_2TAP(put_, 8, mmx2)
1651 QPEL_2TAP(avg_, 8, mmx2)
1652 QPEL_2TAP(put_, 16, 3dnow)
1653 QPEL_2TAP(avg_, 16, 3dnow)
1654 QPEL_2TAP(put_, 8, 3dnow)
1655 QPEL_2TAP(avg_, 8, 3dnow)
1659 static void just_return(void) { return; }
1662 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
1663 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
1665 const int ix = ox>>(16+shift);
1666 const int iy = oy>>(16+shift);
1667 const int oxs = ox>>4;
1668 const int oys = oy>>4;
1669 const int dxxs = dxx>>4;
1670 const int dxys = dxy>>4;
1671 const int dyxs = dyx>>4;
1672 const int dyys = dyy>>4;
1673 const uint16_t r4[4] = {r,r,r,r};
1674 const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
1675 const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
1676 const uint64_t shift2 = 2*shift;
1677 uint8_t edge_buf[(h+1)*stride];
1680 const int dxw = (dxx-(1<<(16+shift)))*(w-1);
1681 const int dyh = (dyy-(1<<(16+shift)))*(h-1);
1682 const int dxh = dxy*(h-1);
1683 const int dyw = dyx*(w-1);
1684 if( // non-constant fullpel offset (3% of blocks)
1685 ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
1686 (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
1687 // uses more than 16 bits of subpel mv (only at huge resolution)
1688 || (dxx|dxy|dyx|dyy)&15 )
1690 //FIXME could still use mmx for some of the rows
1691 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
1695 src += ix + iy*stride;
1696 if( (unsigned)ix >= width-w ||
1697 (unsigned)iy >= height-h )
1699 ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
1704 "movd %0, %%mm6 \n\t"
1705 "pxor %%mm7, %%mm7 \n\t"
1706 "punpcklwd %%mm6, %%mm6 \n\t"
1707 "punpcklwd %%mm6, %%mm6 \n\t"
1711 for(x=0; x<w; x+=4){
1712 uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
1713 oxs - dxys + dxxs*(x+1),
1714 oxs - dxys + dxxs*(x+2),
1715 oxs - dxys + dxxs*(x+3) };
1716 uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
1717 oys - dyys + dyxs*(x+1),
1718 oys - dyys + dyxs*(x+2),
1719 oys - dyys + dyxs*(x+3) };
1723 "movq %0, %%mm4 \n\t"
1724 "movq %1, %%mm5 \n\t"
1725 "paddw %2, %%mm4 \n\t"
1726 "paddw %3, %%mm5 \n\t"
1727 "movq %%mm4, %0 \n\t"
1728 "movq %%mm5, %1 \n\t"
1729 "psrlw $12, %%mm4 \n\t"
1730 "psrlw $12, %%mm5 \n\t"
1731 : "+m"(*dx4), "+m"(*dy4)
1732 : "m"(*dxy4), "m"(*dyy4)
1736 "movq %%mm6, %%mm2 \n\t"
1737 "movq %%mm6, %%mm1 \n\t"
1738 "psubw %%mm4, %%mm2 \n\t"
1739 "psubw %%mm5, %%mm1 \n\t"
1740 "movq %%mm2, %%mm0 \n\t"
1741 "movq %%mm4, %%mm3 \n\t"
1742 "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
1743 "pmullw %%mm5, %%mm3 \n\t" // dx*dy
1744 "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
1745 "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
1747 "movd %4, %%mm5 \n\t"
1748 "movd %3, %%mm4 \n\t"
1749 "punpcklbw %%mm7, %%mm5 \n\t"
1750 "punpcklbw %%mm7, %%mm4 \n\t"
1751 "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
1752 "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
1754 "movd %2, %%mm5 \n\t"
1755 "movd %1, %%mm4 \n\t"
1756 "punpcklbw %%mm7, %%mm5 \n\t"
1757 "punpcklbw %%mm7, %%mm4 \n\t"
1758 "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
1759 "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
1760 "paddw %5, %%mm1 \n\t"
1761 "paddw %%mm3, %%mm2 \n\t"
1762 "paddw %%mm1, %%mm0 \n\t"
1763 "paddw %%mm2, %%mm0 \n\t"
1765 "psrlw %6, %%mm0 \n\t"
1766 "packuswb %%mm0, %%mm0 \n\t"
1767 "movd %%mm0, %0 \n\t"
1769 : "=m"(dst[x+y*stride])
1770 : "m"(src[0]), "m"(src[1]),
1771 "m"(src[stride]), "m"(src[stride+1]),
1772 "m"(*r4), "m"(shift2)
1780 #define PREFETCH(name, op) \
1781 static void name(void *mem, int stride, int h){\
1782 const uint8_t *p= mem;\
1784 __asm__ volatile(#op" %0" :: "m"(*p));\
1788 PREFETCH(prefetch_mmx2, prefetcht0)
1789 PREFETCH(prefetch_3dnow, prefetch)
1792 #include "h264_qpel_mmx.c"
1794 void ff_put_h264_chroma_mc8_mmx_rnd (uint8_t *dst, uint8_t *src,
1795 int stride, int h, int x, int y);
1796 void ff_put_vc1_chroma_mc8_mmx_nornd (uint8_t *dst, uint8_t *src,
1797 int stride, int h, int x, int y);
1798 void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src,
1799 int stride, int h, int x, int y);
1800 void ff_avg_h264_chroma_mc8_mmx2_rnd (uint8_t *dst, uint8_t *src,
1801 int stride, int h, int x, int y);
1802 void ff_avg_vc1_chroma_mc8_mmx2_nornd (uint8_t *dst, uint8_t *src,
1803 int stride, int h, int x, int y);
1804 void ff_avg_rv40_chroma_mc8_mmx2 (uint8_t *dst, uint8_t *src,
1805 int stride, int h, int x, int y);
1806 void ff_avg_h264_chroma_mc8_3dnow_rnd (uint8_t *dst, uint8_t *src,
1807 int stride, int h, int x, int y);
1808 void ff_avg_vc1_chroma_mc8_3dnow_nornd(uint8_t *dst, uint8_t *src,
1809 int stride, int h, int x, int y);
1810 void ff_avg_rv40_chroma_mc8_3dnow (uint8_t *dst, uint8_t *src,
1811 int stride, int h, int x, int y);
1813 void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
1814 int stride, int h, int x, int y);
1815 void ff_put_rv40_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
1816 int stride, int h, int x, int y);
1817 void ff_avg_h264_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
1818 int stride, int h, int x, int y);
1819 void ff_avg_rv40_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
1820 int stride, int h, int x, int y);
1821 void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
1822 int stride, int h, int x, int y);
1823 void ff_avg_rv40_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
1824 int stride, int h, int x, int y);
1826 void ff_put_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
1827 int stride, int h, int x, int y);
1828 void ff_avg_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
1829 int stride, int h, int x, int y);
1831 void ff_put_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
1832 int stride, int h, int x, int y);
1833 void ff_put_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src,
1834 int stride, int h, int x, int y);
1835 void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
1836 int stride, int h, int x, int y);
1838 void ff_avg_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
1839 int stride, int h, int x, int y);
1840 void ff_avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src,
1841 int stride, int h, int x, int y);
1842 void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
1843 int stride, int h, int x, int y);
1847 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1848 put_pixels8_mmx(dst, src, stride, 8);
1850 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1851 avg_pixels8_mmx(dst, src, stride, 8);
1853 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1854 put_pixels16_mmx(dst, src, stride, 16);
1856 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1857 avg_pixels16_mmx(dst, src, stride, 16);
1861 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1862 put_pixels8_mmx(dst, src, stride, 8);
1864 void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1865 avg_pixels8_mmx2(dst, src, stride, 8);
1868 /* XXX: those functions should be suppressed ASAP when all IDCTs are
1871 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1873 ff_mmx_idct (block);
1874 ff_put_pixels_clamped_mmx(block, dest, line_size);
1876 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1878 ff_mmx_idct (block);
1879 ff_add_pixels_clamped_mmx(block, dest, line_size);
1881 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1883 ff_mmxext_idct (block);
1884 ff_put_pixels_clamped_mmx(block, dest, line_size);
1886 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1888 ff_mmxext_idct (block);
1889 ff_add_pixels_clamped_mmx(block, dest, line_size);
1892 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
1894 ff_idct_xvid_mmx (block);
1895 ff_put_pixels_clamped_mmx(block, dest, line_size);
1897 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
1899 ff_idct_xvid_mmx (block);
1900 ff_add_pixels_clamped_mmx(block, dest, line_size);
1902 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
1904 ff_idct_xvid_mmx2 (block);
1905 ff_put_pixels_clamped_mmx(block, dest, line_size);
1907 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
1909 ff_idct_xvid_mmx2 (block);
1910 ff_add_pixels_clamped_mmx(block, dest, line_size);
1913 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
1916 __asm__ volatile("pxor %%mm7, %%mm7":);
1917 for(i=0; i<blocksize; i+=2) {
1919 "movq %0, %%mm0 \n\t"
1920 "movq %1, %%mm1 \n\t"
1921 "movq %%mm0, %%mm2 \n\t"
1922 "movq %%mm1, %%mm3 \n\t"
1923 "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
1924 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
1925 "pslld $31, %%mm2 \n\t" // keep only the sign bit
1926 "pxor %%mm2, %%mm1 \n\t"
1927 "movq %%mm3, %%mm4 \n\t"
1928 "pand %%mm1, %%mm3 \n\t"
1929 "pandn %%mm1, %%mm4 \n\t"
1930 "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1931 "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1932 "movq %%mm3, %1 \n\t"
1933 "movq %%mm0, %0 \n\t"
1934 :"+m"(mag[i]), "+m"(ang[i])
1938 __asm__ volatile("femms");
1940 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
1945 "movaps %0, %%xmm5 \n\t"
1946 ::"m"(ff_pdw_80000000[0])
1948 for(i=0; i<blocksize; i+=4) {
1950 "movaps %0, %%xmm0 \n\t"
1951 "movaps %1, %%xmm1 \n\t"
1952 "xorps %%xmm2, %%xmm2 \n\t"
1953 "xorps %%xmm3, %%xmm3 \n\t"
1954 "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
1955 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
1956 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
1957 "xorps %%xmm2, %%xmm1 \n\t"
1958 "movaps %%xmm3, %%xmm4 \n\t"
1959 "andps %%xmm1, %%xmm3 \n\t"
1960 "andnps %%xmm1, %%xmm4 \n\t"
1961 "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1962 "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1963 "movaps %%xmm3, %1 \n\t"
1964 "movaps %%xmm0, %0 \n\t"
1965 :"+m"(mag[i]), "+m"(ang[i])
1974 #define MIX5(mono,stereo)\
1976 "movss 0(%2), %%xmm5 \n"\
1977 "movss 8(%2), %%xmm6 \n"\
1978 "movss 24(%2), %%xmm7 \n"\
1979 "shufps $0, %%xmm5, %%xmm5 \n"\
1980 "shufps $0, %%xmm6, %%xmm6 \n"\
1981 "shufps $0, %%xmm7, %%xmm7 \n"\
1983 "movaps (%0,%1), %%xmm0 \n"\
1984 "movaps 0x400(%0,%1), %%xmm1 \n"\
1985 "movaps 0x800(%0,%1), %%xmm2 \n"\
1986 "movaps 0xc00(%0,%1), %%xmm3 \n"\
1987 "movaps 0x1000(%0,%1), %%xmm4 \n"\
1988 "mulps %%xmm5, %%xmm0 \n"\
1989 "mulps %%xmm6, %%xmm1 \n"\
1990 "mulps %%xmm5, %%xmm2 \n"\
1991 "mulps %%xmm7, %%xmm3 \n"\
1992 "mulps %%xmm7, %%xmm4 \n"\
1993 stereo("addps %%xmm1, %%xmm0 \n")\
1994 "addps %%xmm1, %%xmm2 \n"\
1995 "addps %%xmm3, %%xmm0 \n"\
1996 "addps %%xmm4, %%xmm2 \n"\
1997 mono("addps %%xmm2, %%xmm0 \n")\
1998 "movaps %%xmm0, (%0,%1) \n"\
1999 stereo("movaps %%xmm2, 0x400(%0,%1) \n")\
2003 :"r"(samples[0]+len), "r"(matrix)\
2007 #define MIX_MISC(stereo)\
2010 "movaps (%3,%0), %%xmm0 \n"\
2011 stereo("movaps %%xmm0, %%xmm1 \n")\
2012 "mulps %%xmm6, %%xmm0 \n"\
2013 stereo("mulps %%xmm7, %%xmm1 \n")\
2014 "lea 1024(%3,%0), %1 \n"\
2017 "movaps (%1), %%xmm2 \n"\
2018 stereo("movaps %%xmm2, %%xmm3 \n")\
2019 "mulps (%4,%2), %%xmm2 \n"\
2020 stereo("mulps 16(%4,%2), %%xmm3 \n")\
2021 "addps %%xmm2, %%xmm0 \n"\
2022 stereo("addps %%xmm3, %%xmm1 \n")\
2026 "movaps %%xmm0, (%3,%0) \n"\
2027 stereo("movaps %%xmm1, 1024(%3,%0) \n")\
2030 :"+&r"(i), "=&r"(j), "=&r"(k)\
2031 :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
2035 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
2037 int (*matrix_cmp)[2] = (int(*)[2])matrix;
2040 i = -len*sizeof(float);
2041 if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
2043 } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
2046 DECLARE_ALIGNED(16, float, matrix_simd)[in_ch][2][4];
2047 j = 2*in_ch*sizeof(float);
2051 "movss (%2,%0), %%xmm6 \n"
2052 "movss 4(%2,%0), %%xmm7 \n"
2053 "shufps $0, %%xmm6, %%xmm6 \n"
2054 "shufps $0, %%xmm7, %%xmm7 \n"
2055 "movaps %%xmm6, (%1,%0,4) \n"
2056 "movaps %%xmm7, 16(%1,%0,4) \n"
2059 :"r"(matrix_simd), "r"(matrix)
2070 static void vector_fmul_3dnow(float *dst, const float *src, int len){
2071 x86_reg i = (len-4)*4;
2074 "movq (%1,%0), %%mm0 \n\t"
2075 "movq 8(%1,%0), %%mm1 \n\t"
2076 "pfmul (%2,%0), %%mm0 \n\t"
2077 "pfmul 8(%2,%0), %%mm1 \n\t"
2078 "movq %%mm0, (%1,%0) \n\t"
2079 "movq %%mm1, 8(%1,%0) \n\t"
2088 static void vector_fmul_sse(float *dst, const float *src, int len){
2089 x86_reg i = (len-8)*4;
2092 "movaps (%1,%0), %%xmm0 \n\t"
2093 "movaps 16(%1,%0), %%xmm1 \n\t"
2094 "mulps (%2,%0), %%xmm0 \n\t"
2095 "mulps 16(%2,%0), %%xmm1 \n\t"
2096 "movaps %%xmm0, (%1,%0) \n\t"
2097 "movaps %%xmm1, 16(%1,%0) \n\t"
2106 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
2107 x86_reg i = len*4-16;
2110 "pswapd 8(%1), %%mm0 \n\t"
2111 "pswapd (%1), %%mm1 \n\t"
2112 "pfmul (%3,%0), %%mm0 \n\t"
2113 "pfmul 8(%3,%0), %%mm1 \n\t"
2114 "movq %%mm0, (%2,%0) \n\t"
2115 "movq %%mm1, 8(%2,%0) \n\t"
2119 :"+r"(i), "+r"(src1)
2120 :"r"(dst), "r"(src0)
2122 __asm__ volatile("femms");
2124 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
2125 x86_reg i = len*4-32;
2128 "movaps 16(%1), %%xmm0 \n\t"
2129 "movaps (%1), %%xmm1 \n\t"
2130 "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
2131 "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
2132 "mulps (%3,%0), %%xmm0 \n\t"
2133 "mulps 16(%3,%0), %%xmm1 \n\t"
2134 "movaps %%xmm0, (%2,%0) \n\t"
2135 "movaps %%xmm1, 16(%2,%0) \n\t"
2139 :"+r"(i), "+r"(src1)
2140 :"r"(dst), "r"(src0)
2144 static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1,
2145 const float *src2, int len){
2146 x86_reg i = (len-4)*4;
2149 "movq (%2,%0), %%mm0 \n\t"
2150 "movq 8(%2,%0), %%mm1 \n\t"
2151 "pfmul (%3,%0), %%mm0 \n\t"
2152 "pfmul 8(%3,%0), %%mm1 \n\t"
2153 "pfadd (%4,%0), %%mm0 \n\t"
2154 "pfadd 8(%4,%0), %%mm1 \n\t"
2155 "movq %%mm0, (%1,%0) \n\t"
2156 "movq %%mm1, 8(%1,%0) \n\t"
2160 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2163 __asm__ volatile("femms");
2165 static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
2166 const float *src2, int len){
2167 x86_reg i = (len-8)*4;
2170 "movaps (%2,%0), %%xmm0 \n\t"
2171 "movaps 16(%2,%0), %%xmm1 \n\t"
2172 "mulps (%3,%0), %%xmm0 \n\t"
2173 "mulps 16(%3,%0), %%xmm1 \n\t"
2174 "addps (%4,%0), %%xmm0 \n\t"
2175 "addps 16(%4,%0), %%xmm1 \n\t"
2176 "movaps %%xmm0, (%1,%0) \n\t"
2177 "movaps %%xmm1, 16(%1,%0) \n\t"
2181 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2186 static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
2187 const float *win, float add_bias, int len){
2191 x86_reg j = len*4-8;
2194 "pswapd (%5,%1), %%mm1 \n"
2195 "movq (%5,%0), %%mm0 \n"
2196 "pswapd (%4,%1), %%mm5 \n"
2197 "movq (%3,%0), %%mm4 \n"
2198 "movq %%mm0, %%mm2 \n"
2199 "movq %%mm1, %%mm3 \n"
2200 "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
2201 "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j]
2202 "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
2203 "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i]
2204 "pfadd %%mm3, %%mm2 \n"
2205 "pfsub %%mm0, %%mm1 \n"
2206 "pswapd %%mm2, %%mm2 \n"
2207 "movq %%mm1, (%2,%0) \n"
2208 "movq %%mm2, (%2,%1) \n"
2214 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2218 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2221 static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
2222 const float *win, float add_bias, int len){
2226 x86_reg j = len*4-16;
2229 "movaps (%5,%1), %%xmm1 \n"
2230 "movaps (%5,%0), %%xmm0 \n"
2231 "movaps (%4,%1), %%xmm5 \n"
2232 "movaps (%3,%0), %%xmm4 \n"
2233 "shufps $0x1b, %%xmm1, %%xmm1 \n"
2234 "shufps $0x1b, %%xmm5, %%xmm5 \n"
2235 "movaps %%xmm0, %%xmm2 \n"
2236 "movaps %%xmm1, %%xmm3 \n"
2237 "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
2238 "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j]
2239 "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
2240 "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i]
2241 "addps %%xmm3, %%xmm2 \n"
2242 "subps %%xmm0, %%xmm1 \n"
2243 "shufps $0x1b, %%xmm2, %%xmm2 \n"
2244 "movaps %%xmm1, (%2,%0) \n"
2245 "movaps %%xmm2, (%2,%1) \n"
2250 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2254 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2257 static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
2261 "movss %3, %%xmm4 \n"
2262 "shufps $0, %%xmm4, %%xmm4 \n"
2264 "cvtpi2ps (%2,%0), %%xmm0 \n"
2265 "cvtpi2ps 8(%2,%0), %%xmm1 \n"
2266 "cvtpi2ps 16(%2,%0), %%xmm2 \n"
2267 "cvtpi2ps 24(%2,%0), %%xmm3 \n"
2268 "movlhps %%xmm1, %%xmm0 \n"
2269 "movlhps %%xmm3, %%xmm2 \n"
2270 "mulps %%xmm4, %%xmm0 \n"
2271 "mulps %%xmm4, %%xmm2 \n"
2272 "movaps %%xmm0, (%1,%0) \n"
2273 "movaps %%xmm2, 16(%1,%0) \n"
2277 :"r"(dst+len), "r"(src+len), "m"(mul)
2281 static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
2285 "movss %3, %%xmm4 \n"
2286 "shufps $0, %%xmm4, %%xmm4 \n"
2288 "cvtdq2ps (%2,%0), %%xmm0 \n"
2289 "cvtdq2ps 16(%2,%0), %%xmm1 \n"
2290 "mulps %%xmm4, %%xmm0 \n"
2291 "mulps %%xmm4, %%xmm1 \n"
2292 "movaps %%xmm0, (%1,%0) \n"
2293 "movaps %%xmm1, 16(%1,%0) \n"
2297 :"r"(dst+len), "r"(src+len), "m"(mul)
2301 static void vector_clipf_sse(float *dst, const float *src, float min, float max,
2304 x86_reg i = (len-16)*4;
2306 "movss %3, %%xmm4 \n"
2307 "movss %4, %%xmm5 \n"
2308 "shufps $0, %%xmm4, %%xmm4 \n"
2309 "shufps $0, %%xmm5, %%xmm5 \n"
2311 "movaps (%2,%0), %%xmm0 \n\t" // 3/1 on intel
2312 "movaps 16(%2,%0), %%xmm1 \n\t"
2313 "movaps 32(%2,%0), %%xmm2 \n\t"
2314 "movaps 48(%2,%0), %%xmm3 \n\t"
2315 "maxps %%xmm4, %%xmm0 \n\t"
2316 "maxps %%xmm4, %%xmm1 \n\t"
2317 "maxps %%xmm4, %%xmm2 \n\t"
2318 "maxps %%xmm4, %%xmm3 \n\t"
2319 "minps %%xmm5, %%xmm0 \n\t"
2320 "minps %%xmm5, %%xmm1 \n\t"
2321 "minps %%xmm5, %%xmm2 \n\t"
2322 "minps %%xmm5, %%xmm3 \n\t"
2323 "movaps %%xmm0, (%1,%0) \n\t"
2324 "movaps %%xmm1, 16(%1,%0) \n\t"
2325 "movaps %%xmm2, 32(%1,%0) \n\t"
2326 "movaps %%xmm3, 48(%1,%0) \n\t"
2330 :"r"(dst), "r"(src), "m"(min), "m"(max)
2335 static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
2336 x86_reg reglen = len;
2337 // not bit-exact: pf2id uses different rounding than C and SSE
2340 "lea (%2,%0,2) , %2 \n\t"
2344 "pf2id (%2,%0,2) , %%mm0 \n\t"
2345 "pf2id 8(%2,%0,2) , %%mm1 \n\t"
2346 "pf2id 16(%2,%0,2) , %%mm2 \n\t"
2347 "pf2id 24(%2,%0,2) , %%mm3 \n\t"
2348 "packssdw %%mm1 , %%mm0 \n\t"
2349 "packssdw %%mm3 , %%mm2 \n\t"
2350 "movq %%mm0 , (%1,%0) \n\t"
2351 "movq %%mm2 , 8(%1,%0) \n\t"
2355 :"+r"(reglen), "+r"(dst), "+r"(src)
2358 static void float_to_int16_sse(int16_t *dst, const float *src, long len){
2359 x86_reg reglen = len;
2362 "lea (%2,%0,2) , %2 \n\t"
2366 "cvtps2pi (%2,%0,2) , %%mm0 \n\t"
2367 "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t"
2368 "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t"
2369 "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t"
2370 "packssdw %%mm1 , %%mm0 \n\t"
2371 "packssdw %%mm3 , %%mm2 \n\t"
2372 "movq %%mm0 , (%1,%0) \n\t"
2373 "movq %%mm2 , 8(%1,%0) \n\t"
2377 :"+r"(reglen), "+r"(dst), "+r"(src)
2381 static void float_to_int16_sse2(int16_t *dst, const float *src, long len){
2382 x86_reg reglen = len;
2385 "lea (%2,%0,2) , %2 \n\t"
2389 "cvtps2dq (%2,%0,2) , %%xmm0 \n\t"
2390 "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t"
2391 "packssdw %%xmm1 , %%xmm0 \n\t"
2392 "movdqa %%xmm0 , (%1,%0) \n\t"
2395 :"+r"(reglen), "+r"(dst), "+r"(src)
2399 void ff_vp3_idct_mmx(int16_t *input_data);
2400 void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block);
2401 void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block);
2403 void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int line_size, const DCTELEM *block);
2405 void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
2406 void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
2408 void ff_vp3_idct_sse2(int16_t *input_data);
2409 void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block);
2410 void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block);
2412 void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
2413 void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
2414 void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len);
2415 int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2, int order, int shift);
2416 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, int order, int shift);
2417 int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
2418 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
2419 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
2420 void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top);
2421 int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left);
2422 int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left);
2425 #define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6)
2426 #define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
2427 #define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
2429 #define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
2431 #define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
2432 /* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
2433 static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
2434 DECLARE_ALIGNED(16, int16_t, tmp)[len];\
2436 for(c=0; c<channels; c++){\
2437 float_to_int16_##cpu(tmp, src[c], len);\
2438 for(i=0, j=c; i<len; i++, j+=channels)\
2443 static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
2445 float_to_int16_##cpu(dst, src[0], len);\
2446 else if(channels==2){\
2447 x86_reg reglen = len; \
2448 const float *src0 = src[0];\
2449 const float *src1 = src[1];\
2457 :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\
2459 }else if(channels==6){\
2460 ff_float_to_int16_interleave6_##cpu(dst, src, len);\
2462 float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
2465 FLOAT_TO_INT16_INTERLEAVE(3dnow,
2467 "pf2id (%2,%0), %%mm0 \n"
2468 "pf2id 8(%2,%0), %%mm1 \n"
2469 "pf2id (%3,%0), %%mm2 \n"
2470 "pf2id 8(%3,%0), %%mm3 \n"
2471 "packssdw %%mm1, %%mm0 \n"
2472 "packssdw %%mm3, %%mm2 \n"
2473 "movq %%mm0, %%mm1 \n"
2474 "punpcklwd %%mm2, %%mm0 \n"
2475 "punpckhwd %%mm2, %%mm1 \n"
2476 "movq %%mm0, (%1,%0)\n"
2477 "movq %%mm1, 8(%1,%0)\n"
2483 FLOAT_TO_INT16_INTERLEAVE(sse,
2485 "cvtps2pi (%2,%0), %%mm0 \n"
2486 "cvtps2pi 8(%2,%0), %%mm1 \n"
2487 "cvtps2pi (%3,%0), %%mm2 \n"
2488 "cvtps2pi 8(%3,%0), %%mm3 \n"
2489 "packssdw %%mm1, %%mm0 \n"
2490 "packssdw %%mm3, %%mm2 \n"
2491 "movq %%mm0, %%mm1 \n"
2492 "punpcklwd %%mm2, %%mm0 \n"
2493 "punpckhwd %%mm2, %%mm1 \n"
2494 "movq %%mm0, (%1,%0)\n"
2495 "movq %%mm1, 8(%1,%0)\n"
2501 FLOAT_TO_INT16_INTERLEAVE(sse2,
2503 "cvtps2dq (%2,%0), %%xmm0 \n"
2504 "cvtps2dq (%3,%0), %%xmm1 \n"
2505 "packssdw %%xmm1, %%xmm0 \n"
2506 "movhlps %%xmm0, %%xmm1 \n"
2507 "punpcklwd %%xmm1, %%xmm0 \n"
2508 "movdqa %%xmm0, (%1,%0) \n"
2513 static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){
2515 ff_float_to_int16_interleave6_3dn2(dst, src, len);
2517 float_to_int16_interleave_3dnow(dst, src, len, channels);
2520 float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
2522 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2524 int mm_flags = mm_support();
2526 if (avctx->dsp_mask) {
2527 if (avctx->dsp_mask & FF_MM_FORCE)
2528 mm_flags |= (avctx->dsp_mask & 0xffff);
2530 mm_flags &= ~(avctx->dsp_mask & 0xffff);
2534 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
2535 if (mm_flags & FF_MM_MMX)
2536 av_log(avctx, AV_LOG_INFO, " mmx");
2537 if (mm_flags & FF_MM_MMX2)
2538 av_log(avctx, AV_LOG_INFO, " mmx2");
2539 if (mm_flags & FF_MM_3DNOW)
2540 av_log(avctx, AV_LOG_INFO, " 3dnow");
2541 if (mm_flags & FF_MM_SSE)
2542 av_log(avctx, AV_LOG_INFO, " sse");
2543 if (mm_flags & FF_MM_SSE2)
2544 av_log(avctx, AV_LOG_INFO, " sse2");
2545 av_log(avctx, AV_LOG_INFO, "\n");
2548 if (mm_flags & FF_MM_MMX) {
2549 const int idct_algo= avctx->idct_algo;
2551 if(avctx->lowres==0){
2552 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2553 c->idct_put= ff_simple_idct_put_mmx;
2554 c->idct_add= ff_simple_idct_add_mmx;
2555 c->idct = ff_simple_idct_mmx;
2556 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2558 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2559 if(mm_flags & FF_MM_MMX2){
2560 c->idct_put= ff_libmpeg2mmx2_idct_put;
2561 c->idct_add= ff_libmpeg2mmx2_idct_add;
2562 c->idct = ff_mmxext_idct;
2564 c->idct_put= ff_libmpeg2mmx_idct_put;
2565 c->idct_add= ff_libmpeg2mmx_idct_add;
2566 c->idct = ff_mmx_idct;
2568 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
2570 }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) &&
2571 idct_algo==FF_IDCT_VP3 && HAVE_YASM){
2572 if(mm_flags & FF_MM_SSE2){
2573 c->idct_put= ff_vp3_idct_put_sse2;
2574 c->idct_add= ff_vp3_idct_add_sse2;
2575 c->idct = ff_vp3_idct_sse2;
2576 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2578 c->idct_put= ff_vp3_idct_put_mmx;
2579 c->idct_add= ff_vp3_idct_add_mmx;
2580 c->idct = ff_vp3_idct_mmx;
2581 c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
2583 }else if(idct_algo==FF_IDCT_CAVS){
2584 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2585 }else if(idct_algo==FF_IDCT_XVIDMMX){
2586 if(mm_flags & FF_MM_SSE2){
2587 c->idct_put= ff_idct_xvid_sse2_put;
2588 c->idct_add= ff_idct_xvid_sse2_add;
2589 c->idct = ff_idct_xvid_sse2;
2590 c->idct_permutation_type= FF_SSE2_IDCT_PERM;
2591 }else if(mm_flags & FF_MM_MMX2){
2592 c->idct_put= ff_idct_xvid_mmx2_put;
2593 c->idct_add= ff_idct_xvid_mmx2_add;
2594 c->idct = ff_idct_xvid_mmx2;
2596 c->idct_put= ff_idct_xvid_mmx_put;
2597 c->idct_add= ff_idct_xvid_mmx_add;
2598 c->idct = ff_idct_xvid_mmx;
2603 c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
2604 c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
2605 c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
2606 c->clear_block = clear_block_mmx;
2607 c->clear_blocks = clear_blocks_mmx;
2608 if ((mm_flags & FF_MM_SSE) &&
2609 !(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
2610 /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
2611 c->clear_block = clear_block_sse;
2612 c->clear_blocks = clear_blocks_sse;
2615 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2616 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
2617 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
2618 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
2619 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
2621 SET_HPEL_FUNCS(put, 0, 16, mmx);
2622 SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
2623 SET_HPEL_FUNCS(avg, 0, 16, mmx);
2624 SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
2625 SET_HPEL_FUNCS(put, 1, 8, mmx);
2626 SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
2627 SET_HPEL_FUNCS(avg, 1, 8, mmx);
2628 SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
2632 c->add_bytes= add_bytes_mmx;
2633 c->add_bytes_l2= add_bytes_l2_mmx;
2635 c->draw_edges = draw_edges_mmx;
2637 if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
2638 c->h263_v_loop_filter= h263_v_loop_filter_mmx;
2639 c->h263_h_loop_filter= h263_h_loop_filter_mmx;
2643 c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_mmx_rnd;
2644 c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_mmx;
2645 c->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_mmx_nornd;
2647 c->put_rv40_chroma_pixels_tab[0]= ff_put_rv40_chroma_mc8_mmx;
2648 c->put_rv40_chroma_pixels_tab[1]= ff_put_rv40_chroma_mc4_mmx;
2651 if (mm_flags & FF_MM_MMX2) {
2652 c->prefetch = prefetch_mmx2;
2654 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2655 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2657 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2658 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2659 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2661 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2662 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2664 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2665 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2666 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2668 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2669 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2670 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2671 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2672 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2673 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2674 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2676 if (CONFIG_VP3_DECODER && HAVE_YASM) {
2677 c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
2678 c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
2681 if (CONFIG_VP3_DECODER && HAVE_YASM) {
2682 c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
2685 if (CONFIG_VP3_DECODER
2686 && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
2687 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
2688 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2;
2691 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2692 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
2693 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
2694 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
2695 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
2696 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
2697 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
2698 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
2699 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
2700 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
2701 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
2702 c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
2703 c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
2704 c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
2705 c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
2706 c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
2707 c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
2709 SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
2710 SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
2711 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
2712 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
2713 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
2714 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
2716 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
2717 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
2718 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
2719 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
2720 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
2721 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
2723 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
2724 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
2725 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
2726 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
2729 c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_mmx2;
2730 c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_mmx2;
2732 c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_mmx2_nornd;
2734 c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_mmx2_rnd;
2735 c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_mmx2;
2736 c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_mmx2;
2737 c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_mmx2;
2739 c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
2741 #if HAVE_7REGS && HAVE_TEN_OPERANDS
2742 if( mm_flags&FF_MM_3DNOW )
2743 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
2746 if (CONFIG_VC1_DECODER)
2747 ff_vc1dsp_init_mmx(c, avctx);
2749 c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
2750 } else if (mm_flags & FF_MM_3DNOW) {
2751 c->prefetch = prefetch_3dnow;
2753 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2754 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2756 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2757 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2758 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2760 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2761 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2763 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2764 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2765 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2767 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2768 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2769 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2770 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2771 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2772 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2773 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2776 if (CONFIG_VP3_DECODER
2777 && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
2778 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
2779 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
2782 SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
2783 SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
2784 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
2785 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
2786 SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
2787 SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
2789 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
2790 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
2791 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
2792 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
2793 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
2794 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
2796 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
2797 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
2798 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
2799 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
2802 c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_3dnow_rnd;
2803 c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_3dnow;
2805 c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_3dnow_nornd;
2807 c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_3dnow;
2808 c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_3dnow;
2813 #define H264_QPEL_FUNCS(x, y, CPU)\
2814 c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
2815 c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
2816 c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
2817 c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
2818 if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){
2819 // these functions are slower than mmx on AMD, but faster on Intel
2820 c->put_pixels_tab[0][0] = put_pixels16_sse2;
2821 c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
2822 H264_QPEL_FUNCS(0, 0, sse2);
2824 if(mm_flags & FF_MM_SSE2){
2825 H264_QPEL_FUNCS(0, 1, sse2);
2826 H264_QPEL_FUNCS(0, 2, sse2);
2827 H264_QPEL_FUNCS(0, 3, sse2);
2828 H264_QPEL_FUNCS(1, 1, sse2);
2829 H264_QPEL_FUNCS(1, 2, sse2);
2830 H264_QPEL_FUNCS(1, 3, sse2);
2831 H264_QPEL_FUNCS(2, 1, sse2);
2832 H264_QPEL_FUNCS(2, 2, sse2);
2833 H264_QPEL_FUNCS(2, 3, sse2);
2834 H264_QPEL_FUNCS(3, 1, sse2);
2835 H264_QPEL_FUNCS(3, 2, sse2);
2836 H264_QPEL_FUNCS(3, 3, sse2);
2839 if(mm_flags & FF_MM_SSSE3){
2840 H264_QPEL_FUNCS(1, 0, ssse3);
2841 H264_QPEL_FUNCS(1, 1, ssse3);
2842 H264_QPEL_FUNCS(1, 2, ssse3);
2843 H264_QPEL_FUNCS(1, 3, ssse3);
2844 H264_QPEL_FUNCS(2, 0, ssse3);
2845 H264_QPEL_FUNCS(2, 1, ssse3);
2846 H264_QPEL_FUNCS(2, 2, ssse3);
2847 H264_QPEL_FUNCS(2, 3, ssse3);
2848 H264_QPEL_FUNCS(3, 0, ssse3);
2849 H264_QPEL_FUNCS(3, 1, ssse3);
2850 H264_QPEL_FUNCS(3, 2, ssse3);
2851 H264_QPEL_FUNCS(3, 3, ssse3);
2852 c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
2854 c->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_ssse3_nornd;
2855 c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_ssse3_nornd;
2856 c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_ssse3_rnd;
2857 c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_ssse3_rnd;
2858 c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_ssse3;
2859 c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_ssse3;
2860 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
2861 if (mm_flags & FF_MM_SSE4) // not really sse4, just slow on Conroe
2862 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
2867 if(mm_flags & FF_MM_3DNOW){
2868 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
2869 c->vector_fmul = vector_fmul_3dnow;
2870 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2871 c->float_to_int16 = float_to_int16_3dnow;
2872 c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
2875 if(mm_flags & FF_MM_3DNOWEXT){
2876 c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
2877 c->vector_fmul_window = vector_fmul_window_3dnow2;
2878 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2879 c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
2882 if(mm_flags & FF_MM_MMX2){
2884 c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
2885 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
2888 if(mm_flags & FF_MM_SSE){
2889 c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
2890 c->ac3_downmix = ac3_downmix_sse;
2891 c->vector_fmul = vector_fmul_sse;
2892 c->vector_fmul_reverse = vector_fmul_reverse_sse;
2893 c->vector_fmul_add = vector_fmul_add_sse;
2894 c->vector_fmul_window = vector_fmul_window_sse;
2895 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse;
2896 c->vector_clipf = vector_clipf_sse;
2897 c->float_to_int16 = float_to_int16_sse;
2898 c->float_to_int16_interleave = float_to_int16_interleave_sse;
2900 c->scalarproduct_float = ff_scalarproduct_float_sse;
2903 if(mm_flags & FF_MM_3DNOW)
2904 c->vector_fmul_add = vector_fmul_add_3dnow; // faster than sse
2905 if(mm_flags & FF_MM_SSE2){
2906 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
2907 c->float_to_int16 = float_to_int16_sse2;
2908 c->float_to_int16_interleave = float_to_int16_interleave_sse2;
2910 c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
2911 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
2914 if((mm_flags & FF_MM_SSSE3) && !(mm_flags & (FF_MM_SSE42|FF_MM_3DNOW)) && HAVE_YASM) // cachesplit
2915 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
2918 if (CONFIG_ENCODERS)
2919 dsputilenc_init_mmx(c, avctx);
2922 // for speed testing
2923 get_pixels = just_return;
2924 put_pixels_clamped = just_return;
2925 add_pixels_clamped = just_return;
2927 pix_abs16x16 = just_return;
2928 pix_abs16x16_x2 = just_return;
2929 pix_abs16x16_y2 = just_return;
2930 pix_abs16x16_xy2 = just_return;
2932 put_pixels_tab[0] = just_return;
2933 put_pixels_tab[1] = just_return;
2934 put_pixels_tab[2] = just_return;
2935 put_pixels_tab[3] = just_return;
2937 put_no_rnd_pixels_tab[0] = just_return;
2938 put_no_rnd_pixels_tab[1] = just_return;
2939 put_no_rnd_pixels_tab[2] = just_return;
2940 put_no_rnd_pixels_tab[3] = just_return;
2942 avg_pixels_tab[0] = just_return;
2943 avg_pixels_tab[1] = just_return;
2944 avg_pixels_tab[2] = just_return;
2945 avg_pixels_tab[3] = just_return;
2947 avg_no_rnd_pixels_tab[0] = just_return;
2948 avg_no_rnd_pixels_tab[1] = just_return;
2949 avg_no_rnd_pixels_tab[2] = just_return;
2950 avg_no_rnd_pixels_tab[3] = just_return;
2952 //av_fdct = just_return;
2953 //ff_idct = just_return;