2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
25 #include "libavutil/x86_cpu.h"
26 #include "libavcodec/dsputil.h"
27 #include "libavcodec/h263.h"
28 #include "libavcodec/mpegvideo.h"
29 #include "libavcodec/simple_idct.h"
30 #include "dsputil_mmx.h"
31 #include "vp3dsp_mmx.h"
32 #include "vp3dsp_sse2.h"
33 #include "vp6dsp_mmx.h"
34 #include "vp6dsp_sse2.h"
35 #include "idct_xvid.h"
40 int mm_flags; /* multimedia extension flags */
42 /* pixel operations */
43 DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL;
44 DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
46 DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000[2]) =
47 {0x8000000080000000ULL, 0x8000000080000000ULL};
49 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL;
50 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4 ) = 0x0004000400040004ULL;
51 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
52 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
53 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
54 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
55 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
56 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
57 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
58 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
59 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
60 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
61 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
62 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
64 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1 ) = 0x0101010101010101ULL;
65 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3 ) = 0x0303030303030303ULL;
66 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
67 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
68 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
69 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
70 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
71 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
73 DECLARE_ALIGNED_16(const double, ff_pd_1[2]) = { 1.0, 1.0 };
74 DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 };
76 #define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::)
77 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
79 #define MOVQ_BFE(regd) \
81 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
82 "paddb %%" #regd ", %%" #regd " \n\t" ::)
85 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
86 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
88 // for shared library it's better to use this way for accessing constants
90 #define MOVQ_BONE(regd) \
92 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
93 "psrlw $15, %%" #regd " \n\t" \
94 "packuswb %%" #regd ", %%" #regd " \n\t" ::)
96 #define MOVQ_WTWO(regd) \
98 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
99 "psrlw $15, %%" #regd " \n\t" \
100 "psllw $1, %%" #regd " \n\t"::)
104 // using regr as temporary and for the output result
105 // first argument is unmodifed and second is trashed
106 // regfe is supposed to contain 0xfefefefefefefefe
107 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
108 "movq " #rega ", " #regr " \n\t"\
109 "pand " #regb ", " #regr " \n\t"\
110 "pxor " #rega ", " #regb " \n\t"\
111 "pand " #regfe "," #regb " \n\t"\
112 "psrlq $1, " #regb " \n\t"\
113 "paddb " #regb ", " #regr " \n\t"
115 #define PAVGB_MMX(rega, regb, regr, regfe) \
116 "movq " #rega ", " #regr " \n\t"\
117 "por " #regb ", " #regr " \n\t"\
118 "pxor " #rega ", " #regb " \n\t"\
119 "pand " #regfe "," #regb " \n\t"\
120 "psrlq $1, " #regb " \n\t"\
121 "psubb " #regb ", " #regr " \n\t"
123 // mm6 is supposed to contain 0xfefefefefefefefe
124 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
125 "movq " #rega ", " #regr " \n\t"\
126 "movq " #regc ", " #regp " \n\t"\
127 "pand " #regb ", " #regr " \n\t"\
128 "pand " #regd ", " #regp " \n\t"\
129 "pxor " #rega ", " #regb " \n\t"\
130 "pxor " #regc ", " #regd " \n\t"\
131 "pand %%mm6, " #regb " \n\t"\
132 "pand %%mm6, " #regd " \n\t"\
133 "psrlq $1, " #regb " \n\t"\
134 "psrlq $1, " #regd " \n\t"\
135 "paddb " #regb ", " #regr " \n\t"\
136 "paddb " #regd ", " #regp " \n\t"
138 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
139 "movq " #rega ", " #regr " \n\t"\
140 "movq " #regc ", " #regp " \n\t"\
141 "por " #regb ", " #regr " \n\t"\
142 "por " #regd ", " #regp " \n\t"\
143 "pxor " #rega ", " #regb " \n\t"\
144 "pxor " #regc ", " #regd " \n\t"\
145 "pand %%mm6, " #regb " \n\t"\
146 "pand %%mm6, " #regd " \n\t"\
147 "psrlq $1, " #regd " \n\t"\
148 "psrlq $1, " #regb " \n\t"\
149 "psubb " #regb ", " #regr " \n\t"\
150 "psubb " #regd ", " #regp " \n\t"
152 /***********************************/
153 /* MMX no rounding */
154 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
155 #define SET_RND MOVQ_WONE
156 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
157 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
159 #include "dsputil_mmx_rnd_template.c"
165 /***********************************/
168 #define DEF(x, y) x ## _ ## y ##_mmx
169 #define SET_RND MOVQ_WTWO
170 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
171 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
173 #include "dsputil_mmx_rnd_template.c"
180 /***********************************/
183 #define DEF(x) x ## _3dnow
184 #define PAVGB "pavgusb"
186 #include "dsputil_mmx_avg_template.c"
191 /***********************************/
194 #define DEF(x) x ## _mmx2
196 /* Introduced only in MMX2 set */
197 #define PAVGB "pavgb"
199 #include "dsputil_mmx_avg_template.c"
204 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
205 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
206 #define put_pixels16_mmx2 put_pixels16_mmx
207 #define put_pixels8_mmx2 put_pixels8_mmx
208 #define put_pixels4_mmx2 put_pixels4_mmx
209 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
210 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
211 #define put_pixels16_3dnow put_pixels16_mmx
212 #define put_pixels8_3dnow put_pixels8_mmx
213 #define put_pixels4_3dnow put_pixels4_mmx
214 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
215 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
217 /***********************************/
220 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
225 /* read the pixels */
230 "movq %3, %%mm0 \n\t"
231 "movq 8%3, %%mm1 \n\t"
232 "movq 16%3, %%mm2 \n\t"
233 "movq 24%3, %%mm3 \n\t"
234 "movq 32%3, %%mm4 \n\t"
235 "movq 40%3, %%mm5 \n\t"
236 "movq 48%3, %%mm6 \n\t"
237 "movq 56%3, %%mm7 \n\t"
238 "packuswb %%mm1, %%mm0 \n\t"
239 "packuswb %%mm3, %%mm2 \n\t"
240 "packuswb %%mm5, %%mm4 \n\t"
241 "packuswb %%mm7, %%mm6 \n\t"
242 "movq %%mm0, (%0) \n\t"
243 "movq %%mm2, (%0, %1) \n\t"
244 "movq %%mm4, (%0, %1, 2) \n\t"
245 "movq %%mm6, (%0, %2) \n\t"
246 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
251 // if here would be an exact copy of the code above
252 // compiler would generate some very strange code
255 "movq (%3), %%mm0 \n\t"
256 "movq 8(%3), %%mm1 \n\t"
257 "movq 16(%3), %%mm2 \n\t"
258 "movq 24(%3), %%mm3 \n\t"
259 "movq 32(%3), %%mm4 \n\t"
260 "movq 40(%3), %%mm5 \n\t"
261 "movq 48(%3), %%mm6 \n\t"
262 "movq 56(%3), %%mm7 \n\t"
263 "packuswb %%mm1, %%mm0 \n\t"
264 "packuswb %%mm3, %%mm2 \n\t"
265 "packuswb %%mm5, %%mm4 \n\t"
266 "packuswb %%mm7, %%mm6 \n\t"
267 "movq %%mm0, (%0) \n\t"
268 "movq %%mm2, (%0, %1) \n\t"
269 "movq %%mm4, (%0, %1, 2) \n\t"
270 "movq %%mm6, (%0, %2) \n\t"
271 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
275 DECLARE_ASM_CONST(8, uint8_t, ff_vector128[8]) =
276 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
278 #define put_signed_pixels_clamped_mmx_half(off) \
279 "movq "#off"(%2), %%mm1 \n\t"\
280 "movq 16+"#off"(%2), %%mm2 \n\t"\
281 "movq 32+"#off"(%2), %%mm3 \n\t"\
282 "movq 48+"#off"(%2), %%mm4 \n\t"\
283 "packsswb 8+"#off"(%2), %%mm1 \n\t"\
284 "packsswb 24+"#off"(%2), %%mm2 \n\t"\
285 "packsswb 40+"#off"(%2), %%mm3 \n\t"\
286 "packsswb 56+"#off"(%2), %%mm4 \n\t"\
287 "paddb %%mm0, %%mm1 \n\t"\
288 "paddb %%mm0, %%mm2 \n\t"\
289 "paddb %%mm0, %%mm3 \n\t"\
290 "paddb %%mm0, %%mm4 \n\t"\
291 "movq %%mm1, (%0) \n\t"\
292 "movq %%mm2, (%0, %3) \n\t"\
293 "movq %%mm3, (%0, %3, 2) \n\t"\
294 "movq %%mm4, (%0, %1) \n\t"
296 void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
298 x86_reg line_skip = line_size;
302 "movq "MANGLE(ff_vector128)", %%mm0 \n\t"
303 "lea (%3, %3, 2), %1 \n\t"
304 put_signed_pixels_clamped_mmx_half(0)
305 "lea (%0, %3, 4), %0 \n\t"
306 put_signed_pixels_clamped_mmx_half(64)
307 :"+&r" (pixels), "=&r" (line_skip3)
308 :"r" (block), "r"(line_skip)
312 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
318 /* read the pixels */
325 "movq (%2), %%mm0 \n\t"
326 "movq 8(%2), %%mm1 \n\t"
327 "movq 16(%2), %%mm2 \n\t"
328 "movq 24(%2), %%mm3 \n\t"
329 "movq %0, %%mm4 \n\t"
330 "movq %1, %%mm6 \n\t"
331 "movq %%mm4, %%mm5 \n\t"
332 "punpcklbw %%mm7, %%mm4 \n\t"
333 "punpckhbw %%mm7, %%mm5 \n\t"
334 "paddsw %%mm4, %%mm0 \n\t"
335 "paddsw %%mm5, %%mm1 \n\t"
336 "movq %%mm6, %%mm5 \n\t"
337 "punpcklbw %%mm7, %%mm6 \n\t"
338 "punpckhbw %%mm7, %%mm5 \n\t"
339 "paddsw %%mm6, %%mm2 \n\t"
340 "paddsw %%mm5, %%mm3 \n\t"
341 "packuswb %%mm1, %%mm0 \n\t"
342 "packuswb %%mm3, %%mm2 \n\t"
343 "movq %%mm0, %0 \n\t"
344 "movq %%mm2, %1 \n\t"
345 :"+m"(*pix), "+m"(*(pix+line_size))
353 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
356 "lea (%3, %3), %%"REG_a" \n\t"
359 "movd (%1), %%mm0 \n\t"
360 "movd (%1, %3), %%mm1 \n\t"
361 "movd %%mm0, (%2) \n\t"
362 "movd %%mm1, (%2, %3) \n\t"
363 "add %%"REG_a", %1 \n\t"
364 "add %%"REG_a", %2 \n\t"
365 "movd (%1), %%mm0 \n\t"
366 "movd (%1, %3), %%mm1 \n\t"
367 "movd %%mm0, (%2) \n\t"
368 "movd %%mm1, (%2, %3) \n\t"
369 "add %%"REG_a", %1 \n\t"
370 "add %%"REG_a", %2 \n\t"
373 : "+g"(h), "+r" (pixels), "+r" (block)
374 : "r"((x86_reg)line_size)
379 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
382 "lea (%3, %3), %%"REG_a" \n\t"
385 "movq (%1), %%mm0 \n\t"
386 "movq (%1, %3), %%mm1 \n\t"
387 "movq %%mm0, (%2) \n\t"
388 "movq %%mm1, (%2, %3) \n\t"
389 "add %%"REG_a", %1 \n\t"
390 "add %%"REG_a", %2 \n\t"
391 "movq (%1), %%mm0 \n\t"
392 "movq (%1, %3), %%mm1 \n\t"
393 "movq %%mm0, (%2) \n\t"
394 "movq %%mm1, (%2, %3) \n\t"
395 "add %%"REG_a", %1 \n\t"
396 "add %%"REG_a", %2 \n\t"
399 : "+g"(h), "+r" (pixels), "+r" (block)
400 : "r"((x86_reg)line_size)
405 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
408 "lea (%3, %3), %%"REG_a" \n\t"
411 "movq (%1), %%mm0 \n\t"
412 "movq 8(%1), %%mm4 \n\t"
413 "movq (%1, %3), %%mm1 \n\t"
414 "movq 8(%1, %3), %%mm5 \n\t"
415 "movq %%mm0, (%2) \n\t"
416 "movq %%mm4, 8(%2) \n\t"
417 "movq %%mm1, (%2, %3) \n\t"
418 "movq %%mm5, 8(%2, %3) \n\t"
419 "add %%"REG_a", %1 \n\t"
420 "add %%"REG_a", %2 \n\t"
421 "movq (%1), %%mm0 \n\t"
422 "movq 8(%1), %%mm4 \n\t"
423 "movq (%1, %3), %%mm1 \n\t"
424 "movq 8(%1, %3), %%mm5 \n\t"
425 "movq %%mm0, (%2) \n\t"
426 "movq %%mm4, 8(%2) \n\t"
427 "movq %%mm1, (%2, %3) \n\t"
428 "movq %%mm5, 8(%2, %3) \n\t"
429 "add %%"REG_a", %1 \n\t"
430 "add %%"REG_a", %2 \n\t"
433 : "+g"(h), "+r" (pixels), "+r" (block)
434 : "r"((x86_reg)line_size)
439 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
443 "movdqu (%1), %%xmm0 \n\t"
444 "movdqu (%1,%3), %%xmm1 \n\t"
445 "movdqu (%1,%3,2), %%xmm2 \n\t"
446 "movdqu (%1,%4), %%xmm3 \n\t"
447 "movdqa %%xmm0, (%2) \n\t"
448 "movdqa %%xmm1, (%2,%3) \n\t"
449 "movdqa %%xmm2, (%2,%3,2) \n\t"
450 "movdqa %%xmm3, (%2,%4) \n\t"
452 "lea (%1,%3,4), %1 \n\t"
453 "lea (%2,%3,4), %2 \n\t"
455 : "+g"(h), "+r" (pixels), "+r" (block)
456 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
461 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
465 "movdqu (%1), %%xmm0 \n\t"
466 "movdqu (%1,%3), %%xmm1 \n\t"
467 "movdqu (%1,%3,2), %%xmm2 \n\t"
468 "movdqu (%1,%4), %%xmm3 \n\t"
469 "pavgb (%2), %%xmm0 \n\t"
470 "pavgb (%2,%3), %%xmm1 \n\t"
471 "pavgb (%2,%3,2), %%xmm2 \n\t"
472 "pavgb (%2,%4), %%xmm3 \n\t"
473 "movdqa %%xmm0, (%2) \n\t"
474 "movdqa %%xmm1, (%2,%3) \n\t"
475 "movdqa %%xmm2, (%2,%3,2) \n\t"
476 "movdqa %%xmm3, (%2,%4) \n\t"
478 "lea (%1,%3,4), %1 \n\t"
479 "lea (%2,%3,4), %2 \n\t"
481 : "+g"(h), "+r" (pixels), "+r" (block)
482 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
487 #define CLEAR_BLOCKS(name,n) \
488 static void name(DCTELEM *blocks)\
491 "pxor %%mm7, %%mm7 \n\t"\
492 "mov %1, %%"REG_a" \n\t"\
494 "movq %%mm7, (%0, %%"REG_a") \n\t"\
495 "movq %%mm7, 8(%0, %%"REG_a") \n\t"\
496 "movq %%mm7, 16(%0, %%"REG_a") \n\t"\
497 "movq %%mm7, 24(%0, %%"REG_a") \n\t"\
498 "add $32, %%"REG_a" \n\t"\
500 : : "r" (((uint8_t *)blocks)+128*n),\
505 CLEAR_BLOCKS(clear_blocks_mmx, 6)
506 CLEAR_BLOCKS(clear_block_mmx, 1)
508 static void clear_block_sse(DCTELEM *block)
511 "xorps %%xmm0, %%xmm0 \n"
512 "movaps %%xmm0, (%0) \n"
513 "movaps %%xmm0, 16(%0) \n"
514 "movaps %%xmm0, 32(%0) \n"
515 "movaps %%xmm0, 48(%0) \n"
516 "movaps %%xmm0, 64(%0) \n"
517 "movaps %%xmm0, 80(%0) \n"
518 "movaps %%xmm0, 96(%0) \n"
519 "movaps %%xmm0, 112(%0) \n"
525 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
530 "movq (%1, %0), %%mm0 \n\t"
531 "movq (%2, %0), %%mm1 \n\t"
532 "paddb %%mm0, %%mm1 \n\t"
533 "movq %%mm1, (%2, %0) \n\t"
534 "movq 8(%1, %0), %%mm0 \n\t"
535 "movq 8(%2, %0), %%mm1 \n\t"
536 "paddb %%mm0, %%mm1 \n\t"
537 "movq %%mm1, 8(%2, %0) \n\t"
543 : "r"(src), "r"(dst), "r"((x86_reg)w-15)
546 dst[i+0] += src[i+0];
549 static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
554 "movq (%2, %0), %%mm0 \n\t"
555 "movq 8(%2, %0), %%mm1 \n\t"
556 "paddb (%3, %0), %%mm0 \n\t"
557 "paddb 8(%3, %0), %%mm1 \n\t"
558 "movq %%mm0, (%1, %0) \n\t"
559 "movq %%mm1, 8(%1, %0) \n\t"
565 : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15)
568 dst[i] = src1[i] + src2[i];
571 #if HAVE_7REGS && HAVE_TEN_OPERANDS
572 static void add_hfyu_median_prediction_cmov(uint8_t *dst, uint8_t *top, uint8_t *diff, int w, int *left, int *left_top) {
575 int l = *left & 0xff;
576 int tl = *left_top & 0xff;
581 "movzx (%3,%4), %2 \n"
594 "add (%6,%4), %b0 \n"
595 "mov %b0, (%5,%4) \n"
598 :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
599 :"r"(dst+w), "r"(diff+w), "rm"(top+w)
606 #define H263_LOOP_FILTER \
607 "pxor %%mm7, %%mm7 \n\t"\
608 "movq %0, %%mm0 \n\t"\
609 "movq %0, %%mm1 \n\t"\
610 "movq %3, %%mm2 \n\t"\
611 "movq %3, %%mm3 \n\t"\
612 "punpcklbw %%mm7, %%mm0 \n\t"\
613 "punpckhbw %%mm7, %%mm1 \n\t"\
614 "punpcklbw %%mm7, %%mm2 \n\t"\
615 "punpckhbw %%mm7, %%mm3 \n\t"\
616 "psubw %%mm2, %%mm0 \n\t"\
617 "psubw %%mm3, %%mm1 \n\t"\
618 "movq %1, %%mm2 \n\t"\
619 "movq %1, %%mm3 \n\t"\
620 "movq %2, %%mm4 \n\t"\
621 "movq %2, %%mm5 \n\t"\
622 "punpcklbw %%mm7, %%mm2 \n\t"\
623 "punpckhbw %%mm7, %%mm3 \n\t"\
624 "punpcklbw %%mm7, %%mm4 \n\t"\
625 "punpckhbw %%mm7, %%mm5 \n\t"\
626 "psubw %%mm2, %%mm4 \n\t"\
627 "psubw %%mm3, %%mm5 \n\t"\
628 "psllw $2, %%mm4 \n\t"\
629 "psllw $2, %%mm5 \n\t"\
630 "paddw %%mm0, %%mm4 \n\t"\
631 "paddw %%mm1, %%mm5 \n\t"\
632 "pxor %%mm6, %%mm6 \n\t"\
633 "pcmpgtw %%mm4, %%mm6 \n\t"\
634 "pcmpgtw %%mm5, %%mm7 \n\t"\
635 "pxor %%mm6, %%mm4 \n\t"\
636 "pxor %%mm7, %%mm5 \n\t"\
637 "psubw %%mm6, %%mm4 \n\t"\
638 "psubw %%mm7, %%mm5 \n\t"\
639 "psrlw $3, %%mm4 \n\t"\
640 "psrlw $3, %%mm5 \n\t"\
641 "packuswb %%mm5, %%mm4 \n\t"\
642 "packsswb %%mm7, %%mm6 \n\t"\
643 "pxor %%mm7, %%mm7 \n\t"\
644 "movd %4, %%mm2 \n\t"\
645 "punpcklbw %%mm2, %%mm2 \n\t"\
646 "punpcklbw %%mm2, %%mm2 \n\t"\
647 "punpcklbw %%mm2, %%mm2 \n\t"\
648 "psubusb %%mm4, %%mm2 \n\t"\
649 "movq %%mm2, %%mm3 \n\t"\
650 "psubusb %%mm4, %%mm3 \n\t"\
651 "psubb %%mm3, %%mm2 \n\t"\
652 "movq %1, %%mm3 \n\t"\
653 "movq %2, %%mm4 \n\t"\
654 "pxor %%mm6, %%mm3 \n\t"\
655 "pxor %%mm6, %%mm4 \n\t"\
656 "paddusb %%mm2, %%mm3 \n\t"\
657 "psubusb %%mm2, %%mm4 \n\t"\
658 "pxor %%mm6, %%mm3 \n\t"\
659 "pxor %%mm6, %%mm4 \n\t"\
660 "paddusb %%mm2, %%mm2 \n\t"\
661 "packsswb %%mm1, %%mm0 \n\t"\
662 "pcmpgtb %%mm0, %%mm7 \n\t"\
663 "pxor %%mm7, %%mm0 \n\t"\
664 "psubb %%mm7, %%mm0 \n\t"\
665 "movq %%mm0, %%mm1 \n\t"\
666 "psubusb %%mm2, %%mm0 \n\t"\
667 "psubb %%mm0, %%mm1 \n\t"\
668 "pand %5, %%mm1 \n\t"\
669 "psrlw $2, %%mm1 \n\t"\
670 "pxor %%mm7, %%mm1 \n\t"\
671 "psubb %%mm7, %%mm1 \n\t"\
672 "movq %0, %%mm5 \n\t"\
673 "movq %3, %%mm6 \n\t"\
674 "psubb %%mm1, %%mm5 \n\t"\
675 "paddb %%mm1, %%mm6 \n\t"
677 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
678 if(CONFIG_ANY_H263) {
679 const int strength= ff_h263_loop_filter_strength[qscale];
685 "movq %%mm3, %1 \n\t"
686 "movq %%mm4, %2 \n\t"
687 "movq %%mm5, %0 \n\t"
688 "movq %%mm6, %3 \n\t"
689 : "+m" (*(uint64_t*)(src - 2*stride)),
690 "+m" (*(uint64_t*)(src - 1*stride)),
691 "+m" (*(uint64_t*)(src + 0*stride)),
692 "+m" (*(uint64_t*)(src + 1*stride))
693 : "g" (2*strength), "m"(ff_pb_FC)
698 static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
699 __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ...
700 "movd %4, %%mm0 \n\t"
701 "movd %5, %%mm1 \n\t"
702 "movd %6, %%mm2 \n\t"
703 "movd %7, %%mm3 \n\t"
704 "punpcklbw %%mm1, %%mm0 \n\t"
705 "punpcklbw %%mm3, %%mm2 \n\t"
706 "movq %%mm0, %%mm1 \n\t"
707 "punpcklwd %%mm2, %%mm0 \n\t"
708 "punpckhwd %%mm2, %%mm1 \n\t"
709 "movd %%mm0, %0 \n\t"
710 "punpckhdq %%mm0, %%mm0 \n\t"
711 "movd %%mm0, %1 \n\t"
712 "movd %%mm1, %2 \n\t"
713 "punpckhdq %%mm1, %%mm1 \n\t"
714 "movd %%mm1, %3 \n\t"
716 : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
717 "=m" (*(uint32_t*)(dst + 1*dst_stride)),
718 "=m" (*(uint32_t*)(dst + 2*dst_stride)),
719 "=m" (*(uint32_t*)(dst + 3*dst_stride))
720 : "m" (*(uint32_t*)(src + 0*src_stride)),
721 "m" (*(uint32_t*)(src + 1*src_stride)),
722 "m" (*(uint32_t*)(src + 2*src_stride)),
723 "m" (*(uint32_t*)(src + 3*src_stride))
727 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
728 if(CONFIG_ANY_H263) {
729 const int strength= ff_h263_loop_filter_strength[qscale];
730 DECLARE_ALIGNED(8, uint64_t, temp[4]);
731 uint8_t *btemp= (uint8_t*)temp;
735 transpose4x4(btemp , src , 8, stride);
736 transpose4x4(btemp+4, src + 4*stride, 8, stride);
738 H263_LOOP_FILTER // 5 3 4 6
744 : "g" (2*strength), "m"(ff_pb_FC)
748 "movq %%mm5, %%mm1 \n\t"
749 "movq %%mm4, %%mm0 \n\t"
750 "punpcklbw %%mm3, %%mm5 \n\t"
751 "punpcklbw %%mm6, %%mm4 \n\t"
752 "punpckhbw %%mm3, %%mm1 \n\t"
753 "punpckhbw %%mm6, %%mm0 \n\t"
754 "movq %%mm5, %%mm3 \n\t"
755 "movq %%mm1, %%mm6 \n\t"
756 "punpcklwd %%mm4, %%mm5 \n\t"
757 "punpcklwd %%mm0, %%mm1 \n\t"
758 "punpckhwd %%mm4, %%mm3 \n\t"
759 "punpckhwd %%mm0, %%mm6 \n\t"
760 "movd %%mm5, (%0) \n\t"
761 "punpckhdq %%mm5, %%mm5 \n\t"
762 "movd %%mm5, (%0,%2) \n\t"
763 "movd %%mm3, (%0,%2,2) \n\t"
764 "punpckhdq %%mm3, %%mm3 \n\t"
765 "movd %%mm3, (%0,%3) \n\t"
766 "movd %%mm1, (%1) \n\t"
767 "punpckhdq %%mm1, %%mm1 \n\t"
768 "movd %%mm1, (%1,%2) \n\t"
769 "movd %%mm6, (%1,%2,2) \n\t"
770 "punpckhdq %%mm6, %%mm6 \n\t"
771 "movd %%mm6, (%1,%3) \n\t"
773 "r" (src + 4*stride),
774 "r" ((x86_reg) stride ),
775 "r" ((x86_reg)(3*stride))
780 /* draw the edges of width 'w' of an image of size width, height
781 this mmx version can only handle w==8 || w==16 */
782 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
784 uint8_t *ptr, *last_line;
787 last_line = buf + (height - 1) * wrap;
794 "movd (%0), %%mm0 \n\t"
795 "punpcklbw %%mm0, %%mm0 \n\t"
796 "punpcklwd %%mm0, %%mm0 \n\t"
797 "punpckldq %%mm0, %%mm0 \n\t"
798 "movq %%mm0, -8(%0) \n\t"
799 "movq -8(%0, %2), %%mm1 \n\t"
800 "punpckhbw %%mm1, %%mm1 \n\t"
801 "punpckhwd %%mm1, %%mm1 \n\t"
802 "punpckhdq %%mm1, %%mm1 \n\t"
803 "movq %%mm1, (%0, %2) \n\t"
808 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
815 "movd (%0), %%mm0 \n\t"
816 "punpcklbw %%mm0, %%mm0 \n\t"
817 "punpcklwd %%mm0, %%mm0 \n\t"
818 "punpckldq %%mm0, %%mm0 \n\t"
819 "movq %%mm0, -8(%0) \n\t"
820 "movq %%mm0, -16(%0) \n\t"
821 "movq -8(%0, %2), %%mm1 \n\t"
822 "punpckhbw %%mm1, %%mm1 \n\t"
823 "punpckhwd %%mm1, %%mm1 \n\t"
824 "punpckhdq %%mm1, %%mm1 \n\t"
825 "movq %%mm1, (%0, %2) \n\t"
826 "movq %%mm1, 8(%0, %2) \n\t"
831 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
836 /* top and bottom (and hopefully also the corners) */
837 ptr= buf - (i + 1) * wrap - w;
840 "movq (%1, %0), %%mm0 \n\t"
841 "movq %%mm0, (%0) \n\t"
842 "movq %%mm0, (%0, %2) \n\t"
843 "movq %%mm0, (%0, %2, 2) \n\t"
844 "movq %%mm0, (%0, %3) \n\t"
849 : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
851 ptr= last_line + (i + 1) * wrap - w;
854 "movq (%1, %0), %%mm0 \n\t"
855 "movq %%mm0, (%0) \n\t"
856 "movq %%mm0, (%0, %2) \n\t"
857 "movq %%mm0, (%0, %2, 2) \n\t"
858 "movq %%mm0, (%0, %3) \n\t"
863 : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
868 #define PAETH(cpu, abs3)\
869 static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
874 "pxor %%mm7, %%mm7 \n"\
875 "movd (%1,%0), %%mm0 \n"\
876 "movd (%2,%0), %%mm1 \n"\
877 "punpcklbw %%mm7, %%mm0 \n"\
878 "punpcklbw %%mm7, %%mm1 \n"\
881 "movq %%mm1, %%mm2 \n"\
882 "movd (%2,%0), %%mm1 \n"\
883 "movq %%mm2, %%mm3 \n"\
884 "punpcklbw %%mm7, %%mm1 \n"\
885 "movq %%mm2, %%mm4 \n"\
886 "psubw %%mm1, %%mm3 \n"\
887 "psubw %%mm0, %%mm4 \n"\
888 "movq %%mm3, %%mm5 \n"\
889 "paddw %%mm4, %%mm5 \n"\
891 "movq %%mm4, %%mm6 \n"\
892 "pminsw %%mm5, %%mm6 \n"\
893 "pcmpgtw %%mm6, %%mm3 \n"\
894 "pcmpgtw %%mm5, %%mm4 \n"\
895 "movq %%mm4, %%mm6 \n"\
896 "pand %%mm3, %%mm4 \n"\
897 "pandn %%mm3, %%mm6 \n"\
898 "pandn %%mm0, %%mm3 \n"\
899 "movd (%3,%0), %%mm0 \n"\
900 "pand %%mm1, %%mm6 \n"\
901 "pand %%mm4, %%mm2 \n"\
902 "punpcklbw %%mm7, %%mm0 \n"\
904 "paddw %%mm6, %%mm0 \n"\
905 "paddw %%mm2, %%mm3 \n"\
906 "paddw %%mm3, %%mm0 \n"\
907 "pand %%mm5, %%mm0 \n"\
908 "movq %%mm0, %%mm3 \n"\
909 "packuswb %%mm3, %%mm3 \n"\
910 "movd %%mm3, (%1,%0) \n"\
915 :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
922 "psubw %%mm5, %%mm7 \n"\
923 "pmaxsw %%mm7, %%mm5 \n"\
924 "pxor %%mm6, %%mm6 \n"\
925 "pxor %%mm7, %%mm7 \n"\
926 "psubw %%mm3, %%mm6 \n"\
927 "psubw %%mm4, %%mm7 \n"\
928 "pmaxsw %%mm6, %%mm3 \n"\
929 "pmaxsw %%mm7, %%mm4 \n"\
930 "pxor %%mm7, %%mm7 \n"
933 "pabsw %%mm3, %%mm3 \n"\
934 "pabsw %%mm4, %%mm4 \n"\
935 "pabsw %%mm5, %%mm5 \n"
937 PAETH(mmx2, ABS3_MMX2)
939 PAETH(ssse3, ABS3_SSSE3)
942 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
943 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
944 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
945 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
946 "movq "#in7", " #m3 " \n\t" /* d */\
947 "movq "#in0", %%mm5 \n\t" /* D */\
948 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
949 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
950 "movq "#in1", %%mm5 \n\t" /* C */\
951 "movq "#in2", %%mm6 \n\t" /* B */\
952 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
953 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
954 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
955 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
956 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
957 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
958 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
959 "psraw $5, %%mm5 \n\t"\
960 "packuswb %%mm5, %%mm5 \n\t"\
961 OP(%%mm5, out, %%mm7, d)
963 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
964 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
968 "pxor %%mm7, %%mm7 \n\t"\
970 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
971 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
972 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
973 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
974 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
975 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
976 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
977 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
978 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
979 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
980 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
981 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
982 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
983 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
984 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
985 "paddw %%mm3, %%mm5 \n\t" /* b */\
986 "paddw %%mm2, %%mm6 \n\t" /* c */\
987 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
988 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
989 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
990 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
991 "paddw %%mm4, %%mm0 \n\t" /* a */\
992 "paddw %%mm1, %%mm5 \n\t" /* d */\
993 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
994 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
995 "paddw %6, %%mm6 \n\t"\
996 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
997 "psraw $5, %%mm0 \n\t"\
998 "movq %%mm0, %5 \n\t"\
999 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1001 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
1002 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
1003 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
1004 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
1005 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
1006 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
1007 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
1008 "paddw %%mm0, %%mm2 \n\t" /* b */\
1009 "paddw %%mm5, %%mm3 \n\t" /* c */\
1010 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1011 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1012 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
1013 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
1014 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
1015 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
1016 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1017 "paddw %%mm2, %%mm1 \n\t" /* a */\
1018 "paddw %%mm6, %%mm4 \n\t" /* d */\
1019 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1020 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
1021 "paddw %6, %%mm1 \n\t"\
1022 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
1023 "psraw $5, %%mm3 \n\t"\
1024 "movq %5, %%mm1 \n\t"\
1025 "packuswb %%mm3, %%mm1 \n\t"\
1026 OP_MMX2(%%mm1, (%1),%%mm4, q)\
1027 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
1029 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
1030 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
1031 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
1032 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
1033 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
1034 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
1035 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
1036 "paddw %%mm1, %%mm5 \n\t" /* b */\
1037 "paddw %%mm4, %%mm0 \n\t" /* c */\
1038 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1039 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
1040 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
1041 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
1042 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
1043 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
1044 "paddw %%mm3, %%mm2 \n\t" /* d */\
1045 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
1046 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
1047 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
1048 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
1049 "paddw %%mm2, %%mm6 \n\t" /* a */\
1050 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
1051 "paddw %6, %%mm0 \n\t"\
1052 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1053 "psraw $5, %%mm0 \n\t"\
1054 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1056 "paddw %%mm5, %%mm3 \n\t" /* a */\
1057 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
1058 "paddw %%mm4, %%mm6 \n\t" /* b */\
1059 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
1060 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
1061 "paddw %%mm1, %%mm4 \n\t" /* c */\
1062 "paddw %%mm2, %%mm5 \n\t" /* d */\
1063 "paddw %%mm6, %%mm6 \n\t" /* 2b */\
1064 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
1065 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
1066 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
1067 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
1068 "paddw %6, %%mm4 \n\t"\
1069 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
1070 "psraw $5, %%mm4 \n\t"\
1071 "packuswb %%mm4, %%mm0 \n\t"\
1072 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1078 : "+a"(src), "+c"(dst), "+D"(h)\
1079 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1084 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1087 /* quick HACK, XXX FIXME MUST be optimized */\
1090 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1091 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1092 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1093 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1094 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1095 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1096 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1097 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1098 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1099 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1100 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1101 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1102 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1103 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1104 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1105 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1107 "movq (%0), %%mm0 \n\t"\
1108 "movq 8(%0), %%mm1 \n\t"\
1109 "paddw %2, %%mm0 \n\t"\
1110 "paddw %2, %%mm1 \n\t"\
1111 "psraw $5, %%mm0 \n\t"\
1112 "psraw $5, %%mm1 \n\t"\
1113 "packuswb %%mm1, %%mm0 \n\t"\
1114 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1115 "movq 16(%0), %%mm0 \n\t"\
1116 "movq 24(%0), %%mm1 \n\t"\
1117 "paddw %2, %%mm0 \n\t"\
1118 "paddw %2, %%mm1 \n\t"\
1119 "psraw $5, %%mm0 \n\t"\
1120 "psraw $5, %%mm1 \n\t"\
1121 "packuswb %%mm1, %%mm0 \n\t"\
1122 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1123 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1131 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1133 "pxor %%mm7, %%mm7 \n\t"\
1135 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1136 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1137 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1138 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1139 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1140 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1141 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1142 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1143 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1144 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1145 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1146 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1147 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1148 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1149 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1150 "paddw %%mm3, %%mm5 \n\t" /* b */\
1151 "paddw %%mm2, %%mm6 \n\t" /* c */\
1152 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1153 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1154 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1155 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1156 "paddw %%mm4, %%mm0 \n\t" /* a */\
1157 "paddw %%mm1, %%mm5 \n\t" /* d */\
1158 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1159 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1160 "paddw %5, %%mm6 \n\t"\
1161 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1162 "psraw $5, %%mm0 \n\t"\
1163 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1165 "movd 5(%0), %%mm5 \n\t" /* FGHI */\
1166 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
1167 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
1168 "paddw %%mm5, %%mm1 \n\t" /* a */\
1169 "paddw %%mm6, %%mm2 \n\t" /* b */\
1170 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
1171 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
1172 "paddw %%mm6, %%mm3 \n\t" /* c */\
1173 "paddw %%mm5, %%mm4 \n\t" /* d */\
1174 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1175 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1176 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1177 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1178 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
1179 "paddw %5, %%mm1 \n\t"\
1180 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
1181 "psraw $5, %%mm3 \n\t"\
1182 "packuswb %%mm3, %%mm0 \n\t"\
1183 OP_MMX2(%%mm0, (%1), %%mm4, q)\
1189 : "+a"(src), "+c"(dst), "+d"(h)\
1190 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
1195 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1198 /* quick HACK, XXX FIXME MUST be optimized */\
1201 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1202 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1203 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1204 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1205 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1206 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1207 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1208 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1210 "movq (%0), %%mm0 \n\t"\
1211 "movq 8(%0), %%mm1 \n\t"\
1212 "paddw %2, %%mm0 \n\t"\
1213 "paddw %2, %%mm1 \n\t"\
1214 "psraw $5, %%mm0 \n\t"\
1215 "psraw $5, %%mm1 \n\t"\
1216 "packuswb %%mm1, %%mm0 \n\t"\
1217 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1218 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1226 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1228 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1229 uint64_t temp[17*4];\
1230 uint64_t *temp_ptr= temp;\
1235 "pxor %%mm7, %%mm7 \n\t"\
1237 "movq (%0), %%mm0 \n\t"\
1238 "movq (%0), %%mm1 \n\t"\
1239 "movq 8(%0), %%mm2 \n\t"\
1240 "movq 8(%0), %%mm3 \n\t"\
1241 "punpcklbw %%mm7, %%mm0 \n\t"\
1242 "punpckhbw %%mm7, %%mm1 \n\t"\
1243 "punpcklbw %%mm7, %%mm2 \n\t"\
1244 "punpckhbw %%mm7, %%mm3 \n\t"\
1245 "movq %%mm0, (%1) \n\t"\
1246 "movq %%mm1, 17*8(%1) \n\t"\
1247 "movq %%mm2, 2*17*8(%1) \n\t"\
1248 "movq %%mm3, 3*17*8(%1) \n\t"\
1253 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1254 : "r" ((x86_reg)srcStride)\
1261 /*FIXME reorder for speed */\
1263 /*"pxor %%mm7, %%mm7 \n\t"*/\
1265 "movq (%0), %%mm0 \n\t"\
1266 "movq 8(%0), %%mm1 \n\t"\
1267 "movq 16(%0), %%mm2 \n\t"\
1268 "movq 24(%0), %%mm3 \n\t"\
1269 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1270 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1272 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1274 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1276 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1277 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1279 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1280 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1282 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1283 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1285 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1286 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1288 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1290 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1292 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1293 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1295 "add $136, %0 \n\t"\
1300 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1301 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
1306 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1307 uint64_t temp[9*2];\
1308 uint64_t *temp_ptr= temp;\
1313 "pxor %%mm7, %%mm7 \n\t"\
1315 "movq (%0), %%mm0 \n\t"\
1316 "movq (%0), %%mm1 \n\t"\
1317 "punpcklbw %%mm7, %%mm0 \n\t"\
1318 "punpckhbw %%mm7, %%mm1 \n\t"\
1319 "movq %%mm0, (%1) \n\t"\
1320 "movq %%mm1, 9*8(%1) \n\t"\
1325 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1326 : "r" ((x86_reg)srcStride)\
1333 /*FIXME reorder for speed */\
1335 /*"pxor %%mm7, %%mm7 \n\t"*/\
1337 "movq (%0), %%mm0 \n\t"\
1338 "movq 8(%0), %%mm1 \n\t"\
1339 "movq 16(%0), %%mm2 \n\t"\
1340 "movq 24(%0), %%mm3 \n\t"\
1341 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
1342 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
1344 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
1346 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1348 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1350 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
1352 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
1353 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
1360 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1361 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
1366 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1367 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
1370 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1372 uint8_t * const half= (uint8_t*)temp;\
1373 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1374 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1377 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1378 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
1381 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1383 uint8_t * const half= (uint8_t*)temp;\
1384 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1385 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
1388 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1390 uint8_t * const half= (uint8_t*)temp;\
1391 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1392 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1395 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1396 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
1399 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1401 uint8_t * const half= (uint8_t*)temp;\
1402 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1403 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
1405 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1406 uint64_t half[8 + 9];\
1407 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1408 uint8_t * const halfHV= ((uint8_t*)half);\
1409 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1410 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1411 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1412 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1414 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1415 uint64_t half[8 + 9];\
1416 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1417 uint8_t * const halfHV= ((uint8_t*)half);\
1418 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1419 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1420 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1421 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1423 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1424 uint64_t half[8 + 9];\
1425 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1426 uint8_t * const halfHV= ((uint8_t*)half);\
1427 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1428 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1429 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1430 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1432 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1433 uint64_t half[8 + 9];\
1434 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1435 uint8_t * const halfHV= ((uint8_t*)half);\
1436 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1437 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1438 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1439 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1441 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1442 uint64_t half[8 + 9];\
1443 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1444 uint8_t * const halfHV= ((uint8_t*)half);\
1445 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1446 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1447 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1449 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1450 uint64_t half[8 + 9];\
1451 uint8_t * const halfH= ((uint8_t*)half) + 64;\
1452 uint8_t * const halfHV= ((uint8_t*)half);\
1453 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1454 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1455 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1457 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1458 uint64_t half[8 + 9];\
1459 uint8_t * const halfH= ((uint8_t*)half);\
1460 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1461 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1462 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1464 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1465 uint64_t half[8 + 9];\
1466 uint8_t * const halfH= ((uint8_t*)half);\
1467 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1468 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1469 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1471 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1473 uint8_t * const halfH= ((uint8_t*)half);\
1474 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1475 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1477 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1478 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
1481 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1483 uint8_t * const half= (uint8_t*)temp;\
1484 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1485 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1488 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1489 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
1492 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1494 uint8_t * const half= (uint8_t*)temp;\
1495 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1496 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
1499 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1501 uint8_t * const half= (uint8_t*)temp;\
1502 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1503 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1506 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1507 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
1510 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1512 uint8_t * const half= (uint8_t*)temp;\
1513 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1514 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
1516 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1517 uint64_t half[16*2 + 17*2];\
1518 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1519 uint8_t * const halfHV= ((uint8_t*)half);\
1520 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1521 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1522 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1523 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1525 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1526 uint64_t half[16*2 + 17*2];\
1527 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1528 uint8_t * const halfHV= ((uint8_t*)half);\
1529 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1530 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1531 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1532 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1534 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1535 uint64_t half[16*2 + 17*2];\
1536 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1537 uint8_t * const halfHV= ((uint8_t*)half);\
1538 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1539 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1540 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1541 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1543 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1544 uint64_t half[16*2 + 17*2];\
1545 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1546 uint8_t * const halfHV= ((uint8_t*)half);\
1547 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1548 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1549 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1550 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1552 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1553 uint64_t half[16*2 + 17*2];\
1554 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1555 uint8_t * const halfHV= ((uint8_t*)half);\
1556 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1557 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1558 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1560 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1561 uint64_t half[16*2 + 17*2];\
1562 uint8_t * const halfH= ((uint8_t*)half) + 256;\
1563 uint8_t * const halfHV= ((uint8_t*)half);\
1564 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1565 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1566 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1568 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1569 uint64_t half[17*2];\
1570 uint8_t * const halfH= ((uint8_t*)half);\
1571 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1572 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1573 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1575 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1576 uint64_t half[17*2];\
1577 uint8_t * const halfH= ((uint8_t*)half);\
1578 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1579 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1580 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1582 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1583 uint64_t half[17*2];\
1584 uint8_t * const halfH= ((uint8_t*)half);\
1585 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1586 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1589 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
1590 #define AVG_3DNOW_OP(a,b,temp, size) \
1591 "mov" #size " " #b ", " #temp " \n\t"\
1592 "pavgusb " #temp ", " #a " \n\t"\
1593 "mov" #size " " #a ", " #b " \n\t"
1594 #define AVG_MMX2_OP(a,b,temp, size) \
1595 "mov" #size " " #b ", " #temp " \n\t"\
1596 "pavgb " #temp ", " #a " \n\t"\
1597 "mov" #size " " #a ", " #b " \n\t"
1599 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
1600 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
1601 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
1602 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
1603 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
1604 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
1605 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
1606 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
1607 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
1609 /***********************************/
1610 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
1612 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
1613 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1614 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
1616 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
1617 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1618 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
1621 #define QPEL_2TAP(OPNAME, SIZE, MMX)\
1622 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
1623 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
1624 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
1625 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
1626 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
1627 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
1628 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
1629 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
1630 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
1631 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1632 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
1634 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1635 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
1637 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
1638 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
1639 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
1640 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
1641 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
1642 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
1643 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
1644 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
1646 QPEL_2TAP(put_, 16, mmx2)
1647 QPEL_2TAP(avg_, 16, mmx2)
1648 QPEL_2TAP(put_, 8, mmx2)
1649 QPEL_2TAP(avg_, 8, mmx2)
1650 QPEL_2TAP(put_, 16, 3dnow)
1651 QPEL_2TAP(avg_, 16, 3dnow)
1652 QPEL_2TAP(put_, 8, 3dnow)
1653 QPEL_2TAP(avg_, 8, 3dnow)
1657 static void just_return(void) { return; }
1660 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
1661 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
1663 const int ix = ox>>(16+shift);
1664 const int iy = oy>>(16+shift);
1665 const int oxs = ox>>4;
1666 const int oys = oy>>4;
1667 const int dxxs = dxx>>4;
1668 const int dxys = dxy>>4;
1669 const int dyxs = dyx>>4;
1670 const int dyys = dyy>>4;
1671 const uint16_t r4[4] = {r,r,r,r};
1672 const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
1673 const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
1674 const uint64_t shift2 = 2*shift;
1675 uint8_t edge_buf[(h+1)*stride];
1678 const int dxw = (dxx-(1<<(16+shift)))*(w-1);
1679 const int dyh = (dyy-(1<<(16+shift)))*(h-1);
1680 const int dxh = dxy*(h-1);
1681 const int dyw = dyx*(w-1);
1682 if( // non-constant fullpel offset (3% of blocks)
1683 ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
1684 (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
1685 // uses more than 16 bits of subpel mv (only at huge resolution)
1686 || (dxx|dxy|dyx|dyy)&15 )
1688 //FIXME could still use mmx for some of the rows
1689 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
1693 src += ix + iy*stride;
1694 if( (unsigned)ix >= width-w ||
1695 (unsigned)iy >= height-h )
1697 ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
1702 "movd %0, %%mm6 \n\t"
1703 "pxor %%mm7, %%mm7 \n\t"
1704 "punpcklwd %%mm6, %%mm6 \n\t"
1705 "punpcklwd %%mm6, %%mm6 \n\t"
1709 for(x=0; x<w; x+=4){
1710 uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
1711 oxs - dxys + dxxs*(x+1),
1712 oxs - dxys + dxxs*(x+2),
1713 oxs - dxys + dxxs*(x+3) };
1714 uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
1715 oys - dyys + dyxs*(x+1),
1716 oys - dyys + dyxs*(x+2),
1717 oys - dyys + dyxs*(x+3) };
1721 "movq %0, %%mm4 \n\t"
1722 "movq %1, %%mm5 \n\t"
1723 "paddw %2, %%mm4 \n\t"
1724 "paddw %3, %%mm5 \n\t"
1725 "movq %%mm4, %0 \n\t"
1726 "movq %%mm5, %1 \n\t"
1727 "psrlw $12, %%mm4 \n\t"
1728 "psrlw $12, %%mm5 \n\t"
1729 : "+m"(*dx4), "+m"(*dy4)
1730 : "m"(*dxy4), "m"(*dyy4)
1734 "movq %%mm6, %%mm2 \n\t"
1735 "movq %%mm6, %%mm1 \n\t"
1736 "psubw %%mm4, %%mm2 \n\t"
1737 "psubw %%mm5, %%mm1 \n\t"
1738 "movq %%mm2, %%mm0 \n\t"
1739 "movq %%mm4, %%mm3 \n\t"
1740 "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
1741 "pmullw %%mm5, %%mm3 \n\t" // dx*dy
1742 "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
1743 "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
1745 "movd %4, %%mm5 \n\t"
1746 "movd %3, %%mm4 \n\t"
1747 "punpcklbw %%mm7, %%mm5 \n\t"
1748 "punpcklbw %%mm7, %%mm4 \n\t"
1749 "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
1750 "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
1752 "movd %2, %%mm5 \n\t"
1753 "movd %1, %%mm4 \n\t"
1754 "punpcklbw %%mm7, %%mm5 \n\t"
1755 "punpcklbw %%mm7, %%mm4 \n\t"
1756 "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
1757 "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
1758 "paddw %5, %%mm1 \n\t"
1759 "paddw %%mm3, %%mm2 \n\t"
1760 "paddw %%mm1, %%mm0 \n\t"
1761 "paddw %%mm2, %%mm0 \n\t"
1763 "psrlw %6, %%mm0 \n\t"
1764 "packuswb %%mm0, %%mm0 \n\t"
1765 "movd %%mm0, %0 \n\t"
1767 : "=m"(dst[x+y*stride])
1768 : "m"(src[0]), "m"(src[1]),
1769 "m"(src[stride]), "m"(src[stride+1]),
1770 "m"(*r4), "m"(shift2)
1778 #define PREFETCH(name, op) \
1779 static void name(void *mem, int stride, int h){\
1780 const uint8_t *p= mem;\
1782 __asm__ volatile(#op" %0" :: "m"(*p));\
1786 PREFETCH(prefetch_mmx2, prefetcht0)
1787 PREFETCH(prefetch_3dnow, prefetch)
1790 #include "h264dsp_mmx.c"
1791 #include "rv40dsp_mmx.c"
1794 void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx);
1795 void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx);
1797 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1798 put_pixels8_mmx(dst, src, stride, 8);
1800 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1801 avg_pixels8_mmx(dst, src, stride, 8);
1803 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1804 put_pixels16_mmx(dst, src, stride, 16);
1806 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1807 avg_pixels16_mmx(dst, src, stride, 16);
1811 void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx);
1813 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1814 put_pixels8_mmx(dst, src, stride, 8);
1817 /* external functions, from idct_mmx.c */
1818 void ff_mmx_idct(DCTELEM *block);
1819 void ff_mmxext_idct(DCTELEM *block);
1821 /* XXX: those functions should be suppressed ASAP when all IDCTs are
1824 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1826 ff_mmx_idct (block);
1827 put_pixels_clamped_mmx(block, dest, line_size);
1829 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1831 ff_mmx_idct (block);
1832 add_pixels_clamped_mmx(block, dest, line_size);
1834 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1836 ff_mmxext_idct (block);
1837 put_pixels_clamped_mmx(block, dest, line_size);
1839 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1841 ff_mmxext_idct (block);
1842 add_pixels_clamped_mmx(block, dest, line_size);
1845 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
1847 ff_idct_xvid_mmx (block);
1848 put_pixels_clamped_mmx(block, dest, line_size);
1850 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
1852 ff_idct_xvid_mmx (block);
1853 add_pixels_clamped_mmx(block, dest, line_size);
1855 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
1857 ff_idct_xvid_mmx2 (block);
1858 put_pixels_clamped_mmx(block, dest, line_size);
1860 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
1862 ff_idct_xvid_mmx2 (block);
1863 add_pixels_clamped_mmx(block, dest, line_size);
1866 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
1869 __asm__ volatile("pxor %%mm7, %%mm7":);
1870 for(i=0; i<blocksize; i+=2) {
1872 "movq %0, %%mm0 \n\t"
1873 "movq %1, %%mm1 \n\t"
1874 "movq %%mm0, %%mm2 \n\t"
1875 "movq %%mm1, %%mm3 \n\t"
1876 "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
1877 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
1878 "pslld $31, %%mm2 \n\t" // keep only the sign bit
1879 "pxor %%mm2, %%mm1 \n\t"
1880 "movq %%mm3, %%mm4 \n\t"
1881 "pand %%mm1, %%mm3 \n\t"
1882 "pandn %%mm1, %%mm4 \n\t"
1883 "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1884 "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1885 "movq %%mm3, %1 \n\t"
1886 "movq %%mm0, %0 \n\t"
1887 :"+m"(mag[i]), "+m"(ang[i])
1891 __asm__ volatile("femms");
1893 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
1898 "movaps %0, %%xmm5 \n\t"
1899 ::"m"(ff_pdw_80000000[0])
1901 for(i=0; i<blocksize; i+=4) {
1903 "movaps %0, %%xmm0 \n\t"
1904 "movaps %1, %%xmm1 \n\t"
1905 "xorps %%xmm2, %%xmm2 \n\t"
1906 "xorps %%xmm3, %%xmm3 \n\t"
1907 "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
1908 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
1909 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
1910 "xorps %%xmm2, %%xmm1 \n\t"
1911 "movaps %%xmm3, %%xmm4 \n\t"
1912 "andps %%xmm1, %%xmm3 \n\t"
1913 "andnps %%xmm1, %%xmm4 \n\t"
1914 "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1915 "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1916 "movaps %%xmm3, %1 \n\t"
1917 "movaps %%xmm0, %0 \n\t"
1918 :"+m"(mag[i]), "+m"(ang[i])
1927 #define MIX5(mono,stereo)\
1929 "movss 0(%2), %%xmm5 \n"\
1930 "movss 8(%2), %%xmm6 \n"\
1931 "movss 24(%2), %%xmm7 \n"\
1932 "shufps $0, %%xmm5, %%xmm5 \n"\
1933 "shufps $0, %%xmm6, %%xmm6 \n"\
1934 "shufps $0, %%xmm7, %%xmm7 \n"\
1936 "movaps (%0,%1), %%xmm0 \n"\
1937 "movaps 0x400(%0,%1), %%xmm1 \n"\
1938 "movaps 0x800(%0,%1), %%xmm2 \n"\
1939 "movaps 0xc00(%0,%1), %%xmm3 \n"\
1940 "movaps 0x1000(%0,%1), %%xmm4 \n"\
1941 "mulps %%xmm5, %%xmm0 \n"\
1942 "mulps %%xmm6, %%xmm1 \n"\
1943 "mulps %%xmm5, %%xmm2 \n"\
1944 "mulps %%xmm7, %%xmm3 \n"\
1945 "mulps %%xmm7, %%xmm4 \n"\
1946 stereo("addps %%xmm1, %%xmm0 \n")\
1947 "addps %%xmm1, %%xmm2 \n"\
1948 "addps %%xmm3, %%xmm0 \n"\
1949 "addps %%xmm4, %%xmm2 \n"\
1950 mono("addps %%xmm2, %%xmm0 \n")\
1951 "movaps %%xmm0, (%0,%1) \n"\
1952 stereo("movaps %%xmm2, 0x400(%0,%1) \n")\
1956 :"r"(samples[0]+len), "r"(matrix)\
1960 #define MIX_MISC(stereo)\
1963 "movaps (%3,%0), %%xmm0 \n"\
1964 stereo("movaps %%xmm0, %%xmm1 \n")\
1965 "mulps %%xmm6, %%xmm0 \n"\
1966 stereo("mulps %%xmm7, %%xmm1 \n")\
1967 "lea 1024(%3,%0), %1 \n"\
1970 "movaps (%1), %%xmm2 \n"\
1971 stereo("movaps %%xmm2, %%xmm3 \n")\
1972 "mulps (%4,%2), %%xmm2 \n"\
1973 stereo("mulps 16(%4,%2), %%xmm3 \n")\
1974 "addps %%xmm2, %%xmm0 \n"\
1975 stereo("addps %%xmm3, %%xmm1 \n")\
1979 "movaps %%xmm0, (%3,%0) \n"\
1980 stereo("movaps %%xmm1, 1024(%3,%0) \n")\
1983 :"+&r"(i), "=&r"(j), "=&r"(k)\
1984 :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
1988 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
1990 int (*matrix_cmp)[2] = (int(*)[2])matrix;
1993 i = -len*sizeof(float);
1994 if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
1996 } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
1999 DECLARE_ALIGNED_16(float, matrix_simd[in_ch][2][4]);
2000 j = 2*in_ch*sizeof(float);
2004 "movss (%2,%0), %%xmm6 \n"
2005 "movss 4(%2,%0), %%xmm7 \n"
2006 "shufps $0, %%xmm6, %%xmm6 \n"
2007 "shufps $0, %%xmm7, %%xmm7 \n"
2008 "movaps %%xmm6, (%1,%0,4) \n"
2009 "movaps %%xmm7, 16(%1,%0,4) \n"
2012 :"r"(matrix_simd), "r"(matrix)
2023 static void vector_fmul_3dnow(float *dst, const float *src, int len){
2024 x86_reg i = (len-4)*4;
2027 "movq (%1,%0), %%mm0 \n\t"
2028 "movq 8(%1,%0), %%mm1 \n\t"
2029 "pfmul (%2,%0), %%mm0 \n\t"
2030 "pfmul 8(%2,%0), %%mm1 \n\t"
2031 "movq %%mm0, (%1,%0) \n\t"
2032 "movq %%mm1, 8(%1,%0) \n\t"
2041 static void vector_fmul_sse(float *dst, const float *src, int len){
2042 x86_reg i = (len-8)*4;
2045 "movaps (%1,%0), %%xmm0 \n\t"
2046 "movaps 16(%1,%0), %%xmm1 \n\t"
2047 "mulps (%2,%0), %%xmm0 \n\t"
2048 "mulps 16(%2,%0), %%xmm1 \n\t"
2049 "movaps %%xmm0, (%1,%0) \n\t"
2050 "movaps %%xmm1, 16(%1,%0) \n\t"
2059 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
2060 x86_reg i = len*4-16;
2063 "pswapd 8(%1), %%mm0 \n\t"
2064 "pswapd (%1), %%mm1 \n\t"
2065 "pfmul (%3,%0), %%mm0 \n\t"
2066 "pfmul 8(%3,%0), %%mm1 \n\t"
2067 "movq %%mm0, (%2,%0) \n\t"
2068 "movq %%mm1, 8(%2,%0) \n\t"
2072 :"+r"(i), "+r"(src1)
2073 :"r"(dst), "r"(src0)
2075 __asm__ volatile("femms");
2077 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
2078 x86_reg i = len*4-32;
2081 "movaps 16(%1), %%xmm0 \n\t"
2082 "movaps (%1), %%xmm1 \n\t"
2083 "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
2084 "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
2085 "mulps (%3,%0), %%xmm0 \n\t"
2086 "mulps 16(%3,%0), %%xmm1 \n\t"
2087 "movaps %%xmm0, (%2,%0) \n\t"
2088 "movaps %%xmm1, 16(%2,%0) \n\t"
2092 :"+r"(i), "+r"(src1)
2093 :"r"(dst), "r"(src0)
2097 static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1,
2098 const float *src2, int src3, int len, int step){
2099 x86_reg i = (len-4)*4;
2100 if(step == 2 && src3 == 0){
2104 "movq (%2,%0), %%mm0 \n\t"
2105 "movq 8(%2,%0), %%mm1 \n\t"
2106 "pfmul (%3,%0), %%mm0 \n\t"
2107 "pfmul 8(%3,%0), %%mm1 \n\t"
2108 "pfadd (%4,%0), %%mm0 \n\t"
2109 "pfadd 8(%4,%0), %%mm1 \n\t"
2110 "movd %%mm0, (%1) \n\t"
2111 "movd %%mm1, 16(%1) \n\t"
2112 "psrlq $32, %%mm0 \n\t"
2113 "psrlq $32, %%mm1 \n\t"
2114 "movd %%mm0, 8(%1) \n\t"
2115 "movd %%mm1, 24(%1) \n\t"
2120 :"r"(src0), "r"(src1), "r"(src2)
2124 else if(step == 1 && src3 == 0){
2127 "movq (%2,%0), %%mm0 \n\t"
2128 "movq 8(%2,%0), %%mm1 \n\t"
2129 "pfmul (%3,%0), %%mm0 \n\t"
2130 "pfmul 8(%3,%0), %%mm1 \n\t"
2131 "pfadd (%4,%0), %%mm0 \n\t"
2132 "pfadd 8(%4,%0), %%mm1 \n\t"
2133 "movq %%mm0, (%1,%0) \n\t"
2134 "movq %%mm1, 8(%1,%0) \n\t"
2138 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2143 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
2144 __asm__ volatile("femms");
2146 static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1,
2147 const float *src2, int src3, int len, int step){
2148 x86_reg i = (len-8)*4;
2149 if(step == 2 && src3 == 0){
2153 "movaps (%2,%0), %%xmm0 \n\t"
2154 "movaps 16(%2,%0), %%xmm1 \n\t"
2155 "mulps (%3,%0), %%xmm0 \n\t"
2156 "mulps 16(%3,%0), %%xmm1 \n\t"
2157 "addps (%4,%0), %%xmm0 \n\t"
2158 "addps 16(%4,%0), %%xmm1 \n\t"
2159 "movss %%xmm0, (%1) \n\t"
2160 "movss %%xmm1, 32(%1) \n\t"
2161 "movhlps %%xmm0, %%xmm2 \n\t"
2162 "movhlps %%xmm1, %%xmm3 \n\t"
2163 "movss %%xmm2, 16(%1) \n\t"
2164 "movss %%xmm3, 48(%1) \n\t"
2165 "shufps $0xb1, %%xmm0, %%xmm0 \n\t"
2166 "shufps $0xb1, %%xmm1, %%xmm1 \n\t"
2167 "movss %%xmm0, 8(%1) \n\t"
2168 "movss %%xmm1, 40(%1) \n\t"
2169 "movhlps %%xmm0, %%xmm2 \n\t"
2170 "movhlps %%xmm1, %%xmm3 \n\t"
2171 "movss %%xmm2, 24(%1) \n\t"
2172 "movss %%xmm3, 56(%1) \n\t"
2177 :"r"(src0), "r"(src1), "r"(src2)
2181 else if(step == 1 && src3 == 0){
2184 "movaps (%2,%0), %%xmm0 \n\t"
2185 "movaps 16(%2,%0), %%xmm1 \n\t"
2186 "mulps (%3,%0), %%xmm0 \n\t"
2187 "mulps 16(%3,%0), %%xmm1 \n\t"
2188 "addps (%4,%0), %%xmm0 \n\t"
2189 "addps 16(%4,%0), %%xmm1 \n\t"
2190 "movaps %%xmm0, (%1,%0) \n\t"
2191 "movaps %%xmm1, 16(%1,%0) \n\t"
2195 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2200 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
2203 static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
2204 const float *win, float add_bias, int len){
2208 x86_reg j = len*4-8;
2211 "pswapd (%5,%1), %%mm1 \n"
2212 "movq (%5,%0), %%mm0 \n"
2213 "pswapd (%4,%1), %%mm5 \n"
2214 "movq (%3,%0), %%mm4 \n"
2215 "movq %%mm0, %%mm2 \n"
2216 "movq %%mm1, %%mm3 \n"
2217 "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
2218 "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j]
2219 "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
2220 "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i]
2221 "pfadd %%mm3, %%mm2 \n"
2222 "pfsub %%mm0, %%mm1 \n"
2223 "pswapd %%mm2, %%mm2 \n"
2224 "movq %%mm1, (%2,%0) \n"
2225 "movq %%mm2, (%2,%1) \n"
2231 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2235 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2238 static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
2239 const float *win, float add_bias, int len){
2243 x86_reg j = len*4-16;
2246 "movaps (%5,%1), %%xmm1 \n"
2247 "movaps (%5,%0), %%xmm0 \n"
2248 "movaps (%4,%1), %%xmm5 \n"
2249 "movaps (%3,%0), %%xmm4 \n"
2250 "shufps $0x1b, %%xmm1, %%xmm1 \n"
2251 "shufps $0x1b, %%xmm5, %%xmm5 \n"
2252 "movaps %%xmm0, %%xmm2 \n"
2253 "movaps %%xmm1, %%xmm3 \n"
2254 "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
2255 "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j]
2256 "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
2257 "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i]
2258 "addps %%xmm3, %%xmm2 \n"
2259 "subps %%xmm0, %%xmm1 \n"
2260 "shufps $0x1b, %%xmm2, %%xmm2 \n"
2261 "movaps %%xmm1, (%2,%0) \n"
2262 "movaps %%xmm2, (%2,%1) \n"
2267 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2271 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2274 static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
2278 "movss %3, %%xmm4 \n"
2279 "shufps $0, %%xmm4, %%xmm4 \n"
2281 "cvtpi2ps (%2,%0), %%xmm0 \n"
2282 "cvtpi2ps 8(%2,%0), %%xmm1 \n"
2283 "cvtpi2ps 16(%2,%0), %%xmm2 \n"
2284 "cvtpi2ps 24(%2,%0), %%xmm3 \n"
2285 "movlhps %%xmm1, %%xmm0 \n"
2286 "movlhps %%xmm3, %%xmm2 \n"
2287 "mulps %%xmm4, %%xmm0 \n"
2288 "mulps %%xmm4, %%xmm2 \n"
2289 "movaps %%xmm0, (%1,%0) \n"
2290 "movaps %%xmm2, 16(%1,%0) \n"
2294 :"r"(dst+len), "r"(src+len), "m"(mul)
2298 static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
2302 "movss %3, %%xmm4 \n"
2303 "shufps $0, %%xmm4, %%xmm4 \n"
2305 "cvtdq2ps (%2,%0), %%xmm0 \n"
2306 "cvtdq2ps 16(%2,%0), %%xmm1 \n"
2307 "mulps %%xmm4, %%xmm0 \n"
2308 "mulps %%xmm4, %%xmm1 \n"
2309 "movaps %%xmm0, (%1,%0) \n"
2310 "movaps %%xmm1, 16(%1,%0) \n"
2314 :"r"(dst+len), "r"(src+len), "m"(mul)
2318 static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
2319 x86_reg reglen = len;
2320 // not bit-exact: pf2id uses different rounding than C and SSE
2323 "lea (%2,%0,2) , %2 \n\t"
2327 "pf2id (%2,%0,2) , %%mm0 \n\t"
2328 "pf2id 8(%2,%0,2) , %%mm1 \n\t"
2329 "pf2id 16(%2,%0,2) , %%mm2 \n\t"
2330 "pf2id 24(%2,%0,2) , %%mm3 \n\t"
2331 "packssdw %%mm1 , %%mm0 \n\t"
2332 "packssdw %%mm3 , %%mm2 \n\t"
2333 "movq %%mm0 , (%1,%0) \n\t"
2334 "movq %%mm2 , 8(%1,%0) \n\t"
2338 :"+r"(reglen), "+r"(dst), "+r"(src)
2341 static void float_to_int16_sse(int16_t *dst, const float *src, long len){
2342 x86_reg reglen = len;
2345 "lea (%2,%0,2) , %2 \n\t"
2349 "cvtps2pi (%2,%0,2) , %%mm0 \n\t"
2350 "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t"
2351 "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t"
2352 "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t"
2353 "packssdw %%mm1 , %%mm0 \n\t"
2354 "packssdw %%mm3 , %%mm2 \n\t"
2355 "movq %%mm0 , (%1,%0) \n\t"
2356 "movq %%mm2 , 8(%1,%0) \n\t"
2360 :"+r"(reglen), "+r"(dst), "+r"(src)
2364 static void float_to_int16_sse2(int16_t *dst, const float *src, long len){
2365 x86_reg reglen = len;
2368 "lea (%2,%0,2) , %2 \n\t"
2372 "cvtps2dq (%2,%0,2) , %%xmm0 \n\t"
2373 "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t"
2374 "packssdw %%xmm1 , %%xmm0 \n\t"
2375 "movdqa %%xmm0 , (%1,%0) \n\t"
2378 :"+r"(reglen), "+r"(dst), "+r"(src)
2383 void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
2384 void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
2385 void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len);
2386 void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *top, uint8_t *diff, int w, int *left, int *left_top);
2387 void ff_x264_deblock_v_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
2388 void ff_x264_deblock_h_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
2389 void ff_x264_deblock_v8_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
2390 void ff_x264_deblock_h_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
2392 static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta)
2394 ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta);
2395 ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta);
2398 void ff_x264_deblock_v_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
2399 void ff_x264_deblock_h_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
2401 #define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6)
2402 #define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
2403 #define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
2405 #define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
2407 #define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
2408 /* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
2409 static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
2410 DECLARE_ALIGNED_16(int16_t, tmp[len]);\
2412 for(c=0; c<channels; c++){\
2413 float_to_int16_##cpu(tmp, src[c], len);\
2414 for(i=0, j=c; i<len; i++, j+=channels)\
2419 static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
2421 float_to_int16_##cpu(dst, src[0], len);\
2422 else if(channels==2){\
2423 x86_reg reglen = len; \
2424 const float *src0 = src[0];\
2425 const float *src1 = src[1];\
2433 :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\
2435 }else if(channels==6){\
2436 ff_float_to_int16_interleave6_##cpu(dst, src, len);\
2438 float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
2441 FLOAT_TO_INT16_INTERLEAVE(3dnow,
2443 "pf2id (%2,%0), %%mm0 \n"
2444 "pf2id 8(%2,%0), %%mm1 \n"
2445 "pf2id (%3,%0), %%mm2 \n"
2446 "pf2id 8(%3,%0), %%mm3 \n"
2447 "packssdw %%mm1, %%mm0 \n"
2448 "packssdw %%mm3, %%mm2 \n"
2449 "movq %%mm0, %%mm1 \n"
2450 "punpcklwd %%mm2, %%mm0 \n"
2451 "punpckhwd %%mm2, %%mm1 \n"
2452 "movq %%mm0, (%1,%0)\n"
2453 "movq %%mm1, 8(%1,%0)\n"
2459 FLOAT_TO_INT16_INTERLEAVE(sse,
2461 "cvtps2pi (%2,%0), %%mm0 \n"
2462 "cvtps2pi 8(%2,%0), %%mm1 \n"
2463 "cvtps2pi (%3,%0), %%mm2 \n"
2464 "cvtps2pi 8(%3,%0), %%mm3 \n"
2465 "packssdw %%mm1, %%mm0 \n"
2466 "packssdw %%mm3, %%mm2 \n"
2467 "movq %%mm0, %%mm1 \n"
2468 "punpcklwd %%mm2, %%mm0 \n"
2469 "punpckhwd %%mm2, %%mm1 \n"
2470 "movq %%mm0, (%1,%0)\n"
2471 "movq %%mm1, 8(%1,%0)\n"
2477 FLOAT_TO_INT16_INTERLEAVE(sse2,
2479 "cvtps2dq (%2,%0), %%xmm0 \n"
2480 "cvtps2dq (%3,%0), %%xmm1 \n"
2481 "packssdw %%xmm1, %%xmm0 \n"
2482 "movhlps %%xmm0, %%xmm1 \n"
2483 "punpcklwd %%xmm1, %%xmm0 \n"
2484 "movdqa %%xmm0, (%1,%0) \n"
2489 static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){
2491 ff_float_to_int16_interleave6_3dn2(dst, src, len);
2493 float_to_int16_interleave_3dnow(dst, src, len, channels);
2497 void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width);
2498 void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width);
2499 void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
2500 void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
2501 void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
2502 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
2503 void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
2504 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
2507 static void add_int16_sse2(int16_t * v1, int16_t * v2, int order)
2509 x86_reg o = -(order << 1);
2514 "movdqu (%1,%2), %%xmm0 \n\t"
2515 "movdqu 16(%1,%2), %%xmm1 \n\t"
2516 "paddw (%0,%2), %%xmm0 \n\t"
2517 "paddw 16(%0,%2), %%xmm1 \n\t"
2518 "movdqa %%xmm0, (%0,%2) \n\t"
2519 "movdqa %%xmm1, 16(%0,%2) \n\t"
2522 : "+r"(v1), "+r"(v2), "+r"(o)
2526 static void sub_int16_sse2(int16_t * v1, int16_t * v2, int order)
2528 x86_reg o = -(order << 1);
2533 "movdqa (%0,%2), %%xmm0 \n\t"
2534 "movdqa 16(%0,%2), %%xmm2 \n\t"
2535 "movdqu (%1,%2), %%xmm1 \n\t"
2536 "movdqu 16(%1,%2), %%xmm3 \n\t"
2537 "psubw %%xmm1, %%xmm0 \n\t"
2538 "psubw %%xmm3, %%xmm2 \n\t"
2539 "movdqa %%xmm0, (%0,%2) \n\t"
2540 "movdqa %%xmm2, 16(%0,%2) \n\t"
2543 : "+r"(v1), "+r"(v2), "+r"(o)
2547 static int32_t scalarproduct_int16_sse2(int16_t * v1, int16_t * v2, int order, int shift)
2550 DECLARE_ALIGNED_16(xmm_reg, sh);
2551 x86_reg o = -(order << 1);
2557 "pxor %%xmm7, %%xmm7 \n\t"
2559 "movdqu (%0,%3), %%xmm0 \n\t"
2560 "movdqu 16(%0,%3), %%xmm1 \n\t"
2561 "pmaddwd (%1,%3), %%xmm0 \n\t"
2562 "pmaddwd 16(%1,%3), %%xmm1 \n\t"
2563 "paddd %%xmm0, %%xmm7 \n\t"
2564 "paddd %%xmm1, %%xmm7 \n\t"
2567 "movhlps %%xmm7, %%xmm2 \n\t"
2568 "paddd %%xmm2, %%xmm7 \n\t"
2569 "psrad %4, %%xmm7 \n\t"
2570 "pshuflw $0x4E, %%xmm7,%%xmm2 \n\t"
2571 "paddd %%xmm2, %%xmm7 \n\t"
2572 "movd %%xmm7, %2 \n\t"
2573 : "+r"(v1), "+r"(v2), "=r"(res), "+r"(o)
2579 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2581 mm_flags = mm_support();
2583 if (avctx->dsp_mask) {
2584 if (avctx->dsp_mask & FF_MM_FORCE)
2585 mm_flags |= (avctx->dsp_mask & 0xffff);
2587 mm_flags &= ~(avctx->dsp_mask & 0xffff);
2591 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
2592 if (mm_flags & FF_MM_MMX)
2593 av_log(avctx, AV_LOG_INFO, " mmx");
2594 if (mm_flags & FF_MM_MMX2)
2595 av_log(avctx, AV_LOG_INFO, " mmx2");
2596 if (mm_flags & FF_MM_3DNOW)
2597 av_log(avctx, AV_LOG_INFO, " 3dnow");
2598 if (mm_flags & FF_MM_SSE)
2599 av_log(avctx, AV_LOG_INFO, " sse");
2600 if (mm_flags & FF_MM_SSE2)
2601 av_log(avctx, AV_LOG_INFO, " sse2");
2602 av_log(avctx, AV_LOG_INFO, "\n");
2605 if (mm_flags & FF_MM_MMX) {
2606 const int idct_algo= avctx->idct_algo;
2608 if(avctx->lowres==0){
2609 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2610 c->idct_put= ff_simple_idct_put_mmx;
2611 c->idct_add= ff_simple_idct_add_mmx;
2612 c->idct = ff_simple_idct_mmx;
2613 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2615 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2616 if(mm_flags & FF_MM_MMX2){
2617 c->idct_put= ff_libmpeg2mmx2_idct_put;
2618 c->idct_add= ff_libmpeg2mmx2_idct_add;
2619 c->idct = ff_mmxext_idct;
2621 c->idct_put= ff_libmpeg2mmx_idct_put;
2622 c->idct_add= ff_libmpeg2mmx_idct_add;
2623 c->idct = ff_mmx_idct;
2625 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
2627 }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER || CONFIG_THEORA_DECODER) &&
2628 idct_algo==FF_IDCT_VP3){
2629 if(mm_flags & FF_MM_SSE2){
2630 c->idct_put= ff_vp3_idct_put_sse2;
2631 c->idct_add= ff_vp3_idct_add_sse2;
2632 c->idct = ff_vp3_idct_sse2;
2633 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2635 c->idct_put= ff_vp3_idct_put_mmx;
2636 c->idct_add= ff_vp3_idct_add_mmx;
2637 c->idct = ff_vp3_idct_mmx;
2638 c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
2640 }else if(idct_algo==FF_IDCT_CAVS){
2641 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2642 }else if(idct_algo==FF_IDCT_XVIDMMX){
2643 if(mm_flags & FF_MM_SSE2){
2644 c->idct_put= ff_idct_xvid_sse2_put;
2645 c->idct_add= ff_idct_xvid_sse2_add;
2646 c->idct = ff_idct_xvid_sse2;
2647 c->idct_permutation_type= FF_SSE2_IDCT_PERM;
2648 }else if(mm_flags & FF_MM_MMX2){
2649 c->idct_put= ff_idct_xvid_mmx2_put;
2650 c->idct_add= ff_idct_xvid_mmx2_add;
2651 c->idct = ff_idct_xvid_mmx2;
2653 c->idct_put= ff_idct_xvid_mmx_put;
2654 c->idct_add= ff_idct_xvid_mmx_add;
2655 c->idct = ff_idct_xvid_mmx;
2660 c->put_pixels_clamped = put_pixels_clamped_mmx;
2661 c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
2662 c->add_pixels_clamped = add_pixels_clamped_mmx;
2663 c->clear_block = clear_block_mmx;
2664 c->clear_blocks = clear_blocks_mmx;
2665 if (mm_flags & FF_MM_SSE)
2666 c->clear_block = clear_block_sse;
2668 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2669 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
2670 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
2671 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
2672 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
2674 SET_HPEL_FUNCS(put, 0, 16, mmx);
2675 SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
2676 SET_HPEL_FUNCS(avg, 0, 16, mmx);
2677 SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
2678 SET_HPEL_FUNCS(put, 1, 8, mmx);
2679 SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
2680 SET_HPEL_FUNCS(avg, 1, 8, mmx);
2681 SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
2685 c->add_bytes= add_bytes_mmx;
2686 c->add_bytes_l2= add_bytes_l2_mmx;
2688 c->draw_edges = draw_edges_mmx;
2690 if (CONFIG_ANY_H263) {
2691 c->h263_v_loop_filter= h263_v_loop_filter_mmx;
2692 c->h263_h_loop_filter= h263_h_loop_filter_mmx;
2694 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd;
2695 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
2696 c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_nornd;
2698 c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_mmx;
2699 c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_mmx;
2701 c->h264_idct_dc_add=
2702 c->h264_idct_add= ff_h264_idct_add_mmx;
2703 c->h264_idct8_dc_add=
2704 c->h264_idct8_add= ff_h264_idct8_add_mmx;
2706 c->h264_idct_add16 = ff_h264_idct_add16_mmx;
2707 c->h264_idct8_add4 = ff_h264_idct8_add4_mmx;
2708 c->h264_idct_add8 = ff_h264_idct_add8_mmx;
2709 c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
2711 if (CONFIG_VP6_DECODER) {
2712 c->vp6_filter_diag4 = ff_vp6_filter_diag4_mmx;
2715 if (mm_flags & FF_MM_MMX2) {
2716 c->prefetch = prefetch_mmx2;
2718 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2719 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2721 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2722 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2723 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2725 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2726 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2728 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2729 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2730 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2732 c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
2733 c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
2734 c->h264_idct_add16 = ff_h264_idct_add16_mmx2;
2735 c->h264_idct8_add4 = ff_h264_idct8_add4_mmx2;
2736 c->h264_idct_add8 = ff_h264_idct_add8_mmx2;
2737 c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
2739 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2740 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2741 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2742 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2743 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2744 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2745 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2747 if (CONFIG_VP3_DECODER || CONFIG_THEORA_DECODER) {
2748 c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
2749 c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
2753 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2754 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
2755 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
2756 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
2757 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
2758 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
2759 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
2760 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
2761 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
2762 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
2763 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
2764 c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
2765 c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
2766 c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
2767 c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
2768 c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
2769 c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
2771 SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
2772 SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
2773 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
2774 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
2775 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
2776 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
2778 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
2779 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
2780 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
2781 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
2782 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
2783 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
2785 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
2786 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
2787 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
2788 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
2790 c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2;
2791 c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2;
2793 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd;
2794 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
2795 c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
2796 c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
2797 c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
2798 c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
2799 c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
2800 c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
2801 c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
2802 c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
2803 c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
2805 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
2806 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
2807 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
2808 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
2809 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
2810 c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
2811 c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
2812 c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
2814 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
2815 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
2816 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
2817 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
2818 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
2819 c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
2820 c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
2821 c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
2824 c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
2826 #if HAVE_7REGS && HAVE_TEN_OPERANDS
2827 if( mm_flags&FF_MM_3DNOW )
2828 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
2831 if (CONFIG_CAVS_DECODER)
2832 ff_cavsdsp_init_mmx2(c, avctx);
2834 if (CONFIG_VC1_DECODER || CONFIG_WMV3_DECODER)
2835 ff_vc1dsp_init_mmx(c, avctx);
2837 c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
2838 } else if (mm_flags & FF_MM_3DNOW) {
2839 c->prefetch = prefetch_3dnow;
2841 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2842 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2844 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2845 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2846 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2848 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2849 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2851 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2852 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2853 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2855 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2856 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2857 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2858 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2859 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2860 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2861 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2864 SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
2865 SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
2866 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
2867 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
2868 SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
2869 SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
2871 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
2872 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
2873 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
2874 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
2875 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
2876 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
2878 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
2879 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
2880 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
2881 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
2883 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd;
2884 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
2886 c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_3dnow;
2887 c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_3dnow;
2889 if (CONFIG_CAVS_DECODER)
2890 ff_cavsdsp_init_3dnow(c, avctx);
2894 #define H264_QPEL_FUNCS(x, y, CPU)\
2895 c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
2896 c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
2897 c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
2898 c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
2899 if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){
2900 // these functions are slower than mmx on AMD, but faster on Intel
2901 /* FIXME works in most codecs, but crashes svq1 due to unaligned chroma
2902 c->put_pixels_tab[0][0] = put_pixels16_sse2;
2903 c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
2905 H264_QPEL_FUNCS(0, 0, sse2);
2907 if(mm_flags & FF_MM_SSE2){
2908 c->h264_idct8_add = ff_h264_idct8_add_sse2;
2909 c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
2911 H264_QPEL_FUNCS(0, 1, sse2);
2912 H264_QPEL_FUNCS(0, 2, sse2);
2913 H264_QPEL_FUNCS(0, 3, sse2);
2914 H264_QPEL_FUNCS(1, 1, sse2);
2915 H264_QPEL_FUNCS(1, 2, sse2);
2916 H264_QPEL_FUNCS(1, 3, sse2);
2917 H264_QPEL_FUNCS(2, 1, sse2);
2918 H264_QPEL_FUNCS(2, 2, sse2);
2919 H264_QPEL_FUNCS(2, 3, sse2);
2920 H264_QPEL_FUNCS(3, 1, sse2);
2921 H264_QPEL_FUNCS(3, 2, sse2);
2922 H264_QPEL_FUNCS(3, 3, sse2);
2924 if (CONFIG_VP6_DECODER) {
2925 c->vp6_filter_diag4 = ff_vp6_filter_diag4_sse2;
2929 if(mm_flags & FF_MM_SSSE3){
2930 H264_QPEL_FUNCS(1, 0, ssse3);
2931 H264_QPEL_FUNCS(1, 1, ssse3);
2932 H264_QPEL_FUNCS(1, 2, ssse3);
2933 H264_QPEL_FUNCS(1, 3, ssse3);
2934 H264_QPEL_FUNCS(2, 0, ssse3);
2935 H264_QPEL_FUNCS(2, 1, ssse3);
2936 H264_QPEL_FUNCS(2, 2, ssse3);
2937 H264_QPEL_FUNCS(2, 3, ssse3);
2938 H264_QPEL_FUNCS(3, 0, ssse3);
2939 H264_QPEL_FUNCS(3, 1, ssse3);
2940 H264_QPEL_FUNCS(3, 2, ssse3);
2941 H264_QPEL_FUNCS(3, 3, ssse3);
2942 c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_nornd;
2943 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd;
2944 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd;
2945 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3;
2946 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3;
2947 c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
2951 #if CONFIG_GPL && HAVE_YASM
2952 if (mm_flags & FF_MM_MMX2){
2954 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext;
2955 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext;
2957 if( mm_flags&FF_MM_SSE2 ){
2958 #if ARCH_X86_64 || !defined(__ICC) || __ICC > 1100
2959 c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2;
2960 c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2;
2961 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2;
2962 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2;
2964 c->h264_idct_add16 = ff_h264_idct_add16_sse2;
2965 c->h264_idct_add8 = ff_h264_idct_add8_sse2;
2966 c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
2971 #if CONFIG_SNOW_DECODER
2972 if(mm_flags & FF_MM_SSE2 & 0){
2973 c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
2975 c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
2977 c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
2980 if(mm_flags & FF_MM_MMX2){
2981 c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
2983 c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
2986 c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
2990 if(mm_flags & FF_MM_3DNOW){
2991 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
2992 c->vector_fmul = vector_fmul_3dnow;
2993 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2994 c->float_to_int16 = float_to_int16_3dnow;
2995 c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
2998 if(mm_flags & FF_MM_3DNOWEXT){
2999 c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
3000 c->vector_fmul_window = vector_fmul_window_3dnow2;
3001 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
3002 c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
3005 if(mm_flags & FF_MM_SSE){
3006 c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
3007 c->ac3_downmix = ac3_downmix_sse;
3008 c->vector_fmul = vector_fmul_sse;
3009 c->vector_fmul_reverse = vector_fmul_reverse_sse;
3010 c->vector_fmul_add_add = vector_fmul_add_add_sse;
3011 c->vector_fmul_window = vector_fmul_window_sse;
3012 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse;
3013 c->float_to_int16 = float_to_int16_sse;
3014 c->float_to_int16_interleave = float_to_int16_interleave_sse;
3016 if(mm_flags & FF_MM_3DNOW)
3017 c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse
3018 if(mm_flags & FF_MM_SSE2){
3019 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
3020 c->float_to_int16 = float_to_int16_sse2;
3021 c->float_to_int16_interleave = float_to_int16_interleave_sse2;
3022 c->add_int16 = add_int16_sse2;
3023 c->sub_int16 = sub_int16_sse2;
3024 c->scalarproduct_int16 = scalarproduct_int16_sse2;
3028 if (CONFIG_ENCODERS)
3029 dsputilenc_init_mmx(c, avctx);
3032 // for speed testing
3033 get_pixels = just_return;
3034 put_pixels_clamped = just_return;
3035 add_pixels_clamped = just_return;
3037 pix_abs16x16 = just_return;
3038 pix_abs16x16_x2 = just_return;
3039 pix_abs16x16_y2 = just_return;
3040 pix_abs16x16_xy2 = just_return;
3042 put_pixels_tab[0] = just_return;
3043 put_pixels_tab[1] = just_return;
3044 put_pixels_tab[2] = just_return;
3045 put_pixels_tab[3] = just_return;
3047 put_no_rnd_pixels_tab[0] = just_return;
3048 put_no_rnd_pixels_tab[1] = just_return;
3049 put_no_rnd_pixels_tab[2] = just_return;
3050 put_no_rnd_pixels_tab[3] = just_return;
3052 avg_pixels_tab[0] = just_return;
3053 avg_pixels_tab[1] = just_return;
3054 avg_pixels_tab[2] = just_return;
3055 avg_pixels_tab[3] = just_return;
3057 avg_no_rnd_pixels_tab[0] = just_return;
3058 avg_no_rnd_pixels_tab[1] = just_return;
3059 avg_no_rnd_pixels_tab[2] = just_return;
3060 avg_no_rnd_pixels_tab[3] = just_return;
3062 //av_fdct = just_return;
3063 //ff_idct = just_return;