]> git.sesse.net Git - ffmpeg/blob - libavcodec/x86/dsputil_mmx.c
x86: Remove some duplicate function declarations
[ffmpeg] / libavcodec / x86 / dsputil_mmx.c
1 /*
2  * MMX optimized DSP utils
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 #include "libavutil/attributes.h"
26 #include "libavutil/cpu.h"
27 #include "libavutil/x86/asm.h"
28 #include "libavcodec/dsputil.h"
29 #include "libavcodec/h264dsp.h"
30 #include "libavcodec/mpegvideo.h"
31 #include "libavcodec/simple_idct.h"
32 #include "dsputil_mmx.h"
33 #include "idct_xvid.h"
34
35 //#undef NDEBUG
36 //#include <assert.h>
37
38 /* pixel operations */
39 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_15)   =   0x000F000F000F000FULL;
40 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_17)   = { 0x0011001100110011ULL, 0x0011001100110011ULL };
41 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_20)   =   0x0014001400140014ULL;
42 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_42)   =   0x002A002A002A002AULL;
43 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_53)   =   0x0035003500350035ULL;
44 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_96)   =   0x0060006000600060ULL;
45 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_128)  =   0x0080008000800080ULL;
46 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_255)  =   0x00ff00ff00ff00ffULL;
47 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_512)  = { 0x0200020002000200ULL, 0x0200020002000200ULL };
48 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
49
50 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_3F)   =   0x3F3F3F3F3F3F3F3FULL;
51 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_FC)   =   0xFCFCFCFCFCFCFCFCULL;
52
53 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
54 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
55
56
57 #if HAVE_YASM
58 void ff_put_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
59                               int dstStride, int src1Stride, int h);
60 void ff_put_no_rnd_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1,
61                                      uint8_t *src2, int dstStride,
62                                      int src1Stride, int h);
63 void ff_avg_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
64                               int dstStride, int src1Stride, int h);
65 void ff_put_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
66                                int dstStride, int src1Stride, int h);
67 void ff_avg_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
68                                int dstStride, int src1Stride, int h);
69 void ff_put_no_rnd_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
70                                       int dstStride, int src1Stride, int h);
71
72 static void ff_put_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
73                                    ptrdiff_t line_size, int h)
74 {
75     ff_put_pixels8_mmxext(block,     pixels,     line_size, h);
76     ff_put_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
77 }
78
79 void ff_put_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
80                                          int dstStride, int srcStride, int h);
81 void ff_avg_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
82                                          int dstStride, int srcStride, int h);
83 void ff_put_no_rnd_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
84                                                  int dstStride, int srcStride,
85                                                  int h);
86 void ff_put_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
87                                         int dstStride, int srcStride, int h);
88 void ff_avg_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
89                                         int dstStride, int srcStride, int h);
90 void ff_put_no_rnd_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
91                                                 int dstStride, int srcStride,
92                                                 int h);
93 void ff_put_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
94                                          int dstStride, int srcStride);
95 void ff_avg_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
96                                          int dstStride, int srcStride);
97 void ff_put_no_rnd_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
98                                                  int dstStride, int srcStride);
99 void ff_put_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
100                                         int dstStride, int srcStride);
101 void ff_avg_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
102                                         int dstStride, int srcStride);
103 void ff_put_no_rnd_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
104                                                 int dstStride, int srcStride);
105 #define ff_put_no_rnd_pixels16_mmxext ff_put_pixels16_mmxext
106 #define ff_put_no_rnd_pixels8_mmxext ff_put_pixels8_mmxext
107 #endif /* HAVE_YASM */
108
109
110 #if HAVE_INLINE_ASM
111
112 #define JUMPALIGN()     __asm__ volatile (".p2align 3"::)
113 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::)
114
115 #define MOVQ_BFE(regd)                                  \
116     __asm__ volatile (                                  \
117         "pcmpeqd %%"#regd", %%"#regd"   \n\t"           \
118         "paddb   %%"#regd", %%"#regd"   \n\t" ::)
119
120 #ifndef PIC
121 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_bone))
122 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_wtwo))
123 #else
124 // for shared library it's better to use this way for accessing constants
125 // pcmpeqd -> -1
126 #define MOVQ_BONE(regd)                                 \
127     __asm__ volatile (                                  \
128         "pcmpeqd  %%"#regd", %%"#regd"  \n\t"           \
129         "psrlw          $15, %%"#regd"  \n\t"           \
130         "packuswb %%"#regd", %%"#regd"  \n\t" ::)
131
132 #define MOVQ_WTWO(regd)                                 \
133     __asm__ volatile (                                  \
134         "pcmpeqd %%"#regd", %%"#regd"   \n\t"           \
135         "psrlw         $15, %%"#regd"   \n\t"           \
136         "psllw          $1, %%"#regd"   \n\t"::)
137
138 #endif
139
140 // using regr as temporary and for the output result
141 // first argument is unmodifed and second is trashed
142 // regfe is supposed to contain 0xfefefefefefefefe
143 #define PAVGB_MMX(rega, regb, regr, regfe)                       \
144     "movq   "#rega", "#regr"            \n\t"                    \
145     "por    "#regb", "#regr"            \n\t"                    \
146     "pxor   "#rega", "#regb"            \n\t"                    \
147     "pand  "#regfe", "#regb"            \n\t"                    \
148     "psrlq       $1, "#regb"            \n\t"                    \
149     "psubb  "#regb", "#regr"            \n\t"
150
151 // mm6 is supposed to contain 0xfefefefefefefefe
152 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp)           \
153     "movq  "#rega", "#regr"             \n\t"                    \
154     "movq  "#regc", "#regp"             \n\t"                    \
155     "por   "#regb", "#regr"             \n\t"                    \
156     "por   "#regd", "#regp"             \n\t"                    \
157     "pxor  "#rega", "#regb"             \n\t"                    \
158     "pxor  "#regc", "#regd"             \n\t"                    \
159     "pand    %%mm6, "#regb"             \n\t"                    \
160     "pand    %%mm6, "#regd"             \n\t"                    \
161     "psrlq      $1, "#regd"             \n\t"                    \
162     "psrlq      $1, "#regb"             \n\t"                    \
163     "psubb "#regb", "#regr"             \n\t"                    \
164     "psubb "#regd", "#regp"             \n\t"
165
166 /***********************************/
167 /* MMX rounding */
168
169 #define DEF(x, y) x ## _ ## y ## _mmx
170 #define SET_RND  MOVQ_WTWO
171 #define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
172 #define PAVGB(a, b, c, e)               PAVGB_MMX(a, b, c, e)
173 #define OP_AVG(a, b, c, e)              PAVGB_MMX(a, b, c, e)
174
175 #include "dsputil_rnd_template.c"
176
177 #undef DEF
178 #undef SET_RND
179 #undef PAVGBP
180 #undef PAVGB
181 #undef OP_AVG
182
183 #endif /* HAVE_INLINE_ASM */
184
185
186 #if HAVE_YASM
187
188 /***********************************/
189 /* MMXEXT specific */
190
191 //FIXME the following could be optimized too ...
192 static void ff_avg_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
193                                    int line_size, int h)
194 {
195     ff_avg_pixels8_mmxext(block,     pixels,     line_size, h);
196     ff_avg_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
197 }
198
199 #endif /* HAVE_YASM */
200
201
202 #if HAVE_INLINE_ASM
203 /***********************************/
204 /* standard MMX */
205
206 void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
207                                int line_size)
208 {
209     const int16_t *p;
210     uint8_t *pix;
211
212     /* read the pixels */
213     p   = block;
214     pix = pixels;
215     /* unrolled loop */
216     __asm__ volatile (
217         "movq      (%3), %%mm0          \n\t"
218         "movq     8(%3), %%mm1          \n\t"
219         "movq    16(%3), %%mm2          \n\t"
220         "movq    24(%3), %%mm3          \n\t"
221         "movq    32(%3), %%mm4          \n\t"
222         "movq    40(%3), %%mm5          \n\t"
223         "movq    48(%3), %%mm6          \n\t"
224         "movq    56(%3), %%mm7          \n\t"
225         "packuswb %%mm1, %%mm0          \n\t"
226         "packuswb %%mm3, %%mm2          \n\t"
227         "packuswb %%mm5, %%mm4          \n\t"
228         "packuswb %%mm7, %%mm6          \n\t"
229         "movq     %%mm0, (%0)           \n\t"
230         "movq     %%mm2, (%0, %1)       \n\t"
231         "movq     %%mm4, (%0, %1, 2)    \n\t"
232         "movq     %%mm6, (%0, %2)       \n\t"
233         :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
234            "r"(p)
235         : "memory");
236     pix += line_size * 4;
237     p   += 32;
238
239     // if here would be an exact copy of the code above
240     // compiler would generate some very strange code
241     // thus using "r"
242     __asm__ volatile (
243         "movq       (%3), %%mm0         \n\t"
244         "movq      8(%3), %%mm1         \n\t"
245         "movq     16(%3), %%mm2         \n\t"
246         "movq     24(%3), %%mm3         \n\t"
247         "movq     32(%3), %%mm4         \n\t"
248         "movq     40(%3), %%mm5         \n\t"
249         "movq     48(%3), %%mm6         \n\t"
250         "movq     56(%3), %%mm7         \n\t"
251         "packuswb  %%mm1, %%mm0         \n\t"
252         "packuswb  %%mm3, %%mm2         \n\t"
253         "packuswb  %%mm5, %%mm4         \n\t"
254         "packuswb  %%mm7, %%mm6         \n\t"
255         "movq      %%mm0, (%0)          \n\t"
256         "movq      %%mm2, (%0, %1)      \n\t"
257         "movq      %%mm4, (%0, %1, 2)   \n\t"
258         "movq      %%mm6, (%0, %2)      \n\t"
259         :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
260         : "memory");
261 }
262
263 #define put_signed_pixels_clamped_mmx_half(off)             \
264     "movq          "#off"(%2), %%mm1        \n\t"           \
265     "movq     16 + "#off"(%2), %%mm2        \n\t"           \
266     "movq     32 + "#off"(%2), %%mm3        \n\t"           \
267     "movq     48 + "#off"(%2), %%mm4        \n\t"           \
268     "packsswb  8 + "#off"(%2), %%mm1        \n\t"           \
269     "packsswb 24 + "#off"(%2), %%mm2        \n\t"           \
270     "packsswb 40 + "#off"(%2), %%mm3        \n\t"           \
271     "packsswb 56 + "#off"(%2), %%mm4        \n\t"           \
272     "paddb              %%mm0, %%mm1        \n\t"           \
273     "paddb              %%mm0, %%mm2        \n\t"           \
274     "paddb              %%mm0, %%mm3        \n\t"           \
275     "paddb              %%mm0, %%mm4        \n\t"           \
276     "movq               %%mm1, (%0)         \n\t"           \
277     "movq               %%mm2, (%0, %3)     \n\t"           \
278     "movq               %%mm3, (%0, %3, 2)  \n\t"           \
279     "movq               %%mm4, (%0, %1)     \n\t"
280
281 void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
282                                       int line_size)
283 {
284     x86_reg line_skip = line_size;
285     x86_reg line_skip3;
286
287     __asm__ volatile (
288         "movq "MANGLE(ff_pb_80)", %%mm0     \n\t"
289         "lea         (%3, %3, 2), %1        \n\t"
290         put_signed_pixels_clamped_mmx_half(0)
291         "lea         (%0, %3, 4), %0        \n\t"
292         put_signed_pixels_clamped_mmx_half(64)
293         : "+&r"(pixels), "=&r"(line_skip3)
294         : "r"(block), "r"(line_skip)
295         : "memory");
296 }
297
298 void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
299                                int line_size)
300 {
301     const int16_t *p;
302     uint8_t *pix;
303     int i;
304
305     /* read the pixels */
306     p   = block;
307     pix = pixels;
308     MOVQ_ZERO(mm7);
309     i = 4;
310     do {
311         __asm__ volatile (
312             "movq        (%2), %%mm0    \n\t"
313             "movq       8(%2), %%mm1    \n\t"
314             "movq      16(%2), %%mm2    \n\t"
315             "movq      24(%2), %%mm3    \n\t"
316             "movq          %0, %%mm4    \n\t"
317             "movq          %1, %%mm6    \n\t"
318             "movq       %%mm4, %%mm5    \n\t"
319             "punpcklbw  %%mm7, %%mm4    \n\t"
320             "punpckhbw  %%mm7, %%mm5    \n\t"
321             "paddsw     %%mm4, %%mm0    \n\t"
322             "paddsw     %%mm5, %%mm1    \n\t"
323             "movq       %%mm6, %%mm5    \n\t"
324             "punpcklbw  %%mm7, %%mm6    \n\t"
325             "punpckhbw  %%mm7, %%mm5    \n\t"
326             "paddsw     %%mm6, %%mm2    \n\t"
327             "paddsw     %%mm5, %%mm3    \n\t"
328             "packuswb   %%mm1, %%mm0    \n\t"
329             "packuswb   %%mm3, %%mm2    \n\t"
330             "movq       %%mm0, %0       \n\t"
331             "movq       %%mm2, %1       \n\t"
332             : "+m"(*pix), "+m"(*(pix + line_size))
333             : "r"(p)
334             : "memory");
335         pix += line_size * 2;
336         p   += 16;
337     } while (--i);
338 }
339
340 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
341                             ptrdiff_t line_size, int h)
342 {
343     __asm__ volatile (
344         "lea   (%3, %3), %%"REG_a"      \n\t"
345         ".p2align     3                 \n\t"
346         "1:                             \n\t"
347         "movq  (%1    ), %%mm0          \n\t"
348         "movq  (%1, %3), %%mm1          \n\t"
349         "movq     %%mm0, (%2)           \n\t"
350         "movq     %%mm1, (%2, %3)       \n\t"
351         "add  %%"REG_a", %1             \n\t"
352         "add  %%"REG_a", %2             \n\t"
353         "movq  (%1    ), %%mm0          \n\t"
354         "movq  (%1, %3), %%mm1          \n\t"
355         "movq     %%mm0, (%2)           \n\t"
356         "movq     %%mm1, (%2, %3)       \n\t"
357         "add  %%"REG_a", %1             \n\t"
358         "add  %%"REG_a", %2             \n\t"
359         "subl        $4, %0             \n\t"
360         "jnz         1b                 \n\t"
361         : "+g"(h), "+r"(pixels),  "+r"(block)
362         : "r"((x86_reg)line_size)
363         : "%"REG_a, "memory"
364         );
365 }
366
367 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
368                              ptrdiff_t line_size, int h)
369 {
370     __asm__ volatile (
371         "lea   (%3, %3), %%"REG_a"      \n\t"
372         ".p2align     3                 \n\t"
373         "1:                             \n\t"
374         "movq  (%1    ), %%mm0          \n\t"
375         "movq 8(%1    ), %%mm4          \n\t"
376         "movq  (%1, %3), %%mm1          \n\t"
377         "movq 8(%1, %3), %%mm5          \n\t"
378         "movq     %%mm0,  (%2)          \n\t"
379         "movq     %%mm4, 8(%2)          \n\t"
380         "movq     %%mm1,  (%2, %3)      \n\t"
381         "movq     %%mm5, 8(%2, %3)      \n\t"
382         "add  %%"REG_a", %1             \n\t"
383         "add  %%"REG_a", %2             \n\t"
384         "movq  (%1    ), %%mm0          \n\t"
385         "movq 8(%1    ), %%mm4          \n\t"
386         "movq  (%1, %3), %%mm1          \n\t"
387         "movq 8(%1, %3), %%mm5          \n\t"
388         "movq     %%mm0,  (%2)          \n\t"
389         "movq     %%mm4, 8(%2)          \n\t"
390         "movq     %%mm1,  (%2, %3)      \n\t"
391         "movq     %%mm5, 8(%2, %3)      \n\t"
392         "add  %%"REG_a", %1             \n\t"
393         "add  %%"REG_a", %2             \n\t"
394         "subl        $4, %0             \n\t"
395         "jnz         1b                 \n\t"
396         : "+g"(h), "+r"(pixels),  "+r"(block)
397         : "r"((x86_reg)line_size)
398         : "%"REG_a, "memory"
399         );
400 }
401
402 #define CLEAR_BLOCKS(name, n)                           \
403 static void name(int16_t *blocks)                       \
404 {                                                       \
405     __asm__ volatile (                                  \
406         "pxor %%mm7, %%mm7              \n\t"           \
407         "mov     %1,        %%"REG_a"   \n\t"           \
408         "1:                             \n\t"           \
409         "movq %%mm7,   (%0, %%"REG_a")  \n\t"           \
410         "movq %%mm7,  8(%0, %%"REG_a")  \n\t"           \
411         "movq %%mm7, 16(%0, %%"REG_a")  \n\t"           \
412         "movq %%mm7, 24(%0, %%"REG_a")  \n\t"           \
413         "add    $32, %%"REG_a"          \n\t"           \
414         "js      1b                     \n\t"           \
415         :: "r"(((uint8_t *)blocks) + 128 * n),          \
416            "i"(-128 * n)                                \
417         : "%"REG_a                                      \
418         );                                              \
419 }
420 CLEAR_BLOCKS(clear_blocks_mmx, 6)
421 CLEAR_BLOCKS(clear_block_mmx, 1)
422
423 static void clear_block_sse(int16_t *block)
424 {
425     __asm__ volatile (
426         "xorps  %%xmm0, %%xmm0          \n"
427         "movaps %%xmm0,    (%0)         \n"
428         "movaps %%xmm0,  16(%0)         \n"
429         "movaps %%xmm0,  32(%0)         \n"
430         "movaps %%xmm0,  48(%0)         \n"
431         "movaps %%xmm0,  64(%0)         \n"
432         "movaps %%xmm0,  80(%0)         \n"
433         "movaps %%xmm0,  96(%0)         \n"
434         "movaps %%xmm0, 112(%0)         \n"
435         :: "r"(block)
436         : "memory"
437     );
438 }
439
440 static void clear_blocks_sse(int16_t *blocks)
441 {
442     __asm__ volatile (
443         "xorps  %%xmm0, %%xmm0              \n"
444         "mov        %1,         %%"REG_a"   \n"
445         "1:                                 \n"
446         "movaps %%xmm0,    (%0, %%"REG_a")  \n"
447         "movaps %%xmm0,  16(%0, %%"REG_a")  \n"
448         "movaps %%xmm0,  32(%0, %%"REG_a")  \n"
449         "movaps %%xmm0,  48(%0, %%"REG_a")  \n"
450         "movaps %%xmm0,  64(%0, %%"REG_a")  \n"
451         "movaps %%xmm0,  80(%0, %%"REG_a")  \n"
452         "movaps %%xmm0,  96(%0, %%"REG_a")  \n"
453         "movaps %%xmm0, 112(%0, %%"REG_a")  \n"
454         "add      $128,         %%"REG_a"   \n"
455         "js         1b                      \n"
456         :: "r"(((uint8_t *)blocks) + 128 * 6),
457            "i"(-128 * 6)
458         : "%"REG_a
459     );
460 }
461
462 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
463 {
464     x86_reg i = 0;
465     __asm__ volatile (
466         "jmp          2f                \n\t"
467         "1:                             \n\t"
468         "movq   (%1, %0), %%mm0         \n\t"
469         "movq   (%2, %0), %%mm1         \n\t"
470         "paddb     %%mm0, %%mm1         \n\t"
471         "movq      %%mm1, (%2, %0)      \n\t"
472         "movq  8(%1, %0), %%mm0         \n\t"
473         "movq  8(%2, %0), %%mm1         \n\t"
474         "paddb     %%mm0, %%mm1         \n\t"
475         "movq      %%mm1, 8(%2, %0)     \n\t"
476         "add         $16, %0            \n\t"
477         "2:                             \n\t"
478         "cmp          %3, %0            \n\t"
479         "js           1b                \n\t"
480         : "+r"(i)
481         : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
482     );
483     for ( ; i < w; i++)
484         dst[i + 0] += src[i + 0];
485 }
486
487 #if HAVE_7REGS
488 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
489                                             const uint8_t *diff, int w,
490                                             int *left, int *left_top)
491 {
492     x86_reg w2 = -w;
493     x86_reg x;
494     int l  = *left     & 0xff;
495     int tl = *left_top & 0xff;
496     int t;
497     __asm__ volatile (
498         "mov          %7, %3            \n"
499         "1:                             \n"
500         "movzbl (%3, %4), %2            \n"
501         "mov          %2, %k3           \n"
502         "sub         %b1, %b3           \n"
503         "add         %b0, %b3           \n"
504         "mov          %2, %1            \n"
505         "cmp          %0, %2            \n"
506         "cmovg        %0, %2            \n"
507         "cmovg        %1, %0            \n"
508         "cmp         %k3, %0            \n"
509         "cmovg       %k3, %0            \n"
510         "mov          %7, %3            \n"
511         "cmp          %2, %0            \n"
512         "cmovl        %2, %0            \n"
513         "add    (%6, %4), %b0           \n"
514         "mov         %b0, (%5, %4)      \n"
515         "inc          %4                \n"
516         "jl           1b                \n"
517         : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
518         : "r"(dst + w), "r"(diff + w), "rm"(top + w)
519     );
520     *left     = l;
521     *left_top = tl;
522 }
523 #endif
524
525 /* Draw the edges of width 'w' of an image of size width, height
526  * this MMX version can only handle w == 8 || w == 16. */
527 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
528                            int w, int h, int sides)
529 {
530     uint8_t *ptr, *last_line;
531     int i;
532
533     last_line = buf + (height - 1) * wrap;
534     /* left and right */
535     ptr = buf;
536     if (w == 8) {
537         __asm__ volatile (
538             "1:                             \n\t"
539             "movd            (%0), %%mm0    \n\t"
540             "punpcklbw      %%mm0, %%mm0    \n\t"
541             "punpcklwd      %%mm0, %%mm0    \n\t"
542             "punpckldq      %%mm0, %%mm0    \n\t"
543             "movq           %%mm0, -8(%0)   \n\t"
544             "movq      -8(%0, %2), %%mm1    \n\t"
545             "punpckhbw      %%mm1, %%mm1    \n\t"
546             "punpckhwd      %%mm1, %%mm1    \n\t"
547             "punpckhdq      %%mm1, %%mm1    \n\t"
548             "movq           %%mm1, (%0, %2) \n\t"
549             "add               %1, %0       \n\t"
550             "cmp               %3, %0       \n\t"
551             "jb                1b           \n\t"
552             : "+r"(ptr)
553             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
554             );
555     } else {
556         __asm__ volatile (
557             "1:                                 \n\t"
558             "movd            (%0), %%mm0        \n\t"
559             "punpcklbw      %%mm0, %%mm0        \n\t"
560             "punpcklwd      %%mm0, %%mm0        \n\t"
561             "punpckldq      %%mm0, %%mm0        \n\t"
562             "movq           %%mm0, -8(%0)       \n\t"
563             "movq           %%mm0, -16(%0)      \n\t"
564             "movq      -8(%0, %2), %%mm1        \n\t"
565             "punpckhbw      %%mm1, %%mm1        \n\t"
566             "punpckhwd      %%mm1, %%mm1        \n\t"
567             "punpckhdq      %%mm1, %%mm1        \n\t"
568             "movq           %%mm1,  (%0, %2)    \n\t"
569             "movq           %%mm1, 8(%0, %2)    \n\t"
570             "add               %1, %0           \n\t"
571             "cmp               %3, %0           \n\t"
572             "jb                1b               \n\t"
573             : "+r"(ptr)
574             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
575             );
576     }
577
578     /* top and bottom (and hopefully also the corners) */
579     if (sides & EDGE_TOP) {
580         for (i = 0; i < h; i += 4) {
581             ptr = buf - (i + 1) * wrap - w;
582             __asm__ volatile (
583                 "1:                             \n\t"
584                 "movq (%1, %0), %%mm0           \n\t"
585                 "movq    %%mm0, (%0)            \n\t"
586                 "movq    %%mm0, (%0, %2)        \n\t"
587                 "movq    %%mm0, (%0, %2, 2)     \n\t"
588                 "movq    %%mm0, (%0, %3)        \n\t"
589                 "add        $8, %0              \n\t"
590                 "cmp        %4, %0              \n\t"
591                 "jb         1b                  \n\t"
592                 : "+r"(ptr)
593                 : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
594                   "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
595                 );
596         }
597     }
598
599     if (sides & EDGE_BOTTOM) {
600         for (i = 0; i < h; i += 4) {
601             ptr = last_line + (i + 1) * wrap - w;
602             __asm__ volatile (
603                 "1:                             \n\t"
604                 "movq (%1, %0), %%mm0           \n\t"
605                 "movq    %%mm0, (%0)            \n\t"
606                 "movq    %%mm0, (%0, %2)        \n\t"
607                 "movq    %%mm0, (%0, %2, 2)     \n\t"
608                 "movq    %%mm0, (%0, %3)        \n\t"
609                 "add        $8, %0              \n\t"
610                 "cmp        %4, %0              \n\t"
611                 "jb         1b                  \n\t"
612                 : "+r"(ptr)
613                 : "r"((x86_reg)last_line - (x86_reg)ptr - w),
614                   "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
615                   "r"(ptr + width + 2 * w)
616                 );
617         }
618     }
619 }
620 #endif /* HAVE_INLINE_ASM */
621
622
623 #if HAVE_YASM
624 #define QPEL_OP(OPNAME, ROUNDER, RND, MMX)                              \
625 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src,   \
626                                           ptrdiff_t stride)             \
627 {                                                                       \
628     ff_ ## OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);              \
629 }                                                                       \
630                                                                         \
631 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src,    \
632                                          ptrdiff_t stride)              \
633 {                                                                       \
634     uint64_t temp[8];                                                   \
635     uint8_t * const half = (uint8_t*)temp;                              \
636     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8,        \
637                                                    stride, 8);          \
638     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half,                 \
639                                         stride, stride, 8);             \
640 }                                                                       \
641                                                                         \
642 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src,    \
643                                          ptrdiff_t stride)              \
644 {                                                                       \
645     ff_ ## OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride,    \
646                                                    stride, 8);          \
647 }                                                                       \
648                                                                         \
649 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src,    \
650                                          ptrdiff_t stride)              \
651 {                                                                       \
652     uint64_t temp[8];                                                   \
653     uint8_t * const half = (uint8_t*)temp;                              \
654     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8,        \
655                                                    stride, 8);          \
656     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride,     \
657                                         stride, 8);                     \
658 }                                                                       \
659                                                                         \
660 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src,    \
661                                          ptrdiff_t stride)              \
662 {                                                                       \
663     uint64_t temp[8];                                                   \
664     uint8_t * const half = (uint8_t*)temp;                              \
665     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src,           \
666                                                    8, stride);          \
667     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half,                 \
668                                         stride, stride, 8);             \
669 }                                                                       \
670                                                                         \
671 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src,    \
672                                          ptrdiff_t stride)              \
673 {                                                                       \
674     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src,            \
675                                                    stride, stride);     \
676 }                                                                       \
677                                                                         \
678 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src,    \
679                                          ptrdiff_t stride)              \
680 {                                                                       \
681     uint64_t temp[8];                                                   \
682     uint8_t * const half = (uint8_t*)temp;                              \
683     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src,           \
684                                                    8, stride);          \
685     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride,\
686                                         stride, 8);                     \
687 }                                                                       \
688                                                                         \
689 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src,    \
690                                          ptrdiff_t stride)              \
691 {                                                                       \
692     uint64_t half[8 + 9];                                               \
693     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
694     uint8_t * const halfHV = ((uint8_t*)half);                          \
695     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
696                                                    stride, 9);          \
697     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8,           \
698                                         stride, 9);                     \
699     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
700     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
701                                         stride, 8, 8);                  \
702 }                                                                       \
703                                                                         \
704 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src,    \
705                                          ptrdiff_t stride)              \
706 {                                                                       \
707     uint64_t half[8 + 9];                                               \
708     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
709     uint8_t * const halfHV = ((uint8_t*)half);                          \
710     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
711                                                    stride, 9);          \
712     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
713                                         stride, 9);                     \
714     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
715     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
716                                         stride, 8, 8);                  \
717 }                                                                       \
718                                                                         \
719 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src,    \
720                                          ptrdiff_t stride)              \
721 {                                                                       \
722     uint64_t half[8 + 9];                                               \
723     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
724     uint8_t * const halfHV = ((uint8_t*)half);                          \
725     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
726                                                    stride, 9);          \
727     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8,           \
728                                         stride, 9);                     \
729     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
730     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
731                                         stride, 8, 8);                  \
732 }                                                                       \
733                                                                         \
734 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src,    \
735                                          ptrdiff_t stride)              \
736 {                                                                       \
737     uint64_t half[8 + 9];                                               \
738     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
739     uint8_t * const halfHV = ((uint8_t*)half);                          \
740     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
741                                                    stride, 9);          \
742     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
743                                         stride, 9);                     \
744     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
745     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
746                                         stride, 8, 8);                  \
747 }                                                                       \
748                                                                         \
749 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src,    \
750                                          ptrdiff_t stride)              \
751 {                                                                       \
752     uint64_t half[8 + 9];                                               \
753     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
754     uint8_t * const halfHV = ((uint8_t*)half);                          \
755     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
756                                                    stride, 9);          \
757     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
758     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
759                                         stride, 8, 8);                  \
760 }                                                                       \
761                                                                         \
762 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src,    \
763                                          ptrdiff_t stride)              \
764 {                                                                       \
765     uint64_t half[8 + 9];                                               \
766     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
767     uint8_t * const halfHV = ((uint8_t*)half);                          \
768     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
769                                                    stride, 9);          \
770     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
771     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
772                                         stride, 8, 8);                  \
773 }                                                                       \
774                                                                         \
775 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src,    \
776                                          ptrdiff_t stride)              \
777 {                                                                       \
778     uint64_t half[8 + 9];                                               \
779     uint8_t * const halfH = ((uint8_t*)half);                           \
780     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
781                                                    stride, 9);          \
782     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH,              \
783                                         8, stride, 9);                  \
784     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
785                                                    stride, 8);          \
786 }                                                                       \
787                                                                         \
788 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src,    \
789                                          ptrdiff_t stride)              \
790 {                                                                       \
791     uint64_t half[8 + 9];                                               \
792     uint8_t * const halfH = ((uint8_t*)half);                           \
793     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
794                                                    stride, 9);          \
795     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
796                                         stride, 9);                     \
797     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
798                                                    stride, 8);          \
799 }                                                                       \
800                                                                         \
801 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src,    \
802                                          ptrdiff_t stride)              \
803 {                                                                       \
804     uint64_t half[9];                                                   \
805     uint8_t * const halfH = ((uint8_t*)half);                           \
806     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
807                                                    stride, 9);          \
808     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
809                                                    stride, 8);          \
810 }                                                                       \
811                                                                         \
812 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src,  \
813                                            ptrdiff_t stride)            \
814 {                                                                       \
815     ff_ ## OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);            \
816 }                                                                       \
817                                                                         \
818 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src,   \
819                                           ptrdiff_t stride)             \
820 {                                                                       \
821     uint64_t temp[32];                                                  \
822     uint8_t * const half = (uint8_t*)temp;                              \
823     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16,      \
824                                                     stride, 16);        \
825     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride,        \
826                                          stride, 16);                   \
827 }                                                                       \
828                                                                         \
829 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src,   \
830                                           ptrdiff_t stride)             \
831 {                                                                       \
832     ff_ ## OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src,           \
833                                                     stride, stride, 16);\
834 }                                                                       \
835                                                                         \
836 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src,   \
837                                           ptrdiff_t stride)             \
838 {                                                                       \
839     uint64_t temp[32];                                                  \
840     uint8_t * const half = (uint8_t*)temp;                              \
841     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16,      \
842                                                     stride, 16);        \
843     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half,            \
844                                          stride, stride, 16);           \
845 }                                                                       \
846                                                                         \
847 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src,   \
848                                           ptrdiff_t stride)             \
849 {                                                                       \
850     uint64_t temp[32];                                                  \
851     uint8_t * const half = (uint8_t*)temp;                              \
852     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16,      \
853                                                     stride);            \
854     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride,        \
855                                          stride, 16);                   \
856 }                                                                       \
857                                                                         \
858 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src,   \
859                                           ptrdiff_t stride)             \
860 {                                                                       \
861     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src,           \
862                                                     stride, stride);    \
863 }                                                                       \
864                                                                         \
865 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src,   \
866                                           ptrdiff_t stride)             \
867 {                                                                       \
868     uint64_t temp[32];                                                  \
869     uint8_t * const half = (uint8_t*)temp;                              \
870     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16,      \
871                                                     stride);            \
872     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half,         \
873                                          stride, stride, 16);           \
874 }                                                                       \
875                                                                         \
876 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src,   \
877                                           ptrdiff_t stride)             \
878 {                                                                       \
879     uint64_t half[16 * 2 + 17 * 2];                                     \
880     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
881     uint8_t * const halfHV = ((uint8_t*)half);                          \
882     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
883                                                     stride, 17);        \
884     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
885                                          stride, 17);                   \
886     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
887                                                     16, 16);            \
888     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
889                                          stride, 16, 16);               \
890 }                                                                       \
891                                                                         \
892 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src,   \
893                                           ptrdiff_t stride)             \
894 {                                                                       \
895     uint64_t half[16 * 2 + 17 * 2];                                     \
896     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
897     uint8_t * const halfHV = ((uint8_t*)half);                          \
898     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
899                                                     stride, 17);        \
900     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
901                                          stride, 17);                   \
902     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
903                                                     16, 16);            \
904     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
905                                          stride, 16, 16);               \
906 }                                                                       \
907                                                                         \
908 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src,   \
909                                           ptrdiff_t stride)             \
910 {                                                                       \
911     uint64_t half[16 * 2 + 17 * 2];                                     \
912     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
913     uint8_t * const halfHV = ((uint8_t*)half);                          \
914     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
915                                                     stride, 17);        \
916     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
917                                          stride, 17);                   \
918     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
919                                                     16, 16);            \
920     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
921                                          stride, 16, 16);               \
922 }                                                                       \
923                                                                         \
924 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src,   \
925                                           ptrdiff_t stride)             \
926 {                                                                       \
927     uint64_t half[16 * 2 + 17 * 2];                                     \
928     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
929     uint8_t * const halfHV = ((uint8_t*)half);                          \
930     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
931                                                     stride, 17);        \
932     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
933                                          stride, 17);                   \
934     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
935                                                     16, 16);            \
936     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
937                                          stride, 16, 16);               \
938 }                                                                       \
939                                                                         \
940 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src,   \
941                                           ptrdiff_t stride)             \
942 {                                                                       \
943     uint64_t half[16 * 2 + 17 * 2];                                     \
944     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
945     uint8_t * const halfHV = ((uint8_t*)half);                          \
946     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
947                                                     stride, 17);        \
948     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
949                                                     16, 16);            \
950     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
951                                          stride, 16, 16);               \
952 }                                                                       \
953                                                                         \
954 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src,   \
955                                           ptrdiff_t stride)             \
956 {                                                                       \
957     uint64_t half[16 * 2 + 17 * 2];                                     \
958     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
959     uint8_t * const halfHV = ((uint8_t*)half);                          \
960     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
961                                                     stride, 17);        \
962     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
963                                                     16, 16);            \
964     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
965                                          stride, 16, 16);               \
966 }                                                                       \
967                                                                         \
968 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src,   \
969                                           ptrdiff_t stride)             \
970 {                                                                       \
971     uint64_t half[17 * 2];                                              \
972     uint8_t * const halfH = ((uint8_t*)half);                           \
973     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
974                                                     stride, 17);        \
975     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
976                                          stride, 17);                   \
977     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
978                                                     stride, 16);        \
979 }                                                                       \
980                                                                         \
981 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src,   \
982                                           ptrdiff_t stride)             \
983 {                                                                       \
984     uint64_t half[17 * 2];                                              \
985     uint8_t * const halfH = ((uint8_t*)half);                           \
986     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
987                                                     stride, 17);        \
988     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
989                                          stride, 17);                   \
990     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
991                                                     stride, 16);        \
992 }                                                                       \
993                                                                         \
994 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src,   \
995                                           ptrdiff_t stride)             \
996 {                                                                       \
997     uint64_t half[17 * 2];                                              \
998     uint8_t * const halfH = ((uint8_t*)half);                           \
999     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
1000                                                     stride, 17);        \
1001     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
1002                                                     stride, 16);        \
1003 }
1004
1005 QPEL_OP(put_,          ff_pw_16, _,        mmxext)
1006 QPEL_OP(avg_,          ff_pw_16, _,        mmxext)
1007 QPEL_OP(put_no_rnd_,   ff_pw_15, _no_rnd_, mmxext)
1008 #endif /* HAVE_YASM */
1009
1010
1011 #if HAVE_INLINE_ASM
1012 void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1013 {
1014   put_pixels8_xy2_mmx(dst, src, stride, 8);
1015 }
1016 void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1017 {
1018   put_pixels16_xy2_mmx(dst, src, stride, 16);
1019 }
1020 void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1021 {
1022   avg_pixels8_xy2_mmx(dst, src, stride, 8);
1023 }
1024 void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1025 {
1026   avg_pixels16_xy2_mmx(dst, src, stride, 16);
1027 }
1028
1029 static void gmc_mmx(uint8_t *dst, uint8_t *src,
1030                     int stride, int h, int ox, int oy,
1031                     int dxx, int dxy, int dyx, int dyy,
1032                     int shift, int r, int width, int height)
1033 {
1034     const int w    = 8;
1035     const int ix   = ox  >> (16 + shift);
1036     const int iy   = oy  >> (16 + shift);
1037     const int oxs  = ox  >> 4;
1038     const int oys  = oy  >> 4;
1039     const int dxxs = dxx >> 4;
1040     const int dxys = dxy >> 4;
1041     const int dyxs = dyx >> 4;
1042     const int dyys = dyy >> 4;
1043     const uint16_t r4[4]   = { r, r, r, r };
1044     const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
1045     const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
1046     const uint64_t shift2 = 2 * shift;
1047     int x, y;
1048
1049     const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
1050     const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
1051     const int dxh = dxy * (h - 1);
1052     const int dyw = dyx * (w - 1);
1053     if ( // non-constant fullpel offset (3% of blocks)
1054         ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
1055          (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
1056         // uses more than 16 bits of subpel mv (only at huge resolution)
1057         || (dxx | dxy | dyx | dyy) & 15 ||
1058         (unsigned)ix >= width  - w ||
1059         (unsigned)iy >= height - h) {
1060         // FIXME could still use mmx for some of the rows
1061         ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
1062                  shift, r, width, height);
1063         return;
1064     }
1065
1066     src += ix + iy * stride;
1067
1068     __asm__ volatile (
1069         "movd         %0, %%mm6         \n\t"
1070         "pxor      %%mm7, %%mm7         \n\t"
1071         "punpcklwd %%mm6, %%mm6         \n\t"
1072         "punpcklwd %%mm6, %%mm6         \n\t"
1073         :: "r"(1<<shift)
1074     );
1075
1076     for (x = 0; x < w; x += 4) {
1077         uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
1078                             oxs - dxys + dxxs * (x + 1),
1079                             oxs - dxys + dxxs * (x + 2),
1080                             oxs - dxys + dxxs * (x + 3) };
1081         uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
1082                             oys - dyys + dyxs * (x + 1),
1083                             oys - dyys + dyxs * (x + 2),
1084                             oys - dyys + dyxs * (x + 3) };
1085
1086         for (y = 0; y < h; y++) {
1087             __asm__ volatile (
1088                 "movq      %0, %%mm4    \n\t"
1089                 "movq      %1, %%mm5    \n\t"
1090                 "paddw     %2, %%mm4    \n\t"
1091                 "paddw     %3, %%mm5    \n\t"
1092                 "movq   %%mm4, %0       \n\t"
1093                 "movq   %%mm5, %1       \n\t"
1094                 "psrlw    $12, %%mm4    \n\t"
1095                 "psrlw    $12, %%mm5    \n\t"
1096                 : "+m"(*dx4), "+m"(*dy4)
1097                 : "m"(*dxy4), "m"(*dyy4)
1098             );
1099
1100             __asm__ volatile (
1101                 "movq      %%mm6, %%mm2 \n\t"
1102                 "movq      %%mm6, %%mm1 \n\t"
1103                 "psubw     %%mm4, %%mm2 \n\t"
1104                 "psubw     %%mm5, %%mm1 \n\t"
1105                 "movq      %%mm2, %%mm0 \n\t"
1106                 "movq      %%mm4, %%mm3 \n\t"
1107                 "pmullw    %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
1108                 "pmullw    %%mm5, %%mm3 \n\t" // dx * dy
1109                 "pmullw    %%mm5, %%mm2 \n\t" // (s - dx) * dy
1110                 "pmullw    %%mm4, %%mm1 \n\t" // dx * (s - dy)
1111
1112                 "movd         %4, %%mm5 \n\t"
1113                 "movd         %3, %%mm4 \n\t"
1114                 "punpcklbw %%mm7, %%mm5 \n\t"
1115                 "punpcklbw %%mm7, %%mm4 \n\t"
1116                 "pmullw    %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
1117                 "pmullw    %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
1118
1119                 "movd         %2, %%mm5 \n\t"
1120                 "movd         %1, %%mm4 \n\t"
1121                 "punpcklbw %%mm7, %%mm5 \n\t"
1122                 "punpcklbw %%mm7, %%mm4 \n\t"
1123                 "pmullw    %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
1124                 "pmullw    %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
1125                 "paddw        %5, %%mm1 \n\t"
1126                 "paddw     %%mm3, %%mm2 \n\t"
1127                 "paddw     %%mm1, %%mm0 \n\t"
1128                 "paddw     %%mm2, %%mm0 \n\t"
1129
1130                 "psrlw        %6, %%mm0 \n\t"
1131                 "packuswb  %%mm0, %%mm0 \n\t"
1132                 "movd      %%mm0, %0    \n\t"
1133
1134                 : "=m"(dst[x + y * stride])
1135                 : "m"(src[0]), "m"(src[1]),
1136                   "m"(src[stride]), "m"(src[stride + 1]),
1137                   "m"(*r4), "m"(shift2)
1138             );
1139             src += stride;
1140         }
1141         src += 4 - h * stride;
1142     }
1143 }
1144
1145 /* CAVS-specific */
1146 void ff_put_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1147 {
1148     put_pixels8_mmx(dst, src, stride, 8);
1149 }
1150
1151 void ff_avg_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1152 {
1153     avg_pixels8_mmx(dst, src, stride, 8);
1154 }
1155
1156 void ff_put_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1157 {
1158     put_pixels16_mmx(dst, src, stride, 16);
1159 }
1160
1161 void ff_avg_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1162 {
1163     avg_pixels16_mmx(dst, src, stride, 16);
1164 }
1165
1166 /* VC-1-specific */
1167 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src,
1168                                ptrdiff_t stride, int rnd)
1169 {
1170     put_pixels8_mmx(dst, src, stride, 8);
1171 }
1172
1173 static void vector_clipf_sse(float *dst, const float *src,
1174                              float min, float max, int len)
1175 {
1176     x86_reg i = (len - 16) * 4;
1177     __asm__ volatile (
1178         "movss          %3, %%xmm4      \n\t"
1179         "movss          %4, %%xmm5      \n\t"
1180         "shufps $0, %%xmm4, %%xmm4      \n\t"
1181         "shufps $0, %%xmm5, %%xmm5      \n\t"
1182         "1:                             \n\t"
1183         "movaps   (%2, %0), %%xmm0      \n\t" // 3/1 on intel
1184         "movaps 16(%2, %0), %%xmm1      \n\t"
1185         "movaps 32(%2, %0), %%xmm2      \n\t"
1186         "movaps 48(%2, %0), %%xmm3      \n\t"
1187         "maxps      %%xmm4, %%xmm0      \n\t"
1188         "maxps      %%xmm4, %%xmm1      \n\t"
1189         "maxps      %%xmm4, %%xmm2      \n\t"
1190         "maxps      %%xmm4, %%xmm3      \n\t"
1191         "minps      %%xmm5, %%xmm0      \n\t"
1192         "minps      %%xmm5, %%xmm1      \n\t"
1193         "minps      %%xmm5, %%xmm2      \n\t"
1194         "minps      %%xmm5, %%xmm3      \n\t"
1195         "movaps     %%xmm0,   (%1, %0)  \n\t"
1196         "movaps     %%xmm1, 16(%1, %0)  \n\t"
1197         "movaps     %%xmm2, 32(%1, %0)  \n\t"
1198         "movaps     %%xmm3, 48(%1, %0)  \n\t"
1199         "sub           $64, %0          \n\t"
1200         "jge            1b              \n\t"
1201         : "+&r"(i)
1202         : "r"(dst), "r"(src), "m"(min), "m"(max)
1203         : "memory"
1204     );
1205 }
1206
1207 #endif /* HAVE_INLINE_ASM */
1208
1209 void ff_h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1210 void ff_h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1211
1212 int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2,
1213                                       int order);
1214 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
1215                                     int order);
1216 int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2,
1217                                                const int16_t *v3,
1218                                                int order, int mul);
1219 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
1220                                              const int16_t *v3,
1221                                              int order, int mul);
1222 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
1223                                               const int16_t *v3,
1224                                               int order, int mul);
1225
1226 void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
1227                                         const int16_t *window, unsigned int len);
1228 void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
1229                                       const int16_t *window, unsigned int len);
1230 void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
1231                                   const int16_t *window, unsigned int len);
1232 void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
1233                                 const int16_t *window, unsigned int len);
1234 void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
1235                                  const int16_t *window, unsigned int len);
1236 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
1237                                       const int16_t *window, unsigned int len);
1238
1239 void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
1240 void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
1241
1242 void ff_add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top,
1243                                           const uint8_t *diff, int w,
1244                                           int *left, int *left_top);
1245 int  ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src,
1246                                        int w, int left);
1247 int  ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
1248                                       int w, int left);
1249
1250 void ff_vector_clip_int32_mmx     (int32_t *dst, const int32_t *src,
1251                                    int32_t min, int32_t max, unsigned int len);
1252 void ff_vector_clip_int32_sse2    (int32_t *dst, const int32_t *src,
1253                                    int32_t min, int32_t max, unsigned int len);
1254 void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
1255                                    int32_t min, int32_t max, unsigned int len);
1256 void ff_vector_clip_int32_sse4    (int32_t *dst, const int32_t *src,
1257                                    int32_t min, int32_t max, unsigned int len);
1258
1259 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX)                          \
1260     do {                                                                     \
1261     c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
1262     c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
1263     c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
1264     c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
1265     c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
1266     c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
1267     c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
1268     c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
1269     c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
1270     c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
1271     c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
1272     c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
1273     c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
1274     c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
1275     c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
1276     c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
1277     } while (0)
1278
1279 static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx,
1280                                      int mm_flags)
1281 {
1282     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1283
1284 #if HAVE_INLINE_ASM
1285     c->put_pixels_clamped        = ff_put_pixels_clamped_mmx;
1286     c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
1287     c->add_pixels_clamped        = ff_add_pixels_clamped_mmx;
1288
1289     if (!high_bit_depth) {
1290         c->clear_block  = clear_block_mmx;
1291         c->clear_blocks = clear_blocks_mmx;
1292         c->draw_edges   = draw_edges_mmx;
1293
1294         switch (avctx->idct_algo) {
1295         case FF_IDCT_AUTO:
1296         case FF_IDCT_SIMPLEMMX:
1297             c->idct_put              = ff_simple_idct_put_mmx;
1298             c->idct_add              = ff_simple_idct_add_mmx;
1299             c->idct                  = ff_simple_idct_mmx;
1300             c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
1301             break;
1302         case FF_IDCT_XVIDMMX:
1303             c->idct_put              = ff_idct_xvid_mmx_put;
1304             c->idct_add              = ff_idct_xvid_mmx_add;
1305             c->idct                  = ff_idct_xvid_mmx;
1306             break;
1307         }
1308     }
1309
1310     c->gmc = gmc_mmx;
1311
1312     c->add_bytes = add_bytes_mmx;
1313 #endif /* HAVE_INLINE_ASM */
1314
1315 #if HAVE_YASM
1316     if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
1317         c->h263_v_loop_filter = ff_h263_v_loop_filter_mmx;
1318         c->h263_h_loop_filter = ff_h263_h_loop_filter_mmx;
1319     }
1320
1321     c->vector_clip_int32 = ff_vector_clip_int32_mmx;
1322 #endif
1323
1324 }
1325
1326 static av_cold void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx,
1327                                         int mm_flags)
1328 {
1329     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1330
1331 #if HAVE_YASM
1332     SET_QPEL_FUNCS(avg_qpel,        0, 16, mmxext, );
1333     SET_QPEL_FUNCS(avg_qpel,        1,  8, mmxext, );
1334
1335     SET_QPEL_FUNCS(put_qpel,        0, 16, mmxext, );
1336     SET_QPEL_FUNCS(put_qpel,        1,  8, mmxext, );
1337     SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, );
1338     SET_QPEL_FUNCS(put_no_rnd_qpel, 1,  8, mmxext, );
1339 #endif /* HAVE_YASM */
1340
1341 #if HAVE_INLINE_ASM
1342     if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
1343         c->idct_put = ff_idct_xvid_mmxext_put;
1344         c->idct_add = ff_idct_xvid_mmxext_add;
1345         c->idct     = ff_idct_xvid_mmxext;
1346     }
1347 #endif /* HAVE_INLINE_ASM */
1348
1349 #if HAVE_MMXEXT_EXTERNAL
1350     /* slower than cmov version on AMD */
1351     if (!(mm_flags & AV_CPU_FLAG_3DNOW))
1352         c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmxext;
1353
1354     c->scalarproduct_int16          = ff_scalarproduct_int16_mmxext;
1355     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
1356
1357     if (avctx->flags & CODEC_FLAG_BITEXACT) {
1358         c->apply_window_int16 = ff_apply_window_int16_mmxext;
1359     } else {
1360         c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
1361     }
1362 #endif /* HAVE_MMXEXT_EXTERNAL */
1363 }
1364
1365 static av_cold void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx,
1366                                      int mm_flags)
1367 {
1368 #if HAVE_INLINE_ASM
1369     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1370
1371     if (!high_bit_depth) {
1372         if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
1373             /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
1374             c->clear_block  = clear_block_sse;
1375             c->clear_blocks = clear_blocks_sse;
1376         }
1377     }
1378
1379     c->vector_clipf = vector_clipf_sse;
1380 #endif /* HAVE_INLINE_ASM */
1381 }
1382
1383 static av_cold void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
1384                                       int mm_flags)
1385 {
1386     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1387
1388 #if HAVE_SSE2_INLINE
1389     if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
1390         c->idct_put              = ff_idct_xvid_sse2_put;
1391         c->idct_add              = ff_idct_xvid_sse2_add;
1392         c->idct                  = ff_idct_xvid_sse2;
1393         c->idct_permutation_type = FF_SSE2_IDCT_PERM;
1394     }
1395 #endif /* HAVE_SSE2_INLINE */
1396
1397 #if HAVE_SSE2_EXTERNAL
1398     c->scalarproduct_int16          = ff_scalarproduct_int16_sse2;
1399     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
1400     if (mm_flags & AV_CPU_FLAG_ATOM) {
1401         c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
1402     } else {
1403         c->vector_clip_int32 = ff_vector_clip_int32_sse2;
1404     }
1405     if (avctx->flags & CODEC_FLAG_BITEXACT) {
1406         c->apply_window_int16 = ff_apply_window_int16_sse2;
1407     } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
1408         c->apply_window_int16 = ff_apply_window_int16_round_sse2;
1409     }
1410     c->bswap_buf = ff_bswap32_buf_sse2;
1411 #endif /* HAVE_SSE2_EXTERNAL */
1412 }
1413
1414 static av_cold void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
1415                                        int mm_flags)
1416 {
1417 #if HAVE_SSSE3_EXTERNAL
1418     c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
1419     if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
1420         c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
1421
1422     if (mm_flags & AV_CPU_FLAG_ATOM)
1423         c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
1424     else
1425         c->apply_window_int16 = ff_apply_window_int16_ssse3;
1426     if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
1427         c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
1428     c->bswap_buf = ff_bswap32_buf_ssse3;
1429 #endif /* HAVE_SSSE3_EXTERNAL */
1430 }
1431
1432 static av_cold void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
1433                                       int mm_flags)
1434 {
1435 #if HAVE_SSE4_EXTERNAL
1436     c->vector_clip_int32 = ff_vector_clip_int32_sse4;
1437 #endif /* HAVE_SSE4_EXTERNAL */
1438 }
1439
1440 av_cold void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
1441 {
1442     int mm_flags = av_get_cpu_flags();
1443
1444 #if HAVE_7REGS && HAVE_INLINE_ASM
1445     if (mm_flags & AV_CPU_FLAG_CMOV)
1446         c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
1447 #endif
1448
1449     if (mm_flags & AV_CPU_FLAG_MMX)
1450         dsputil_init_mmx(c, avctx, mm_flags);
1451
1452     if (mm_flags & AV_CPU_FLAG_MMXEXT)
1453         dsputil_init_mmxext(c, avctx, mm_flags);
1454
1455     if (mm_flags & AV_CPU_FLAG_SSE)
1456         dsputil_init_sse(c, avctx, mm_flags);
1457
1458     if (mm_flags & AV_CPU_FLAG_SSE2)
1459         dsputil_init_sse2(c, avctx, mm_flags);
1460
1461     if (mm_flags & AV_CPU_FLAG_SSSE3)
1462         dsputil_init_ssse3(c, avctx, mm_flags);
1463
1464     if (mm_flags & AV_CPU_FLAG_SSE4)
1465         dsputil_init_sse4(c, avctx, mm_flags);
1466
1467     if (CONFIG_ENCODERS)
1468         ff_dsputilenc_init_mmx(c, avctx);
1469 }