]> git.sesse.net Git - ffmpeg/blob - libswscale/x86/swscale_template.c
rgb2rgb: remove duplicate mmx/mmx2/3dnow/sse2 functions.
[ffmpeg] / libswscale / x86 / swscale_template.c
1 /*
2  * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include "swscale_template.h"
22
23 #undef REAL_MOVNTQ
24 #undef MOVNTQ
25 #undef PREFETCH
26
27 #if COMPILE_TEMPLATE_MMX2
28 #define PREFETCH "prefetchnta"
29 #else
30 #define PREFETCH  " # nop"
31 #endif
32
33 #if COMPILE_TEMPLATE_MMX2
34 #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
35 #else
36 #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
37 #endif
38 #define MOVNTQ(a,b)  REAL_MOVNTQ(a,b)
39
40 #define YSCALEYUV2YV12X(x, offset, dest, width) \
41     __asm__ volatile(\
42         "xor                          %%"REG_a", %%"REG_a"  \n\t"\
43         "movq             "VROUNDER_OFFSET"(%0), %%mm3      \n\t"\
44         "movq                             %%mm3, %%mm4      \n\t"\
45         "lea                     " offset "(%0), %%"REG_d"  \n\t"\
46         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
47         ".p2align                             4             \n\t" /* FIXME Unroll? */\
48         "1:                                                 \n\t"\
49         "movq                      8(%%"REG_d"), %%mm0      \n\t" /* filterCoeff */\
50         "movq   "  x "(%%"REG_S", %%"REG_a", 2), %%mm2      \n\t" /* srcData */\
51         "movq 8+"  x "(%%"REG_S", %%"REG_a", 2), %%mm5      \n\t" /* srcData */\
52         "add                                $16, %%"REG_d"  \n\t"\
53         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
54         "test                         %%"REG_S", %%"REG_S"  \n\t"\
55         "pmulhw                           %%mm0, %%mm2      \n\t"\
56         "pmulhw                           %%mm0, %%mm5      \n\t"\
57         "paddw                            %%mm2, %%mm3      \n\t"\
58         "paddw                            %%mm5, %%mm4      \n\t"\
59         " jnz                                1b             \n\t"\
60         "psraw                               $3, %%mm3      \n\t"\
61         "psraw                               $3, %%mm4      \n\t"\
62         "packuswb                         %%mm4, %%mm3      \n\t"\
63         MOVNTQ(%%mm3, (%1, %%REGa))\
64         "add                                 $8, %%"REG_a"  \n\t"\
65         "cmp                                 %2, %%"REG_a"  \n\t"\
66         "movq             "VROUNDER_OFFSET"(%0), %%mm3      \n\t"\
67         "movq                             %%mm3, %%mm4      \n\t"\
68         "lea                     " offset "(%0), %%"REG_d"  \n\t"\
69         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
70         "jb                                  1b             \n\t"\
71         :: "r" (&c->redDither),\
72         "r" (dest), "g" ((x86_reg)width)\
73         : "%"REG_a, "%"REG_d, "%"REG_S\
74     );
75
76 static inline void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter,
77                                     const int16_t **lumSrc, int lumFilterSize,
78                                     const int16_t *chrFilter, const int16_t **chrSrc,
79                                     int chrFilterSize, const int16_t **alpSrc,
80                                     uint8_t *dest, uint8_t *uDest, uint8_t *vDest,
81                                     uint8_t *aDest, long dstW, long chrDstW)
82 {
83     if (uDest) {
84         YSCALEYUV2YV12X(   "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
85         YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
86     }
87     if (CONFIG_SWSCALE_ALPHA && aDest) {
88         YSCALEYUV2YV12X(   "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
89     }
90
91     YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
92 }
93
94 #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
95     __asm__ volatile(\
96         "lea                     " offset "(%0), %%"REG_d"  \n\t"\
97         "xor                          %%"REG_a", %%"REG_a"  \n\t"\
98         "pxor                             %%mm4, %%mm4      \n\t"\
99         "pxor                             %%mm5, %%mm5      \n\t"\
100         "pxor                             %%mm6, %%mm6      \n\t"\
101         "pxor                             %%mm7, %%mm7      \n\t"\
102         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
103         ".p2align                             4             \n\t"\
104         "1:                                                 \n\t"\
105         "movq   "  x "(%%"REG_S", %%"REG_a", 2), %%mm0      \n\t" /* srcData */\
106         "movq 8+"  x "(%%"REG_S", %%"REG_a", 2), %%mm2      \n\t" /* srcData */\
107         "mov        "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S"  \n\t"\
108         "movq   "  x "(%%"REG_S", %%"REG_a", 2), %%mm1      \n\t" /* srcData */\
109         "movq                             %%mm0, %%mm3      \n\t"\
110         "punpcklwd                        %%mm1, %%mm0      \n\t"\
111         "punpckhwd                        %%mm1, %%mm3      \n\t"\
112         "movq       "STR(APCK_COEF)"(%%"REG_d"), %%mm1      \n\t" /* filterCoeff */\
113         "pmaddwd                          %%mm1, %%mm0      \n\t"\
114         "pmaddwd                          %%mm1, %%mm3      \n\t"\
115         "paddd                            %%mm0, %%mm4      \n\t"\
116         "paddd                            %%mm3, %%mm5      \n\t"\
117         "movq 8+"  x "(%%"REG_S", %%"REG_a", 2), %%mm3      \n\t" /* srcData */\
118         "mov        "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S"  \n\t"\
119         "add                  $"STR(APCK_SIZE)", %%"REG_d"  \n\t"\
120         "test                         %%"REG_S", %%"REG_S"  \n\t"\
121         "movq                             %%mm2, %%mm0      \n\t"\
122         "punpcklwd                        %%mm3, %%mm2      \n\t"\
123         "punpckhwd                        %%mm3, %%mm0      \n\t"\
124         "pmaddwd                          %%mm1, %%mm2      \n\t"\
125         "pmaddwd                          %%mm1, %%mm0      \n\t"\
126         "paddd                            %%mm2, %%mm6      \n\t"\
127         "paddd                            %%mm0, %%mm7      \n\t"\
128         " jnz                                1b             \n\t"\
129         "psrad                              $16, %%mm4      \n\t"\
130         "psrad                              $16, %%mm5      \n\t"\
131         "psrad                              $16, %%mm6      \n\t"\
132         "psrad                              $16, %%mm7      \n\t"\
133         "movq             "VROUNDER_OFFSET"(%0), %%mm0      \n\t"\
134         "packssdw                         %%mm5, %%mm4      \n\t"\
135         "packssdw                         %%mm7, %%mm6      \n\t"\
136         "paddw                            %%mm0, %%mm4      \n\t"\
137         "paddw                            %%mm0, %%mm6      \n\t"\
138         "psraw                               $3, %%mm4      \n\t"\
139         "psraw                               $3, %%mm6      \n\t"\
140         "packuswb                         %%mm6, %%mm4      \n\t"\
141         MOVNTQ(%%mm4, (%1, %%REGa))\
142         "add                                 $8, %%"REG_a"  \n\t"\
143         "cmp                                 %2, %%"REG_a"  \n\t"\
144         "lea                     " offset "(%0), %%"REG_d"  \n\t"\
145         "pxor                             %%mm4, %%mm4      \n\t"\
146         "pxor                             %%mm5, %%mm5      \n\t"\
147         "pxor                             %%mm6, %%mm6      \n\t"\
148         "pxor                             %%mm7, %%mm7      \n\t"\
149         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
150         "jb                                  1b             \n\t"\
151         :: "r" (&c->redDither),\
152         "r" (dest), "g" ((x86_reg)width)\
153         : "%"REG_a, "%"REG_d, "%"REG_S\
154     );
155
156 static inline void RENAME(yuv2yuvX_ar)(SwsContext *c, const int16_t *lumFilter,
157                                        const int16_t **lumSrc, int lumFilterSize,
158                                        const int16_t *chrFilter, const int16_t **chrSrc,
159                                        int chrFilterSize, const int16_t **alpSrc,
160                                        uint8_t *dest, uint8_t *uDest, uint8_t *vDest,
161                                        uint8_t *aDest, long dstW, long chrDstW)
162 {
163     if (uDest) {
164         YSCALEYUV2YV12X_ACCURATE(   "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
165         YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
166     }
167     if (CONFIG_SWSCALE_ALPHA && aDest) {
168         YSCALEYUV2YV12X_ACCURATE(   "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
169     }
170
171     YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
172 }
173
174 #define YSCALEYUV2YV121 \
175     "mov %2, %%"REG_a"                    \n\t"\
176     ".p2align               4             \n\t" /* FIXME Unroll? */\
177     "1:                                   \n\t"\
178     "movq  (%0, %%"REG_a", 2), %%mm0      \n\t"\
179     "movq 8(%0, %%"REG_a", 2), %%mm1      \n\t"\
180     "psraw                 $7, %%mm0      \n\t"\
181     "psraw                 $7, %%mm1      \n\t"\
182     "packuswb           %%mm1, %%mm0      \n\t"\
183     MOVNTQ(%%mm0, (%1, %%REGa))\
184     "add                   $8, %%"REG_a"  \n\t"\
185     "jnc                   1b             \n\t"
186
187 static inline void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc,
188                                     const int16_t *chrSrc, const int16_t *alpSrc,
189                                     uint8_t *dest, uint8_t *uDest, uint8_t *vDest,
190                                     uint8_t *aDest, long dstW, long chrDstW)
191 {
192     long p= 4;
193     const uint8_t *src[4]= { alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW };
194     uint8_t *dst[4]= { aDest, dest, uDest, vDest };
195     x86_reg counter[4]= { dstW, dstW, chrDstW, chrDstW };
196
197     while (p--) {
198         if (dst[p]) {
199             __asm__ volatile(
200                YSCALEYUV2YV121
201                :: "r" (src[p]), "r" (dst[p] + counter[p]),
202                   "g" (-counter[p])
203                : "%"REG_a
204             );
205         }
206     }
207 }
208
209 #define YSCALEYUV2YV121_ACCURATE \
210     "mov %2, %%"REG_a"                    \n\t"\
211     "pcmpeqw %%mm7, %%mm7                 \n\t"\
212     "psrlw                 $15, %%mm7     \n\t"\
213     "psllw                  $6, %%mm7     \n\t"\
214     ".p2align                4            \n\t" /* FIXME Unroll? */\
215     "1:                                   \n\t"\
216     "movq  (%0, %%"REG_a", 2), %%mm0      \n\t"\
217     "movq 8(%0, %%"REG_a", 2), %%mm1      \n\t"\
218     "paddsw             %%mm7, %%mm0      \n\t"\
219     "paddsw             %%mm7, %%mm1      \n\t"\
220     "psraw                 $7, %%mm0      \n\t"\
221     "psraw                 $7, %%mm1      \n\t"\
222     "packuswb           %%mm1, %%mm0      \n\t"\
223     MOVNTQ(%%mm0, (%1, %%REGa))\
224     "add                   $8, %%"REG_a"  \n\t"\
225     "jnc                   1b             \n\t"
226
227 static inline void RENAME(yuv2yuv1_ar)(SwsContext *c, const int16_t *lumSrc,
228                                        const int16_t *chrSrc, const int16_t *alpSrc,
229                                        uint8_t *dest, uint8_t *uDest, uint8_t *vDest,
230                                        uint8_t *aDest, long dstW, long chrDstW)
231 {
232     long p= 4;
233     const uint8_t *src[4]= { alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW };
234     uint8_t *dst[4]= { aDest, dest, uDest, vDest };
235     x86_reg counter[4]= { dstW, dstW, chrDstW, chrDstW };
236
237     while (p--) {
238         if (dst[p]) {
239             __asm__ volatile(
240                 YSCALEYUV2YV121_ACCURATE
241                 :: "r" (src[p]), "r" (dst[p] + counter[p]),
242                    "g" (-counter[p])
243                 : "%"REG_a
244             );
245         }
246     }
247 }
248
249 #define YSCALEYUV2PACKEDX_UV \
250     __asm__ volatile(\
251         "xor                   %%"REG_a", %%"REG_a"     \n\t"\
252         ".p2align                      4                \n\t"\
253         "nop                                            \n\t"\
254         "1:                                             \n\t"\
255         "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"     \n\t"\
256         "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
257         "movq      "VROUNDER_OFFSET"(%0), %%mm3         \n\t"\
258         "movq                      %%mm3, %%mm4         \n\t"\
259         ".p2align                      4                \n\t"\
260         "2:                                             \n\t"\
261         "movq               8(%%"REG_d"), %%mm0         \n\t" /* filterCoeff */\
262         "movq     (%%"REG_S", %%"REG_a"), %%mm2         \n\t" /* UsrcData */\
263         "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm5         \n\t" /* VsrcData */\
264         "add                         $16, %%"REG_d"     \n\t"\
265         "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
266         "pmulhw                    %%mm0, %%mm2         \n\t"\
267         "pmulhw                    %%mm0, %%mm5         \n\t"\
268         "paddw                     %%mm2, %%mm3         \n\t"\
269         "paddw                     %%mm5, %%mm4         \n\t"\
270         "test                  %%"REG_S", %%"REG_S"     \n\t"\
271         " jnz                         2b                \n\t"\
272
273 #define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
274     "lea                "offset"(%0), %%"REG_d"     \n\t"\
275     "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
276     "movq      "VROUNDER_OFFSET"(%0), "#dst1"       \n\t"\
277     "movq                    "#dst1", "#dst2"       \n\t"\
278     ".p2align                      4                \n\t"\
279     "2:                                             \n\t"\
280     "movq               8(%%"REG_d"), "#coeff"      \n\t" /* filterCoeff */\
281     "movq  (%%"REG_S", %%"REG_a", 2), "#src1"       \n\t" /* Y1srcData */\
282     "movq 8(%%"REG_S", %%"REG_a", 2), "#src2"       \n\t" /* Y2srcData */\
283     "add                         $16, %%"REG_d"            \n\t"\
284     "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
285     "pmulhw                 "#coeff", "#src1"       \n\t"\
286     "pmulhw                 "#coeff", "#src2"       \n\t"\
287     "paddw                   "#src1", "#dst1"       \n\t"\
288     "paddw                   "#src2", "#dst2"       \n\t"\
289     "test                  %%"REG_S", %%"REG_S"     \n\t"\
290     " jnz                         2b                \n\t"\
291
292 #define YSCALEYUV2PACKEDX \
293     YSCALEYUV2PACKEDX_UV \
294     YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
295
296 #define YSCALEYUV2PACKEDX_END                     \
297         :: "r" (&c->redDither),                   \
298             "m" (dummy), "m" (dummy), "m" (dummy),\
299             "r" (dest), "m" (dstW_reg)            \
300         : "%"REG_a, "%"REG_d, "%"REG_S            \
301     );
302
303 #define YSCALEYUV2PACKEDX_ACCURATE_UV \
304     __asm__ volatile(\
305         "xor %%"REG_a", %%"REG_a"                       \n\t"\
306         ".p2align                      4                \n\t"\
307         "nop                                            \n\t"\
308         "1:                                             \n\t"\
309         "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"     \n\t"\
310         "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
311         "pxor                      %%mm4, %%mm4         \n\t"\
312         "pxor                      %%mm5, %%mm5         \n\t"\
313         "pxor                      %%mm6, %%mm6         \n\t"\
314         "pxor                      %%mm7, %%mm7         \n\t"\
315         ".p2align                      4                \n\t"\
316         "2:                                             \n\t"\
317         "movq     (%%"REG_S", %%"REG_a"), %%mm0         \n\t" /* UsrcData */\
318         "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm2         \n\t" /* VsrcData */\
319         "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S"     \n\t"\
320         "movq     (%%"REG_S", %%"REG_a"), %%mm1         \n\t" /* UsrcData */\
321         "movq                      %%mm0, %%mm3         \n\t"\
322         "punpcklwd                 %%mm1, %%mm0         \n\t"\
323         "punpckhwd                 %%mm1, %%mm3         \n\t"\
324         "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1         \n\t" /* filterCoeff */\
325         "pmaddwd                   %%mm1, %%mm0         \n\t"\
326         "pmaddwd                   %%mm1, %%mm3         \n\t"\
327         "paddd                     %%mm0, %%mm4         \n\t"\
328         "paddd                     %%mm3, %%mm5         \n\t"\
329         "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm3         \n\t" /* VsrcData */\
330         "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S"     \n\t"\
331         "add           $"STR(APCK_SIZE)", %%"REG_d"     \n\t"\
332         "test                  %%"REG_S", %%"REG_S"     \n\t"\
333         "movq                      %%mm2, %%mm0         \n\t"\
334         "punpcklwd                 %%mm3, %%mm2         \n\t"\
335         "punpckhwd                 %%mm3, %%mm0         \n\t"\
336         "pmaddwd                   %%mm1, %%mm2         \n\t"\
337         "pmaddwd                   %%mm1, %%mm0         \n\t"\
338         "paddd                     %%mm2, %%mm6         \n\t"\
339         "paddd                     %%mm0, %%mm7         \n\t"\
340         " jnz                         2b                \n\t"\
341         "psrad                       $16, %%mm4         \n\t"\
342         "psrad                       $16, %%mm5         \n\t"\
343         "psrad                       $16, %%mm6         \n\t"\
344         "psrad                       $16, %%mm7         \n\t"\
345         "movq      "VROUNDER_OFFSET"(%0), %%mm0         \n\t"\
346         "packssdw                  %%mm5, %%mm4         \n\t"\
347         "packssdw                  %%mm7, %%mm6         \n\t"\
348         "paddw                     %%mm0, %%mm4         \n\t"\
349         "paddw                     %%mm0, %%mm6         \n\t"\
350         "movq                      %%mm4, "U_TEMP"(%0)  \n\t"\
351         "movq                      %%mm6, "V_TEMP"(%0)  \n\t"\
352
353 #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
354     "lea                "offset"(%0), %%"REG_d"     \n\t"\
355     "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
356     "pxor                      %%mm1, %%mm1         \n\t"\
357     "pxor                      %%mm5, %%mm5         \n\t"\
358     "pxor                      %%mm7, %%mm7         \n\t"\
359     "pxor                      %%mm6, %%mm6         \n\t"\
360     ".p2align                      4                \n\t"\
361     "2:                                             \n\t"\
362     "movq  (%%"REG_S", %%"REG_a", 2), %%mm0         \n\t" /* Y1srcData */\
363     "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2         \n\t" /* Y2srcData */\
364     "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S"     \n\t"\
365     "movq  (%%"REG_S", %%"REG_a", 2), %%mm4         \n\t" /* Y1srcData */\
366     "movq                      %%mm0, %%mm3         \n\t"\
367     "punpcklwd                 %%mm4, %%mm0         \n\t"\
368     "punpckhwd                 %%mm4, %%mm3         \n\t"\
369     "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4         \n\t" /* filterCoeff */\
370     "pmaddwd                   %%mm4, %%mm0         \n\t"\
371     "pmaddwd                   %%mm4, %%mm3         \n\t"\
372     "paddd                     %%mm0, %%mm1         \n\t"\
373     "paddd                     %%mm3, %%mm5         \n\t"\
374     "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3         \n\t" /* Y2srcData */\
375     "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S"     \n\t"\
376     "add           $"STR(APCK_SIZE)", %%"REG_d"     \n\t"\
377     "test                  %%"REG_S", %%"REG_S"     \n\t"\
378     "movq                      %%mm2, %%mm0         \n\t"\
379     "punpcklwd                 %%mm3, %%mm2         \n\t"\
380     "punpckhwd                 %%mm3, %%mm0         \n\t"\
381     "pmaddwd                   %%mm4, %%mm2         \n\t"\
382     "pmaddwd                   %%mm4, %%mm0         \n\t"\
383     "paddd                     %%mm2, %%mm7         \n\t"\
384     "paddd                     %%mm0, %%mm6         \n\t"\
385     " jnz                         2b                \n\t"\
386     "psrad                       $16, %%mm1         \n\t"\
387     "psrad                       $16, %%mm5         \n\t"\
388     "psrad                       $16, %%mm7         \n\t"\
389     "psrad                       $16, %%mm6         \n\t"\
390     "movq      "VROUNDER_OFFSET"(%0), %%mm0         \n\t"\
391     "packssdw                  %%mm5, %%mm1         \n\t"\
392     "packssdw                  %%mm6, %%mm7         \n\t"\
393     "paddw                     %%mm0, %%mm1         \n\t"\
394     "paddw                     %%mm0, %%mm7         \n\t"\
395     "movq               "U_TEMP"(%0), %%mm3         \n\t"\
396     "movq               "V_TEMP"(%0), %%mm4         \n\t"\
397
398 #define YSCALEYUV2PACKEDX_ACCURATE \
399     YSCALEYUV2PACKEDX_ACCURATE_UV \
400     YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
401
402 #define YSCALEYUV2RGBX \
403     "psubw  "U_OFFSET"(%0), %%mm3       \n\t" /* (U-128)8*/\
404     "psubw  "V_OFFSET"(%0), %%mm4       \n\t" /* (V-128)8*/\
405     "movq            %%mm3, %%mm2       \n\t" /* (U-128)8*/\
406     "movq            %%mm4, %%mm5       \n\t" /* (V-128)8*/\
407     "pmulhw "UG_COEFF"(%0), %%mm3       \n\t"\
408     "pmulhw "VG_COEFF"(%0), %%mm4       \n\t"\
409     /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
410     "pmulhw "UB_COEFF"(%0), %%mm2       \n\t"\
411     "pmulhw "VR_COEFF"(%0), %%mm5       \n\t"\
412     "psubw  "Y_OFFSET"(%0), %%mm1       \n\t" /* 8(Y-16)*/\
413     "psubw  "Y_OFFSET"(%0), %%mm7       \n\t" /* 8(Y-16)*/\
414     "pmulhw  "Y_COEFF"(%0), %%mm1       \n\t"\
415     "pmulhw  "Y_COEFF"(%0), %%mm7       \n\t"\
416     /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
417     "paddw           %%mm3, %%mm4       \n\t"\
418     "movq            %%mm2, %%mm0       \n\t"\
419     "movq            %%mm5, %%mm6       \n\t"\
420     "movq            %%mm4, %%mm3       \n\t"\
421     "punpcklwd       %%mm2, %%mm2       \n\t"\
422     "punpcklwd       %%mm5, %%mm5       \n\t"\
423     "punpcklwd       %%mm4, %%mm4       \n\t"\
424     "paddw           %%mm1, %%mm2       \n\t"\
425     "paddw           %%mm1, %%mm5       \n\t"\
426     "paddw           %%mm1, %%mm4       \n\t"\
427     "punpckhwd       %%mm0, %%mm0       \n\t"\
428     "punpckhwd       %%mm6, %%mm6       \n\t"\
429     "punpckhwd       %%mm3, %%mm3       \n\t"\
430     "paddw           %%mm7, %%mm0       \n\t"\
431     "paddw           %%mm7, %%mm6       \n\t"\
432     "paddw           %%mm7, %%mm3       \n\t"\
433     /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
434     "packuswb        %%mm0, %%mm2       \n\t"\
435     "packuswb        %%mm6, %%mm5       \n\t"\
436     "packuswb        %%mm3, %%mm4       \n\t"\
437
438 #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
439     "movq       "#b", "#q2"     \n\t" /* B */\
440     "movq       "#r", "#t"      \n\t" /* R */\
441     "punpcklbw  "#g", "#b"      \n\t" /* GBGBGBGB 0 */\
442     "punpcklbw  "#a", "#r"      \n\t" /* ARARARAR 0 */\
443     "punpckhbw  "#g", "#q2"     \n\t" /* GBGBGBGB 2 */\
444     "punpckhbw  "#a", "#t"      \n\t" /* ARARARAR 2 */\
445     "movq       "#b", "#q0"     \n\t" /* GBGBGBGB 0 */\
446     "movq      "#q2", "#q3"     \n\t" /* GBGBGBGB 2 */\
447     "punpcklwd  "#r", "#q0"     \n\t" /* ARGBARGB 0 */\
448     "punpckhwd  "#r", "#b"      \n\t" /* ARGBARGB 1 */\
449     "punpcklwd  "#t", "#q2"     \n\t" /* ARGBARGB 2 */\
450     "punpckhwd  "#t", "#q3"     \n\t" /* ARGBARGB 3 */\
451 \
452     MOVNTQ(   q0,   (dst, index, 4))\
453     MOVNTQ(    b,  8(dst, index, 4))\
454     MOVNTQ(   q2, 16(dst, index, 4))\
455     MOVNTQ(   q3, 24(dst, index, 4))\
456 \
457     "add      $8, "#index"      \n\t"\
458     "cmp "#dstw", "#index"      \n\t"\
459     " jb      1b                \n\t"
460 #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)  REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
461
462 static inline void RENAME(yuv2rgb32_X_ar)(SwsContext *c, const int16_t *lumFilter,
463                                           const int16_t **lumSrc, int lumFilterSize,
464                                           const int16_t *chrFilter, const int16_t **chrSrc,
465                                           int chrFilterSize, const int16_t **alpSrc,
466                                           uint8_t *dest, long dstW, long dstY)
467 {
468     x86_reg dummy=0;
469     x86_reg dstW_reg = dstW;
470
471     if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
472         YSCALEYUV2PACKEDX_ACCURATE
473         YSCALEYUV2RGBX
474         "movq                      %%mm2, "U_TEMP"(%0)  \n\t"
475         "movq                      %%mm4, "V_TEMP"(%0)  \n\t"
476         "movq                      %%mm5, "Y_TEMP"(%0)  \n\t"
477         YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
478         "movq               "Y_TEMP"(%0), %%mm5         \n\t"
479         "psraw                        $3, %%mm1         \n\t"
480         "psraw                        $3, %%mm7         \n\t"
481         "packuswb                  %%mm7, %%mm1         \n\t"
482         WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
483         YSCALEYUV2PACKEDX_END
484     } else {
485         YSCALEYUV2PACKEDX_ACCURATE
486         YSCALEYUV2RGBX
487         "pcmpeqd %%mm7, %%mm7 \n\t"
488         WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
489         YSCALEYUV2PACKEDX_END
490     }
491 }
492
493 static inline void RENAME(yuv2rgb32_X)(SwsContext *c, const int16_t *lumFilter,
494                                        const int16_t **lumSrc, int lumFilterSize,
495                                        const int16_t *chrFilter, const int16_t **chrSrc,
496                                        int chrFilterSize, const int16_t **alpSrc,
497                                        uint8_t *dest, long dstW, long dstY)
498 {
499     x86_reg dummy=0;
500     x86_reg dstW_reg = dstW;
501
502     if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
503         YSCALEYUV2PACKEDX
504         YSCALEYUV2RGBX
505         YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
506         "psraw                        $3, %%mm1         \n\t"
507         "psraw                        $3, %%mm7         \n\t"
508         "packuswb                  %%mm7, %%mm1         \n\t"
509         WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
510         YSCALEYUV2PACKEDX_END
511     } else {
512         YSCALEYUV2PACKEDX
513         YSCALEYUV2RGBX
514         "pcmpeqd %%mm7, %%mm7 \n\t"
515         WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
516         YSCALEYUV2PACKEDX_END
517     }
518 }
519
520 #define REAL_WRITERGB16(dst, dstw, index) \
521     "pand "MANGLE(bF8)", %%mm2  \n\t" /* B */\
522     "pand "MANGLE(bFC)", %%mm4  \n\t" /* G */\
523     "pand "MANGLE(bF8)", %%mm5  \n\t" /* R */\
524     "psrlq           $3, %%mm2  \n\t"\
525 \
526     "movq         %%mm2, %%mm1  \n\t"\
527     "movq         %%mm4, %%mm3  \n\t"\
528 \
529     "punpcklbw    %%mm7, %%mm3  \n\t"\
530     "punpcklbw    %%mm5, %%mm2  \n\t"\
531     "punpckhbw    %%mm7, %%mm4  \n\t"\
532     "punpckhbw    %%mm5, %%mm1  \n\t"\
533 \
534     "psllq           $3, %%mm3  \n\t"\
535     "psllq           $3, %%mm4  \n\t"\
536 \
537     "por          %%mm3, %%mm2  \n\t"\
538     "por          %%mm4, %%mm1  \n\t"\
539 \
540     MOVNTQ(%%mm2,  (dst, index, 2))\
541     MOVNTQ(%%mm1, 8(dst, index, 2))\
542 \
543     "add             $8, "#index"   \n\t"\
544     "cmp        "#dstw", "#index"   \n\t"\
545     " jb             1b             \n\t"
546 #define WRITERGB16(dst, dstw, index)  REAL_WRITERGB16(dst, dstw, index)
547
548 static inline void RENAME(yuv2rgb565_X_ar)(SwsContext *c, const int16_t *lumFilter,
549                                            const int16_t **lumSrc, int lumFilterSize,
550                                            const int16_t *chrFilter, const int16_t **chrSrc,
551                                            int chrFilterSize, const int16_t **alpSrc,
552                                            uint8_t *dest, long dstW, long dstY)
553 {
554     x86_reg dummy=0;
555     x86_reg dstW_reg = dstW;
556
557     YSCALEYUV2PACKEDX_ACCURATE
558     YSCALEYUV2RGBX
559     "pxor %%mm7, %%mm7 \n\t"
560     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
561 #ifdef DITHER1XBPP
562     "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
563     "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
564     "paddusb "RED_DITHER"(%0), %%mm5\n\t"
565 #endif
566     WRITERGB16(%4, %5, %%REGa)
567     YSCALEYUV2PACKEDX_END
568 }
569
570 static inline void RENAME(yuv2rgb565_X)(SwsContext *c, const int16_t *lumFilter,
571                                         const int16_t **lumSrc, int lumFilterSize,
572                                         const int16_t *chrFilter, const int16_t **chrSrc,
573                                         int chrFilterSize, const int16_t **alpSrc,
574                                         uint8_t *dest, long dstW, long dstY)
575 {
576     x86_reg dummy=0;
577     x86_reg dstW_reg = dstW;
578
579     YSCALEYUV2PACKEDX
580     YSCALEYUV2RGBX
581     "pxor %%mm7, %%mm7 \n\t"
582     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
583 #ifdef DITHER1XBPP
584     "paddusb "BLUE_DITHER"(%0), %%mm2  \n\t"
585     "paddusb "GREEN_DITHER"(%0), %%mm4  \n\t"
586     "paddusb "RED_DITHER"(%0), %%mm5  \n\t"
587 #endif
588     WRITERGB16(%4, %5, %%REGa)
589     YSCALEYUV2PACKEDX_END
590 }
591
592 #define REAL_WRITERGB15(dst, dstw, index) \
593     "pand "MANGLE(bF8)", %%mm2  \n\t" /* B */\
594     "pand "MANGLE(bF8)", %%mm4  \n\t" /* G */\
595     "pand "MANGLE(bF8)", %%mm5  \n\t" /* R */\
596     "psrlq           $3, %%mm2  \n\t"\
597     "psrlq           $1, %%mm5  \n\t"\
598 \
599     "movq         %%mm2, %%mm1  \n\t"\
600     "movq         %%mm4, %%mm3  \n\t"\
601 \
602     "punpcklbw    %%mm7, %%mm3  \n\t"\
603     "punpcklbw    %%mm5, %%mm2  \n\t"\
604     "punpckhbw    %%mm7, %%mm4  \n\t"\
605     "punpckhbw    %%mm5, %%mm1  \n\t"\
606 \
607     "psllq           $2, %%mm3  \n\t"\
608     "psllq           $2, %%mm4  \n\t"\
609 \
610     "por          %%mm3, %%mm2  \n\t"\
611     "por          %%mm4, %%mm1  \n\t"\
612 \
613     MOVNTQ(%%mm2,  (dst, index, 2))\
614     MOVNTQ(%%mm1, 8(dst, index, 2))\
615 \
616     "add             $8, "#index"   \n\t"\
617     "cmp        "#dstw", "#index"   \n\t"\
618     " jb             1b             \n\t"
619 #define WRITERGB15(dst, dstw, index)  REAL_WRITERGB15(dst, dstw, index)
620
621 static inline void RENAME(yuv2rgb555_X_ar)(SwsContext *c, const int16_t *lumFilter,
622                                            const int16_t **lumSrc, int lumFilterSize,
623                                            const int16_t *chrFilter, const int16_t **chrSrc,
624                                            int chrFilterSize, const int16_t **alpSrc,
625                                            uint8_t *dest, long dstW, long dstY)
626 {
627     x86_reg dummy=0;
628     x86_reg dstW_reg = dstW;
629
630     YSCALEYUV2PACKEDX_ACCURATE
631     YSCALEYUV2RGBX
632     "pxor %%mm7, %%mm7 \n\t"
633     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
634 #ifdef DITHER1XBPP
635     "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
636     "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
637     "paddusb "RED_DITHER"(%0), %%mm5\n\t"
638 #endif
639     WRITERGB15(%4, %5, %%REGa)
640     YSCALEYUV2PACKEDX_END
641 }
642
643 static inline void RENAME(yuv2rgb555_X)(SwsContext *c, const int16_t *lumFilter,
644                                         const int16_t **lumSrc, int lumFilterSize,
645                                         const int16_t *chrFilter, const int16_t **chrSrc,
646                                         int chrFilterSize, const int16_t **alpSrc,
647                                         uint8_t *dest, long dstW, long dstY)
648 {
649     x86_reg dummy=0;
650     x86_reg dstW_reg = dstW;
651
652     YSCALEYUV2PACKEDX
653     YSCALEYUV2RGBX
654     "pxor %%mm7, %%mm7 \n\t"
655     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
656 #ifdef DITHER1XBPP
657     "paddusb "BLUE_DITHER"(%0), %%mm2  \n\t"
658     "paddusb "GREEN_DITHER"(%0), %%mm4  \n\t"
659     "paddusb "RED_DITHER"(%0), %%mm5  \n\t"
660 #endif
661     WRITERGB15(%4, %5, %%REGa)
662     YSCALEYUV2PACKEDX_END
663 }
664
665 #define WRITEBGR24MMX(dst, dstw, index) \
666     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
667     "movq      %%mm2, %%mm1     \n\t" /* B */\
668     "movq      %%mm5, %%mm6     \n\t" /* R */\
669     "punpcklbw %%mm4, %%mm2     \n\t" /* GBGBGBGB 0 */\
670     "punpcklbw %%mm7, %%mm5     \n\t" /* 0R0R0R0R 0 */\
671     "punpckhbw %%mm4, %%mm1     \n\t" /* GBGBGBGB 2 */\
672     "punpckhbw %%mm7, %%mm6     \n\t" /* 0R0R0R0R 2 */\
673     "movq      %%mm2, %%mm0     \n\t" /* GBGBGBGB 0 */\
674     "movq      %%mm1, %%mm3     \n\t" /* GBGBGBGB 2 */\
675     "punpcklwd %%mm5, %%mm0     \n\t" /* 0RGB0RGB 0 */\
676     "punpckhwd %%mm5, %%mm2     \n\t" /* 0RGB0RGB 1 */\
677     "punpcklwd %%mm6, %%mm1     \n\t" /* 0RGB0RGB 2 */\
678     "punpckhwd %%mm6, %%mm3     \n\t" /* 0RGB0RGB 3 */\
679 \
680     "movq      %%mm0, %%mm4     \n\t" /* 0RGB0RGB 0 */\
681     "movq      %%mm2, %%mm6     \n\t" /* 0RGB0RGB 1 */\
682     "movq      %%mm1, %%mm5     \n\t" /* 0RGB0RGB 2 */\
683     "movq      %%mm3, %%mm7     \n\t" /* 0RGB0RGB 3 */\
684 \
685     "psllq       $40, %%mm0     \n\t" /* RGB00000 0 */\
686     "psllq       $40, %%mm2     \n\t" /* RGB00000 1 */\
687     "psllq       $40, %%mm1     \n\t" /* RGB00000 2 */\
688     "psllq       $40, %%mm3     \n\t" /* RGB00000 3 */\
689 \
690     "punpckhdq %%mm4, %%mm0     \n\t" /* 0RGBRGB0 0 */\
691     "punpckhdq %%mm6, %%mm2     \n\t" /* 0RGBRGB0 1 */\
692     "punpckhdq %%mm5, %%mm1     \n\t" /* 0RGBRGB0 2 */\
693     "punpckhdq %%mm7, %%mm3     \n\t" /* 0RGBRGB0 3 */\
694 \
695     "psrlq        $8, %%mm0     \n\t" /* 00RGBRGB 0 */\
696     "movq      %%mm2, %%mm6     \n\t" /* 0RGBRGB0 1 */\
697     "psllq       $40, %%mm2     \n\t" /* GB000000 1 */\
698     "por       %%mm2, %%mm0     \n\t" /* GBRGBRGB 0 */\
699     MOVNTQ(%%mm0, (dst))\
700 \
701     "psrlq       $24, %%mm6     \n\t" /* 0000RGBR 1 */\
702     "movq      %%mm1, %%mm5     \n\t" /* 0RGBRGB0 2 */\
703     "psllq       $24, %%mm1     \n\t" /* BRGB0000 2 */\
704     "por       %%mm1, %%mm6     \n\t" /* BRGBRGBR 1 */\
705     MOVNTQ(%%mm6, 8(dst))\
706 \
707     "psrlq       $40, %%mm5     \n\t" /* 000000RG 2 */\
708     "psllq        $8, %%mm3     \n\t" /* RGBRGB00 3 */\
709     "por       %%mm3, %%mm5     \n\t" /* RGBRGBRG 2 */\
710     MOVNTQ(%%mm5, 16(dst))\
711 \
712     "add         $24, "#dst"    \n\t"\
713 \
714     "add          $8, "#index"  \n\t"\
715     "cmp     "#dstw", "#index"  \n\t"\
716     " jb          1b            \n\t"
717
718 #define WRITEBGR24MMX2(dst, dstw, index) \
719     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
720     "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
721     "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
722     "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2  B1 B0 B1 B0 */\
723     "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2  G1 G0 G1 G0 */\
724     "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0  R1 R0 R1 R0 */\
725 \
726     "pand   %%mm0, %%mm1        \n\t" /*    B2        B1       B0 */\
727     "pand   %%mm0, %%mm3        \n\t" /*    G2        G1       G0 */\
728     "pand   %%mm7, %%mm6        \n\t" /*       R1        R0       */\
729 \
730     "psllq     $8, %%mm3        \n\t" /* G2        G1       G0    */\
731     "por    %%mm1, %%mm6        \n\t"\
732     "por    %%mm3, %%mm6        \n\t"\
733     MOVNTQ(%%mm6, (dst))\
734 \
735     "psrlq     $8, %%mm4        \n\t" /* 00 G7 G6 G5  G4 G3 G2 G1 */\
736     "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4  B3 B2 B3 B2 */\
737     "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3  G4 G3 G4 G3 */\
738     "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4  R3 R2 R3 R2 */\
739 \
740     "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5       B4        B3    */\
741     "pand   %%mm7, %%mm3        \n\t" /*       G4        G3       */\
742     "pand   %%mm0, %%mm6        \n\t" /*    R4        R3       R2 */\
743 \
744     "por    %%mm1, %%mm3        \n\t" /* B5    G4 B4     G3 B3    */\
745     "por    %%mm3, %%mm6        \n\t"\
746     MOVNTQ(%%mm6, 8(dst))\
747 \
748     "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6  B7 B6 B6 B7 */\
749     "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7  G6 G5 G6 G5 */\
750     "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6  R5 R4 R5 R4 */\
751 \
752     "pand   %%mm7, %%mm1        \n\t" /*       B7        B6       */\
753     "pand   %%mm0, %%mm3        \n\t" /*    G7        G6       G5 */\
754     "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7       R6        R5    */\
755 \
756     "por    %%mm1, %%mm3        \n\t"\
757     "por    %%mm3, %%mm6        \n\t"\
758     MOVNTQ(%%mm6, 16(dst))\
759 \
760     "add      $24, "#dst"       \n\t"\
761 \
762     "add       $8, "#index"     \n\t"\
763     "cmp  "#dstw", "#index"     \n\t"\
764     " jb       1b               \n\t"
765
766 #if COMPILE_TEMPLATE_MMX2
767 #undef WRITEBGR24
768 #define WRITEBGR24(dst, dstw, index)  WRITEBGR24MMX2(dst, dstw, index)
769 #else
770 #undef WRITEBGR24
771 #define WRITEBGR24(dst, dstw, index)  WRITEBGR24MMX(dst, dstw, index)
772 #endif
773
774 static inline void RENAME(yuv2bgr24_X_ar)(SwsContext *c, const int16_t *lumFilter,
775                                           const int16_t **lumSrc, int lumFilterSize,
776                                           const int16_t *chrFilter, const int16_t **chrSrc,
777                                           int chrFilterSize, const int16_t **alpSrc,
778                                           uint8_t *dest, long dstW, long dstY)
779 {
780     x86_reg dummy=0;
781     x86_reg dstW_reg = dstW;
782
783     YSCALEYUV2PACKEDX_ACCURATE
784     YSCALEYUV2RGBX
785     "pxor %%mm7, %%mm7 \n\t"
786     "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
787     "add %4, %%"REG_c"                        \n\t"
788     WRITEBGR24(%%REGc, %5, %%REGa)
789     :: "r" (&c->redDither),
790        "m" (dummy), "m" (dummy), "m" (dummy),
791        "r" (dest), "m" (dstW_reg)
792     : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
793     );
794 }
795
796 static inline void RENAME(yuv2bgr24_X)(SwsContext *c, const int16_t *lumFilter,
797                                        const int16_t **lumSrc, int lumFilterSize,
798                                        const int16_t *chrFilter, const int16_t **chrSrc,
799                                        int chrFilterSize, const int16_t **alpSrc,
800                                        uint8_t *dest, long dstW, long dstY)
801 {
802     x86_reg dummy=0;
803     x86_reg dstW_reg = dstW;
804
805     YSCALEYUV2PACKEDX
806     YSCALEYUV2RGBX
807     "pxor                    %%mm7, %%mm7       \n\t"
808     "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"   \n\t" //FIXME optimize
809     "add                        %4, %%"REG_c"   \n\t"
810     WRITEBGR24(%%REGc, %5, %%REGa)
811     :: "r" (&c->redDither),
812        "m" (dummy), "m" (dummy), "m" (dummy),
813        "r" (dest),  "m" (dstW_reg)
814     : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
815     );
816 }
817
818 #define REAL_WRITEYUY2(dst, dstw, index) \
819     "packuswb  %%mm3, %%mm3     \n\t"\
820     "packuswb  %%mm4, %%mm4     \n\t"\
821     "packuswb  %%mm7, %%mm1     \n\t"\
822     "punpcklbw %%mm4, %%mm3     \n\t"\
823     "movq      %%mm1, %%mm7     \n\t"\
824     "punpcklbw %%mm3, %%mm1     \n\t"\
825     "punpckhbw %%mm3, %%mm7     \n\t"\
826 \
827     MOVNTQ(%%mm1, (dst, index, 2))\
828     MOVNTQ(%%mm7, 8(dst, index, 2))\
829 \
830     "add          $8, "#index"  \n\t"\
831     "cmp     "#dstw", "#index"  \n\t"\
832     " jb          1b            \n\t"
833 #define WRITEYUY2(dst, dstw, index)  REAL_WRITEYUY2(dst, dstw, index)
834
835 static inline void RENAME(yuv2yuyv422_X_ar)(SwsContext *c, const int16_t *lumFilter,
836                                             const int16_t **lumSrc, int lumFilterSize,
837                                             const int16_t *chrFilter, const int16_t **chrSrc,
838                                             int chrFilterSize, const int16_t **alpSrc,
839                                             uint8_t *dest, long dstW, long dstY)
840 {
841     x86_reg dummy=0;
842     x86_reg dstW_reg = dstW;
843
844     YSCALEYUV2PACKEDX_ACCURATE
845     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
846     "psraw $3, %%mm3    \n\t"
847     "psraw $3, %%mm4    \n\t"
848     "psraw $3, %%mm1    \n\t"
849     "psraw $3, %%mm7    \n\t"
850     WRITEYUY2(%4, %5, %%REGa)
851     YSCALEYUV2PACKEDX_END
852 }
853
854 static inline void RENAME(yuv2yuyv422_X)(SwsContext *c, const int16_t *lumFilter,
855                                          const int16_t **lumSrc, int lumFilterSize,
856                                          const int16_t *chrFilter, const int16_t **chrSrc,
857                                          int chrFilterSize, const int16_t **alpSrc,
858                                          uint8_t *dest, long dstW, long dstY)
859 {
860     x86_reg dummy=0;
861     x86_reg dstW_reg = dstW;
862
863     YSCALEYUV2PACKEDX
864     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
865     "psraw $3, %%mm3    \n\t"
866     "psraw $3, %%mm4    \n\t"
867     "psraw $3, %%mm1    \n\t"
868     "psraw $3, %%mm7    \n\t"
869     WRITEYUY2(%4, %5, %%REGa)
870     YSCALEYUV2PACKEDX_END
871 }
872
873 #define REAL_YSCALEYUV2RGB_UV(index, c) \
874     "xor            "#index", "#index"  \n\t"\
875     ".p2align              4            \n\t"\
876     "1:                                 \n\t"\
877     "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
878     "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
879     "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
880     "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
881     "psubw             %%mm3, %%mm2     \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
882     "psubw             %%mm4, %%mm5     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
883     "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0    \n\t"\
884     "pmulhw            %%mm0, %%mm2     \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
885     "pmulhw            %%mm0, %%mm5     \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
886     "psraw                $4, %%mm3     \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
887     "psraw                $4, %%mm4     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
888     "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
889     "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
890     "psubw  "U_OFFSET"("#c"), %%mm3     \n\t" /* (U-128)8*/\
891     "psubw  "V_OFFSET"("#c"), %%mm4     \n\t" /* (V-128)8*/\
892     "movq              %%mm3, %%mm2     \n\t" /* (U-128)8*/\
893     "movq              %%mm4, %%mm5     \n\t" /* (V-128)8*/\
894     "pmulhw "UG_COEFF"("#c"), %%mm3     \n\t"\
895     "pmulhw "VG_COEFF"("#c"), %%mm4     \n\t"\
896     /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
897
898 #define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
899     "movq  ("#b1", "#index", 2), %%mm0     \n\t" /*buf0[eax]*/\
900     "movq  ("#b2", "#index", 2), %%mm1     \n\t" /*buf1[eax]*/\
901     "movq 8("#b1", "#index", 2), %%mm6     \n\t" /*buf0[eax]*/\
902     "movq 8("#b2", "#index", 2), %%mm7     \n\t" /*buf1[eax]*/\
903     "psubw             %%mm1, %%mm0     \n\t" /* buf0[eax] - buf1[eax]*/\
904     "psubw             %%mm7, %%mm6     \n\t" /* buf0[eax] - buf1[eax]*/\
905     "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
906     "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
907     "psraw                $4, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
908     "psraw                $4, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
909     "paddw             %%mm0, %%mm1     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
910     "paddw             %%mm6, %%mm7     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
911
912 #define REAL_YSCALEYUV2RGB_COEFF(c) \
913     "pmulhw "UB_COEFF"("#c"), %%mm2     \n\t"\
914     "pmulhw "VR_COEFF"("#c"), %%mm5     \n\t"\
915     "psubw  "Y_OFFSET"("#c"), %%mm1     \n\t" /* 8(Y-16)*/\
916     "psubw  "Y_OFFSET"("#c"), %%mm7     \n\t" /* 8(Y-16)*/\
917     "pmulhw  "Y_COEFF"("#c"), %%mm1     \n\t"\
918     "pmulhw  "Y_COEFF"("#c"), %%mm7     \n\t"\
919     /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
920     "paddw             %%mm3, %%mm4     \n\t"\
921     "movq              %%mm2, %%mm0     \n\t"\
922     "movq              %%mm5, %%mm6     \n\t"\
923     "movq              %%mm4, %%mm3     \n\t"\
924     "punpcklwd         %%mm2, %%mm2     \n\t"\
925     "punpcklwd         %%mm5, %%mm5     \n\t"\
926     "punpcklwd         %%mm4, %%mm4     \n\t"\
927     "paddw             %%mm1, %%mm2     \n\t"\
928     "paddw             %%mm1, %%mm5     \n\t"\
929     "paddw             %%mm1, %%mm4     \n\t"\
930     "punpckhwd         %%mm0, %%mm0     \n\t"\
931     "punpckhwd         %%mm6, %%mm6     \n\t"\
932     "punpckhwd         %%mm3, %%mm3     \n\t"\
933     "paddw             %%mm7, %%mm0     \n\t"\
934     "paddw             %%mm7, %%mm6     \n\t"\
935     "paddw             %%mm7, %%mm3     \n\t"\
936     /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
937     "packuswb          %%mm0, %%mm2     \n\t"\
938     "packuswb          %%mm6, %%mm5     \n\t"\
939     "packuswb          %%mm3, %%mm4     \n\t"\
940
941 #define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
942
943 #define YSCALEYUV2RGB(index, c) \
944     REAL_YSCALEYUV2RGB_UV(index, c) \
945     REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
946     REAL_YSCALEYUV2RGB_COEFF(c)
947
948 /**
949  * vertical bilinear scale YV12 to RGB
950  */
951 static inline void RENAME(yuv2rgb32_2)(SwsContext *c, const uint16_t *buf0,
952                                        const uint16_t *buf1, const uint16_t *uvbuf0,
953                                        const uint16_t *uvbuf1, const uint16_t *abuf0,
954                                        const uint16_t *abuf1, uint8_t *dest,
955                                        int dstW, int yalpha, int uvalpha, int y)
956 {
957     if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
958 #if ARCH_X86_64
959         __asm__ volatile(
960             YSCALEYUV2RGB(%%r8, %5)
961             YSCALEYUV2RGB_YA(%%r8, %5, %6, %7)
962             "psraw                  $3, %%mm1       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
963             "psraw                  $3, %%mm7       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
964             "packuswb            %%mm7, %%mm1       \n\t"
965             WRITEBGR32(%4, 8280(%5), %%r8, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
966             :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "r" (dest),
967                "a" (&c->redDither),
968                "r" (abuf0), "r" (abuf1)
969             : "%r8"
970         );
971 #else
972         *(const uint16_t **)(&c->u_temp)=abuf0;
973         *(const uint16_t **)(&c->v_temp)=abuf1;
974         __asm__ volatile(
975             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
976             "mov        %4, %%"REG_b"               \n\t"
977             "push %%"REG_BP"                        \n\t"
978             YSCALEYUV2RGB(%%REGBP, %5)
979             "push                   %0              \n\t"
980             "push                   %1              \n\t"
981             "mov          "U_TEMP"(%5), %0          \n\t"
982             "mov          "V_TEMP"(%5), %1          \n\t"
983             YSCALEYUV2RGB_YA(%%REGBP, %5, %0, %1)
984             "psraw                  $3, %%mm1       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
985             "psraw                  $3, %%mm7       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
986             "packuswb            %%mm7, %%mm1       \n\t"
987             "pop                    %1              \n\t"
988             "pop                    %0              \n\t"
989             WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
990             "pop %%"REG_BP"                         \n\t"
991             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
992             :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
993                "a" (&c->redDither)
994         );
995 #endif
996     } else {
997         __asm__ volatile(
998             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
999             "mov        %4, %%"REG_b"               \n\t"
1000             "push %%"REG_BP"                        \n\t"
1001             YSCALEYUV2RGB(%%REGBP, %5)
1002             "pcmpeqd %%mm7, %%mm7                   \n\t"
1003             WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1004             "pop %%"REG_BP"                         \n\t"
1005             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1006             :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1007                "a" (&c->redDither)
1008         );
1009     }
1010 }
1011
1012 static inline void RENAME(yuv2bgr24_2)(SwsContext *c, const uint16_t *buf0,
1013                                        const uint16_t *buf1, const uint16_t *uvbuf0,
1014                                        const uint16_t *uvbuf1, const uint16_t *abuf0,
1015                                        const uint16_t *abuf1, uint8_t *dest,
1016                                        int dstW, int yalpha, int uvalpha, int y)
1017 {
1018     //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1019     __asm__ volatile(
1020         "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1021         "mov        %4, %%"REG_b"               \n\t"
1022         "push %%"REG_BP"                        \n\t"
1023         YSCALEYUV2RGB(%%REGBP, %5)
1024         "pxor    %%mm7, %%mm7                   \n\t"
1025         WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1026         "pop %%"REG_BP"                         \n\t"
1027         "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1028         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1029            "a" (&c->redDither)
1030     );
1031 }
1032
1033 static inline void RENAME(yuv2rgb555_2)(SwsContext *c, const uint16_t *buf0,
1034                                         const uint16_t *buf1, const uint16_t *uvbuf0,
1035                                         const uint16_t *uvbuf1, const uint16_t *abuf0,
1036                                         const uint16_t *abuf1, uint8_t *dest,
1037                                         int dstW, int yalpha, int uvalpha, int y)
1038 {
1039     //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1040     __asm__ volatile(
1041         "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1042         "mov        %4, %%"REG_b"               \n\t"
1043         "push %%"REG_BP"                        \n\t"
1044         YSCALEYUV2RGB(%%REGBP, %5)
1045         "pxor    %%mm7, %%mm7                   \n\t"
1046         /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1047 #ifdef DITHER1XBPP
1048         "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1049         "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1050         "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1051 #endif
1052         WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1053         "pop %%"REG_BP"                         \n\t"
1054         "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1055         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1056            "a" (&c->redDither)
1057     );
1058 }
1059
1060 static inline void RENAME(yuv2rgb565_2)(SwsContext *c, const uint16_t *buf0,
1061                                         const uint16_t *buf1, const uint16_t *uvbuf0,
1062                                         const uint16_t *uvbuf1, const uint16_t *abuf0,
1063                                         const uint16_t *abuf1, uint8_t *dest,
1064                                         int dstW, int yalpha, int uvalpha, int y)
1065 {
1066     //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1067     __asm__ volatile(
1068         "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1069         "mov        %4, %%"REG_b"               \n\t"
1070         "push %%"REG_BP"                        \n\t"
1071         YSCALEYUV2RGB(%%REGBP, %5)
1072         "pxor    %%mm7, %%mm7                   \n\t"
1073         /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1074 #ifdef DITHER1XBPP
1075         "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1076         "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1077         "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1078 #endif
1079         WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1080         "pop %%"REG_BP"                         \n\t"
1081         "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1082         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1083            "a" (&c->redDither)
1084     );
1085 }
1086
1087 #define REAL_YSCALEYUV2PACKED(index, c) \
1088     "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0              \n\t"\
1089     "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1              \n\t"\
1090     "psraw                $3, %%mm0                           \n\t"\
1091     "psraw                $3, %%mm1                           \n\t"\
1092     "movq              %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
1093     "movq              %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
1094     "xor            "#index", "#index"                        \n\t"\
1095     ".p2align              4            \n\t"\
1096     "1:                                 \n\t"\
1097     "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
1098     "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
1099     "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
1100     "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
1101     "psubw             %%mm3, %%mm2     \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
1102     "psubw             %%mm4, %%mm5     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
1103     "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0    \n\t"\
1104     "pmulhw            %%mm0, %%mm2     \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
1105     "pmulhw            %%mm0, %%mm5     \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
1106     "psraw                $7, %%mm3     \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
1107     "psraw                $7, %%mm4     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
1108     "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
1109     "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
1110     "movq  (%0, "#index", 2), %%mm0     \n\t" /*buf0[eax]*/\
1111     "movq  (%1, "#index", 2), %%mm1     \n\t" /*buf1[eax]*/\
1112     "movq 8(%0, "#index", 2), %%mm6     \n\t" /*buf0[eax]*/\
1113     "movq 8(%1, "#index", 2), %%mm7     \n\t" /*buf1[eax]*/\
1114     "psubw             %%mm1, %%mm0     \n\t" /* buf0[eax] - buf1[eax]*/\
1115     "psubw             %%mm7, %%mm6     \n\t" /* buf0[eax] - buf1[eax]*/\
1116     "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
1117     "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
1118     "psraw                $7, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1119     "psraw                $7, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1120     "paddw             %%mm0, %%mm1     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
1121     "paddw             %%mm6, %%mm7     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
1122
1123 #define YSCALEYUV2PACKED(index, c)  REAL_YSCALEYUV2PACKED(index, c)
1124
1125 static inline void RENAME(yuv2yuyv422_2)(SwsContext *c, const uint16_t *buf0,
1126                                          const uint16_t *buf1, const uint16_t *uvbuf0,
1127                                          const uint16_t *uvbuf1, const uint16_t *abuf0,
1128                                          const uint16_t *abuf1, uint8_t *dest,
1129                                          int dstW, int yalpha, int uvalpha, int y)
1130 {
1131     //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1132     __asm__ volatile(
1133         "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1134         "mov %4, %%"REG_b"                        \n\t"
1135         "push %%"REG_BP"                        \n\t"
1136         YSCALEYUV2PACKED(%%REGBP, %5)
1137         WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1138         "pop %%"REG_BP"                         \n\t"
1139         "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1140         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1141            "a" (&c->redDither)
1142     );
1143 }
1144
1145 #define REAL_YSCALEYUV2RGB1(index, c) \
1146     "xor            "#index", "#index"  \n\t"\
1147     ".p2align              4            \n\t"\
1148     "1:                                 \n\t"\
1149     "movq     (%2, "#index"), %%mm3     \n\t" /* uvbuf0[eax]*/\
1150     "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4     \n\t" /* uvbuf0[eax+2048]*/\
1151     "psraw                $4, %%mm3     \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
1152     "psraw                $4, %%mm4     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
1153     "psubw  "U_OFFSET"("#c"), %%mm3     \n\t" /* (U-128)8*/\
1154     "psubw  "V_OFFSET"("#c"), %%mm4     \n\t" /* (V-128)8*/\
1155     "movq              %%mm3, %%mm2     \n\t" /* (U-128)8*/\
1156     "movq              %%mm4, %%mm5     \n\t" /* (V-128)8*/\
1157     "pmulhw "UG_COEFF"("#c"), %%mm3     \n\t"\
1158     "pmulhw "VG_COEFF"("#c"), %%mm4     \n\t"\
1159     /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
1160     "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
1161     "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
1162     "psraw                $4, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1163     "psraw                $4, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1164     "pmulhw "UB_COEFF"("#c"), %%mm2     \n\t"\
1165     "pmulhw "VR_COEFF"("#c"), %%mm5     \n\t"\
1166     "psubw  "Y_OFFSET"("#c"), %%mm1     \n\t" /* 8(Y-16)*/\
1167     "psubw  "Y_OFFSET"("#c"), %%mm7     \n\t" /* 8(Y-16)*/\
1168     "pmulhw  "Y_COEFF"("#c"), %%mm1     \n\t"\
1169     "pmulhw  "Y_COEFF"("#c"), %%mm7     \n\t"\
1170     /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
1171     "paddw             %%mm3, %%mm4     \n\t"\
1172     "movq              %%mm2, %%mm0     \n\t"\
1173     "movq              %%mm5, %%mm6     \n\t"\
1174     "movq              %%mm4, %%mm3     \n\t"\
1175     "punpcklwd         %%mm2, %%mm2     \n\t"\
1176     "punpcklwd         %%mm5, %%mm5     \n\t"\
1177     "punpcklwd         %%mm4, %%mm4     \n\t"\
1178     "paddw             %%mm1, %%mm2     \n\t"\
1179     "paddw             %%mm1, %%mm5     \n\t"\
1180     "paddw             %%mm1, %%mm4     \n\t"\
1181     "punpckhwd         %%mm0, %%mm0     \n\t"\
1182     "punpckhwd         %%mm6, %%mm6     \n\t"\
1183     "punpckhwd         %%mm3, %%mm3     \n\t"\
1184     "paddw             %%mm7, %%mm0     \n\t"\
1185     "paddw             %%mm7, %%mm6     \n\t"\
1186     "paddw             %%mm7, %%mm3     \n\t"\
1187     /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
1188     "packuswb          %%mm0, %%mm2     \n\t"\
1189     "packuswb          %%mm6, %%mm5     \n\t"\
1190     "packuswb          %%mm3, %%mm4     \n\t"\
1191
1192 #define YSCALEYUV2RGB1(index, c)  REAL_YSCALEYUV2RGB1(index, c)
1193
1194 // do vertical chrominance interpolation
1195 #define REAL_YSCALEYUV2RGB1b(index, c) \
1196     "xor            "#index", "#index"  \n\t"\
1197     ".p2align              4            \n\t"\
1198     "1:                                 \n\t"\
1199     "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
1200     "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
1201     "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
1202     "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
1203     "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
1204     "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
1205     "psrlw                $5, %%mm3     \n\t" /*FIXME might overflow*/\
1206     "psrlw                $5, %%mm4     \n\t" /*FIXME might overflow*/\
1207     "psubw  "U_OFFSET"("#c"), %%mm3     \n\t" /* (U-128)8*/\
1208     "psubw  "V_OFFSET"("#c"), %%mm4     \n\t" /* (V-128)8*/\
1209     "movq              %%mm3, %%mm2     \n\t" /* (U-128)8*/\
1210     "movq              %%mm4, %%mm5     \n\t" /* (V-128)8*/\
1211     "pmulhw "UG_COEFF"("#c"), %%mm3     \n\t"\
1212     "pmulhw "VG_COEFF"("#c"), %%mm4     \n\t"\
1213     /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
1214     "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
1215     "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
1216     "psraw                $4, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1217     "psraw                $4, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1218     "pmulhw "UB_COEFF"("#c"), %%mm2     \n\t"\
1219     "pmulhw "VR_COEFF"("#c"), %%mm5     \n\t"\
1220     "psubw  "Y_OFFSET"("#c"), %%mm1     \n\t" /* 8(Y-16)*/\
1221     "psubw  "Y_OFFSET"("#c"), %%mm7     \n\t" /* 8(Y-16)*/\
1222     "pmulhw  "Y_COEFF"("#c"), %%mm1     \n\t"\
1223     "pmulhw  "Y_COEFF"("#c"), %%mm7     \n\t"\
1224     /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
1225     "paddw             %%mm3, %%mm4     \n\t"\
1226     "movq              %%mm2, %%mm0     \n\t"\
1227     "movq              %%mm5, %%mm6     \n\t"\
1228     "movq              %%mm4, %%mm3     \n\t"\
1229     "punpcklwd         %%mm2, %%mm2     \n\t"\
1230     "punpcklwd         %%mm5, %%mm5     \n\t"\
1231     "punpcklwd         %%mm4, %%mm4     \n\t"\
1232     "paddw             %%mm1, %%mm2     \n\t"\
1233     "paddw             %%mm1, %%mm5     \n\t"\
1234     "paddw             %%mm1, %%mm4     \n\t"\
1235     "punpckhwd         %%mm0, %%mm0     \n\t"\
1236     "punpckhwd         %%mm6, %%mm6     \n\t"\
1237     "punpckhwd         %%mm3, %%mm3     \n\t"\
1238     "paddw             %%mm7, %%mm0     \n\t"\
1239     "paddw             %%mm7, %%mm6     \n\t"\
1240     "paddw             %%mm7, %%mm3     \n\t"\
1241     /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
1242     "packuswb          %%mm0, %%mm2     \n\t"\
1243     "packuswb          %%mm6, %%mm5     \n\t"\
1244     "packuswb          %%mm3, %%mm4     \n\t"\
1245
1246 #define YSCALEYUV2RGB1b(index, c)  REAL_YSCALEYUV2RGB1b(index, c)
1247
1248 #define REAL_YSCALEYUV2RGB1_ALPHA(index) \
1249     "movq  (%1, "#index", 2), %%mm7     \n\t" /* abuf0[index  ]     */\
1250     "movq 8(%1, "#index", 2), %%mm1     \n\t" /* abuf0[index+4]     */\
1251     "psraw                $7, %%mm7     \n\t" /* abuf0[index  ] >>7 */\
1252     "psraw                $7, %%mm1     \n\t" /* abuf0[index+4] >>7 */\
1253     "packuswb          %%mm1, %%mm7     \n\t"
1254 #define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
1255
1256 /**
1257  * YV12 to RGB without scaling or interpolating
1258  */
1259 static inline void RENAME(yuv2rgb32_1)(SwsContext *c, const uint16_t *buf0,
1260                                        const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1261                                        const uint16_t *abuf0, uint8_t *dest,
1262                                        int dstW, int uvalpha, enum PixelFormat dstFormat,
1263                                        int flags, int y)
1264 {
1265     const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1266
1267     if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1268         if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1269             __asm__ volatile(
1270                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1271                 "mov        %4, %%"REG_b"               \n\t"
1272                 "push %%"REG_BP"                        \n\t"
1273                 YSCALEYUV2RGB1(%%REGBP, %5)
1274                 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1275                 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1276                 "pop %%"REG_BP"                         \n\t"
1277                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1278                 :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1279                    "a" (&c->redDither)
1280             );
1281         } else {
1282             __asm__ volatile(
1283                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1284                 "mov        %4, %%"REG_b"               \n\t"
1285                 "push %%"REG_BP"                        \n\t"
1286                 YSCALEYUV2RGB1(%%REGBP, %5)
1287                 "pcmpeqd %%mm7, %%mm7                   \n\t"
1288                 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1289                 "pop %%"REG_BP"                         \n\t"
1290                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1291                 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1292                    "a" (&c->redDither)
1293             );
1294         }
1295     } else {
1296         if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1297             __asm__ volatile(
1298                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1299                 "mov        %4, %%"REG_b"               \n\t"
1300                 "push %%"REG_BP"                        \n\t"
1301                 YSCALEYUV2RGB1b(%%REGBP, %5)
1302                 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1303                 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1304                 "pop %%"REG_BP"                         \n\t"
1305                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1306                 :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1307                    "a" (&c->redDither)
1308             );
1309         } else {
1310             __asm__ volatile(
1311                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1312                 "mov        %4, %%"REG_b"               \n\t"
1313                 "push %%"REG_BP"                        \n\t"
1314                 YSCALEYUV2RGB1b(%%REGBP, %5)
1315                 "pcmpeqd %%mm7, %%mm7                   \n\t"
1316                 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1317                 "pop %%"REG_BP"                         \n\t"
1318                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1319                 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1320                    "a" (&c->redDither)
1321             );
1322         }
1323     }
1324 }
1325
1326 static inline void RENAME(yuv2bgr24_1)(SwsContext *c, const uint16_t *buf0,
1327                                        const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1328                                        const uint16_t *abuf0, uint8_t *dest,
1329                                        int dstW, int uvalpha, enum PixelFormat dstFormat,
1330                                        int flags, int y)
1331 {
1332     const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1333
1334     if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1335         __asm__ volatile(
1336             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1337             "mov        %4, %%"REG_b"               \n\t"
1338             "push %%"REG_BP"                        \n\t"
1339             YSCALEYUV2RGB1(%%REGBP, %5)
1340             "pxor    %%mm7, %%mm7                   \n\t"
1341             WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1342             "pop %%"REG_BP"                         \n\t"
1343             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1344             :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1345                "a" (&c->redDither)
1346         );
1347     } else {
1348         __asm__ volatile(
1349             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1350             "mov        %4, %%"REG_b"               \n\t"
1351             "push %%"REG_BP"                        \n\t"
1352             YSCALEYUV2RGB1b(%%REGBP, %5)
1353             "pxor    %%mm7, %%mm7                   \n\t"
1354             WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1355             "pop %%"REG_BP"                         \n\t"
1356             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1357             :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1358                "a" (&c->redDither)
1359         );
1360     }
1361 }
1362
1363 static inline void RENAME(yuv2rgb555_1)(SwsContext *c, const uint16_t *buf0,
1364                                         const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1365                                         const uint16_t *abuf0, uint8_t *dest,
1366                                         int dstW, int uvalpha, enum PixelFormat dstFormat,
1367                                         int flags, int y)
1368 {
1369     const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1370
1371     if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1372         __asm__ volatile(
1373             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1374             "mov        %4, %%"REG_b"               \n\t"
1375             "push %%"REG_BP"                        \n\t"
1376             YSCALEYUV2RGB1(%%REGBP, %5)
1377             "pxor    %%mm7, %%mm7                   \n\t"
1378             /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1379 #ifdef DITHER1XBPP
1380             "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1381             "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1382             "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1383 #endif
1384             WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1385             "pop %%"REG_BP"                         \n\t"
1386             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1387             :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1388                "a" (&c->redDither)
1389         );
1390     } else {
1391         __asm__ volatile(
1392             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1393             "mov        %4, %%"REG_b"               \n\t"
1394             "push %%"REG_BP"                        \n\t"
1395             YSCALEYUV2RGB1b(%%REGBP, %5)
1396             "pxor    %%mm7, %%mm7                   \n\t"
1397             /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1398 #ifdef DITHER1XBPP
1399             "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1400             "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1401             "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1402 #endif
1403             WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1404             "pop %%"REG_BP"                         \n\t"
1405             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1406             :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1407                "a" (&c->redDither)
1408         );
1409     }
1410 }
1411
1412 static inline void RENAME(yuv2rgb565_1)(SwsContext *c, const uint16_t *buf0,
1413                                         const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1414                                         const uint16_t *abuf0, uint8_t *dest,
1415                                         int dstW, int uvalpha, enum PixelFormat dstFormat,
1416                                         int flags, int y)
1417 {
1418     const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1419
1420     if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1421         __asm__ volatile(
1422             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1423             "mov        %4, %%"REG_b"               \n\t"
1424             "push %%"REG_BP"                        \n\t"
1425             YSCALEYUV2RGB1(%%REGBP, %5)
1426             "pxor    %%mm7, %%mm7                   \n\t"
1427             /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1428 #ifdef DITHER1XBPP
1429             "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1430             "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1431             "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1432 #endif
1433             WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1434             "pop %%"REG_BP"                         \n\t"
1435             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1436             :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1437                "a" (&c->redDither)
1438         );
1439     } else {
1440         __asm__ volatile(
1441             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1442             "mov        %4, %%"REG_b"               \n\t"
1443             "push %%"REG_BP"                        \n\t"
1444             YSCALEYUV2RGB1b(%%REGBP, %5)
1445             "pxor    %%mm7, %%mm7                   \n\t"
1446             /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1447 #ifdef DITHER1XBPP
1448             "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1449             "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1450             "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1451 #endif
1452             WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1453             "pop %%"REG_BP"                         \n\t"
1454             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1455             :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1456                "a" (&c->redDither)
1457         );
1458     }
1459 }
1460
1461 #define REAL_YSCALEYUV2PACKED1(index, c) \
1462     "xor            "#index", "#index"  \n\t"\
1463     ".p2align              4            \n\t"\
1464     "1:                                 \n\t"\
1465     "movq     (%2, "#index"), %%mm3     \n\t" /* uvbuf0[eax]*/\
1466     "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4     \n\t" /* uvbuf0[eax+2048]*/\
1467     "psraw                $7, %%mm3     \n\t" \
1468     "psraw                $7, %%mm4     \n\t" \
1469     "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
1470     "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
1471     "psraw                $7, %%mm1     \n\t" \
1472     "psraw                $7, %%mm7     \n\t" \
1473
1474 #define YSCALEYUV2PACKED1(index, c)  REAL_YSCALEYUV2PACKED1(index, c)
1475
1476 #define REAL_YSCALEYUV2PACKED1b(index, c) \
1477     "xor "#index", "#index"             \n\t"\
1478     ".p2align              4            \n\t"\
1479     "1:                                 \n\t"\
1480     "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
1481     "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
1482     "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
1483     "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
1484     "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
1485     "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
1486     "psrlw                $8, %%mm3     \n\t" \
1487     "psrlw                $8, %%mm4     \n\t" \
1488     "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
1489     "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
1490     "psraw                $7, %%mm1     \n\t" \
1491     "psraw                $7, %%mm7     \n\t"
1492 #define YSCALEYUV2PACKED1b(index, c)  REAL_YSCALEYUV2PACKED1b(index, c)
1493
1494 static inline void RENAME(yuv2yuyv422_1)(SwsContext *c, const uint16_t *buf0,
1495                                          const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1496                                          const uint16_t *abuf0, uint8_t *dest,
1497                                          int dstW, int uvalpha, enum PixelFormat dstFormat,
1498                                          int flags, int y)
1499 {
1500     const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1501
1502     if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1503         __asm__ volatile(
1504             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1505             "mov        %4, %%"REG_b"               \n\t"
1506             "push %%"REG_BP"                        \n\t"
1507             YSCALEYUV2PACKED1(%%REGBP, %5)
1508             WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1509             "pop %%"REG_BP"                         \n\t"
1510             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1511             :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1512                "a" (&c->redDither)
1513         );
1514     } else {
1515         __asm__ volatile(
1516             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1517             "mov        %4, %%"REG_b"               \n\t"
1518             "push %%"REG_BP"                        \n\t"
1519             YSCALEYUV2PACKED1b(%%REGBP, %5)
1520             WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1521             "pop %%"REG_BP"                         \n\t"
1522             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1523             :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1524                "a" (&c->redDither)
1525         );
1526     }
1527 }
1528
1529 #if !COMPILE_TEMPLATE_MMX2
1530 //FIXME yuy2* can read up to 7 samples too much
1531
1532 static inline void RENAME(yuy2ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1533 {
1534     __asm__ volatile(
1535         "movq "MANGLE(bm01010101)", %%mm2           \n\t"
1536         "mov                    %0, %%"REG_a"       \n\t"
1537         "1:                                         \n\t"
1538         "movq    (%1, %%"REG_a",2), %%mm0           \n\t"
1539         "movq   8(%1, %%"REG_a",2), %%mm1           \n\t"
1540         "pand                %%mm2, %%mm0           \n\t"
1541         "pand                %%mm2, %%mm1           \n\t"
1542         "packuswb            %%mm1, %%mm0           \n\t"
1543         "movq                %%mm0, (%2, %%"REG_a") \n\t"
1544         "add                    $8, %%"REG_a"       \n\t"
1545         " js                    1b                  \n\t"
1546         : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1547         : "%"REG_a
1548     );
1549 }
1550
1551 static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1552 {
1553     __asm__ volatile(
1554         "movq "MANGLE(bm01010101)", %%mm4           \n\t"
1555         "mov                    %0, %%"REG_a"       \n\t"
1556         "1:                                         \n\t"
1557         "movq    (%1, %%"REG_a",4), %%mm0           \n\t"
1558         "movq   8(%1, %%"REG_a",4), %%mm1           \n\t"
1559         "psrlw                  $8, %%mm0           \n\t"
1560         "psrlw                  $8, %%mm1           \n\t"
1561         "packuswb            %%mm1, %%mm0           \n\t"
1562         "movq                %%mm0, %%mm1           \n\t"
1563         "psrlw                  $8, %%mm0           \n\t"
1564         "pand                %%mm4, %%mm1           \n\t"
1565         "packuswb            %%mm0, %%mm0           \n\t"
1566         "packuswb            %%mm1, %%mm1           \n\t"
1567         "movd                %%mm0, (%3, %%"REG_a") \n\t"
1568         "movd                %%mm1, (%2, %%"REG_a") \n\t"
1569         "add                    $4, %%"REG_a"       \n\t"
1570         " js                    1b                  \n\t"
1571         : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1572         : "%"REG_a
1573     );
1574     assert(src1 == src2);
1575 }
1576
1577 static inline void RENAME(LEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1578 {
1579     __asm__ volatile(
1580         "mov                    %0, %%"REG_a"       \n\t"
1581         "1:                                         \n\t"
1582         "movq    (%1, %%"REG_a",2), %%mm0           \n\t"
1583         "movq   8(%1, %%"REG_a",2), %%mm1           \n\t"
1584         "movq    (%2, %%"REG_a",2), %%mm2           \n\t"
1585         "movq   8(%2, %%"REG_a",2), %%mm3           \n\t"
1586         "psrlw                  $8, %%mm0           \n\t"
1587         "psrlw                  $8, %%mm1           \n\t"
1588         "psrlw                  $8, %%mm2           \n\t"
1589         "psrlw                  $8, %%mm3           \n\t"
1590         "packuswb            %%mm1, %%mm0           \n\t"
1591         "packuswb            %%mm3, %%mm2           \n\t"
1592         "movq                %%mm0, (%3, %%"REG_a") \n\t"
1593         "movq                %%mm2, (%4, %%"REG_a") \n\t"
1594         "add                    $8, %%"REG_a"       \n\t"
1595         " js                    1b                  \n\t"
1596         : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
1597         : "%"REG_a
1598     );
1599 }
1600
1601 /* This is almost identical to the previous, end exists only because
1602  * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
1603 static inline void RENAME(uyvyToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1604 {
1605     __asm__ volatile(
1606         "mov                  %0, %%"REG_a"         \n\t"
1607         "1:                                         \n\t"
1608         "movq  (%1, %%"REG_a",2), %%mm0             \n\t"
1609         "movq 8(%1, %%"REG_a",2), %%mm1             \n\t"
1610         "psrlw                $8, %%mm0             \n\t"
1611         "psrlw                $8, %%mm1             \n\t"
1612         "packuswb          %%mm1, %%mm0             \n\t"
1613         "movq              %%mm0, (%2, %%"REG_a")   \n\t"
1614         "add                  $8, %%"REG_a"         \n\t"
1615         " js                  1b                    \n\t"
1616         : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1617         : "%"REG_a
1618     );
1619 }
1620
1621 static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1622 {
1623     __asm__ volatile(
1624         "movq "MANGLE(bm01010101)", %%mm4           \n\t"
1625         "mov                    %0, %%"REG_a"       \n\t"
1626         "1:                                         \n\t"
1627         "movq    (%1, %%"REG_a",4), %%mm0           \n\t"
1628         "movq   8(%1, %%"REG_a",4), %%mm1           \n\t"
1629         "pand                %%mm4, %%mm0           \n\t"
1630         "pand                %%mm4, %%mm1           \n\t"
1631         "packuswb            %%mm1, %%mm0           \n\t"
1632         "movq                %%mm0, %%mm1           \n\t"
1633         "psrlw                  $8, %%mm0           \n\t"
1634         "pand                %%mm4, %%mm1           \n\t"
1635         "packuswb            %%mm0, %%mm0           \n\t"
1636         "packuswb            %%mm1, %%mm1           \n\t"
1637         "movd                %%mm0, (%3, %%"REG_a") \n\t"
1638         "movd                %%mm1, (%2, %%"REG_a") \n\t"
1639         "add                    $4, %%"REG_a"       \n\t"
1640         " js                    1b                  \n\t"
1641         : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1642         : "%"REG_a
1643     );
1644     assert(src1 == src2);
1645 }
1646
1647 static inline void RENAME(BEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1648 {
1649     __asm__ volatile(
1650         "movq "MANGLE(bm01010101)", %%mm4           \n\t"
1651         "mov                    %0, %%"REG_a"       \n\t"
1652         "1:                                         \n\t"
1653         "movq    (%1, %%"REG_a",2), %%mm0           \n\t"
1654         "movq   8(%1, %%"REG_a",2), %%mm1           \n\t"
1655         "movq    (%2, %%"REG_a",2), %%mm2           \n\t"
1656         "movq   8(%2, %%"REG_a",2), %%mm3           \n\t"
1657         "pand                %%mm4, %%mm0           \n\t"
1658         "pand                %%mm4, %%mm1           \n\t"
1659         "pand                %%mm4, %%mm2           \n\t"
1660         "pand                %%mm4, %%mm3           \n\t"
1661         "packuswb            %%mm1, %%mm0           \n\t"
1662         "packuswb            %%mm3, %%mm2           \n\t"
1663         "movq                %%mm0, (%3, %%"REG_a") \n\t"
1664         "movq                %%mm2, (%4, %%"REG_a") \n\t"
1665         "add                    $8, %%"REG_a"       \n\t"
1666         " js                    1b                  \n\t"
1667         : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
1668         : "%"REG_a
1669     );
1670 }
1671
1672 static inline void RENAME(nvXXtoUV)(uint8_t *dst1, uint8_t *dst2,
1673                                     const uint8_t *src, long width)
1674 {
1675     __asm__ volatile(
1676         "movq "MANGLE(bm01010101)", %%mm4           \n\t"
1677         "mov                    %0, %%"REG_a"       \n\t"
1678         "1:                                         \n\t"
1679         "movq    (%1, %%"REG_a",2), %%mm0           \n\t"
1680         "movq   8(%1, %%"REG_a",2), %%mm1           \n\t"
1681         "movq                %%mm0, %%mm2           \n\t"
1682         "movq                %%mm1, %%mm3           \n\t"
1683         "pand                %%mm4, %%mm0           \n\t"
1684         "pand                %%mm4, %%mm1           \n\t"
1685         "psrlw                  $8, %%mm2           \n\t"
1686         "psrlw                  $8, %%mm3           \n\t"
1687         "packuswb            %%mm1, %%mm0           \n\t"
1688         "packuswb            %%mm3, %%mm2           \n\t"
1689         "movq                %%mm0, (%2, %%"REG_a") \n\t"
1690         "movq                %%mm2, (%3, %%"REG_a") \n\t"
1691         "add                    $8, %%"REG_a"       \n\t"
1692         " js                    1b                  \n\t"
1693         : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst1+width), "r" (dst2+width)
1694         : "%"REG_a
1695     );
1696 }
1697
1698 static inline void RENAME(nv12ToUV)(uint8_t *dstU, uint8_t *dstV,
1699                                     const uint8_t *src1, const uint8_t *src2,
1700                                     long width, uint32_t *unused)
1701 {
1702     RENAME(nvXXtoUV)(dstU, dstV, src1, width);
1703 }
1704
1705 static inline void RENAME(nv21ToUV)(uint8_t *dstU, uint8_t *dstV,
1706                                     const uint8_t *src1, const uint8_t *src2,
1707                                     long width, uint32_t *unused)
1708 {
1709     RENAME(nvXXtoUV)(dstV, dstU, src1, width);
1710 }
1711 #endif /* !COMPILE_TEMPLATE_MMX2 */
1712
1713 static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, const uint8_t *src, long width, enum PixelFormat srcFormat)
1714 {
1715
1716     if(srcFormat == PIX_FMT_BGR24) {
1717         __asm__ volatile(
1718             "movq  "MANGLE(ff_bgr24toY1Coeff)", %%mm5       \n\t"
1719             "movq  "MANGLE(ff_bgr24toY2Coeff)", %%mm6       \n\t"
1720             :
1721         );
1722     } else {
1723         __asm__ volatile(
1724             "movq  "MANGLE(ff_rgb24toY1Coeff)", %%mm5       \n\t"
1725             "movq  "MANGLE(ff_rgb24toY2Coeff)", %%mm6       \n\t"
1726             :
1727         );
1728     }
1729
1730     __asm__ volatile(
1731         "movq  "MANGLE(ff_bgr24toYOffset)", %%mm4   \n\t"
1732         "mov                        %2, %%"REG_a"   \n\t"
1733         "pxor                    %%mm7, %%mm7       \n\t"
1734         "1:                                         \n\t"
1735         PREFETCH"               64(%0)              \n\t"
1736         "movd                     (%0), %%mm0       \n\t"
1737         "movd                    2(%0), %%mm1       \n\t"
1738         "movd                    6(%0), %%mm2       \n\t"
1739         "movd                    8(%0), %%mm3       \n\t"
1740         "add                       $12, %0          \n\t"
1741         "punpcklbw               %%mm7, %%mm0       \n\t"
1742         "punpcklbw               %%mm7, %%mm1       \n\t"
1743         "punpcklbw               %%mm7, %%mm2       \n\t"
1744         "punpcklbw               %%mm7, %%mm3       \n\t"
1745         "pmaddwd                 %%mm5, %%mm0       \n\t"
1746         "pmaddwd                 %%mm6, %%mm1       \n\t"
1747         "pmaddwd                 %%mm5, %%mm2       \n\t"
1748         "pmaddwd                 %%mm6, %%mm3       \n\t"
1749         "paddd                   %%mm1, %%mm0       \n\t"
1750         "paddd                   %%mm3, %%mm2       \n\t"
1751         "paddd                   %%mm4, %%mm0       \n\t"
1752         "paddd                   %%mm4, %%mm2       \n\t"
1753         "psrad                     $15, %%mm0       \n\t"
1754         "psrad                     $15, %%mm2       \n\t"
1755         "packssdw                %%mm2, %%mm0       \n\t"
1756         "packuswb                %%mm0, %%mm0       \n\t"
1757         "movd                %%mm0, (%1, %%"REG_a") \n\t"
1758         "add                        $4, %%"REG_a"   \n\t"
1759         " js                        1b              \n\t"
1760     : "+r" (src)
1761     : "r" (dst+width), "g" ((x86_reg)-width)
1762     : "%"REG_a
1763     );
1764 }
1765
1766 static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, long width, enum PixelFormat srcFormat)
1767 {
1768     __asm__ volatile(
1769         "movq                    24(%4), %%mm6       \n\t"
1770         "mov                        %3, %%"REG_a"   \n\t"
1771         "pxor                    %%mm7, %%mm7       \n\t"
1772         "1:                                         \n\t"
1773         PREFETCH"               64(%0)              \n\t"
1774         "movd                     (%0), %%mm0       \n\t"
1775         "movd                    2(%0), %%mm1       \n\t"
1776         "punpcklbw               %%mm7, %%mm0       \n\t"
1777         "punpcklbw               %%mm7, %%mm1       \n\t"
1778         "movq                    %%mm0, %%mm2       \n\t"
1779         "movq                    %%mm1, %%mm3       \n\t"
1780         "pmaddwd                  (%4), %%mm0       \n\t"
1781         "pmaddwd                 8(%4), %%mm1       \n\t"
1782         "pmaddwd                16(%4), %%mm2       \n\t"
1783         "pmaddwd                 %%mm6, %%mm3       \n\t"
1784         "paddd                   %%mm1, %%mm0       \n\t"
1785         "paddd                   %%mm3, %%mm2       \n\t"
1786
1787         "movd                    6(%0), %%mm1       \n\t"
1788         "movd                    8(%0), %%mm3       \n\t"
1789         "add                       $12, %0          \n\t"
1790         "punpcklbw               %%mm7, %%mm1       \n\t"
1791         "punpcklbw               %%mm7, %%mm3       \n\t"
1792         "movq                    %%mm1, %%mm4       \n\t"
1793         "movq                    %%mm3, %%mm5       \n\t"
1794         "pmaddwd                  (%4), %%mm1       \n\t"
1795         "pmaddwd                 8(%4), %%mm3       \n\t"
1796         "pmaddwd                16(%4), %%mm4       \n\t"
1797         "pmaddwd                 %%mm6, %%mm5       \n\t"
1798         "paddd                   %%mm3, %%mm1       \n\t"
1799         "paddd                   %%mm5, %%mm4       \n\t"
1800
1801         "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3       \n\t"
1802         "paddd                   %%mm3, %%mm0       \n\t"
1803         "paddd                   %%mm3, %%mm2       \n\t"
1804         "paddd                   %%mm3, %%mm1       \n\t"
1805         "paddd                   %%mm3, %%mm4       \n\t"
1806         "psrad                     $15, %%mm0       \n\t"
1807         "psrad                     $15, %%mm2       \n\t"
1808         "psrad                     $15, %%mm1       \n\t"
1809         "psrad                     $15, %%mm4       \n\t"
1810         "packssdw                %%mm1, %%mm0       \n\t"
1811         "packssdw                %%mm4, %%mm2       \n\t"
1812         "packuswb                %%mm0, %%mm0       \n\t"
1813         "packuswb                %%mm2, %%mm2       \n\t"
1814         "movd                %%mm0, (%1, %%"REG_a") \n\t"
1815         "movd                %%mm2, (%2, %%"REG_a") \n\t"
1816         "add                        $4, %%"REG_a"   \n\t"
1817         " js                        1b              \n\t"
1818     : "+r" (src)
1819     : "r" (dstU+width), "r" (dstV+width), "g" ((x86_reg)-width), "r"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24])
1820     : "%"REG_a
1821     );
1822 }
1823
1824 static inline void RENAME(bgr24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1825 {
1826     RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
1827 }
1828
1829 static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1830 {
1831     RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
1832     assert(src1 == src2);
1833 }
1834
1835 static inline void RENAME(rgb24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1836 {
1837     RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
1838 }
1839
1840 static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1841 {
1842     assert(src1==src2);
1843     RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
1844 }
1845
1846 #if !COMPILE_TEMPLATE_MMX2
1847 // bilinear / bicubic scaling
1848 static inline void RENAME(hScale)(int16_t *dst, int dstW, const uint8_t *src, int srcW, int xInc,
1849                                   const int16_t *filter, const int16_t *filterPos, long filterSize)
1850 {
1851     assert(filterSize % 4 == 0 && filterSize>0);
1852     if (filterSize==4) { // Always true for upscaling, sometimes for down, too.
1853         x86_reg counter= -2*dstW;
1854         filter-= counter*2;
1855         filterPos-= counter/2;
1856         dst-= counter/2;
1857         __asm__ volatile(
1858 #if defined(PIC)
1859             "push            %%"REG_b"              \n\t"
1860 #endif
1861             "pxor                %%mm7, %%mm7       \n\t"
1862             "push           %%"REG_BP"              \n\t" // we use 7 regs here ...
1863             "mov             %%"REG_a", %%"REG_BP"  \n\t"
1864             ".p2align                4              \n\t"
1865             "1:                                     \n\t"
1866             "movzwl   (%2, %%"REG_BP"), %%eax       \n\t"
1867             "movzwl  2(%2, %%"REG_BP"), %%ebx       \n\t"
1868             "movq  (%1, %%"REG_BP", 4), %%mm1       \n\t"
1869             "movq 8(%1, %%"REG_BP", 4), %%mm3       \n\t"
1870             "movd      (%3, %%"REG_a"), %%mm0       \n\t"
1871             "movd      (%3, %%"REG_b"), %%mm2       \n\t"
1872             "punpcklbw           %%mm7, %%mm0       \n\t"
1873             "punpcklbw           %%mm7, %%mm2       \n\t"
1874             "pmaddwd             %%mm1, %%mm0       \n\t"
1875             "pmaddwd             %%mm2, %%mm3       \n\t"
1876             "movq                %%mm0, %%mm4       \n\t"
1877             "punpckldq           %%mm3, %%mm0       \n\t"
1878             "punpckhdq           %%mm3, %%mm4       \n\t"
1879             "paddd               %%mm4, %%mm0       \n\t"
1880             "psrad                  $7, %%mm0       \n\t"
1881             "packssdw            %%mm0, %%mm0       \n\t"
1882             "movd                %%mm0, (%4, %%"REG_BP")    \n\t"
1883             "add                    $4, %%"REG_BP"  \n\t"
1884             " jnc                   1b              \n\t"
1885
1886             "pop            %%"REG_BP"              \n\t"
1887 #if defined(PIC)
1888             "pop             %%"REG_b"              \n\t"
1889 #endif
1890             : "+a" (counter)
1891             : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
1892 #if !defined(PIC)
1893             : "%"REG_b
1894 #endif
1895         );
1896     } else if (filterSize==8) {
1897         x86_reg counter= -2*dstW;
1898         filter-= counter*4;
1899         filterPos-= counter/2;
1900         dst-= counter/2;
1901         __asm__ volatile(
1902 #if defined(PIC)
1903             "push             %%"REG_b"             \n\t"
1904 #endif
1905             "pxor                 %%mm7, %%mm7      \n\t"
1906             "push            %%"REG_BP"             \n\t" // we use 7 regs here ...
1907             "mov              %%"REG_a", %%"REG_BP" \n\t"
1908             ".p2align                 4             \n\t"
1909             "1:                                     \n\t"
1910             "movzwl    (%2, %%"REG_BP"), %%eax      \n\t"
1911             "movzwl   2(%2, %%"REG_BP"), %%ebx      \n\t"
1912             "movq   (%1, %%"REG_BP", 8), %%mm1      \n\t"
1913             "movq 16(%1, %%"REG_BP", 8), %%mm3      \n\t"
1914             "movd       (%3, %%"REG_a"), %%mm0      \n\t"
1915             "movd       (%3, %%"REG_b"), %%mm2      \n\t"
1916             "punpcklbw            %%mm7, %%mm0      \n\t"
1917             "punpcklbw            %%mm7, %%mm2      \n\t"
1918             "pmaddwd              %%mm1, %%mm0      \n\t"
1919             "pmaddwd              %%mm2, %%mm3      \n\t"
1920
1921             "movq  8(%1, %%"REG_BP", 8), %%mm1      \n\t"
1922             "movq 24(%1, %%"REG_BP", 8), %%mm5      \n\t"
1923             "movd      4(%3, %%"REG_a"), %%mm4      \n\t"
1924             "movd      4(%3, %%"REG_b"), %%mm2      \n\t"
1925             "punpcklbw            %%mm7, %%mm4      \n\t"
1926             "punpcklbw            %%mm7, %%mm2      \n\t"
1927             "pmaddwd              %%mm1, %%mm4      \n\t"
1928             "pmaddwd              %%mm2, %%mm5      \n\t"
1929             "paddd                %%mm4, %%mm0      \n\t"
1930             "paddd                %%mm5, %%mm3      \n\t"
1931             "movq                 %%mm0, %%mm4      \n\t"
1932             "punpckldq            %%mm3, %%mm0      \n\t"
1933             "punpckhdq            %%mm3, %%mm4      \n\t"
1934             "paddd                %%mm4, %%mm0      \n\t"
1935             "psrad                   $7, %%mm0      \n\t"
1936             "packssdw             %%mm0, %%mm0      \n\t"
1937             "movd                 %%mm0, (%4, %%"REG_BP")   \n\t"
1938             "add                     $4, %%"REG_BP" \n\t"
1939             " jnc                    1b             \n\t"
1940
1941             "pop             %%"REG_BP"             \n\t"
1942 #if defined(PIC)
1943             "pop              %%"REG_b"             \n\t"
1944 #endif
1945             : "+a" (counter)
1946             : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
1947 #if !defined(PIC)
1948             : "%"REG_b
1949 #endif
1950         );
1951     } else {
1952         const uint8_t *offset = src+filterSize;
1953         x86_reg counter= -2*dstW;
1954         //filter-= counter*filterSize/2;
1955         filterPos-= counter/2;
1956         dst-= counter/2;
1957         __asm__ volatile(
1958             "pxor                  %%mm7, %%mm7     \n\t"
1959             ".p2align                  4            \n\t"
1960             "1:                                     \n\t"
1961             "mov                      %2, %%"REG_c" \n\t"
1962             "movzwl      (%%"REG_c", %0), %%eax     \n\t"
1963             "movzwl     2(%%"REG_c", %0), %%edx     \n\t"
1964             "mov                      %5, %%"REG_c" \n\t"
1965             "pxor                  %%mm4, %%mm4     \n\t"
1966             "pxor                  %%mm5, %%mm5     \n\t"
1967             "2:                                     \n\t"
1968             "movq                   (%1), %%mm1     \n\t"
1969             "movq               (%1, %6), %%mm3     \n\t"
1970             "movd (%%"REG_c", %%"REG_a"), %%mm0     \n\t"
1971             "movd (%%"REG_c", %%"REG_d"), %%mm2     \n\t"
1972             "punpcklbw             %%mm7, %%mm0     \n\t"
1973             "punpcklbw             %%mm7, %%mm2     \n\t"
1974             "pmaddwd               %%mm1, %%mm0     \n\t"
1975             "pmaddwd               %%mm2, %%mm3     \n\t"
1976             "paddd                 %%mm3, %%mm5     \n\t"
1977             "paddd                 %%mm0, %%mm4     \n\t"
1978             "add                      $8, %1        \n\t"
1979             "add                      $4, %%"REG_c" \n\t"
1980             "cmp                      %4, %%"REG_c" \n\t"
1981             " jb                      2b            \n\t"
1982             "add                      %6, %1        \n\t"
1983             "movq                  %%mm4, %%mm0     \n\t"
1984             "punpckldq             %%mm5, %%mm4     \n\t"
1985             "punpckhdq             %%mm5, %%mm0     \n\t"
1986             "paddd                 %%mm0, %%mm4     \n\t"
1987             "psrad                    $7, %%mm4     \n\t"
1988             "packssdw              %%mm4, %%mm4     \n\t"
1989             "mov                      %3, %%"REG_a" \n\t"
1990             "movd                  %%mm4, (%%"REG_a", %0)   \n\t"
1991             "add                      $4, %0        \n\t"
1992             " jnc                     1b            \n\t"
1993
1994             : "+r" (counter), "+r" (filter)
1995             : "m" (filterPos), "m" (dst), "m"(offset),
1996             "m" (src), "r" ((x86_reg)filterSize*2)
1997             : "%"REG_a, "%"REG_c, "%"REG_d
1998         );
1999     }
2000 }
2001 #endif /* !COMPILE_TEMPLATE_MMX2 */
2002
2003 #if COMPILE_TEMPLATE_MMX2
2004 static inline void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst,
2005                                         long dstWidth, const uint8_t *src, int srcW,
2006                                         int xInc)
2007 {
2008     int32_t *filterPos = c->hLumFilterPos;
2009     int16_t *filter    = c->hLumFilter;
2010     int     canMMX2BeUsed  = c->canMMX2BeUsed;
2011     void    *mmx2FilterCode= c->lumMmx2FilterCode;
2012     int i;
2013 #if defined(PIC)
2014     DECLARE_ALIGNED(8, uint64_t, ebxsave);
2015 #endif
2016
2017     __asm__ volatile(
2018 #if defined(PIC)
2019         "mov               %%"REG_b", %5        \n\t"
2020 #endif
2021         "pxor                  %%mm7, %%mm7     \n\t"
2022         "mov                      %0, %%"REG_c" \n\t"
2023         "mov                      %1, %%"REG_D" \n\t"
2024         "mov                      %2, %%"REG_d" \n\t"
2025         "mov                      %3, %%"REG_b" \n\t"
2026         "xor               %%"REG_a", %%"REG_a" \n\t" // i
2027         PREFETCH"        (%%"REG_c")            \n\t"
2028         PREFETCH"      32(%%"REG_c")            \n\t"
2029         PREFETCH"      64(%%"REG_c")            \n\t"
2030
2031 #if ARCH_X86_64
2032 #define CALL_MMX2_FILTER_CODE \
2033         "movl            (%%"REG_b"), %%esi     \n\t"\
2034         "call                    *%4            \n\t"\
2035         "movl (%%"REG_b", %%"REG_a"), %%esi     \n\t"\
2036         "add               %%"REG_S", %%"REG_c" \n\t"\
2037         "add               %%"REG_a", %%"REG_D" \n\t"\
2038         "xor               %%"REG_a", %%"REG_a" \n\t"\
2039
2040 #else
2041 #define CALL_MMX2_FILTER_CODE \
2042         "movl (%%"REG_b"), %%esi        \n\t"\
2043         "call         *%4                       \n\t"\
2044         "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
2045         "add               %%"REG_a", %%"REG_D" \n\t"\
2046         "xor               %%"REG_a", %%"REG_a" \n\t"\
2047
2048 #endif /* ARCH_X86_64 */
2049
2050         CALL_MMX2_FILTER_CODE
2051         CALL_MMX2_FILTER_CODE
2052         CALL_MMX2_FILTER_CODE
2053         CALL_MMX2_FILTER_CODE
2054         CALL_MMX2_FILTER_CODE
2055         CALL_MMX2_FILTER_CODE
2056         CALL_MMX2_FILTER_CODE
2057         CALL_MMX2_FILTER_CODE
2058
2059 #if defined(PIC)
2060         "mov                      %5, %%"REG_b" \n\t"
2061 #endif
2062         :: "m" (src), "m" (dst), "m" (filter), "m" (filterPos),
2063            "m" (mmx2FilterCode)
2064 #if defined(PIC)
2065           ,"m" (ebxsave)
2066 #endif
2067         : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2068 #if !defined(PIC)
2069          ,"%"REG_b
2070 #endif
2071     );
2072
2073     for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
2074         dst[i] = src[srcW-1]*128;
2075 }
2076
2077 static inline void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst,
2078                                         long dstWidth, const uint8_t *src1,
2079                                         const uint8_t *src2, int srcW, int xInc)
2080 {
2081     int32_t *filterPos = c->hChrFilterPos;
2082     int16_t *filter    = c->hChrFilter;
2083     int     canMMX2BeUsed  = c->canMMX2BeUsed;
2084     void    *mmx2FilterCode= c->chrMmx2FilterCode;
2085     int i;
2086 #if defined(PIC)
2087     DECLARE_ALIGNED(8, uint64_t, ebxsave);
2088 #endif
2089
2090     __asm__ volatile(
2091 #if defined(PIC)
2092         "mov          %%"REG_b", %6         \n\t"
2093 #endif
2094         "pxor             %%mm7, %%mm7      \n\t"
2095         "mov                 %0, %%"REG_c"  \n\t"
2096         "mov                 %1, %%"REG_D"  \n\t"
2097         "mov                 %2, %%"REG_d"  \n\t"
2098         "mov                 %3, %%"REG_b"  \n\t"
2099         "xor          %%"REG_a", %%"REG_a"  \n\t" // i
2100         PREFETCH"   (%%"REG_c")             \n\t"
2101         PREFETCH" 32(%%"REG_c")             \n\t"
2102         PREFETCH" 64(%%"REG_c")             \n\t"
2103
2104         CALL_MMX2_FILTER_CODE
2105         CALL_MMX2_FILTER_CODE
2106         CALL_MMX2_FILTER_CODE
2107         CALL_MMX2_FILTER_CODE
2108         "xor          %%"REG_a", %%"REG_a"  \n\t" // i
2109         "mov                 %5, %%"REG_c"  \n\t" // src
2110         "mov                 %1, %%"REG_D"  \n\t" // buf1
2111         "add              $"AV_STRINGIFY(VOF)", %%"REG_D"  \n\t"
2112         PREFETCH"   (%%"REG_c")             \n\t"
2113         PREFETCH" 32(%%"REG_c")             \n\t"
2114         PREFETCH" 64(%%"REG_c")             \n\t"
2115
2116         CALL_MMX2_FILTER_CODE
2117         CALL_MMX2_FILTER_CODE
2118         CALL_MMX2_FILTER_CODE
2119         CALL_MMX2_FILTER_CODE
2120
2121 #if defined(PIC)
2122         "mov %6, %%"REG_b"    \n\t"
2123 #endif
2124         :: "m" (src1), "m" (dst), "m" (filter), "m" (filterPos),
2125            "m" (mmx2FilterCode), "m" (src2)
2126 #if defined(PIC)
2127           ,"m" (ebxsave)
2128 #endif
2129         : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2130 #if !defined(PIC)
2131          ,"%"REG_b
2132 #endif
2133     );
2134
2135     for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
2136         dst[i] = src1[srcW-1]*128;
2137         dst[i+VOFW] = src2[srcW-1]*128;
2138     }
2139 }
2140 #endif /* COMPILE_TEMPLATE_MMX2 */
2141
2142 #if !COMPILE_TEMPLATE_MMX2
2143 static void updateMMXDitherTables(SwsContext *c, int dstY, int lumBufIndex, int chrBufIndex,
2144                                   int lastInLumBuf, int lastInChrBuf)
2145 {
2146     const int dstH= c->dstH;
2147     const int flags= c->flags;
2148     int16_t **lumPixBuf= c->lumPixBuf;
2149     int16_t **chrPixBuf= c->chrPixBuf;
2150     int16_t **alpPixBuf= c->alpPixBuf;
2151     const int vLumBufSize= c->vLumBufSize;
2152     const int vChrBufSize= c->vChrBufSize;
2153     int16_t *vLumFilterPos= c->vLumFilterPos;
2154     int16_t *vChrFilterPos= c->vChrFilterPos;
2155     int16_t *vLumFilter= c->vLumFilter;
2156     int16_t *vChrFilter= c->vChrFilter;
2157     int32_t *lumMmxFilter= c->lumMmxFilter;
2158     int32_t *chrMmxFilter= c->chrMmxFilter;
2159     int32_t av_unused *alpMmxFilter= c->alpMmxFilter;
2160     const int vLumFilterSize= c->vLumFilterSize;
2161     const int vChrFilterSize= c->vChrFilterSize;
2162     const int chrDstY= dstY>>c->chrDstVSubSample;
2163     const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
2164     const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
2165
2166     c->blueDither= ff_dither8[dstY&1];
2167     if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555)
2168         c->greenDither= ff_dither8[dstY&1];
2169     else
2170         c->greenDither= ff_dither4[dstY&1];
2171     c->redDither= ff_dither8[(dstY+1)&1];
2172     if (dstY < dstH - 2) {
2173         const int16_t **lumSrcPtr= (const int16_t **) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
2174         const int16_t **chrSrcPtr= (const int16_t **) chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
2175         const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
2176         int i;
2177         if (flags & SWS_ACCURATE_RND) {
2178             int s= APCK_SIZE / 8;
2179             for (i=0; i<vLumFilterSize; i+=2) {
2180                 *(const void**)&lumMmxFilter[s*i              ]= lumSrcPtr[i  ];
2181                 *(const void**)&lumMmxFilter[s*i+APCK_PTR2/4  ]= lumSrcPtr[i+(vLumFilterSize>1)];
2182                 lumMmxFilter[s*i+APCK_COEF/4  ]=
2183                 lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i    ]
2184                            + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
2185                 if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
2186                     *(const void**)&alpMmxFilter[s*i              ]= alpSrcPtr[i  ];
2187                     *(const void**)&alpMmxFilter[s*i+APCK_PTR2/4  ]= alpSrcPtr[i+(vLumFilterSize>1)];
2188                     alpMmxFilter[s*i+APCK_COEF/4  ]=
2189                     alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4  ];
2190                 }
2191             }
2192             for (i=0; i<vChrFilterSize; i+=2) {
2193                 *(const void**)&chrMmxFilter[s*i              ]= chrSrcPtr[i  ];
2194                 *(const void**)&chrMmxFilter[s*i+APCK_PTR2/4  ]= chrSrcPtr[i+(vChrFilterSize>1)];
2195                 chrMmxFilter[s*i+APCK_COEF/4  ]=
2196                 chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i    ]
2197                            + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
2198             }
2199         } else {
2200             for (i=0; i<vLumFilterSize; i++) {
2201                 lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
2202                 lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
2203                 lumMmxFilter[4*i+2]=
2204                 lumMmxFilter[4*i+3]=
2205                     ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
2206                 if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
2207                     alpMmxFilter[4*i+0]= (int32_t)alpSrcPtr[i];
2208                     alpMmxFilter[4*i+1]= (uint64_t)alpSrcPtr[i] >> 32;
2209                     alpMmxFilter[4*i+2]=
2210                     alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2];
2211                 }
2212             }
2213             for (i=0; i<vChrFilterSize; i++) {
2214                 chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
2215                 chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
2216                 chrMmxFilter[4*i+2]=
2217                 chrMmxFilter[4*i+3]=
2218                     ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
2219             }
2220         }
2221     }
2222 }
2223 #endif /* !COMPILE_TEMPLATE_MMX2 */
2224
2225 static void RENAME(sws_init_swScale)(SwsContext *c)
2226 {
2227     enum PixelFormat srcFormat = c->srcFormat;
2228
2229     if (!(c->flags & SWS_BITEXACT)) {
2230         if (c->flags & SWS_ACCURATE_RND) {
2231             c->yuv2yuv1     = RENAME(yuv2yuv1_ar    );
2232             c->yuv2yuvX     = RENAME(yuv2yuvX_ar    );
2233             switch (c->dstFormat) {
2234             case PIX_FMT_RGB32:   c->yuv2packedX = RENAME(yuv2rgb32_X_ar);   break;
2235             case PIX_FMT_BGR24:   c->yuv2packedX = RENAME(yuv2bgr24_X_ar);   break;
2236             case PIX_FMT_RGB555:  c->yuv2packedX = RENAME(yuv2rgb555_X_ar);  break;
2237             case PIX_FMT_RGB565:  c->yuv2packedX = RENAME(yuv2rgb565_X_ar);  break;
2238             case PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X_ar); break;
2239             default: break;
2240             }
2241         } else {
2242             c->yuv2yuv1     = RENAME(yuv2yuv1    );
2243             c->yuv2yuvX     = RENAME(yuv2yuvX    );
2244             switch (c->dstFormat) {
2245             case PIX_FMT_RGB32:   c->yuv2packedX = RENAME(yuv2rgb32_X);   break;
2246             case PIX_FMT_BGR24:   c->yuv2packedX = RENAME(yuv2bgr24_X);   break;
2247             case PIX_FMT_RGB555:  c->yuv2packedX = RENAME(yuv2rgb555_X);  break;
2248             case PIX_FMT_RGB565:  c->yuv2packedX = RENAME(yuv2rgb565_X);  break;
2249             case PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X); break;
2250             default: break;
2251             }
2252         }
2253         switch (c->dstFormat) {
2254         case PIX_FMT_RGB32:
2255                 c->yuv2packed1 = RENAME(yuv2rgb32_1);
2256                 c->yuv2packed2 = RENAME(yuv2rgb32_2);
2257                 break;
2258         case PIX_FMT_BGR24:
2259                 c->yuv2packed1 = RENAME(yuv2bgr24_1);
2260                 c->yuv2packed2 = RENAME(yuv2bgr24_2);
2261                 break;
2262         case PIX_FMT_RGB555:
2263                 c->yuv2packed1 = RENAME(yuv2rgb555_1);
2264                 c->yuv2packed2 = RENAME(yuv2rgb555_2);
2265                 break;
2266         case PIX_FMT_RGB565:
2267                 c->yuv2packed1 = RENAME(yuv2rgb565_1);
2268                 c->yuv2packed2 = RENAME(yuv2rgb565_2);
2269                 break;
2270         case PIX_FMT_YUYV422:
2271                 c->yuv2packed1 = RENAME(yuv2yuyv422_1);
2272                 c->yuv2packed2 = RENAME(yuv2yuyv422_2);
2273                 break;
2274         default:
2275                 break;
2276         }
2277     }
2278
2279 #if !COMPILE_TEMPLATE_MMX2
2280     c->hScale       = RENAME(hScale      );
2281 #endif /* !COMPILE_TEMPLATE_MMX2 */
2282
2283     // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
2284 #if COMPILE_TEMPLATE_MMX2
2285     if (c->flags & SWS_FAST_BILINEAR && c->canMMX2BeUsed)
2286     {
2287         c->hyscale_fast = RENAME(hyscale_fast);
2288         c->hcscale_fast = RENAME(hcscale_fast);
2289     } else {
2290 #endif /* COMPILE_TEMPLATE_MMX2 */
2291         c->hyscale_fast = NULL;
2292         c->hcscale_fast = NULL;
2293 #if COMPILE_TEMPLATE_MMX2
2294     }
2295 #endif /* COMPILE_TEMPLATE_MMX2 */
2296
2297 #if !COMPILE_TEMPLATE_MMX2
2298     switch(srcFormat) {
2299         case PIX_FMT_YUYV422  : c->chrToYV12 = RENAME(yuy2ToUV); break;
2300         case PIX_FMT_UYVY422  : c->chrToYV12 = RENAME(uyvyToUV); break;
2301         case PIX_FMT_NV12     : c->chrToYV12 = RENAME(nv12ToUV); break;
2302         case PIX_FMT_NV21     : c->chrToYV12 = RENAME(nv21ToUV); break;
2303         case PIX_FMT_YUV420P16BE:
2304         case PIX_FMT_YUV422P16BE:
2305         case PIX_FMT_YUV444P16BE: c->chrToYV12 = RENAME(BEToUV); break;
2306         case PIX_FMT_YUV420P16LE:
2307         case PIX_FMT_YUV422P16LE:
2308         case PIX_FMT_YUV444P16LE: c->chrToYV12 = RENAME(LEToUV); break;
2309         default: break;
2310     }
2311 #endif /* !COMPILE_TEMPLATE_MMX2 */
2312     if (!c->chrSrcHSubSample) {
2313         switch(srcFormat) {
2314         case PIX_FMT_BGR24  : c->chrToYV12 = RENAME(bgr24ToUV); break;
2315         case PIX_FMT_RGB24  : c->chrToYV12 = RENAME(rgb24ToUV); break;
2316         default: break;
2317         }
2318     }
2319
2320     switch (srcFormat) {
2321 #if !COMPILE_TEMPLATE_MMX2
2322     case PIX_FMT_YUYV422  :
2323     case PIX_FMT_YUV420P16BE:
2324     case PIX_FMT_YUV422P16BE:
2325     case PIX_FMT_YUV444P16BE:
2326     case PIX_FMT_Y400A    :
2327     case PIX_FMT_GRAY16BE : c->lumToYV12 = RENAME(yuy2ToY); break;
2328     case PIX_FMT_UYVY422  :
2329     case PIX_FMT_YUV420P16LE:
2330     case PIX_FMT_YUV422P16LE:
2331     case PIX_FMT_YUV444P16LE:
2332     case PIX_FMT_GRAY16LE : c->lumToYV12 = RENAME(uyvyToY); break;
2333 #endif /* !COMPILE_TEMPLATE_MMX2 */
2334     case PIX_FMT_BGR24    : c->lumToYV12 = RENAME(bgr24ToY); break;
2335     case PIX_FMT_RGB24    : c->lumToYV12 = RENAME(rgb24ToY); break;
2336     default: break;
2337     }
2338 #if !COMPILE_TEMPLATE_MMX2
2339     if (c->alpPixBuf) {
2340         switch (srcFormat) {
2341         case PIX_FMT_Y400A  : c->alpToYV12 = RENAME(yuy2ToY); break;
2342         default: break;
2343         }
2344     }
2345 #endif /* !COMPILE_TEMPLATE_MMX2 */
2346 }