]> git.sesse.net Git - ffmpeg/blob - postproc/swscale_template.c
2d68bfd4f5c42615c263076f3ee5e554e58ba72a
[ffmpeg] / postproc / swscale_template.c
1
2 // Software scaling and colorspace conversion routines for MPlayer
3
4 // Orginal C implementation by A'rpi/ESP-team <arpi@thot.banki.hu>
5 // current version mostly by Michael Niedermayer (michaelni@gmx.at)
6 // the parts written by michael are under GNU GPL
7
8 #include <inttypes.h>
9 #include <string.h>
10 #include "../config.h"
11 #include "swscale.h"
12 #include "../mmx_defs.h"
13 #undef MOVNTQ
14
15 //#undef HAVE_MMX2
16 //#undef HAVE_MMX
17 //#undef ARCH_X86
18 #define DITHER1XBPP
19 int fullUVIpol=0;
20 //disables the unscaled height version
21 int allwaysIpol=0;
22
23 #define RET 0xC3 //near return opcode
24 /*
25 NOTES
26
27 known BUGS with known cause (no bugreports please!, but patches are welcome :) )
28 horizontal MMX2 scaler reads 1-7 samples too much (might cause a sig11)
29
30 Supported output formats BGR15 BGR16 BGR24 BGR32
31 BGR15 & BGR16 MMX verions support dithering
32 Special versions: fast Y 1:1 scaling (no interpolation in y direction)
33
34 TODO
35 more intelligent missalignment avoidance for the horizontal scaler
36 */
37
38 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
39 #define MIN(a,b) ((a) > (b) ? (b) : (a))
40 #define MAX(a,b) ((a) < (b) ? (b) : (a))
41
42 #ifdef HAVE_MMX2
43 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
44 #elif defined (HAVE_3DNOW)
45 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
46 #endif
47
48 #ifdef HAVE_MMX2
49 #define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
50 #else
51 #define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
52 #endif
53
54
55 #ifdef HAVE_MMX
56 static uint64_t __attribute__((aligned(8))) yCoeff=    0x2568256825682568LL;
57 static uint64_t __attribute__((aligned(8))) vrCoeff=   0x3343334333433343LL;
58 static uint64_t __attribute__((aligned(8))) ubCoeff=   0x40cf40cf40cf40cfLL;
59 static uint64_t __attribute__((aligned(8))) vgCoeff=   0xE5E2E5E2E5E2E5E2LL;
60 static uint64_t __attribute__((aligned(8))) ugCoeff=   0xF36EF36EF36EF36ELL;
61 static uint64_t __attribute__((aligned(8))) w400=      0x0400040004000400LL;
62 static uint64_t __attribute__((aligned(8))) w80=       0x0080008000800080LL;
63 static uint64_t __attribute__((aligned(8))) w10=       0x0010001000100010LL;
64 static uint64_t __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
65 static uint64_t __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
66 static uint64_t __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
67
68 static uint64_t __attribute__((aligned(8))) b16Dither= 0x0004000400040004LL;
69 static uint64_t __attribute__((aligned(8))) b16Dither1=0x0004000400040004LL;
70 static uint64_t __attribute__((aligned(8))) b16Dither2=0x0602060206020602LL;
71 static uint64_t __attribute__((aligned(8))) g16Dither= 0x0002000200020002LL;
72 static uint64_t __attribute__((aligned(8))) g16Dither1=0x0002000200020002LL;
73 static uint64_t __attribute__((aligned(8))) g16Dither2=0x0301030103010301LL;
74
75 static uint64_t __attribute__((aligned(8))) b16Mask=   0x001F001F001F001FLL;
76 static uint64_t __attribute__((aligned(8))) g16Mask=   0x07E007E007E007E0LL;
77 static uint64_t __attribute__((aligned(8))) r16Mask=   0xF800F800F800F800LL;
78 static uint64_t __attribute__((aligned(8))) b15Mask=   0x001F001F001F001FLL;
79 static uint64_t __attribute__((aligned(8))) g15Mask=   0x03E003E003E003E0LL;
80 static uint64_t __attribute__((aligned(8))) r15Mask=   0x7C007C007C007C00LL;
81
82 static uint64_t __attribute__((aligned(8))) temp0;
83 static uint64_t __attribute__((aligned(8))) asm_yalpha1;
84 static uint64_t __attribute__((aligned(8))) asm_uvalpha1;
85 #endif
86
87 // temporary storage for 4 yuv lines:
88 // 16bit for now (mmx likes it more compact)
89 #ifdef HAVE_MMX
90 static uint16_t __attribute__((aligned(8))) pix_buf_y[4][2048];
91 static uint16_t __attribute__((aligned(8))) pix_buf_uv[2][2048*2];
92 #else
93 static uint16_t pix_buf_y[4][2048];
94 static uint16_t pix_buf_uv[2][2048*2];
95 #endif
96
97 // clipping helper table for C implementations:
98 static unsigned char clip_table[768];
99
100 // yuv->rgb conversion tables:
101 static    int yuvtab_2568[256];
102 static    int yuvtab_3343[256];
103 static    int yuvtab_0c92[256];
104 static    int yuvtab_1a1e[256];
105 static    int yuvtab_40cf[256];
106
107
108 static uint8_t funnyYCode[10000];
109 static uint8_t funnyUVCode[10000];
110
111 static int canMMX2BeUsed=0;
112
113 #define FULL_YSCALEYUV2RGB \
114                 "pxor %%mm7, %%mm7              \n\t"\
115                 "movd %6, %%mm6                 \n\t" /*yalpha1*/\
116                 "punpcklwd %%mm6, %%mm6         \n\t"\
117                 "punpcklwd %%mm6, %%mm6         \n\t"\
118                 "movd %7, %%mm5                 \n\t" /*uvalpha1*/\
119                 "punpcklwd %%mm5, %%mm5         \n\t"\
120                 "punpcklwd %%mm5, %%mm5         \n\t"\
121                 "xorl %%eax, %%eax              \n\t"\
122                 "1:                             \n\t"\
123                 "movq (%0, %%eax, 2), %%mm0     \n\t" /*buf0[eax]*/\
124                 "movq (%1, %%eax, 2), %%mm1     \n\t" /*buf1[eax]*/\
125                 "movq (%2, %%eax,2), %%mm2      \n\t" /* uvbuf0[eax]*/\
126                 "movq (%3, %%eax,2), %%mm3      \n\t" /* uvbuf1[eax]*/\
127                 "psubw %%mm1, %%mm0             \n\t" /* buf0[eax] - buf1[eax]*/\
128                 "psubw %%mm3, %%mm2             \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
129                 "pmulhw %%mm6, %%mm0            \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
130                 "pmulhw %%mm5, %%mm2            \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
131                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
132                 "movq 4096(%2, %%eax,2), %%mm4  \n\t" /* uvbuf0[eax+2048]*/\
133                 "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
134                 "paddw %%mm0, %%mm1             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
135                 "movq 4096(%3, %%eax,2), %%mm0  \n\t" /* uvbuf1[eax+2048]*/\
136                 "paddw %%mm2, %%mm3             \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
137                 "psubw %%mm0, %%mm4             \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
138                 "psubw w80, %%mm1               \n\t" /* 8(Y-16)*/\
139                 "psubw w400, %%mm3              \n\t" /* 8(U-128)*/\
140                 "pmulhw yCoeff, %%mm1           \n\t"\
141 \
142 \
143                 "pmulhw %%mm5, %%mm4            \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
144                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
145                 "pmulhw ubCoeff, %%mm3          \n\t"\
146                 "psraw $4, %%mm0                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
147                 "pmulhw ugCoeff, %%mm2          \n\t"\
148                 "paddw %%mm4, %%mm0             \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
149                 "psubw w400, %%mm0              \n\t" /* (V-128)8*/\
150 \
151 \
152                 "movq %%mm0, %%mm4              \n\t" /* (V-128)8*/\
153                 "pmulhw vrCoeff, %%mm0          \n\t"\
154                 "pmulhw vgCoeff, %%mm4          \n\t"\
155                 "paddw %%mm1, %%mm3             \n\t" /* B*/\
156                 "paddw %%mm1, %%mm0             \n\t" /* R*/\
157                 "packuswb %%mm3, %%mm3          \n\t"\
158 \
159                 "packuswb %%mm0, %%mm0          \n\t"\
160                 "paddw %%mm4, %%mm2             \n\t"\
161                 "paddw %%mm2, %%mm1             \n\t" /* G*/\
162 \
163                 "packuswb %%mm1, %%mm1          \n\t"
164
165 #define YSCALEYUV2RGB \
166                 "movd %6, %%mm6                 \n\t" /*yalpha1*/\
167                 "punpcklwd %%mm6, %%mm6         \n\t"\
168                 "punpcklwd %%mm6, %%mm6         \n\t"\
169                 "movq %%mm6, asm_yalpha1        \n\t"\
170                 "movd %7, %%mm5                 \n\t" /*uvalpha1*/\
171                 "punpcklwd %%mm5, %%mm5         \n\t"\
172                 "punpcklwd %%mm5, %%mm5         \n\t"\
173                 "movq %%mm5, asm_uvalpha1       \n\t"\
174                 "xorl %%eax, %%eax              \n\t"\
175                 "1:                             \n\t"\
176                 "movq (%2, %%eax), %%mm2        \n\t" /* uvbuf0[eax]*/\
177                 "movq (%3, %%eax), %%mm3        \n\t" /* uvbuf1[eax]*/\
178                 "movq 4096(%2, %%eax), %%mm5    \n\t" /* uvbuf0[eax+2048]*/\
179                 "movq 4096(%3, %%eax), %%mm4    \n\t" /* uvbuf1[eax+2048]*/\
180                 "psubw %%mm3, %%mm2             \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
181                 "psubw %%mm4, %%mm5             \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
182                 "movq asm_uvalpha1, %%mm0       \n\t"\
183                 "pmulhw %%mm0, %%mm2            \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
184                 "pmulhw %%mm0, %%mm5            \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
185                 "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
186                 "psraw $4, %%mm4                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
187                 "paddw %%mm2, %%mm3             \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
188                 "paddw %%mm5, %%mm4             \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
189                 "psubw w400, %%mm3              \n\t" /* (U-128)8*/\
190                 "psubw w400, %%mm4              \n\t" /* (V-128)8*/\
191                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
192                 "movq %%mm4, %%mm5              \n\t" /* (V-128)8*/\
193                 "pmulhw ugCoeff, %%mm3          \n\t"\
194                 "pmulhw vgCoeff, %%mm4          \n\t"\
195         /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
196                 "movq (%0, %%eax, 2), %%mm0     \n\t" /*buf0[eax]*/\
197                 "movq (%1, %%eax, 2), %%mm1     \n\t" /*buf1[eax]*/\
198                 "movq 8(%0, %%eax, 2), %%mm6    \n\t" /*buf0[eax]*/\
199                 "movq 8(%1, %%eax, 2), %%mm7    \n\t" /*buf1[eax]*/\
200                 "psubw %%mm1, %%mm0             \n\t" /* buf0[eax] - buf1[eax]*/\
201                 "psubw %%mm7, %%mm6             \n\t" /* buf0[eax] - buf1[eax]*/\
202                 "pmulhw asm_yalpha1, %%mm0      \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
203                 "pmulhw asm_yalpha1, %%mm6      \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
204                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
205                 "psraw $4, %%mm7                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
206                 "paddw %%mm0, %%mm1             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
207                 "paddw %%mm6, %%mm7             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
208                 "pmulhw ubCoeff, %%mm2          \n\t"\
209                 "pmulhw vrCoeff, %%mm5          \n\t"\
210                 "psubw w80, %%mm1               \n\t" /* 8(Y-16)*/\
211                 "psubw w80, %%mm7               \n\t" /* 8(Y-16)*/\
212                 "pmulhw yCoeff, %%mm1           \n\t"\
213                 "pmulhw yCoeff, %%mm7           \n\t"\
214         /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
215                 "paddw %%mm3, %%mm4             \n\t"\
216                 "movq %%mm2, %%mm0              \n\t"\
217                 "movq %%mm5, %%mm6              \n\t"\
218                 "movq %%mm4, %%mm3              \n\t"\
219                 "punpcklwd %%mm2, %%mm2         \n\t"\
220                 "punpcklwd %%mm5, %%mm5         \n\t"\
221                 "punpcklwd %%mm4, %%mm4         \n\t"\
222                 "paddw %%mm1, %%mm2             \n\t"\
223                 "paddw %%mm1, %%mm5             \n\t"\
224                 "paddw %%mm1, %%mm4             \n\t"\
225                 "punpckhwd %%mm0, %%mm0         \n\t"\
226                 "punpckhwd %%mm6, %%mm6         \n\t"\
227                 "punpckhwd %%mm3, %%mm3         \n\t"\
228                 "paddw %%mm7, %%mm0             \n\t"\
229                 "paddw %%mm7, %%mm6             \n\t"\
230                 "paddw %%mm7, %%mm3             \n\t"\
231                 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
232                 "packuswb %%mm0, %%mm2          \n\t"\
233                 "packuswb %%mm6, %%mm5          \n\t"\
234                 "packuswb %%mm3, %%mm4          \n\t"\
235                 "pxor %%mm7, %%mm7              \n\t"
236
237 #define YSCALEYUV2RGB1 \
238                 "xorl %%eax, %%eax              \n\t"\
239                 "1:                             \n\t"\
240                 "movq (%2, %%eax), %%mm3        \n\t" /* uvbuf0[eax]*/\
241                 "movq 4096(%2, %%eax), %%mm4    \n\t" /* uvbuf0[eax+2048]*/\
242                 "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
243                 "psraw $4, %%mm4                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
244                 "psubw w400, %%mm3              \n\t" /* (U-128)8*/\
245                 "psubw w400, %%mm4              \n\t" /* (V-128)8*/\
246                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
247                 "movq %%mm4, %%mm5              \n\t" /* (V-128)8*/\
248                 "pmulhw ugCoeff, %%mm3          \n\t"\
249                 "pmulhw vgCoeff, %%mm4          \n\t"\
250         /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
251                 "movq (%1, %%eax, 2), %%mm1     \n\t" /*buf0[eax]*/\
252                 "movq 8(%1, %%eax, 2), %%mm7    \n\t" /*buf0[eax]*/\
253                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
254                 "psraw $4, %%mm7                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
255                 "pmulhw ubCoeff, %%mm2          \n\t"\
256                 "pmulhw vrCoeff, %%mm5          \n\t"\
257                 "psubw w80, %%mm1               \n\t" /* 8(Y-16)*/\
258                 "psubw w80, %%mm7               \n\t" /* 8(Y-16)*/\
259                 "pmulhw yCoeff, %%mm1           \n\t"\
260                 "pmulhw yCoeff, %%mm7           \n\t"\
261         /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
262                 "paddw %%mm3, %%mm4             \n\t"\
263                 "movq %%mm2, %%mm0              \n\t"\
264                 "movq %%mm5, %%mm6              \n\t"\
265                 "movq %%mm4, %%mm3              \n\t"\
266                 "punpcklwd %%mm2, %%mm2         \n\t"\
267                 "punpcklwd %%mm5, %%mm5         \n\t"\
268                 "punpcklwd %%mm4, %%mm4         \n\t"\
269                 "paddw %%mm1, %%mm2             \n\t"\
270                 "paddw %%mm1, %%mm5             \n\t"\
271                 "paddw %%mm1, %%mm4             \n\t"\
272                 "punpckhwd %%mm0, %%mm0         \n\t"\
273                 "punpckhwd %%mm6, %%mm6         \n\t"\
274                 "punpckhwd %%mm3, %%mm3         \n\t"\
275                 "paddw %%mm7, %%mm0             \n\t"\
276                 "paddw %%mm7, %%mm6             \n\t"\
277                 "paddw %%mm7, %%mm3             \n\t"\
278                 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
279                 "packuswb %%mm0, %%mm2          \n\t"\
280                 "packuswb %%mm6, %%mm5          \n\t"\
281                 "packuswb %%mm3, %%mm4          \n\t"\
282                 "pxor %%mm7, %%mm7              \n\t"
283
284 #define WRITEBGR32 \
285                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
286                         "movq %%mm2, %%mm1              \n\t" /* B */\
287                         "movq %%mm5, %%mm6              \n\t" /* R */\
288                         "punpcklbw %%mm4, %%mm2         \n\t" /* GBGBGBGB 0 */\
289                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R 0 */\
290                         "punpckhbw %%mm4, %%mm1         \n\t" /* GBGBGBGB 2 */\
291                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R 2 */\
292                         "movq %%mm2, %%mm0              \n\t" /* GBGBGBGB 0 */\
293                         "movq %%mm1, %%mm3              \n\t" /* GBGBGBGB 2 */\
294                         "punpcklwd %%mm5, %%mm0         \n\t" /* 0RGB0RGB 0 */\
295                         "punpckhwd %%mm5, %%mm2         \n\t" /* 0RGB0RGB 1 */\
296                         "punpcklwd %%mm6, %%mm1         \n\t" /* 0RGB0RGB 2 */\
297                         "punpckhwd %%mm6, %%mm3         \n\t" /* 0RGB0RGB 3 */\
298 \
299                         MOVNTQ(%%mm0, (%4, %%eax, 4))\
300                         MOVNTQ(%%mm2, 8(%4, %%eax, 4))\
301                         MOVNTQ(%%mm1, 16(%4, %%eax, 4))\
302                         MOVNTQ(%%mm3, 24(%4, %%eax, 4))\
303 \
304                         "addl $8, %%eax                 \n\t"\
305                         "cmpl %5, %%eax                 \n\t"\
306                         " jb 1b                         \n\t"
307
308 #define WRITEBGR16 \
309                         "movq %%mm2, %%mm1              \n\t" /* B */\
310                         "movq %%mm4, %%mm3              \n\t" /* G */\
311                         "movq %%mm5, %%mm6              \n\t" /* R */\
312 \
313                         "punpcklbw %%mm7, %%mm3         \n\t" /* 0G0G0G0G */\
314                         "punpcklbw %%mm7, %%mm2         \n\t" /* 0B0B0B0B */\
315                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R */\
316 \
317                         "psrlw $3, %%mm2                \n\t"\
318                         "psllw $3, %%mm3                \n\t"\
319                         "psllw $8, %%mm5                \n\t"\
320 \
321                         "pand g16Mask, %%mm3            \n\t"\
322                         "pand r16Mask, %%mm5            \n\t"\
323 \
324                         "por %%mm3, %%mm2               \n\t"\
325                         "por %%mm5, %%mm2               \n\t"\
326 \
327                         "punpckhbw %%mm7, %%mm4         \n\t" /* 0G0G0G0G */\
328                         "punpckhbw %%mm7, %%mm1         \n\t" /* 0B0B0B0B */\
329                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R */\
330 \
331                         "psrlw $3, %%mm1                \n\t"\
332                         "psllw $3, %%mm4                \n\t"\
333                         "psllw $8, %%mm6                \n\t"\
334 \
335                         "pand g16Mask, %%mm4            \n\t"\
336                         "pand r16Mask, %%mm6            \n\t"\
337 \
338                         "por %%mm4, %%mm1               \n\t"\
339                         "por %%mm6, %%mm1               \n\t"\
340 \
341                         MOVNTQ(%%mm2, (%4, %%eax, 2))\
342                         MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
343 \
344                         "addl $8, %%eax                 \n\t"\
345                         "cmpl %5, %%eax                 \n\t"\
346                         " jb 1b                         \n\t"
347
348 #define WRITEBGR15 \
349                         "movq %%mm2, %%mm1              \n\t" /* B */\
350                         "movq %%mm4, %%mm3              \n\t" /* G */\
351                         "movq %%mm5, %%mm6              \n\t" /* R */\
352 \
353                         "punpcklbw %%mm7, %%mm3         \n\t" /* 0G0G0G0G */\
354                         "punpcklbw %%mm7, %%mm2         \n\t" /* 0B0B0B0B */\
355                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R */\
356 \
357                         "psrlw $3, %%mm2                \n\t"\
358                         "psllw $2, %%mm3                \n\t"\
359                         "psllw $7, %%mm5                \n\t"\
360 \
361                         "pand g15Mask, %%mm3            \n\t"\
362                         "pand r15Mask, %%mm5            \n\t"\
363 \
364                         "por %%mm3, %%mm2               \n\t"\
365                         "por %%mm5, %%mm2               \n\t"\
366 \
367                         "punpckhbw %%mm7, %%mm4         \n\t" /* 0G0G0G0G */\
368                         "punpckhbw %%mm7, %%mm1         \n\t" /* 0B0B0B0B */\
369                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R */\
370 \
371                         "psrlw $3, %%mm1                \n\t"\
372                         "psllw $2, %%mm4                \n\t"\
373                         "psllw $7, %%mm6                \n\t"\
374 \
375                         "pand g15Mask, %%mm4            \n\t"\
376                         "pand r15Mask, %%mm6            \n\t"\
377 \
378                         "por %%mm4, %%mm1               \n\t"\
379                         "por %%mm6, %%mm1               \n\t"\
380 \
381                         MOVNTQ(%%mm2, (%4, %%eax, 2))\
382                         MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
383 \
384                         "addl $8, %%eax                 \n\t"\
385                         "cmpl %5, %%eax                 \n\t"\
386                         " jb 1b                         \n\t"
387 // FIXME find a faster way to shuffle it to BGR24
388 #define WRITEBGR24 \
389                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
390                         "movq %%mm2, %%mm1              \n\t" /* B */\
391                         "movq %%mm5, %%mm6              \n\t" /* R */\
392                         "punpcklbw %%mm4, %%mm2         \n\t" /* GBGBGBGB 0 */\
393                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R 0 */\
394                         "punpckhbw %%mm4, %%mm1         \n\t" /* GBGBGBGB 2 */\
395                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R 2 */\
396                         "movq %%mm2, %%mm0              \n\t" /* GBGBGBGB 0 */\
397                         "movq %%mm1, %%mm3              \n\t" /* GBGBGBGB 2 */\
398                         "punpcklwd %%mm5, %%mm0         \n\t" /* 0RGB0RGB 0 */\
399                         "punpckhwd %%mm5, %%mm2         \n\t" /* 0RGB0RGB 1 */\
400                         "punpcklwd %%mm6, %%mm1         \n\t" /* 0RGB0RGB 2 */\
401                         "punpckhwd %%mm6, %%mm3         \n\t" /* 0RGB0RGB 3 */\
402 \
403                         "movq %%mm0, %%mm4              \n\t" /* 0RGB0RGB 0 */\
404                         "psrlq $8, %%mm0                \n\t" /* 00RGB0RG 0 */\
405                         "pand bm00000111, %%mm4         \n\t" /* 00000RGB 0 */\
406                         "pand bm11111000, %%mm0         \n\t" /* 00RGB000 0.5 */\
407                         "por %%mm4, %%mm0               \n\t" /* 00RGBRGB 0 */\
408                         "movq %%mm2, %%mm4              \n\t" /* 0RGB0RGB 1 */\
409                         "psllq $48, %%mm2               \n\t" /* GB000000 1 */\
410                         "por %%mm2, %%mm0               \n\t" /* GBRGBRGB 0 */\
411 \
412                         "movq %%mm4, %%mm2              \n\t" /* 0RGB0RGB 1 */\
413                         "psrld $16, %%mm4               \n\t" /* 000R000R 1 */\
414                         "psrlq $24, %%mm2               \n\t" /* 0000RGB0 1.5 */\
415                         "por %%mm4, %%mm2               \n\t" /* 000RRGBR 1 */\
416                         "pand bm00001111, %%mm2         \n\t" /* 0000RGBR 1 */\
417                         "movq %%mm1, %%mm4              \n\t" /* 0RGB0RGB 2 */\
418                         "psrlq $8, %%mm1                \n\t" /* 00RGB0RG 2 */\
419                         "pand bm00000111, %%mm4         \n\t" /* 00000RGB 2 */\
420                         "pand bm11111000, %%mm1         \n\t" /* 00RGB000 2.5 */\
421                         "por %%mm4, %%mm1               \n\t" /* 00RGBRGB 2 */\
422                         "movq %%mm1, %%mm4              \n\t" /* 00RGBRGB 2 */\
423                         "psllq $32, %%mm1               \n\t" /* BRGB0000 2 */\
424                         "por %%mm1, %%mm2               \n\t" /* BRGBRGBR 1 */\
425 \
426                         "psrlq $32, %%mm4               \n\t" /* 000000RG 2.5 */\
427                         "movq %%mm3, %%mm5              \n\t" /* 0RGB0RGB 3 */\
428                         "psrlq $8, %%mm3                \n\t" /* 00RGB0RG 3 */\
429                         "pand bm00000111, %%mm5         \n\t" /* 00000RGB 3 */\
430                         "pand bm11111000, %%mm3         \n\t" /* 00RGB000 3.5 */\
431                         "por %%mm5, %%mm3               \n\t" /* 00RGBRGB 3 */\
432                         "psllq $16, %%mm3               \n\t" /* RGBRGB00 3 */\
433                         "por %%mm4, %%mm3               \n\t" /* RGBRGBRG 2.5 */\
434 \
435                         "leal (%%eax, %%eax, 2), %%ebx  \n\t"\
436                         MOVNTQ(%%mm0, (%4, %%ebx))\
437                         MOVNTQ(%%mm2, 8(%4, %%ebx))\
438                         MOVNTQ(%%mm3, 16(%4, %%ebx))\
439 \
440                         "addl $8, %%eax                 \n\t"\
441                         "cmpl %5, %%eax                 \n\t"\
442                         " jb 1b                         \n\t"
443
444
445 static inline void yuv2yuv(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
446                            uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstw, int yalpha, int uvalpha)
447 {
448         int yalpha1=yalpha^4095;
449         int uvalpha1=uvalpha^4095;
450         int i;
451
452         asm volatile ("\n\t"::: "memory");
453
454         for(i=0;i<dstw;i++)
455         {
456                 ((uint8_t*)dest)[0] = (buf0[i]*yalpha1+buf1[i]*yalpha)>>19;
457                 dest++;
458         }
459
460         if(uvalpha != -1)
461         {
462                 for(i=0; i<dstw/2; i++)
463                 {
464                         ((uint8_t*)uDest)[0] = (uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19;
465                         ((uint8_t*)vDest)[0] = (uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19;
466                         uDest++;
467                         vDest++;
468                 }
469         }
470 }
471
472 /**
473  * vertical scale YV12 to RGB
474  */
475 static inline void yuv2rgbX(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
476                             uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
477 {
478         int yalpha1=yalpha^4095;
479         int uvalpha1=uvalpha^4095;
480         int i;
481
482         if(fullUVIpol)
483         {
484
485 #ifdef HAVE_MMX
486                 if(dstbpp == 32)
487                 {
488                         asm volatile(
489
490
491 FULL_YSCALEYUV2RGB
492                         "punpcklbw %%mm1, %%mm3         \n\t" // BGBGBGBG
493                         "punpcklbw %%mm7, %%mm0         \n\t" // R0R0R0R0
494
495                         "movq %%mm3, %%mm1              \n\t"
496                         "punpcklwd %%mm0, %%mm3         \n\t" // BGR0BGR0
497                         "punpckhwd %%mm0, %%mm1         \n\t" // BGR0BGR0
498
499                         MOVNTQ(%%mm3, (%4, %%eax, 4))
500                         MOVNTQ(%%mm1, 8(%4, %%eax, 4))
501
502                         "addl $4, %%eax                 \n\t"
503                         "cmpl %5, %%eax                 \n\t"
504                         " jb 1b                         \n\t"
505
506
507                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
508                         "m" (yalpha1), "m" (uvalpha1)
509                         : "%eax"
510                         );
511                 }
512                 else if(dstbpp==24)
513                 {
514                         asm volatile(
515
516 FULL_YSCALEYUV2RGB
517
518                                                                 // lsb ... msb
519                         "punpcklbw %%mm1, %%mm3         \n\t" // BGBGBGBG
520                         "punpcklbw %%mm7, %%mm0         \n\t" // R0R0R0R0
521
522                         "movq %%mm3, %%mm1              \n\t"
523                         "punpcklwd %%mm0, %%mm3         \n\t" // BGR0BGR0
524                         "punpckhwd %%mm0, %%mm1         \n\t" // BGR0BGR0
525
526                         "movq %%mm3, %%mm2              \n\t" // BGR0BGR0
527                         "psrlq $8, %%mm3                \n\t" // GR0BGR00
528                         "pand bm00000111, %%mm2         \n\t" // BGR00000
529                         "pand bm11111000, %%mm3         \n\t" // 000BGR00
530                         "por %%mm2, %%mm3               \n\t" // BGRBGR00
531                         "movq %%mm1, %%mm2              \n\t"
532                         "psllq $48, %%mm1               \n\t" // 000000BG
533                         "por %%mm1, %%mm3               \n\t" // BGRBGRBG
534
535                         "movq %%mm2, %%mm1              \n\t" // BGR0BGR0
536                         "psrld $16, %%mm2               \n\t" // R000R000
537                         "psrlq $24, %%mm1               \n\t" // 0BGR0000
538                         "por %%mm2, %%mm1               \n\t" // RBGRR000
539
540                         "movl %4, %%ebx                 \n\t"
541                         "addl %%eax, %%ebx              \n\t"
542
543 #ifdef HAVE_MMX2
544                         //FIXME Alignment
545                         "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
546                         "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
547 #else
548                         "movd %%mm3, (%%ebx, %%eax, 2)  \n\t"
549                         "psrlq $32, %%mm3               \n\t"
550                         "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
551                         "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
552 #endif
553                         "addl $4, %%eax                 \n\t"
554                         "cmpl %5, %%eax                 \n\t"
555                         " jb 1b                         \n\t"
556
557                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
558                         "m" (yalpha1), "m" (uvalpha1)
559                         : "%eax", "%ebx"
560                         );
561                 }
562                 else if(dstbpp==15)
563                 {
564                         asm volatile(
565
566 FULL_YSCALEYUV2RGB
567 #ifdef DITHER1XBPP
568                         "paddusb b16Dither, %%mm1       \n\t"
569                         "paddusb b16Dither, %%mm0       \n\t"
570                         "paddusb b16Dither, %%mm3       \n\t"
571 #endif
572                         "punpcklbw %%mm7, %%mm1         \n\t" // 0G0G0G0G
573                         "punpcklbw %%mm7, %%mm3         \n\t" // 0B0B0B0B
574                         "punpcklbw %%mm7, %%mm0         \n\t" // 0R0R0R0R
575
576                         "psrlw $3, %%mm3                \n\t"
577                         "psllw $2, %%mm1                \n\t"
578                         "psllw $7, %%mm0                \n\t"
579                         "pand g15Mask, %%mm1            \n\t"
580                         "pand r15Mask, %%mm0            \n\t"
581
582                         "por %%mm3, %%mm1               \n\t"
583                         "por %%mm1, %%mm0               \n\t"
584
585                         MOVNTQ(%%mm0, (%4, %%eax, 2))
586
587                         "addl $4, %%eax                 \n\t"
588                         "cmpl %5, %%eax                 \n\t"
589                         " jb 1b                         \n\t"
590
591                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
592                         "m" (yalpha1), "m" (uvalpha1)
593                         : "%eax"
594                         );
595                 }
596                 else if(dstbpp==16)
597                 {
598                         asm volatile(
599
600 FULL_YSCALEYUV2RGB
601 #ifdef DITHER1XBPP
602                         "paddusb g16Dither, %%mm1       \n\t"
603                         "paddusb b16Dither, %%mm0       \n\t"
604                         "paddusb b16Dither, %%mm3       \n\t"
605 #endif
606                         "punpcklbw %%mm7, %%mm1         \n\t" // 0G0G0G0G
607                         "punpcklbw %%mm7, %%mm3         \n\t" // 0B0B0B0B
608                         "punpcklbw %%mm7, %%mm0         \n\t" // 0R0R0R0R
609
610                         "psrlw $3, %%mm3                \n\t"
611                         "psllw $3, %%mm1                \n\t"
612                         "psllw $8, %%mm0                \n\t"
613                         "pand g16Mask, %%mm1            \n\t"
614                         "pand r16Mask, %%mm0            \n\t"
615
616                         "por %%mm3, %%mm1               \n\t"
617                         "por %%mm1, %%mm0               \n\t"
618
619                         MOVNTQ(%%mm0, (%4, %%eax, 2))
620
621                         "addl $4, %%eax                 \n\t"
622                         "cmpl %5, %%eax                 \n\t"
623                         " jb 1b                         \n\t"
624
625                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
626                         "m" (yalpha1), "m" (uvalpha1)
627                         : "%eax"
628                         );
629                 }
630 #else
631                 asm volatile ("\n\t"::: "memory");
632
633                 if(dstbpp==32 || dstbpp==24)
634                 {
635                         for(i=0;i<dstw;i++){
636                                 // vertical linear interpolation && yuv2rgb in a single step:
637                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
638                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
639                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
640                                 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
641                                 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
642                                 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
643                                 dest+=dstbpp>>3;
644                         }
645                 }
646                 else if(dstbpp==16)
647                 {
648                         for(i=0;i<dstw;i++){
649                                 // vertical linear interpolation && yuv2rgb in a single step:
650                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
651                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
652                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
653
654                                 ((uint16_t*)dest)[0] =
655                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
656                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<3)&0x07E0) |
657                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<8)&0xF800);
658                                 dest+=2;
659                         }
660                 }
661                 else if(dstbpp==15)
662                 {
663                         for(i=0;i<dstw;i++){
664                                 // vertical linear interpolation && yuv2rgb in a single step:
665                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
666                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
667                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
668
669                                 ((uint16_t*)dest)[0] =
670                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
671                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<2)&0x03E0) |
672                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<7)&0x7C00);
673                                 dest+=2;
674                         }
675                 }
676 #endif
677         }//FULL_UV_IPOL
678         else
679         {
680 #ifdef HAVE_MMX
681                 if(dstbpp == 32)
682                 {
683                         asm volatile(
684                                 YSCALEYUV2RGB
685                                 WRITEBGR32
686
687                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
688                         "m" (yalpha1), "m" (uvalpha1)
689                         : "%eax"
690                         );
691                 }
692                 else if(dstbpp==24)
693                 {
694                         asm volatile(
695                                 YSCALEYUV2RGB
696                                 WRITEBGR24
697
698                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
699                         "m" (yalpha1), "m" (uvalpha1)
700                         : "%eax", "%ebx"
701                         );
702                 }
703                 else if(dstbpp==15)
704                 {
705                         asm volatile(
706                                 YSCALEYUV2RGB
707                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
708 #ifdef DITHER1XBPP
709                                 "paddusb b16Dither, %%mm2       \n\t"
710                                 "paddusb b16Dither, %%mm4       \n\t"
711                                 "paddusb b16Dither, %%mm5       \n\t"
712 #endif
713
714                                 WRITEBGR15
715
716                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
717                         "m" (yalpha1), "m" (uvalpha1)
718                         : "%eax"
719                         );
720                 }
721                 else if(dstbpp==16)
722                 {
723                         asm volatile(
724                                 YSCALEYUV2RGB
725                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
726 #ifdef DITHER1XBPP
727                                 "paddusb g16Dither, %%mm2       \n\t"
728                                 "paddusb b16Dither, %%mm4       \n\t"
729                                 "paddusb b16Dither, %%mm5       \n\t"
730 #endif
731
732                                 WRITEBGR16
733
734                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
735                         "m" (yalpha1), "m" (uvalpha1)
736                         : "%eax"
737                         );
738                 }
739 #else
740 //FIXME unroll C loop and dont recalculate UV
741                 asm volatile ("\n\t"::: "memory");
742
743                 if(dstbpp==32 || dstbpp==24)
744                 {
745                         for(i=0;i<dstw;i++){
746                                 // vertical linear interpolation && yuv2rgb in a single step:
747                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
748                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
749                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
750                                 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
751                                 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
752                                 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
753                                 dest+=dstbpp>>3;
754                         }
755                 }
756                 else if(dstbpp==16)
757                 {
758                         for(i=0;i<dstw;i++){
759                                 // vertical linear interpolation && yuv2rgb in a single step:
760                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
761                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
762                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
763
764                                 ((uint16_t*)dest)[0] =
765                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
766                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<3)&0x07E0) |
767                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<8)&0xF800);
768                                 dest+=2;
769                         }
770                 }
771                 else if(dstbpp==15)
772                 {
773                         for(i=0;i<dstw;i++){
774                                 // vertical linear interpolation && yuv2rgb in a single step:
775                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
776                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
777                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
778
779                                 ((uint16_t*)dest)[0] =
780                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
781                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<2)&0x03E0) |
782                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<7)&0x7C00);
783                                 dest+=2;
784                         }
785                 }
786 #endif
787         } //!FULL_UV_IPOL
788 }
789
790 /**
791  * YV12 to RGB without scaling or interpolating
792  */
793 static inline void yuv2rgb1(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
794                             uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
795 {
796         int yalpha1=yalpha^4095;
797         int uvalpha1=uvalpha^4095;
798         int i;
799         if(fullUVIpol || allwaysIpol)
800         {
801                 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
802                 return;
803         }
804 #ifdef HAVE_MMX
805                 if(dstbpp == 32)
806                 {
807                         asm volatile(
808                                 YSCALEYUV2RGB1
809                                 WRITEBGR32
810                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
811                         "m" (yalpha1), "m" (uvalpha1)
812                         : "%eax"
813                         );
814                 }
815                 else if(dstbpp==24)
816                 {
817                         asm volatile(
818                                 YSCALEYUV2RGB1
819                                 WRITEBGR24
820                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
821                         "m" (yalpha1), "m" (uvalpha1)
822                         : "%eax", "%ebx"
823                         );
824                 }
825                 else if(dstbpp==15)
826                 {
827                         asm volatile(
828                                 YSCALEYUV2RGB1
829                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
830 #ifdef DITHER1XBPP
831                                 "paddusb b16Dither, %%mm2       \n\t"
832                                 "paddusb b16Dither, %%mm4       \n\t"
833                                 "paddusb b16Dither, %%mm5       \n\t"
834 #endif
835                                 WRITEBGR15
836                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
837                         "m" (yalpha1), "m" (uvalpha1)
838                         : "%eax"
839                         );
840                 }
841                 else if(dstbpp==16)
842                 {
843                         asm volatile(
844                                 YSCALEYUV2RGB1
845                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
846 #ifdef DITHER1XBPP
847                                 "paddusb g16Dither, %%mm2       \n\t"
848                                 "paddusb b16Dither, %%mm4       \n\t"
849                                 "paddusb b16Dither, %%mm5       \n\t"
850 #endif
851
852                                 WRITEBGR16
853                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
854                         "m" (yalpha1), "m" (uvalpha1)
855                         : "%eax"
856                         );
857                 }
858 #else
859 //FIXME unroll C loop and dont recalculate UV
860                 asm volatile ("\n\t"::: "memory");
861
862                 if(dstbpp==32 || dstbpp==24)
863                 {
864                         for(i=0;i<dstw;i++){
865                                 // vertical linear interpolation && yuv2rgb in a single step:
866                                 int Y=yuvtab_2568[buf0[i]>>7];
867                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
868                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
869                                 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
870                                 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
871                                 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
872                                 dest+=dstbpp>>3;
873                         }
874                 }
875                 else if(dstbpp==16)
876                 {
877                         for(i=0;i<dstw;i++){
878                                 // vertical linear interpolation && yuv2rgb in a single step:
879                                 int Y=yuvtab_2568[buf0[i]>>7];
880                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
881                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
882
883                                 ((uint16_t*)dest)[0] =
884                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
885                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<3)&0x07E0) |
886                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<8)&0xF800);
887                                 dest+=2;
888                         }
889                 }
890                 else if(dstbpp==15)
891                 {
892                         for(i=0;i<dstw;i++){
893                                 // vertical linear interpolation && yuv2rgb in a single step:
894                                 int Y=yuvtab_2568[buf0[i]>>7];
895                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
896                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
897
898                                 ((uint16_t*)dest)[0] =
899                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
900                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<2)&0x03E0) |
901                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<7)&0x7C00);
902                                 dest+=2;
903                         }
904                 }
905 #endif
906 }
907
908
909 static inline void hyscale(uint16_t *dst, int dstWidth, uint8_t *src, int srcWidth, int xInc)
910 {
911         int i;
912       unsigned int xpos=0;
913       // *** horizontal scale Y line to temp buffer
914 #ifdef ARCH_X86
915
916 #ifdef HAVE_MMX2
917         if(canMMX2BeUsed)
918         {
919                 asm volatile(
920                         "pxor %%mm7, %%mm7              \n\t"
921                         "pxor %%mm2, %%mm2              \n\t" // 2*xalpha
922                         "movd %5, %%mm6                 \n\t" // xInc&0xFFFF
923                         "punpcklwd %%mm6, %%mm6         \n\t"
924                         "punpcklwd %%mm6, %%mm6         \n\t"
925                         "movq %%mm6, %%mm2              \n\t"
926                         "psllq $16, %%mm2               \n\t"
927                         "paddw %%mm6, %%mm2             \n\t"
928                         "psllq $16, %%mm2               \n\t"
929                         "paddw %%mm6, %%mm2             \n\t"
930                         "psllq $16, %%mm2               \n\t" //0,t,2t,3t               t=xInc&0xFF
931                         "movq %%mm2, temp0              \n\t"
932                         "movd %4, %%mm6                 \n\t" //(xInc*4)&0xFFFF
933                         "punpcklwd %%mm6, %%mm6         \n\t"
934                         "punpcklwd %%mm6, %%mm6         \n\t"
935                         "xorl %%eax, %%eax              \n\t" // i
936                         "movl %0, %%esi                 \n\t" // src
937                         "movl %1, %%edi                 \n\t" // buf1
938                         "movl %3, %%edx                 \n\t" // (xInc*4)>>16
939                         "xorl %%ecx, %%ecx              \n\t"
940                         "xorl %%ebx, %%ebx              \n\t"
941                         "movw %4, %%bx                  \n\t" // (xInc*4)&0xFFFF
942
943 #define FUNNY_Y_CODE \
944                         PREFETCH" 1024(%%esi)           \n\t"\
945                         PREFETCH" 1056(%%esi)           \n\t"\
946                         PREFETCH" 1088(%%esi)           \n\t"\
947                         "call funnyYCode                \n\t"\
948                         "movq temp0, %%mm2              \n\t"\
949                         "xorl %%ecx, %%ecx              \n\t"
950
951 FUNNY_Y_CODE
952 FUNNY_Y_CODE
953 FUNNY_Y_CODE
954 FUNNY_Y_CODE
955 FUNNY_Y_CODE
956 FUNNY_Y_CODE
957 FUNNY_Y_CODE
958 FUNNY_Y_CODE
959
960                         :: "m" (src), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
961                         "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF)
962                         : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
963                 );
964                 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth-1; i--) dst[i] = src[srcWidth-1]*128;
965         }
966         else
967         {
968 #endif
969         //NO MMX just normal asm ...
970         asm volatile(
971                 "xorl %%eax, %%eax              \n\t" // i
972                 "xorl %%ebx, %%ebx              \n\t" // xx
973                 "xorl %%ecx, %%ecx              \n\t" // 2*xalpha
974                 "1:                             \n\t"
975                 "movzbl  (%0, %%ebx), %%edi     \n\t" //src[xx]
976                 "movzbl 1(%0, %%ebx), %%esi     \n\t" //src[xx+1]
977                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
978                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
979                 "shll $16, %%edi                \n\t"
980                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
981                 "movl %1, %%edi                 \n\t"
982                 "shrl $9, %%esi                 \n\t"
983                 "movw %%si, (%%edi, %%eax, 2)   \n\t"
984                 "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
985                 "adcl %3, %%ebx                 \n\t" //xx+= xInc>>8 + carry
986
987                 "movzbl (%0, %%ebx), %%edi      \n\t" //src[xx]
988                 "movzbl 1(%0, %%ebx), %%esi     \n\t" //src[xx+1]
989                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
990                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
991                 "shll $16, %%edi                \n\t"
992                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
993                 "movl %1, %%edi                 \n\t"
994                 "shrl $9, %%esi                 \n\t"
995                 "movw %%si, 2(%%edi, %%eax, 2)  \n\t"
996                 "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
997                 "adcl %3, %%ebx                 \n\t" //xx+= xInc>>8 + carry
998
999
1000                 "addl $2, %%eax                 \n\t"
1001                 "cmpl %2, %%eax                 \n\t"
1002                 " jb 1b                         \n\t"
1003
1004
1005                 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
1006                 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1007                 );
1008 #ifdef HAVE_MMX2
1009         } //if MMX2 cant be used
1010 #endif
1011 #else
1012       for(i=0;i<dstWidth;i++){
1013         register unsigned int xx=xpos>>16;
1014         register unsigned int xalpha=(xpos&0xFFFF)>>9;
1015         dst[i]=(src[xx]*(xalpha^127)+src[xx+1]*xalpha);
1016         xpos+=xInc;
1017       }
1018 #endif
1019 }
1020
1021 inline static void hcscale(uint16_t *dst, int dstWidth,
1022                                 uint8_t *src1, uint8_t *src2, int srcWidth, int xInc)
1023 {
1024         int xpos=0;
1025         int i;
1026 #ifdef ARCH_X86
1027 #ifdef HAVE_MMX2
1028         if(canMMX2BeUsed)
1029         {
1030                 asm volatile(
1031                 "pxor %%mm7, %%mm7              \n\t"
1032                 "pxor %%mm2, %%mm2              \n\t" // 2*xalpha
1033                 "movd %5, %%mm6                 \n\t" // xInc&0xFFFF
1034                 "punpcklwd %%mm6, %%mm6         \n\t"
1035                 "punpcklwd %%mm6, %%mm6         \n\t"
1036                 "movq %%mm6, %%mm2              \n\t"
1037                 "psllq $16, %%mm2               \n\t"
1038                 "paddw %%mm6, %%mm2             \n\t"
1039                 "psllq $16, %%mm2               \n\t"
1040                 "paddw %%mm6, %%mm2             \n\t"
1041                 "psllq $16, %%mm2               \n\t" //0,t,2t,3t               t=xInc&0xFFFF
1042                 "movq %%mm2, temp0              \n\t"
1043                 "movd %4, %%mm6                 \n\t" //(xInc*4)&0xFFFF
1044                 "punpcklwd %%mm6, %%mm6         \n\t"
1045                 "punpcklwd %%mm6, %%mm6         \n\t"
1046                 "xorl %%eax, %%eax              \n\t" // i
1047                 "movl %0, %%esi                 \n\t" // src
1048                 "movl %1, %%edi                 \n\t" // buf1
1049                 "movl %3, %%edx                 \n\t" // (xInc*4)>>16
1050                 "xorl %%ecx, %%ecx              \n\t"
1051                 "xorl %%ebx, %%ebx              \n\t"
1052                 "movw %4, %%bx                  \n\t" // (xInc*4)&0xFFFF
1053
1054 #define FUNNYUVCODE \
1055                         PREFETCH" 1024(%%esi)           \n\t"\
1056                         PREFETCH" 1056(%%esi)           \n\t"\
1057                         PREFETCH" 1088(%%esi)           \n\t"\
1058                         "call funnyUVCode               \n\t"\
1059                         "movq temp0, %%mm2              \n\t"\
1060                         "xorl %%ecx, %%ecx              \n\t"
1061
1062 FUNNYUVCODE
1063 FUNNYUVCODE
1064 FUNNYUVCODE
1065 FUNNYUVCODE
1066
1067 FUNNYUVCODE
1068 FUNNYUVCODE
1069 FUNNYUVCODE
1070 FUNNYUVCODE
1071
1072
1073                 "xorl %%eax, %%eax              \n\t" // i
1074                 "movl %6, %%esi                 \n\t" // src
1075                 "movl %1, %%edi                 \n\t" // buf1
1076                 "addl $4096, %%edi              \n\t"
1077
1078 FUNNYUVCODE
1079 FUNNYUVCODE
1080 FUNNYUVCODE
1081 FUNNYUVCODE
1082
1083 FUNNYUVCODE
1084 FUNNYUVCODE
1085 FUNNYUVCODE
1086 FUNNYUVCODE
1087
1088                 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
1089                   "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF), "m" (src2)
1090                 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
1091         );
1092                 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth/2-1; i--)
1093                 {
1094                         dst[i] = src1[srcWidth/2-1]*128;
1095                         dst[i+2048] = src2[srcWidth/2-1]*128;
1096                 }
1097         }
1098         else
1099         {
1100 #endif
1101         asm volatile(
1102                 "xorl %%eax, %%eax              \n\t" // i
1103                 "xorl %%ebx, %%ebx              \n\t" // xx
1104                 "xorl %%ecx, %%ecx              \n\t" // 2*xalpha
1105                 "1:                             \n\t"
1106                 "movl %0, %%esi                 \n\t"
1107                 "movzbl  (%%esi, %%ebx), %%edi  \n\t" //src[xx]
1108                 "movzbl 1(%%esi, %%ebx), %%esi  \n\t" //src[xx+1]
1109                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
1110                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
1111                 "shll $16, %%edi                \n\t"
1112                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1113                 "movl %1, %%edi                 \n\t"
1114                 "shrl $9, %%esi                 \n\t"
1115                 "movw %%si, (%%edi, %%eax, 2)   \n\t"
1116
1117                 "movzbl  (%5, %%ebx), %%edi     \n\t" //src[xx]
1118                 "movzbl 1(%5, %%ebx), %%esi     \n\t" //src[xx+1]
1119                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
1120                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
1121                 "shll $16, %%edi                \n\t"
1122                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1123                 "movl %1, %%edi                 \n\t"
1124                 "shrl $9, %%esi                 \n\t"
1125                 "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
1126
1127                 "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
1128                 "adcl %3, %%ebx                 \n\t" //xx+= xInc>>8 + carry
1129                 "addl $1, %%eax                 \n\t"
1130                 "cmpl %2, %%eax                 \n\t"
1131                 " jb 1b                         \n\t"
1132
1133                 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
1134                 "r" (src2)
1135                 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1136                 );
1137 #ifdef HAVE_MMX2
1138         } //if MMX2 cant be used
1139 #endif
1140 #else
1141       for(i=0;i<dstWidth;i++){
1142           register unsigned int xx=xpos>>16;
1143           register unsigned int xalpha=(xpos&0xFFFF)>>9;
1144           dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
1145           dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
1146           xpos+=xInc;
1147       }
1148 #endif
1149 }
1150
1151
1152 // *** bilinear scaling and yuv->rgb or yuv->yuv conversion of yv12 slices:
1153 // *** Note: it's called multiple times while decoding a frame, first time y==0
1154 // *** Designed to upscale, but may work for downscale too.
1155 // s_xinc = (src_width << 16) / dst_width
1156 // s_yinc = (src_height << 16) / dst_height
1157 void SwScale_YV12slice(unsigned char* srcptr[],int stride[], int y, int h,
1158                              uint8_t* dstptr[], int dststride, int dstw, int dstbpp,
1159                              unsigned int s_xinc,unsigned int s_yinc){
1160
1161 // scaling factors:
1162 //static int s_yinc=(vo_dga_src_height<<16)/vo_dga_vp_height;
1163 //static int s_xinc=(vo_dga_src_width<<8)/vo_dga_vp_width;
1164
1165 unsigned int s_xinc2;
1166
1167 static int s_srcypos; // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1168 static int s_ypos;
1169
1170 // last horzontally interpolated lines, used to avoid unnecessary calculations
1171 static int s_last_ypos;
1172 static int s_last_y1pos;
1173
1174 static int static_dstw;
1175
1176 #ifdef HAVE_MMX2
1177 // used to detect a horizontal size change
1178 static int old_dstw= -1;
1179 static int old_s_xinc= -1;
1180 #endif
1181
1182 int srcWidth= (dstw*s_xinc + 0x8000)>>16;
1183 int dstUVw= fullUVIpol ? dstw : dstw/2;
1184
1185
1186 #ifdef HAVE_MMX2
1187 canMMX2BeUsed= (s_xinc <= 0x10000 && (dstw&31)==0 && (srcWidth&15)==0) ? 1 : 0;
1188 #endif
1189
1190 // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
1191 // n-2 is the last chrominance sample available
1192 // FIXME this is not perfect, but noone shuld notice the difference, the more correct variant
1193 // would be like the vertical one, but that would require some special code for the
1194 // first and last pixel
1195 if(canMMX2BeUsed)       s_xinc+= 20;
1196 else                    s_xinc = ((srcWidth-2)<<16)/(dstw-2) - 20;
1197
1198 if(fullUVIpol && !dstbpp==12)   s_xinc2= s_xinc>>1;
1199 else                            s_xinc2= s_xinc;
1200   // force calculation of the horizontal interpolation of the first line
1201
1202   if(y==0){
1203         s_last_ypos=-99;
1204         s_last_y1pos=-99;
1205         s_srcypos= s_yinc/2 - 0x8000;
1206         s_ypos=0;
1207 #ifdef HAVE_MMX2
1208 // cant downscale !!!
1209         if((old_s_xinc != s_xinc || old_dstw!=dstw) && canMMX2BeUsed)
1210         {
1211                 uint8_t *fragment;
1212                 int imm8OfPShufW1;
1213                 int imm8OfPShufW2;
1214                 int fragmentLength;
1215
1216                 int xpos, xx, xalpha, i;
1217
1218                 old_s_xinc= s_xinc;
1219                 old_dstw= dstw;
1220
1221                 static_dstw= dstw;
1222
1223                 // create an optimized horizontal scaling routine
1224
1225                 //code fragment
1226
1227                 asm volatile(
1228                         "jmp 9f                         \n\t"
1229                 // Begin
1230                         "0:                             \n\t"
1231                         "movq (%%esi), %%mm0            \n\t" //FIXME Alignment
1232                         "movq %%mm0, %%mm1              \n\t"
1233                         "psrlq $8, %%mm0                \n\t"
1234                         "punpcklbw %%mm7, %%mm1 \n\t"
1235                         "movq %%mm2, %%mm3              \n\t"
1236                         "punpcklbw %%mm7, %%mm0 \n\t"
1237                         "addw %%bx, %%cx                \n\t" //2*xalpha += (4*s_xinc)&0xFFFF
1238                         "pshufw $0xFF, %%mm1, %%mm1     \n\t"
1239                         "1:                             \n\t"
1240                         "adcl %%edx, %%esi              \n\t" //xx+= (4*s_xinc)>>16 + carry
1241                         "pshufw $0xFF, %%mm0, %%mm0     \n\t"
1242                         "2:                             \n\t"
1243                         "psrlw $9, %%mm3                \n\t"
1244                         "psubw %%mm1, %%mm0             \n\t"
1245                         "pmullw %%mm3, %%mm0            \n\t"
1246                         "paddw %%mm6, %%mm2             \n\t" // 2*alpha += xpos&0xFFFF
1247                         "psllw $7, %%mm1                \n\t"
1248                         "paddw %%mm1, %%mm0             \n\t"
1249
1250                         "movq %%mm0, (%%edi, %%eax)     \n\t"
1251
1252                         "addl $8, %%eax                 \n\t"
1253                 // End
1254                         "9:                             \n\t"
1255 //              "int $3\n\t"
1256                         "leal 0b, %0                    \n\t"
1257                         "leal 1b, %1                    \n\t"
1258                         "leal 2b, %2                    \n\t"
1259                         "decl %1                        \n\t"
1260                         "decl %2                        \n\t"
1261                         "subl %0, %1                    \n\t"
1262                         "subl %0, %2                    \n\t"
1263                         "leal 9b, %3                    \n\t"
1264                         "subl %0, %3                    \n\t"
1265                         :"=r" (fragment), "=r" (imm8OfPShufW1), "=r" (imm8OfPShufW2),
1266                          "=r" (fragmentLength)
1267                 );
1268
1269                 xpos= 0; //s_xinc/2 - 0x8000; // difference between pixel centers
1270
1271                 /* choose xinc so that all 8 parts fit exactly
1272                    Note: we cannot use just 1 part because it would not fit in the code cache */
1273 //              s_xinc2_diff= -((((s_xinc2*(dstw/8))&0xFFFF))/(dstw/8))-10;
1274 //              s_xinc_diff= -((((s_xinc*(dstw/8))&0xFFFF))/(dstw/8));
1275 #ifdef ALT_ERROR
1276 //              s_xinc2_diff+= ((0x10000/(dstw/8)));
1277 #endif
1278 //              s_xinc_diff= s_xinc2_diff*2;
1279
1280 //              s_xinc2+= s_xinc2_diff;
1281 //              s_xinc+= s_xinc_diff;
1282
1283 //              old_s_xinc= s_xinc;
1284
1285                 for(i=0; i<dstw/8; i++)
1286                 {
1287                         int xx=xpos>>16;
1288
1289                         if((i&3) == 0)
1290                         {
1291                                 int a=0;
1292                                 int b=((xpos+s_xinc)>>16) - xx;
1293                                 int c=((xpos+s_xinc*2)>>16) - xx;
1294                                 int d=((xpos+s_xinc*3)>>16) - xx;
1295
1296                                 memcpy(funnyYCode + fragmentLength*i/4, fragment, fragmentLength);
1297
1298                                 funnyYCode[fragmentLength*i/4 + imm8OfPShufW1]=
1299                                 funnyYCode[fragmentLength*i/4 + imm8OfPShufW2]=
1300                                         a | (b<<2) | (c<<4) | (d<<6);
1301
1302                                 // if we dont need to read 8 bytes than dont :), reduces the chance of
1303                                 // crossing a cache line
1304                                 if(d<3) funnyYCode[fragmentLength*i/4 + 1]= 0x6E;
1305
1306                                 funnyYCode[fragmentLength*(i+4)/4]= RET;
1307                         }
1308                         xpos+=s_xinc;
1309                 }
1310
1311                 xpos= 0; //s_xinc2/2 - 0x10000; // difference between centers of chrom samples
1312                 for(i=0; i<dstUVw/8; i++)
1313                 {
1314                         int xx=xpos>>16;
1315
1316                         if((i&3) == 0)
1317                         {
1318                                 int a=0;
1319                                 int b=((xpos+s_xinc2)>>16) - xx;
1320                                 int c=((xpos+s_xinc2*2)>>16) - xx;
1321                                 int d=((xpos+s_xinc2*3)>>16) - xx;
1322
1323                                 memcpy(funnyUVCode + fragmentLength*i/4, fragment, fragmentLength);
1324
1325                                 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW1]=
1326                                 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW2]=
1327                                         a | (b<<2) | (c<<4) | (d<<6);
1328
1329                                 // if we dont need to read 8 bytes than dont :), reduces the chance of
1330                                 // crossing a cache line
1331                                 if(d<3) funnyUVCode[fragmentLength*i/4 + 1]= 0x6E;
1332
1333                                 funnyUVCode[fragmentLength*(i+4)/4]= RET;
1334                         }
1335                         xpos+=s_xinc2;
1336                 }
1337 //              funnyCode[0]= RET;
1338         }
1339
1340 #endif // HAVE_MMX2
1341   } // reset counters
1342
1343   while(1){
1344     unsigned char *dest =dstptr[0]+dststride*s_ypos;
1345     unsigned char *uDest=dstptr[1]+(dststride>>1)*(s_ypos>>1);
1346     unsigned char *vDest=dstptr[2]+(dststride>>1)*(s_ypos>>1);
1347
1348     int y0=(s_srcypos + 0xFFFF)>>16;  // first luminance source line number below the dst line
1349         // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1350     int srcuvpos= dstbpp==12 ?  s_srcypos + s_yinc/2 - 0x8000 :
1351                                 s_srcypos - 0x8000;
1352     int y1=(srcuvpos + 0x1FFFF)>>17; // first chrominance source line number below the dst line
1353     int yalpha=((s_srcypos-1)&0xFFFF)>>4;
1354     int uvalpha=((srcuvpos-1)&0x1FFFF)>>5;
1355     uint16_t *buf0=pix_buf_y[y0&1];             // top line of the interpolated slice
1356     uint16_t *buf1=pix_buf_y[((y0+1)&1)];       // bottom line of the interpolated slice
1357     uint16_t *uvbuf0=pix_buf_uv[y1&1];          // top line of the interpolated slice
1358     uint16_t *uvbuf1=pix_buf_uv[(y1+1)&1];      // bottom line of the interpolated slice
1359     int i;
1360
1361     if(y0>=y+h) break; // FIXME wrong, skips last lines, but they are dupliactes anyway
1362
1363     if((y0&1) && dstbpp==12) uvalpha=-1; // there is no alpha if there is no line
1364
1365     s_ypos++; s_srcypos+=s_yinc;
1366
1367     //only interpolate the src line horizontally if we didnt do it allready
1368         if(s_last_ypos!=y0)
1369         {
1370                 unsigned char *src;
1371                 // skip if first line has been horiz scaled alleady
1372                 if(s_last_ypos != y0-1)
1373                 {
1374                         // check if first line is before any available src lines
1375                         if(y0-1 < y)    src=srcptr[0]+(0     )*stride[0];
1376                         else            src=srcptr[0]+(y0-y-1)*stride[0];
1377
1378                         hyscale(buf0, dstw, src, srcWidth, s_xinc);
1379                 }
1380                 // check if second line is after any available src lines
1381                 if(y0-y >= h)   src=srcptr[0]+(h-1)*stride[0];
1382                 else            src=srcptr[0]+(y0-y)*stride[0];
1383
1384                 // the min() is required to avoid reuseing lines which where not available
1385                 s_last_ypos= MIN(y0, y+h-1);
1386                 hyscale(buf1, dstw, src, srcWidth, s_xinc);
1387         }
1388 //      printf("%d %d %d %d\n", y, y1, s_last_y1pos, h);
1389       // *** horizontal scale U and V lines to temp buffer
1390         if(s_last_y1pos!=y1)
1391         {
1392                 uint8_t *src1, *src2;
1393                 // skip if first line has been horiz scaled alleady
1394                 if(s_last_y1pos != y1-1)
1395                 {
1396                         // check if first line is before any available src lines
1397                         if(y1-y/2-1 < 0)
1398                         {
1399                                 src1= srcptr[1]+(0)*stride[1];
1400                                 src2= srcptr[2]+(0)*stride[2];
1401                         }else{
1402                                 src1= srcptr[1]+(y1-y/2-1)*stride[1];
1403                                 src2= srcptr[2]+(y1-y/2-1)*stride[2];
1404                         }
1405                         hcscale(uvbuf0, dstUVw, src1, src2, srcWidth, s_xinc2);
1406                 }
1407
1408                 // check if second line is after any available src lines
1409                 if(y1 - y/2 >= h/2)
1410                 {
1411                         src1= srcptr[1]+(h/2-1)*stride[1];
1412                         src2= srcptr[2]+(h/2-1)*stride[2];
1413                 }else{
1414                         src1= srcptr[1]+(y1-y/2)*stride[1];
1415                         src2= srcptr[2]+(y1-y/2)*stride[2];
1416                 }
1417                 hcscale(uvbuf1, dstUVw, src1, src2, srcWidth, s_xinc2);
1418
1419                 // the min() is required to avoid reuseing lines which where not available
1420                 s_last_y1pos= MIN(y1, y/2+h/2-1);
1421         }
1422
1423         if(dstbpp==12) //YV12
1424                 yuv2yuv(buf0, buf1, uvbuf0, uvbuf1, dest, uDest, vDest, dstw, yalpha, uvalpha);
1425         else if(ABS(s_yinc - 0x10000) < 10)
1426                 yuv2rgb1(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1427         else
1428                 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1429
1430 #ifdef HAVE_MMX
1431         b16Dither= b16Dither1;
1432         b16Dither1= b16Dither2;
1433         b16Dither2= b16Dither;
1434
1435         g16Dither= g16Dither1;
1436         g16Dither1= g16Dither2;
1437         g16Dither2= g16Dither;
1438 #endif
1439   }
1440
1441 #ifdef HAVE_MMX
1442         __asm __volatile(SFENCE:::"memory");
1443         __asm __volatile(EMMS);
1444 #endif
1445 }
1446
1447
1448 void SwScale_Init(){
1449     // generating tables:
1450     int i;
1451     for(i=0;i<256;i++){
1452         clip_table[i]=0;
1453         clip_table[i+256]=i;
1454         clip_table[i+512]=255;
1455         yuvtab_2568[i]=(0x2568*(i-16))+(256<<13);
1456         yuvtab_3343[i]=0x3343*(i-128);
1457         yuvtab_0c92[i]=-0x0c92*(i-128);
1458         yuvtab_1a1e[i]=-0x1a1e*(i-128);
1459         yuvtab_40cf[i]=0x40cf*(i-128);
1460     }
1461
1462 }