]> git.sesse.net Git - ffmpeg/blob - postproc/swscale.c
yuv2rgb bugfix
[ffmpeg] / postproc / swscale.c
1
2 // Software scaling and colorspace conversion routines for MPlayer
3
4 // Orginal C implementation by A'rpi/ESP-team <arpi@thot.banki.hu>
5 // current version mostly by Michael Niedermayer (michaelni@gmx.at)
6 // the parts written by michael are under GNU GPL
7
8 #include <inttypes.h>
9 #include <string.h>
10 #include "../config.h"
11 #include "swscale.h"
12
13 //#undef HAVE_MMX2
14 //#undef HAVE_MMX
15 //#undef ARCH_X86
16 #define DITHER1XBPP
17 int fullUVIpol=0;
18 //disables the unscaled height version
19 int allwaysIpol=0;
20
21 #define RET 0xC3 //near return opcode
22 /*
23 NOTES
24
25 known BUGS with known cause (no bugreports please!, but patches are welcome :) )
26 horizontal MMX2 scaler reads 1-7 samples too much (might cause a sig11)
27
28 Supported output formats BGR15 BGR16 BGR24 BGR32
29 BGR15 & BGR16 MMX verions support dithering
30 Special versions: fast Y 1:1 scaling (no interpolation in y direction)
31
32 TODO
33 more intelligent missalignment avoidance for the horizontal scaler
34 */
35
36 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
37 #define MIN(a,b) ((a) > (b) ? (b) : (a))
38 #define MAX(a,b) ((a) < (b) ? (b) : (a))
39
40 #ifdef HAVE_MMX2
41 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
42 #elif defined (HAVE_3DNOW)
43 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
44 #endif
45
46 #ifdef HAVE_MMX2
47 #define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
48 #else
49 #define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
50 #endif
51
52
53 #ifdef HAVE_MMX
54 static uint64_t __attribute__((aligned(8))) yCoeff=    0x2568256825682568LL;
55 static uint64_t __attribute__((aligned(8))) vrCoeff=   0x3343334333433343LL;
56 static uint64_t __attribute__((aligned(8))) ubCoeff=   0x40cf40cf40cf40cfLL;
57 static uint64_t __attribute__((aligned(8))) vgCoeff=   0xE5E2E5E2E5E2E5E2LL;
58 static uint64_t __attribute__((aligned(8))) ugCoeff=   0xF36EF36EF36EF36ELL;
59 static uint64_t __attribute__((aligned(8))) w400=      0x0400040004000400LL;
60 static uint64_t __attribute__((aligned(8))) w80=       0x0080008000800080LL;
61 static uint64_t __attribute__((aligned(8))) w10=       0x0010001000100010LL;
62 static uint64_t __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
63 static uint64_t __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
64 static uint64_t __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
65
66 static uint64_t __attribute__((aligned(8))) b16Dither= 0x0004000400040004LL;
67 static uint64_t __attribute__((aligned(8))) b16Dither1=0x0004000400040004LL;
68 static uint64_t __attribute__((aligned(8))) b16Dither2=0x0602060206020602LL;
69 static uint64_t __attribute__((aligned(8))) g16Dither= 0x0002000200020002LL;
70 static uint64_t __attribute__((aligned(8))) g16Dither1=0x0002000200020002LL;
71 static uint64_t __attribute__((aligned(8))) g16Dither2=0x0301030103010301LL;
72
73 static uint64_t __attribute__((aligned(8))) b16Mask=   0x001F001F001F001FLL;
74 static uint64_t __attribute__((aligned(8))) g16Mask=   0x07E007E007E007E0LL;
75 static uint64_t __attribute__((aligned(8))) r16Mask=   0xF800F800F800F800LL;
76 static uint64_t __attribute__((aligned(8))) b15Mask=   0x001F001F001F001FLL;
77 static uint64_t __attribute__((aligned(8))) g15Mask=   0x03E003E003E003E0LL;
78 static uint64_t __attribute__((aligned(8))) r15Mask=   0x7C007C007C007C00LL;
79
80 static uint64_t __attribute__((aligned(8))) temp0;
81 static uint64_t __attribute__((aligned(8))) asm_yalpha1;
82 static uint64_t __attribute__((aligned(8))) asm_uvalpha1;
83 #endif
84
85 // temporary storage for 4 yuv lines:
86 // 16bit for now (mmx likes it more compact)
87 #ifdef HAVE_MMX
88 static uint16_t __attribute__((aligned(8))) pix_buf_y[4][2048];
89 static uint16_t __attribute__((aligned(8))) pix_buf_uv[2][2048*2];
90 #else
91 static uint16_t pix_buf_y[4][2048];
92 static uint16_t pix_buf_uv[2][2048*2];
93 #endif
94
95 // clipping helper table for C implementations:
96 static unsigned char clip_table[768];
97
98 // yuv->rgb conversion tables:
99 static    int yuvtab_2568[256];
100 static    int yuvtab_3343[256];
101 static    int yuvtab_0c92[256];
102 static    int yuvtab_1a1e[256];
103 static    int yuvtab_40cf[256];
104
105
106 static uint8_t funnyYCode[10000];
107 static uint8_t funnyUVCode[10000];
108
109 static int canMMX2BeUsed=0;
110
111 #define FULL_YSCALEYUV2RGB \
112                 "pxor %%mm7, %%mm7              \n\t"\
113                 "movd %6, %%mm6                 \n\t" /*yalpha1*/\
114                 "punpcklwd %%mm6, %%mm6         \n\t"\
115                 "punpcklwd %%mm6, %%mm6         \n\t"\
116                 "movd %7, %%mm5                 \n\t" /*uvalpha1*/\
117                 "punpcklwd %%mm5, %%mm5         \n\t"\
118                 "punpcklwd %%mm5, %%mm5         \n\t"\
119                 "xorl %%eax, %%eax              \n\t"\
120                 "1:                             \n\t"\
121                 "movq (%0, %%eax, 2), %%mm0     \n\t" /*buf0[eax]*/\
122                 "movq (%1, %%eax, 2), %%mm1     \n\t" /*buf1[eax]*/\
123                 "movq (%2, %%eax,2), %%mm2      \n\t" /* uvbuf0[eax]*/\
124                 "movq (%3, %%eax,2), %%mm3      \n\t" /* uvbuf1[eax]*/\
125                 "psubw %%mm1, %%mm0             \n\t" /* buf0[eax] - buf1[eax]*/\
126                 "psubw %%mm3, %%mm2             \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
127                 "pmulhw %%mm6, %%mm0            \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
128                 "pmulhw %%mm5, %%mm2            \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
129                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
130                 "movq 4096(%2, %%eax,2), %%mm4  \n\t" /* uvbuf0[eax+2048]*/\
131                 "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
132                 "paddw %%mm0, %%mm1             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
133                 "movq 4096(%3, %%eax,2), %%mm0  \n\t" /* uvbuf1[eax+2048]*/\
134                 "paddw %%mm2, %%mm3             \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
135                 "psubw %%mm0, %%mm4             \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
136                 "psubw w80, %%mm1               \n\t" /* 8(Y-16)*/\
137                 "psubw w400, %%mm3              \n\t" /* 8(U-128)*/\
138                 "pmulhw yCoeff, %%mm1           \n\t"\
139 \
140 \
141                 "pmulhw %%mm5, %%mm4            \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
142                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
143                 "pmulhw ubCoeff, %%mm3          \n\t"\
144                 "psraw $4, %%mm0                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
145                 "pmulhw ugCoeff, %%mm2          \n\t"\
146                 "paddw %%mm4, %%mm0             \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
147                 "psubw w400, %%mm0              \n\t" /* (V-128)8*/\
148 \
149 \
150                 "movq %%mm0, %%mm4              \n\t" /* (V-128)8*/\
151                 "pmulhw vrCoeff, %%mm0          \n\t"\
152                 "pmulhw vgCoeff, %%mm4          \n\t"\
153                 "paddw %%mm1, %%mm3             \n\t" /* B*/\
154                 "paddw %%mm1, %%mm0             \n\t" /* R*/\
155                 "packuswb %%mm3, %%mm3          \n\t"\
156 \
157                 "packuswb %%mm0, %%mm0          \n\t"\
158                 "paddw %%mm4, %%mm2             \n\t"\
159                 "paddw %%mm2, %%mm1             \n\t" /* G*/\
160 \
161                 "packuswb %%mm1, %%mm1          \n\t"
162
163 #define YSCALEYUV2RGB \
164                 "movd %6, %%mm6                 \n\t" /*yalpha1*/\
165                 "punpcklwd %%mm6, %%mm6         \n\t"\
166                 "punpcklwd %%mm6, %%mm6         \n\t"\
167                 "movq %%mm6, asm_yalpha1        \n\t"\
168                 "movd %7, %%mm5                 \n\t" /*uvalpha1*/\
169                 "punpcklwd %%mm5, %%mm5         \n\t"\
170                 "punpcklwd %%mm5, %%mm5         \n\t"\
171                 "movq %%mm5, asm_uvalpha1       \n\t"\
172                 "xorl %%eax, %%eax              \n\t"\
173                 "1:                             \n\t"\
174                 "movq (%2, %%eax), %%mm2        \n\t" /* uvbuf0[eax]*/\
175                 "movq (%3, %%eax), %%mm3        \n\t" /* uvbuf1[eax]*/\
176                 "movq 4096(%2, %%eax), %%mm5    \n\t" /* uvbuf0[eax+2048]*/\
177                 "movq 4096(%3, %%eax), %%mm4    \n\t" /* uvbuf1[eax+2048]*/\
178                 "psubw %%mm3, %%mm2             \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
179                 "psubw %%mm4, %%mm5             \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
180                 "movq asm_uvalpha1, %%mm0       \n\t"\
181                 "pmulhw %%mm0, %%mm2            \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
182                 "pmulhw %%mm0, %%mm5            \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
183                 "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
184                 "psraw $4, %%mm4                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
185                 "paddw %%mm2, %%mm3             \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
186                 "paddw %%mm5, %%mm4             \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
187                 "psubw w400, %%mm3              \n\t" /* (U-128)8*/\
188                 "psubw w400, %%mm4              \n\t" /* (V-128)8*/\
189                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
190                 "movq %%mm4, %%mm5              \n\t" /* (V-128)8*/\
191                 "pmulhw ugCoeff, %%mm3          \n\t"\
192                 "pmulhw vgCoeff, %%mm4          \n\t"\
193         /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
194                 "movq (%0, %%eax, 2), %%mm0     \n\t" /*buf0[eax]*/\
195                 "movq (%1, %%eax, 2), %%mm1     \n\t" /*buf1[eax]*/\
196                 "movq 8(%0, %%eax, 2), %%mm6    \n\t" /*buf0[eax]*/\
197                 "movq 8(%1, %%eax, 2), %%mm7    \n\t" /*buf1[eax]*/\
198                 "psubw %%mm1, %%mm0             \n\t" /* buf0[eax] - buf1[eax]*/\
199                 "psubw %%mm7, %%mm6             \n\t" /* buf0[eax] - buf1[eax]*/\
200                 "pmulhw asm_yalpha1, %%mm0      \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
201                 "pmulhw asm_yalpha1, %%mm6      \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
202                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
203                 "psraw $4, %%mm7                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
204                 "paddw %%mm0, %%mm1             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
205                 "paddw %%mm6, %%mm7             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
206                 "pmulhw ubCoeff, %%mm2          \n\t"\
207                 "pmulhw vrCoeff, %%mm5          \n\t"\
208                 "psubw w80, %%mm1               \n\t" /* 8(Y-16)*/\
209                 "psubw w80, %%mm7               \n\t" /* 8(Y-16)*/\
210                 "pmulhw yCoeff, %%mm1           \n\t"\
211                 "pmulhw yCoeff, %%mm7           \n\t"\
212         /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
213                 "paddw %%mm3, %%mm4             \n\t"\
214                 "movq %%mm2, %%mm0              \n\t"\
215                 "movq %%mm5, %%mm6              \n\t"\
216                 "movq %%mm4, %%mm3              \n\t"\
217                 "punpcklwd %%mm2, %%mm2         \n\t"\
218                 "punpcklwd %%mm5, %%mm5         \n\t"\
219                 "punpcklwd %%mm4, %%mm4         \n\t"\
220                 "paddw %%mm1, %%mm2             \n\t"\
221                 "paddw %%mm1, %%mm5             \n\t"\
222                 "paddw %%mm1, %%mm4             \n\t"\
223                 "punpckhwd %%mm0, %%mm0         \n\t"\
224                 "punpckhwd %%mm6, %%mm6         \n\t"\
225                 "punpckhwd %%mm3, %%mm3         \n\t"\
226                 "paddw %%mm7, %%mm0             \n\t"\
227                 "paddw %%mm7, %%mm6             \n\t"\
228                 "paddw %%mm7, %%mm3             \n\t"\
229                 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
230                 "packuswb %%mm0, %%mm2          \n\t"\
231                 "packuswb %%mm6, %%mm5          \n\t"\
232                 "packuswb %%mm3, %%mm4          \n\t"\
233                 "pxor %%mm7, %%mm7              \n\t"
234
235 #define YSCALEYUV2RGB1 \
236                 "xorl %%eax, %%eax              \n\t"\
237                 "1:                             \n\t"\
238                 "movq (%2, %%eax), %%mm3        \n\t" /* uvbuf0[eax]*/\
239                 "movq 4096(%2, %%eax), %%mm4    \n\t" /* uvbuf0[eax+2048]*/\
240                 "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
241                 "psraw $4, %%mm4                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
242                 "psubw w400, %%mm3              \n\t" /* (U-128)8*/\
243                 "psubw w400, %%mm4              \n\t" /* (V-128)8*/\
244                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
245                 "movq %%mm4, %%mm5              \n\t" /* (V-128)8*/\
246                 "pmulhw ugCoeff, %%mm3          \n\t"\
247                 "pmulhw vgCoeff, %%mm4          \n\t"\
248         /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
249                 "movq (%1, %%eax, 2), %%mm1     \n\t" /*buf0[eax]*/\
250                 "movq 8(%1, %%eax, 2), %%mm7    \n\t" /*buf0[eax]*/\
251                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
252                 "psraw $4, %%mm7                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
253                 "pmulhw ubCoeff, %%mm2          \n\t"\
254                 "pmulhw vrCoeff, %%mm5          \n\t"\
255                 "psubw w80, %%mm1               \n\t" /* 8(Y-16)*/\
256                 "psubw w80, %%mm7               \n\t" /* 8(Y-16)*/\
257                 "pmulhw yCoeff, %%mm1           \n\t"\
258                 "pmulhw yCoeff, %%mm7           \n\t"\
259         /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
260                 "paddw %%mm3, %%mm4             \n\t"\
261                 "movq %%mm2, %%mm0              \n\t"\
262                 "movq %%mm5, %%mm6              \n\t"\
263                 "movq %%mm4, %%mm3              \n\t"\
264                 "punpcklwd %%mm2, %%mm2         \n\t"\
265                 "punpcklwd %%mm5, %%mm5         \n\t"\
266                 "punpcklwd %%mm4, %%mm4         \n\t"\
267                 "paddw %%mm1, %%mm2             \n\t"\
268                 "paddw %%mm1, %%mm5             \n\t"\
269                 "paddw %%mm1, %%mm4             \n\t"\
270                 "punpckhwd %%mm0, %%mm0         \n\t"\
271                 "punpckhwd %%mm6, %%mm6         \n\t"\
272                 "punpckhwd %%mm3, %%mm3         \n\t"\
273                 "paddw %%mm7, %%mm0             \n\t"\
274                 "paddw %%mm7, %%mm6             \n\t"\
275                 "paddw %%mm7, %%mm3             \n\t"\
276                 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
277                 "packuswb %%mm0, %%mm2          \n\t"\
278                 "packuswb %%mm6, %%mm5          \n\t"\
279                 "packuswb %%mm3, %%mm4          \n\t"\
280                 "pxor %%mm7, %%mm7              \n\t"
281
282 #define WRITEBGR32 \
283                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
284                         "movq %%mm2, %%mm1              \n\t" /* B */\
285                         "movq %%mm5, %%mm6              \n\t" /* R */\
286                         "punpcklbw %%mm4, %%mm2         \n\t" /* GBGBGBGB 0 */\
287                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R 0 */\
288                         "punpckhbw %%mm4, %%mm1         \n\t" /* GBGBGBGB 2 */\
289                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R 2 */\
290                         "movq %%mm2, %%mm0              \n\t" /* GBGBGBGB 0 */\
291                         "movq %%mm1, %%mm3              \n\t" /* GBGBGBGB 2 */\
292                         "punpcklwd %%mm5, %%mm0         \n\t" /* 0RGB0RGB 0 */\
293                         "punpckhwd %%mm5, %%mm2         \n\t" /* 0RGB0RGB 1 */\
294                         "punpcklwd %%mm6, %%mm1         \n\t" /* 0RGB0RGB 2 */\
295                         "punpckhwd %%mm6, %%mm3         \n\t" /* 0RGB0RGB 3 */\
296 \
297                         MOVNTQ(%%mm0, (%4, %%eax, 4))\
298                         MOVNTQ(%%mm2, 8(%4, %%eax, 4))\
299                         MOVNTQ(%%mm1, 16(%4, %%eax, 4))\
300                         MOVNTQ(%%mm3, 24(%4, %%eax, 4))\
301 \
302                         "addl $8, %%eax                 \n\t"\
303                         "cmpl %5, %%eax                 \n\t"\
304                         " jb 1b                         \n\t"
305
306 #define WRITEBGR16 \
307                         "movq %%mm2, %%mm1              \n\t" /* B */\
308                         "movq %%mm4, %%mm3              \n\t" /* G */\
309                         "movq %%mm5, %%mm6              \n\t" /* R */\
310 \
311                         "punpcklbw %%mm7, %%mm3         \n\t" /* 0G0G0G0G */\
312                         "punpcklbw %%mm7, %%mm2         \n\t" /* 0B0B0B0B */\
313                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R */\
314 \
315                         "psrlw $3, %%mm2                \n\t"\
316                         "psllw $3, %%mm3                \n\t"\
317                         "psllw $8, %%mm5                \n\t"\
318 \
319                         "pand g16Mask, %%mm3            \n\t"\
320                         "pand r16Mask, %%mm5            \n\t"\
321 \
322                         "por %%mm3, %%mm2               \n\t"\
323                         "por %%mm5, %%mm2               \n\t"\
324 \
325                         "punpckhbw %%mm7, %%mm4         \n\t" /* 0G0G0G0G */\
326                         "punpckhbw %%mm7, %%mm1         \n\t" /* 0B0B0B0B */\
327                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R */\
328 \
329                         "psrlw $3, %%mm1                \n\t"\
330                         "psllw $3, %%mm4                \n\t"\
331                         "psllw $8, %%mm6                \n\t"\
332 \
333                         "pand g16Mask, %%mm4            \n\t"\
334                         "pand r16Mask, %%mm6            \n\t"\
335 \
336                         "por %%mm4, %%mm1               \n\t"\
337                         "por %%mm6, %%mm1               \n\t"\
338 \
339                         MOVNTQ(%%mm2, (%4, %%eax, 2))\
340                         MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
341 \
342                         "addl $8, %%eax                 \n\t"\
343                         "cmpl %5, %%eax                 \n\t"\
344                         " jb 1b                         \n\t"
345
346 #define WRITEBGR15 \
347                         "movq %%mm2, %%mm1              \n\t" /* B */\
348                         "movq %%mm4, %%mm3              \n\t" /* G */\
349                         "movq %%mm5, %%mm6              \n\t" /* R */\
350 \
351                         "punpcklbw %%mm7, %%mm3         \n\t" /* 0G0G0G0G */\
352                         "punpcklbw %%mm7, %%mm2         \n\t" /* 0B0B0B0B */\
353                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R */\
354 \
355                         "psrlw $3, %%mm2                \n\t"\
356                         "psllw $2, %%mm3                \n\t"\
357                         "psllw $7, %%mm5                \n\t"\
358 \
359                         "pand g15Mask, %%mm3            \n\t"\
360                         "pand r15Mask, %%mm5            \n\t"\
361 \
362                         "por %%mm3, %%mm2               \n\t"\
363                         "por %%mm5, %%mm2               \n\t"\
364 \
365                         "punpckhbw %%mm7, %%mm4         \n\t" /* 0G0G0G0G */\
366                         "punpckhbw %%mm7, %%mm1         \n\t" /* 0B0B0B0B */\
367                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R */\
368 \
369                         "psrlw $3, %%mm1                \n\t"\
370                         "psllw $2, %%mm4                \n\t"\
371                         "psllw $7, %%mm6                \n\t"\
372 \
373                         "pand g15Mask, %%mm4            \n\t"\
374                         "pand r15Mask, %%mm6            \n\t"\
375 \
376                         "por %%mm4, %%mm1               \n\t"\
377                         "por %%mm6, %%mm1               \n\t"\
378 \
379                         MOVNTQ(%%mm2, (%4, %%eax, 2))\
380                         MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
381 \
382                         "addl $8, %%eax                 \n\t"\
383                         "cmpl %5, %%eax                 \n\t"\
384                         " jb 1b                         \n\t"
385 // FIXME find a faster way to shuffle it to BGR24
386 #define WRITEBGR24 \
387                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
388                         "movq %%mm2, %%mm1              \n\t" /* B */\
389                         "movq %%mm5, %%mm6              \n\t" /* R */\
390                         "punpcklbw %%mm4, %%mm2         \n\t" /* GBGBGBGB 0 */\
391                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R 0 */\
392                         "punpckhbw %%mm4, %%mm1         \n\t" /* GBGBGBGB 2 */\
393                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R 2 */\
394                         "movq %%mm2, %%mm0              \n\t" /* GBGBGBGB 0 */\
395                         "movq %%mm1, %%mm3              \n\t" /* GBGBGBGB 2 */\
396                         "punpcklwd %%mm5, %%mm0         \n\t" /* 0RGB0RGB 0 */\
397                         "punpckhwd %%mm5, %%mm2         \n\t" /* 0RGB0RGB 1 */\
398                         "punpcklwd %%mm6, %%mm1         \n\t" /* 0RGB0RGB 2 */\
399                         "punpckhwd %%mm6, %%mm3         \n\t" /* 0RGB0RGB 3 */\
400 \
401                         "movq %%mm0, %%mm4              \n\t" /* 0RGB0RGB 0 */\
402                         "psrlq $8, %%mm0                \n\t" /* 00RGB0RG 0 */\
403                         "pand bm00000111, %%mm4         \n\t" /* 00000RGB 0 */\
404                         "pand bm11111000, %%mm0         \n\t" /* 00RGB000 0.5 */\
405                         "por %%mm4, %%mm0               \n\t" /* 00RGBRGB 0 */\
406                         "movq %%mm2, %%mm4              \n\t" /* 0RGB0RGB 1 */\
407                         "psllq $48, %%mm2               \n\t" /* GB000000 1 */\
408                         "por %%mm2, %%mm0               \n\t" /* GBRGBRGB 0 */\
409 \
410                         "movq %%mm4, %%mm2              \n\t" /* 0RGB0RGB 1 */\
411                         "psrld $16, %%mm4               \n\t" /* 000R000R 1 */\
412                         "psrlq $24, %%mm2               \n\t" /* 0000RGB0 1.5 */\
413                         "por %%mm4, %%mm2               \n\t" /* 000RRGBR 1 */\
414                         "pand bm00001111, %%mm2         \n\t" /* 0000RGBR 1 */\
415                         "movq %%mm1, %%mm4              \n\t" /* 0RGB0RGB 2 */\
416                         "psrlq $8, %%mm1                \n\t" /* 00RGB0RG 2 */\
417                         "pand bm00000111, %%mm4         \n\t" /* 00000RGB 2 */\
418                         "pand bm11111000, %%mm1         \n\t" /* 00RGB000 2.5 */\
419                         "por %%mm4, %%mm1               \n\t" /* 00RGBRGB 2 */\
420                         "movq %%mm1, %%mm4              \n\t" /* 00RGBRGB 2 */\
421                         "psllq $32, %%mm1               \n\t" /* BRGB0000 2 */\
422                         "por %%mm1, %%mm2               \n\t" /* BRGBRGBR 1 */\
423 \
424                         "psrlq $32, %%mm4               \n\t" /* 000000RG 2.5 */\
425                         "movq %%mm3, %%mm5              \n\t" /* 0RGB0RGB 3 */\
426                         "psrlq $8, %%mm3                \n\t" /* 00RGB0RG 3 */\
427                         "pand bm00000111, %%mm5         \n\t" /* 00000RGB 3 */\
428                         "pand bm11111000, %%mm3         \n\t" /* 00RGB000 3.5 */\
429                         "por %%mm5, %%mm3               \n\t" /* 00RGBRGB 3 */\
430                         "psllq $16, %%mm3               \n\t" /* RGBRGB00 3 */\
431                         "por %%mm4, %%mm3               \n\t" /* RGBRGBRG 2.5 */\
432 \
433                         "leal (%%eax, %%eax, 2), %%ebx  \n\t"\
434                         MOVNTQ(%%mm0, (%4, %%ebx))\
435                         MOVNTQ(%%mm2, 8(%4, %%ebx))\
436                         MOVNTQ(%%mm3, 16(%4, %%ebx))\
437 \
438                         "addl $8, %%eax                 \n\t"\
439                         "cmpl %5, %%eax                 \n\t"\
440                         " jb 1b                         \n\t"
441
442
443 /**
444  * vertical scale YV12 to RGB
445  */
446 static inline void yuv2rgbX(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
447                             uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
448 {
449         int yalpha1=yalpha^4095;
450         int uvalpha1=uvalpha^4095;
451         int i;
452
453         if(fullUVIpol)
454         {
455
456 #ifdef HAVE_MMX
457                 if(dstbpp == 32)
458                 {
459                         asm volatile(
460
461
462 FULL_YSCALEYUV2RGB
463                         "punpcklbw %%mm1, %%mm3         \n\t" // BGBGBGBG
464                         "punpcklbw %%mm7, %%mm0         \n\t" // R0R0R0R0
465
466                         "movq %%mm3, %%mm1              \n\t"
467                         "punpcklwd %%mm0, %%mm3         \n\t" // BGR0BGR0
468                         "punpckhwd %%mm0, %%mm1         \n\t" // BGR0BGR0
469
470                         MOVNTQ(%%mm3, (%4, %%eax, 4))
471                         MOVNTQ(%%mm1, 8(%4, %%eax, 4))
472
473                         "addl $4, %%eax                 \n\t"
474                         "cmpl %5, %%eax                 \n\t"
475                         " jb 1b                         \n\t"
476
477
478                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
479                         "m" (yalpha1), "m" (uvalpha1)
480                         : "%eax"
481                         );
482                 }
483                 else if(dstbpp==24)
484                 {
485                         asm volatile(
486
487 FULL_YSCALEYUV2RGB
488
489                                                                 // lsb ... msb
490                         "punpcklbw %%mm1, %%mm3         \n\t" // BGBGBGBG
491                         "punpcklbw %%mm7, %%mm0         \n\t" // R0R0R0R0
492
493                         "movq %%mm3, %%mm1              \n\t"
494                         "punpcklwd %%mm0, %%mm3         \n\t" // BGR0BGR0
495                         "punpckhwd %%mm0, %%mm1         \n\t" // BGR0BGR0
496
497                         "movq %%mm3, %%mm2              \n\t" // BGR0BGR0
498                         "psrlq $8, %%mm3                \n\t" // GR0BGR00
499                         "pand bm00000111, %%mm2         \n\t" // BGR00000
500                         "pand bm11111000, %%mm3         \n\t" // 000BGR00
501                         "por %%mm2, %%mm3               \n\t" // BGRBGR00
502                         "movq %%mm1, %%mm2              \n\t"
503                         "psllq $48, %%mm1               \n\t" // 000000BG
504                         "por %%mm1, %%mm3               \n\t" // BGRBGRBG
505
506                         "movq %%mm2, %%mm1              \n\t" // BGR0BGR0
507                         "psrld $16, %%mm2               \n\t" // R000R000
508                         "psrlq $24, %%mm1               \n\t" // 0BGR0000
509                         "por %%mm2, %%mm1               \n\t" // RBGRR000
510
511                         "movl %4, %%ebx                 \n\t"
512                         "addl %%eax, %%ebx              \n\t"
513
514 #ifdef HAVE_MMX2
515                         //FIXME Alignment
516                         "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
517                         "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
518 #else
519                         "movd %%mm3, (%%ebx, %%eax, 2)  \n\t"
520                         "psrlq $32, %%mm3               \n\t"
521                         "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
522                         "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
523 #endif
524                         "addl $4, %%eax                 \n\t"
525                         "cmpl %5, %%eax                 \n\t"
526                         " jb 1b                         \n\t"
527
528                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
529                         "m" (yalpha1), "m" (uvalpha1)
530                         : "%eax", "%ebx"
531                         );
532                 }
533                 else if(dstbpp==15)
534                 {
535                         asm volatile(
536
537 FULL_YSCALEYUV2RGB
538 #ifdef DITHER1XBPP
539                         "paddusb b16Dither, %%mm1       \n\t"
540                         "paddusb b16Dither, %%mm0       \n\t"
541                         "paddusb b16Dither, %%mm3       \n\t"
542 #endif
543                         "punpcklbw %%mm7, %%mm1         \n\t" // 0G0G0G0G
544                         "punpcklbw %%mm7, %%mm3         \n\t" // 0B0B0B0B
545                         "punpcklbw %%mm7, %%mm0         \n\t" // 0R0R0R0R
546
547                         "psrlw $3, %%mm3                \n\t"
548                         "psllw $2, %%mm1                \n\t"
549                         "psllw $7, %%mm0                \n\t"
550                         "pand g15Mask, %%mm1            \n\t"
551                         "pand r15Mask, %%mm0            \n\t"
552
553                         "por %%mm3, %%mm1               \n\t"
554                         "por %%mm1, %%mm0               \n\t"
555
556                         MOVNTQ(%%mm0, (%4, %%eax, 2))
557
558                         "addl $4, %%eax                 \n\t"
559                         "cmpl %5, %%eax                 \n\t"
560                         " jb 1b                         \n\t"
561
562                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
563                         "m" (yalpha1), "m" (uvalpha1)
564                         : "%eax"
565                         );
566                 }
567                 else if(dstbpp==16)
568                 {
569                         asm volatile(
570
571 FULL_YSCALEYUV2RGB
572 #ifdef DITHER1XBPP
573                         "paddusb g16Dither, %%mm1       \n\t"
574                         "paddusb b16Dither, %%mm0       \n\t"
575                         "paddusb b16Dither, %%mm3       \n\t"
576 #endif
577                         "punpcklbw %%mm7, %%mm1         \n\t" // 0G0G0G0G
578                         "punpcklbw %%mm7, %%mm3         \n\t" // 0B0B0B0B
579                         "punpcklbw %%mm7, %%mm0         \n\t" // 0R0R0R0R
580
581                         "psrlw $3, %%mm3                \n\t"
582                         "psllw $3, %%mm1                \n\t"
583                         "psllw $8, %%mm0                \n\t"
584                         "pand g16Mask, %%mm1            \n\t"
585                         "pand r16Mask, %%mm0            \n\t"
586
587                         "por %%mm3, %%mm1               \n\t"
588                         "por %%mm1, %%mm0               \n\t"
589
590                         MOVNTQ(%%mm0, (%4, %%eax, 2))
591
592                         "addl $4, %%eax                 \n\t"
593                         "cmpl %5, %%eax                 \n\t"
594                         " jb 1b                         \n\t"
595
596                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
597                         "m" (yalpha1), "m" (uvalpha1)
598                         : "%eax"
599                         );
600                 }
601 #else
602                 if(dstbpp==32 || dstbpp==24)
603                 {
604                         for(i=0;i<dstw;i++){
605                                 // vertical linear interpolation && yuv2rgb in a single step:
606                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
607                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
608                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
609                                 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
610                                 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
611                                 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
612                                 dest+=dstbpp>>3;
613                         }
614                 }
615                 else if(dstbpp==16)
616                 {
617                         for(i=0;i<dstw;i++){
618                                 // vertical linear interpolation && yuv2rgb in a single step:
619                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
620                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
621                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
622
623                                 ((uint16_t*)dest)[0] =
624                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
625                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<3)&0x07E0) |
626                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<8)&0xF800);
627                                 dest+=2;
628                         }
629                 }
630                 else if(dstbpp==15)
631                 {
632                         for(i=0;i<dstw;i++){
633                                 // vertical linear interpolation && yuv2rgb in a single step:
634                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
635                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
636                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
637
638                                 ((uint16_t*)dest)[0] =
639                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
640                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<2)&0x03E0) |
641                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<7)&0x7C00);
642                                 dest+=2;
643                         }
644                 }
645 #endif
646         }//FULL_UV_IPOL
647         else
648         {
649 #ifdef HAVE_MMX
650                 if(dstbpp == 32)
651                 {
652                         asm volatile(
653                                 YSCALEYUV2RGB
654                                 WRITEBGR32
655
656                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
657                         "m" (yalpha1), "m" (uvalpha1)
658                         : "%eax"
659                         );
660                 }
661                 else if(dstbpp==24)
662                 {
663                         asm volatile(
664                                 YSCALEYUV2RGB
665                                 WRITEBGR24
666
667                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
668                         "m" (yalpha1), "m" (uvalpha1)
669                         : "%eax", "%ebx"
670                         );
671                 }
672                 else if(dstbpp==15)
673                 {
674                         asm volatile(
675                                 YSCALEYUV2RGB
676                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
677 #ifdef DITHER1XBPP
678                                 "paddusb b16Dither, %%mm2       \n\t"
679                                 "paddusb b16Dither, %%mm4       \n\t"
680                                 "paddusb b16Dither, %%mm5       \n\t"
681 #endif
682
683                                 WRITEBGR15
684
685                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
686                         "m" (yalpha1), "m" (uvalpha1)
687                         : "%eax"
688                         );
689                 }
690                 else if(dstbpp==16)
691                 {
692                         asm volatile(
693                                 YSCALEYUV2RGB
694                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
695 #ifdef DITHER1XBPP
696                                 "paddusb g16Dither, %%mm2       \n\t"
697                                 "paddusb b16Dither, %%mm4       \n\t"
698                                 "paddusb b16Dither, %%mm5       \n\t"
699 #endif
700
701                                 WRITEBGR16
702
703                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
704                         "m" (yalpha1), "m" (uvalpha1)
705                         : "%eax"
706                         );
707                 }
708 #else
709 //FIXME unroll C loop and dont recalculate UV
710                 if(dstbpp==32 || dstbpp==24)
711                 {
712                         for(i=0;i<dstw;i++){
713                                 // vertical linear interpolation && yuv2rgb in a single step:
714                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
715                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
716                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
717                                 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
718                                 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
719                                 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
720                                 dest+=dstbpp>>3;
721                         }
722                 }
723                 else if(dstbpp==16)
724                 {
725                         for(i=0;i<dstw;i++){
726                                 // vertical linear interpolation && yuv2rgb in a single step:
727                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
728                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
729                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
730
731                                 ((uint16_t*)dest)[0] =
732                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
733                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<3)&0x07E0) |
734                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<8)&0xF800);
735                                 dest+=2;
736                         }
737                 }
738                 else if(dstbpp==15)
739                 {
740                         for(i=0;i<dstw;i++){
741                                 // vertical linear interpolation && yuv2rgb in a single step:
742                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
743                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
744                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
745
746                                 ((uint16_t*)dest)[0] =
747                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
748                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<2)&0x03E0) |
749                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<7)&0x7C00);
750                                 dest+=2;
751                         }
752                 }
753 #endif
754         } //!FULL_UV_IPOL
755 }
756
757 /**
758  * YV12 to RGB without scaling or interpolating
759  */
760 static inline void yuv2rgb1(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
761                             uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
762 {
763         int yalpha1=yalpha^4095;
764         int uvalpha1=uvalpha^4095;
765         int i;
766         if(fullUVIpol || allwaysIpol)
767         {
768                 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
769                 return;
770         }
771 #ifdef HAVE_MMX
772                 if(dstbpp == 32)
773                 {
774                         asm volatile(
775                                 YSCALEYUV2RGB1
776                                 WRITEBGR32
777                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
778                         "m" (yalpha1), "m" (uvalpha1)
779                         : "%eax"
780                         );
781                 }
782                 else if(dstbpp==24)
783                 {
784                         asm volatile(
785                                 YSCALEYUV2RGB1
786                                 WRITEBGR24
787                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
788                         "m" (yalpha1), "m" (uvalpha1)
789                         : "%eax", "%ebx"
790                         );
791                 }
792                 else if(dstbpp==15)
793                 {
794                         asm volatile(
795                                 YSCALEYUV2RGB1
796                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
797 #ifdef DITHER1XBPP
798                                 "paddusb b16Dither, %%mm2       \n\t"
799                                 "paddusb b16Dither, %%mm4       \n\t"
800                                 "paddusb b16Dither, %%mm5       \n\t"
801 #endif
802                                 WRITEBGR15
803                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
804                         "m" (yalpha1), "m" (uvalpha1)
805                         : "%eax"
806                         );
807                 }
808                 else if(dstbpp==16)
809                 {
810                         asm volatile(
811                                 YSCALEYUV2RGB1
812                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
813 #ifdef DITHER1XBPP
814                                 "paddusb g16Dither, %%mm2       \n\t"
815                                 "paddusb b16Dither, %%mm4       \n\t"
816                                 "paddusb b16Dither, %%mm5       \n\t"
817 #endif
818
819                                 WRITEBGR16
820                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
821                         "m" (yalpha1), "m" (uvalpha1)
822                         : "%eax"
823                         );
824                 }
825 #else
826 //FIXME unroll C loop and dont recalculate UV
827                 if(dstbpp==32 || dstbpp==24)
828                 {
829                         for(i=0;i<dstw;i++){
830                                 // vertical linear interpolation && yuv2rgb in a single step:
831                                 int Y=yuvtab_2568[buf0[i]>>7];
832                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
833                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
834                                 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
835                                 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
836                                 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
837                                 dest+=dstbpp>>3;
838                         }
839                 }
840                 else if(dstbpp==16)
841                 {
842                         for(i=0;i<dstw;i++){
843                                 // vertical linear interpolation && yuv2rgb in a single step:
844                                 int Y=yuvtab_2568[buf0[i]>>7];
845                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
846                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
847
848                                 ((uint16_t*)dest)[0] =
849                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
850                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<3)&0x07E0) |
851                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<8)&0xF800);
852                                 dest+=2;
853                         }
854                 }
855                 else if(dstbpp==15)
856                 {
857                         for(i=0;i<dstw;i++){
858                                 // vertical linear interpolation && yuv2rgb in a single step:
859                                 int Y=yuvtab_2568[buf0[i]>>7];
860                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
861                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
862
863                                 ((uint16_t*)dest)[0] =
864                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
865                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<2)&0x03E0) |
866                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<7)&0x7C00);
867                                 dest+=2;
868                         }
869                 }
870 #endif
871 }
872
873
874 static inline void hyscale(uint16_t *dst, int dstWidth, uint8_t *src, int srcWidth, int xInc)
875 {
876         int i;
877       unsigned int xpos=0;
878       // *** horizontal scale Y line to temp buffer
879 #ifdef ARCH_X86
880
881 #ifdef HAVE_MMX2
882         if(canMMX2BeUsed)
883         {
884                 asm volatile(
885                         "pxor %%mm7, %%mm7              \n\t"
886                         "pxor %%mm2, %%mm2              \n\t" // 2*xalpha
887                         "movd %5, %%mm6                 \n\t" // xInc&0xFFFF
888                         "punpcklwd %%mm6, %%mm6         \n\t"
889                         "punpcklwd %%mm6, %%mm6         \n\t"
890                         "movq %%mm6, %%mm2              \n\t"
891                         "psllq $16, %%mm2               \n\t"
892                         "paddw %%mm6, %%mm2             \n\t"
893                         "psllq $16, %%mm2               \n\t"
894                         "paddw %%mm6, %%mm2             \n\t"
895                         "psllq $16, %%mm2               \n\t" //0,t,2t,3t               t=xInc&0xFF
896                         "movq %%mm2, temp0              \n\t"
897                         "movd %4, %%mm6                 \n\t" //(xInc*4)&0xFFFF
898                         "punpcklwd %%mm6, %%mm6         \n\t"
899                         "punpcklwd %%mm6, %%mm6         \n\t"
900                         "xorl %%eax, %%eax              \n\t" // i
901                         "movl %0, %%esi                 \n\t" // src
902                         "movl %1, %%edi                 \n\t" // buf1
903                         "movl %3, %%edx                 \n\t" // (xInc*4)>>16
904                         "xorl %%ecx, %%ecx              \n\t"
905                         "xorl %%ebx, %%ebx              \n\t"
906                         "movw %4, %%bx                  \n\t" // (xInc*4)&0xFFFF
907 #ifdef HAVE_MMX2
908 #define FUNNY_Y_CODE \
909                         "prefetchnta 1024(%%esi)        \n\t"\
910                         "prefetchnta 1056(%%esi)        \n\t"\
911                         "prefetchnta 1088(%%esi)        \n\t"\
912                         "call funnyYCode                \n\t"\
913                         "movq temp0, %%mm2              \n\t"\
914                         "xorl %%ecx, %%ecx              \n\t"
915 #else
916 #define FUNNY_Y_CODE \
917                         "call funnyYCode                \n\t"\
918                         "movq temp0, %%mm2              \n\t"\
919                         "xorl %%ecx, %%ecx              \n\t"
920 #endif
921 FUNNY_Y_CODE
922 FUNNY_Y_CODE
923 FUNNY_Y_CODE
924 FUNNY_Y_CODE
925 FUNNY_Y_CODE
926 FUNNY_Y_CODE
927 FUNNY_Y_CODE
928 FUNNY_Y_CODE
929
930                         :: "m" (src), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
931                         "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF)
932                         : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
933                 );
934                 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth-1; i--) dst[i] = src[srcWidth-1]*128;
935         }
936         else
937         {
938 #endif
939         //NO MMX just normal asm ...
940         asm volatile(
941                 "xorl %%eax, %%eax              \n\t" // i
942                 "xorl %%ebx, %%ebx              \n\t" // xx
943                 "xorl %%ecx, %%ecx              \n\t" // 2*xalpha
944                 "1:                             \n\t"
945                 "movzbl  (%0, %%ebx), %%edi     \n\t" //src[xx]
946                 "movzbl 1(%0, %%ebx), %%esi     \n\t" //src[xx+1]
947                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
948                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
949                 "shll $16, %%edi                \n\t"
950                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
951                 "movl %1, %%edi                 \n\t"
952                 "shrl $9, %%esi                 \n\t"
953                 "movw %%si, (%%edi, %%eax, 2)   \n\t"
954                 "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
955                 "adcl %3, %%ebx                 \n\t" //xx+= xInc>>8 + carry
956
957                 "movzbl (%0, %%ebx), %%edi      \n\t" //src[xx]
958                 "movzbl 1(%0, %%ebx), %%esi     \n\t" //src[xx+1]
959                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
960                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
961                 "shll $16, %%edi                \n\t"
962                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
963                 "movl %1, %%edi                 \n\t"
964                 "shrl $9, %%esi                 \n\t"
965                 "movw %%si, 2(%%edi, %%eax, 2)  \n\t"
966                 "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
967                 "adcl %3, %%ebx                 \n\t" //xx+= xInc>>8 + carry
968
969
970                 "addl $2, %%eax                 \n\t"
971                 "cmpl %2, %%eax                 \n\t"
972                 " jb 1b                         \n\t"
973
974
975                 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
976                 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
977                 );
978 #ifdef HAVE_MMX2
979         } //if MMX2 cant be used
980 #endif
981 #else
982       for(i=0;i<dstWidth;i++){
983         register unsigned int xx=xpos>>16;
984         register unsigned int xalpha=(xpos&0xFFFF)>>9;
985         dst[i]=(src[xx]*(xalpha^127)+src[xx+1]*xalpha);
986         xpos+=xInc;
987       }
988 #endif
989 }
990
991 inline static void hcscale(uint16_t *dst, int dstWidth,
992                                 uint8_t *src1, uint8_t *src2, int srcWidth, int xInc)
993 {
994         int xpos=0;
995         int i;
996 #ifdef ARCH_X86
997 #ifdef HAVE_MMX2
998         if(canMMX2BeUsed)
999         {
1000                 asm volatile(
1001                 "pxor %%mm7, %%mm7              \n\t"
1002                 "pxor %%mm2, %%mm2              \n\t" // 2*xalpha
1003                 "movd %5, %%mm6                 \n\t" // xInc&0xFFFF
1004                 "punpcklwd %%mm6, %%mm6         \n\t"
1005                 "punpcklwd %%mm6, %%mm6         \n\t"
1006                 "movq %%mm6, %%mm2              \n\t"
1007                 "psllq $16, %%mm2               \n\t"
1008                 "paddw %%mm6, %%mm2             \n\t"
1009                 "psllq $16, %%mm2               \n\t"
1010                 "paddw %%mm6, %%mm2             \n\t"
1011                 "psllq $16, %%mm2               \n\t" //0,t,2t,3t               t=xInc&0xFFFF
1012                 "movq %%mm2, temp0              \n\t"
1013                 "movd %4, %%mm6                 \n\t" //(xInc*4)&0xFFFF
1014                 "punpcklwd %%mm6, %%mm6         \n\t"
1015                 "punpcklwd %%mm6, %%mm6         \n\t"
1016                 "xorl %%eax, %%eax              \n\t" // i
1017                 "movl %0, %%esi                 \n\t" // src
1018                 "movl %1, %%edi                 \n\t" // buf1
1019                 "movl %3, %%edx                 \n\t" // (xInc*4)>>16
1020                 "xorl %%ecx, %%ecx              \n\t"
1021                 "xorl %%ebx, %%ebx              \n\t"
1022                 "movw %4, %%bx                  \n\t" // (xInc*4)&0xFFFF
1023
1024 #ifdef HAVE_MMX2
1025 #define FUNNYUVCODE \
1026                         "prefetchnta 1024(%%esi)        \n\t"\
1027                         "prefetchnta 1056(%%esi)        \n\t"\
1028                         "prefetchnta 1088(%%esi)        \n\t"\
1029                         "call funnyUVCode               \n\t"\
1030                         "movq temp0, %%mm2              \n\t"\
1031                         "xorl %%ecx, %%ecx              \n\t"
1032 #else
1033 #define FUNNYUVCODE \
1034                         "call funnyUVCode               \n\t"\
1035                         "movq temp0, %%mm2              \n\t"\
1036                         "xorl %%ecx, %%ecx              \n\t"
1037 #endif
1038
1039 FUNNYUVCODE
1040 FUNNYUVCODE
1041 FUNNYUVCODE
1042 FUNNYUVCODE
1043
1044 FUNNYUVCODE
1045 FUNNYUVCODE
1046 FUNNYUVCODE
1047 FUNNYUVCODE
1048
1049
1050                 "xorl %%eax, %%eax              \n\t" // i
1051                 "movl %6, %%esi                 \n\t" // src
1052                 "movl %1, %%edi                 \n\t" // buf1
1053                 "addl $4096, %%edi              \n\t"
1054
1055 FUNNYUVCODE
1056 FUNNYUVCODE
1057 FUNNYUVCODE
1058 FUNNYUVCODE
1059
1060 FUNNYUVCODE
1061 FUNNYUVCODE
1062 FUNNYUVCODE
1063 FUNNYUVCODE
1064
1065                 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
1066                   "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF), "m" (src2)
1067                 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
1068         );
1069                 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth/2-1; i--)
1070                 {
1071                         dst[i] = src1[srcWidth/2-1]*128;
1072                         dst[i+2048] = src2[srcWidth/2-1]*128;
1073                 }
1074         }
1075         else
1076         {
1077 #endif
1078         asm volatile(
1079                 "xorl %%eax, %%eax              \n\t" // i
1080                 "xorl %%ebx, %%ebx              \n\t" // xx
1081                 "xorl %%ecx, %%ecx              \n\t" // 2*xalpha
1082                 "1:                             \n\t"
1083                 "movl %0, %%esi                 \n\t"
1084                 "movzbl  (%%esi, %%ebx), %%edi  \n\t" //src[xx]
1085                 "movzbl 1(%%esi, %%ebx), %%esi  \n\t" //src[xx+1]
1086                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
1087                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
1088                 "shll $16, %%edi                \n\t"
1089                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1090                 "movl %1, %%edi                 \n\t"
1091                 "shrl $9, %%esi                 \n\t"
1092                 "movw %%si, (%%edi, %%eax, 2)   \n\t"
1093
1094                 "movzbl  (%5, %%ebx), %%edi     \n\t" //src[xx]
1095                 "movzbl 1(%5, %%ebx), %%esi     \n\t" //src[xx+1]
1096                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
1097                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
1098                 "shll $16, %%edi                \n\t"
1099                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1100                 "movl %1, %%edi                 \n\t"
1101                 "shrl $9, %%esi                 \n\t"
1102                 "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
1103
1104                 "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
1105                 "adcl %3, %%ebx                 \n\t" //xx+= xInc>>8 + carry
1106                 "addl $1, %%eax                 \n\t"
1107                 "cmpl %2, %%eax                 \n\t"
1108                 " jb 1b                         \n\t"
1109
1110                 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
1111                 "r" (src2)
1112                 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1113                 );
1114 #ifdef HAVE_MMX2
1115         } //if MMX2 cant be used
1116 #endif
1117 #else
1118       for(i=0;i<dstWidth;i++){
1119           register unsigned int xx=xpos>>16;
1120           register unsigned int xalpha=(xpos&0xFFFF)>>9;
1121           dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
1122           dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
1123           xpos+=xInc;
1124       }
1125 #endif
1126 }
1127
1128
1129 // *** bilinear scaling and yuv->rgb conversion of yv12 slices:
1130 // *** Note: it's called multiple times while decoding a frame, first time y==0
1131 // *** Designed to upscale, but may work for downscale too.
1132 // s_xinc = (src_width << 16) / dst_width
1133 // s_yinc = (src_height << 16) / dst_height
1134 void SwScale_YV12slice_brg24(unsigned char* srcptr[],int stride[], int y, int h,
1135                              unsigned char* dstptr, int dststride, int dstw, int dstbpp,
1136                              unsigned int s_xinc,unsigned int s_yinc){
1137
1138 // scaling factors:
1139 //static int s_yinc=(vo_dga_src_height<<16)/vo_dga_vp_height;
1140 //static int s_xinc=(vo_dga_src_width<<8)/vo_dga_vp_width;
1141
1142 unsigned int s_xinc2;
1143
1144 static int s_srcypos; // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1145 static int s_ypos;
1146
1147 // last horzontally interpolated lines, used to avoid unnecessary calculations
1148 static int s_last_ypos;
1149 static int s_last_y1pos;
1150
1151 static int static_dstw;
1152
1153 #ifdef HAVE_MMX2
1154 // used to detect a horizontal size change
1155 static int old_dstw= -1;
1156 static int old_s_xinc= -1;
1157 #endif
1158
1159 int srcWidth= (dstw*s_xinc + 0x8000)>>16;
1160 int dstUVw= fullUVIpol ? dstw : dstw/2;
1161
1162
1163 #ifdef HAVE_MMX2
1164 canMMX2BeUsed= (s_xinc <= 0x10000 && (dstw&31)==0 && (srcWidth&15)==0) ? 1 : 0;
1165 #endif
1166
1167 // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
1168 // n-2 is the last chrominance sample available
1169 // FIXME this is not perfect, but noone shuld notice the difference, the more correct variant
1170 // would be like the vertical one, but that would require some special code for the
1171 // first and last pixel
1172 if(canMMX2BeUsed)       s_xinc+= 20;
1173 else                    s_xinc = ((srcWidth-2)<<16)/(dstw-2) - 20;
1174
1175 if(fullUVIpol)  s_xinc2= s_xinc>>1;
1176 else            s_xinc2= s_xinc;
1177   // force calculation of the horizontal interpolation of the first line
1178
1179   if(y==0){
1180         s_last_ypos=-99;
1181         s_last_y1pos=-99;
1182         s_srcypos= s_yinc/2 - 0x8000;
1183         s_ypos=0;
1184 #ifdef HAVE_MMX2
1185 // cant downscale !!!
1186         if((old_s_xinc != s_xinc || old_dstw!=dstw) && canMMX2BeUsed)
1187         {
1188                 uint8_t *fragment;
1189                 int imm8OfPShufW1;
1190                 int imm8OfPShufW2;
1191                 int fragmentLength;
1192
1193                 int xpos, xx, xalpha, i;
1194
1195                 old_s_xinc= s_xinc;
1196                 old_dstw= dstw;
1197
1198                 static_dstw= dstw;
1199
1200                 // create an optimized horizontal scaling routine
1201
1202                 //code fragment
1203
1204                 asm volatile(
1205                         "jmp 9f                         \n\t"
1206                 // Begin
1207                         "0:                             \n\t"
1208                         "movq (%%esi), %%mm0            \n\t" //FIXME Alignment
1209                         "movq %%mm0, %%mm1              \n\t"
1210                         "psrlq $8, %%mm0                \n\t"
1211                         "punpcklbw %%mm7, %%mm1 \n\t"
1212                         "movq %%mm2, %%mm3              \n\t"
1213                         "punpcklbw %%mm7, %%mm0 \n\t"
1214                         "addw %%bx, %%cx                \n\t" //2*xalpha += (4*s_xinc)&0xFFFF
1215                         "pshufw $0xFF, %%mm1, %%mm1     \n\t"
1216                         "1:                             \n\t"
1217                         "adcl %%edx, %%esi              \n\t" //xx+= (4*s_xinc)>>16 + carry
1218                         "pshufw $0xFF, %%mm0, %%mm0     \n\t"
1219                         "2:                             \n\t"
1220                         "psrlw $9, %%mm3                \n\t"
1221                         "psubw %%mm1, %%mm0             \n\t"
1222                         "pmullw %%mm3, %%mm0            \n\t"
1223                         "paddw %%mm6, %%mm2             \n\t" // 2*alpha += xpos&0xFFFF
1224                         "psllw $7, %%mm1                \n\t"
1225                         "paddw %%mm1, %%mm0             \n\t"
1226
1227                         "movq %%mm0, (%%edi, %%eax)     \n\t"
1228
1229                         "addl $8, %%eax                 \n\t"
1230                 // End
1231                         "9:                             \n\t"
1232 //              "int $3\n\t"
1233                         "leal 0b, %0                    \n\t"
1234                         "leal 1b, %1                    \n\t"
1235                         "leal 2b, %2                    \n\t"
1236                         "decl %1                        \n\t"
1237                         "decl %2                        \n\t"
1238                         "subl %0, %1                    \n\t"
1239                         "subl %0, %2                    \n\t"
1240                         "leal 9b, %3                    \n\t"
1241                         "subl %0, %3                    \n\t"
1242                         :"=r" (fragment), "=r" (imm8OfPShufW1), "=r" (imm8OfPShufW2),
1243                          "=r" (fragmentLength)
1244                 );
1245
1246                 xpos= 0; //s_xinc/2 - 0x8000; // difference between pixel centers
1247
1248                 /* choose xinc so that all 8 parts fit exactly
1249                    Note: we cannot use just 1 part because it would not fit in the code cache */
1250 //              s_xinc2_diff= -((((s_xinc2*(dstw/8))&0xFFFF))/(dstw/8))-10;
1251 //              s_xinc_diff= -((((s_xinc*(dstw/8))&0xFFFF))/(dstw/8));
1252 #ifdef ALT_ERROR
1253 //              s_xinc2_diff+= ((0x10000/(dstw/8)));
1254 #endif
1255 //              s_xinc_diff= s_xinc2_diff*2;
1256
1257 //              s_xinc2+= s_xinc2_diff;
1258 //              s_xinc+= s_xinc_diff;
1259
1260 //              old_s_xinc= s_xinc;
1261
1262                 for(i=0; i<dstw/8; i++)
1263                 {
1264                         int xx=xpos>>16;
1265
1266                         if((i&3) == 0)
1267                         {
1268                                 int a=0;
1269                                 int b=((xpos+s_xinc)>>16) - xx;
1270                                 int c=((xpos+s_xinc*2)>>16) - xx;
1271                                 int d=((xpos+s_xinc*3)>>16) - xx;
1272
1273                                 memcpy(funnyYCode + fragmentLength*i/4, fragment, fragmentLength);
1274
1275                                 funnyYCode[fragmentLength*i/4 + imm8OfPShufW1]=
1276                                 funnyYCode[fragmentLength*i/4 + imm8OfPShufW2]=
1277                                         a | (b<<2) | (c<<4) | (d<<6);
1278
1279                                 // if we dont need to read 8 bytes than dont :), reduces the chance of
1280                                 // crossing a cache line
1281                                 if(d<3) funnyYCode[fragmentLength*i/4 + 1]= 0x6E;
1282
1283                                 funnyYCode[fragmentLength*(i+4)/4]= RET;
1284                         }
1285                         xpos+=s_xinc;
1286                 }
1287
1288                 xpos= 0; //s_xinc2/2 - 0x10000; // difference between centers of chrom samples
1289                 for(i=0; i<dstUVw/8; i++)
1290                 {
1291                         int xx=xpos>>16;
1292
1293                         if((i&3) == 0)
1294                         {
1295                                 int a=0;
1296                                 int b=((xpos+s_xinc2)>>16) - xx;
1297                                 int c=((xpos+s_xinc2*2)>>16) - xx;
1298                                 int d=((xpos+s_xinc2*3)>>16) - xx;
1299
1300                                 memcpy(funnyUVCode + fragmentLength*i/4, fragment, fragmentLength);
1301
1302                                 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW1]=
1303                                 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW2]=
1304                                         a | (b<<2) | (c<<4) | (d<<6);
1305
1306                                 // if we dont need to read 8 bytes than dont :), reduces the chance of
1307                                 // crossing a cache line
1308                                 if(d<3) funnyUVCode[fragmentLength*i/4 + 1]= 0x6E;
1309
1310                                 funnyUVCode[fragmentLength*(i+4)/4]= RET;
1311                         }
1312                         xpos+=s_xinc2;
1313                 }
1314 //              funnyCode[0]= RET;
1315         }
1316
1317 #endif // HAVE_MMX2
1318   } // reset counters
1319
1320   while(1){
1321     unsigned char *dest=dstptr+dststride*s_ypos;
1322     int y0=(s_srcypos + 0xFFFF)>>16;  // first luminance source line number below the dst line
1323         // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1324     int srcuvpos= s_srcypos + s_yinc/2 - 0x8000;
1325     int y1=(srcuvpos + 0x1FFFF)>>17; // first chrominance source line number below the dst line
1326     int yalpha=((s_srcypos-1)&0xFFFF)>>4;
1327     int uvalpha=((srcuvpos-1)&0x1FFFF)>>5;
1328     uint16_t *buf0=pix_buf_y[y0&1];             // top line of the interpolated slice
1329     uint16_t *buf1=pix_buf_y[((y0+1)&1)];       // bottom line of the interpolated slice
1330     uint16_t *uvbuf0=pix_buf_uv[y1&1];          // top line of the interpolated slice
1331     uint16_t *uvbuf1=pix_buf_uv[(y1+1)&1];      // bottom line of the interpolated slice
1332     int i;
1333
1334     if(y0>=y+h) break; // FIXME wrong, skips last lines, but they are dupliactes anyway
1335
1336     // if this is after the last line than use only the last src line
1337  /*   if(y0>=y+h)
1338     {
1339         buf1= buf0;
1340         s_last_ypos=y0;
1341     }
1342     if(y1>=(y+h)/2)
1343     {
1344         uvbuf1= uvbuf0;
1345         s_last_y1pos=y1;
1346     }
1347 */
1348
1349     s_ypos++; s_srcypos+=s_yinc;
1350
1351     //only interpolate the src line horizontally if we didnt do it allready
1352         if(s_last_ypos!=y0)
1353         {
1354                 unsigned char *src;
1355                 // skip if first line has been horiz scaled alleady
1356                 if(s_last_ypos != y0-1)
1357                 {
1358                         // check if first line is before any available src lines
1359                         if(y0-1 < y)    src=srcptr[0]+(0     )*stride[0];
1360                         else            src=srcptr[0]+(y0-y-1)*stride[0];
1361
1362                         hyscale(buf0, dstw, src, srcWidth, s_xinc);
1363                 }
1364                 // check if second line is after any available src lines
1365                 if(y0-y >= h)   src=srcptr[0]+(h-1)*stride[0];
1366                 else            src=srcptr[0]+(y0-y)*stride[0];
1367
1368                 // the min() is required to avoid reuseing lines which where not available
1369                 s_last_ypos= MIN(y0, y+h-1);
1370                 hyscale(buf1, dstw, src, srcWidth, s_xinc);
1371         }
1372 //      printf("%d %d %d %d\n", y, y1, s_last_y1pos, h);
1373       // *** horizontal scale U and V lines to temp buffer
1374         if(s_last_y1pos!=y1)
1375         {
1376                 uint8_t *src1, *src2;
1377                 // skip if first line has been horiz scaled alleady
1378                 if(s_last_y1pos != y1-1)
1379                 {
1380                         // check if first line is before any available src lines
1381                         if(y1-y/2-1 < 0)
1382                         {
1383                                 src1= srcptr[1]+(0)*stride[1];
1384                                 src2= srcptr[2]+(0)*stride[2];
1385                         }else{
1386                                 src1= srcptr[1]+(y1-y/2-1)*stride[1];
1387                                 src2= srcptr[2]+(y1-y/2-1)*stride[2];
1388                         }
1389                         hcscale(uvbuf0, dstUVw, src1, src2, srcWidth, s_xinc2);
1390                 }
1391
1392                 // check if second line is after any available src lines
1393                 if(y1 - y/2 >= h/2)
1394                 {
1395                         src1= srcptr[1]+(h/2-1)*stride[1];
1396                         src2= srcptr[2]+(h/2-1)*stride[2];
1397                 }else{
1398                         src1= srcptr[1]+(y1-y/2)*stride[1];
1399                         src2= srcptr[2]+(y1-y/2)*stride[2];
1400                 }
1401                 hcscale(uvbuf1, dstUVw, src1, src2, srcWidth, s_xinc2);
1402
1403                 // the min() is required to avoid reuseing lines which where not available
1404                 s_last_y1pos= MIN(y1, y/2+h/2-1);
1405         }
1406
1407
1408         if(ABS(s_yinc - 0x10000) < 10)
1409                 yuv2rgb1(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1410         else
1411                 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1412
1413 #ifdef HAVE_MMX
1414         b16Dither= b16Dither1;
1415         b16Dither1= b16Dither2;
1416         b16Dither2= b16Dither;
1417
1418         g16Dither= g16Dither1;
1419         g16Dither1= g16Dither2;
1420         g16Dither2= g16Dither;
1421 #endif
1422   }
1423
1424 #ifdef HAVE_3DNOW
1425         asm volatile("femms");
1426 #elif defined (HAVE_MMX)
1427         asm volatile("emms");
1428 #endif
1429 }
1430
1431
1432 void SwScale_Init(){
1433     // generating tables:
1434     int i;
1435     for(i=0;i<256;i++){
1436         clip_table[i]=0;
1437         clip_table[i+256]=i;
1438         clip_table[i+512]=255;
1439         yuvtab_2568[i]=(0x2568*(i-16))+(256<<13);
1440         yuvtab_3343[i]=0x3343*(i-128);
1441         yuvtab_0c92[i]=-0x0c92*(i-128);
1442         yuvtab_1a1e[i]=-0x1a1e*(i-128);
1443         yuvtab_40cf[i]=0x40cf*(i-128);
1444     }
1445
1446 }