]> git.sesse.net Git - ffmpeg/blob - postproc/swscale_template.c
c optimizations
[ffmpeg] / postproc / swscale_template.c
1
2 // Software scaling and colorspace conversion routines for MPlayer
3
4 // Orginal C implementation by A'rpi/ESP-team <arpi@thot.banki.hu>
5 // current version mostly by Michael Niedermayer (michaelni@gmx.at)
6 // the parts written by michael are under GNU GPL
7
8 #include <inttypes.h>
9 #include <string.h>
10 #include "../config.h"
11 #include "swscale.h"
12 #include "../mmx_defs.h"
13 #undef MOVNTQ
14
15 //#undef HAVE_MMX2
16 //#undef HAVE_MMX
17 //#undef ARCH_X86
18 #define DITHER1XBPP
19 int fullUVIpol=0;
20 //disables the unscaled height version
21 int allwaysIpol=0;
22
23 #define RET 0xC3 //near return opcode
24 /*
25 NOTES
26
27 known BUGS with known cause (no bugreports please!, but patches are welcome :) )
28 horizontal MMX2 scaler reads 1-7 samples too much (might cause a sig11)
29
30 Supported output formats BGR15 BGR16 BGR24 BGR32
31 BGR15 & BGR16 MMX verions support dithering
32 Special versions: fast Y 1:1 scaling (no interpolation in y direction)
33
34 TODO
35 more intelligent missalignment avoidance for the horizontal scaler
36 bicubic scaler
37 */
38
39 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
40 #define MIN(a,b) ((a) > (b) ? (b) : (a))
41 #define MAX(a,b) ((a) < (b) ? (b) : (a))
42
43 #ifdef HAVE_MMX2
44 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
45 #elif defined (HAVE_3DNOW)
46 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
47 #endif
48
49 #ifdef HAVE_MMX2
50 #define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
51 #else
52 #define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
53 #endif
54
55
56 #ifdef HAVE_MMX
57 static uint64_t __attribute__((aligned(8))) yCoeff=    0x2568256825682568LL;
58 static uint64_t __attribute__((aligned(8))) vrCoeff=   0x3343334333433343LL;
59 static uint64_t __attribute__((aligned(8))) ubCoeff=   0x40cf40cf40cf40cfLL;
60 static uint64_t __attribute__((aligned(8))) vgCoeff=   0xE5E2E5E2E5E2E5E2LL;
61 static uint64_t __attribute__((aligned(8))) ugCoeff=   0xF36EF36EF36EF36ELL;
62 static uint64_t __attribute__((aligned(8))) w400=      0x0400040004000400LL;
63 static uint64_t __attribute__((aligned(8))) w80=       0x0080008000800080LL;
64 static uint64_t __attribute__((aligned(8))) w10=       0x0010001000100010LL;
65 static uint64_t __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
66 static uint64_t __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
67 static uint64_t __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
68
69 static uint64_t __attribute__((aligned(8))) b16Dither= 0x0004000400040004LL;
70 static uint64_t __attribute__((aligned(8))) b16Dither1=0x0004000400040004LL;
71 static uint64_t __attribute__((aligned(8))) b16Dither2=0x0602060206020602LL;
72 static uint64_t __attribute__((aligned(8))) g16Dither= 0x0002000200020002LL;
73 static uint64_t __attribute__((aligned(8))) g16Dither1=0x0002000200020002LL;
74 static uint64_t __attribute__((aligned(8))) g16Dither2=0x0301030103010301LL;
75
76 static uint64_t __attribute__((aligned(8))) b16Mask=   0x001F001F001F001FLL;
77 static uint64_t __attribute__((aligned(8))) g16Mask=   0x07E007E007E007E0LL;
78 static uint64_t __attribute__((aligned(8))) r16Mask=   0xF800F800F800F800LL;
79 static uint64_t __attribute__((aligned(8))) b15Mask=   0x001F001F001F001FLL;
80 static uint64_t __attribute__((aligned(8))) g15Mask=   0x03E003E003E003E0LL;
81 static uint64_t __attribute__((aligned(8))) r15Mask=   0x7C007C007C007C00LL;
82
83 static uint64_t __attribute__((aligned(8))) temp0;
84 static uint64_t __attribute__((aligned(8))) asm_yalpha1;
85 static uint64_t __attribute__((aligned(8))) asm_uvalpha1;
86 #endif
87
88 // temporary storage for 4 yuv lines:
89 // 16bit for now (mmx likes it more compact)
90 #ifdef HAVE_MMX
91 static uint16_t __attribute__((aligned(8))) pix_buf_y[4][2048];
92 static uint16_t __attribute__((aligned(8))) pix_buf_uv[2][2048*2];
93 #else
94 static uint16_t pix_buf_y[4][2048];
95 static uint16_t pix_buf_uv[2][2048*2];
96 #endif
97
98 // clipping helper table for C implementations:
99 static unsigned char clip_table[768];
100
101 // yuv->rgb conversion tables:
102 static    int yuvtab_2568[256];
103 static    int yuvtab_3343[256];
104 static    int yuvtab_0c92[256];
105 static    int yuvtab_1a1e[256];
106 static    int yuvtab_40cf[256];
107
108
109 static uint8_t funnyYCode[10000];
110 static uint8_t funnyUVCode[10000];
111
112 static int canMMX2BeUsed=0;
113
114 #define FULL_YSCALEYUV2RGB \
115                 "pxor %%mm7, %%mm7              \n\t"\
116                 "movd %6, %%mm6                 \n\t" /*yalpha1*/\
117                 "punpcklwd %%mm6, %%mm6         \n\t"\
118                 "punpcklwd %%mm6, %%mm6         \n\t"\
119                 "movd %7, %%mm5                 \n\t" /*uvalpha1*/\
120                 "punpcklwd %%mm5, %%mm5         \n\t"\
121                 "punpcklwd %%mm5, %%mm5         \n\t"\
122                 "xorl %%eax, %%eax              \n\t"\
123                 "1:                             \n\t"\
124                 "movq (%0, %%eax, 2), %%mm0     \n\t" /*buf0[eax]*/\
125                 "movq (%1, %%eax, 2), %%mm1     \n\t" /*buf1[eax]*/\
126                 "movq (%2, %%eax,2), %%mm2      \n\t" /* uvbuf0[eax]*/\
127                 "movq (%3, %%eax,2), %%mm3      \n\t" /* uvbuf1[eax]*/\
128                 "psubw %%mm1, %%mm0             \n\t" /* buf0[eax] - buf1[eax]*/\
129                 "psubw %%mm3, %%mm2             \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
130                 "pmulhw %%mm6, %%mm0            \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
131                 "pmulhw %%mm5, %%mm2            \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
132                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
133                 "movq 4096(%2, %%eax,2), %%mm4  \n\t" /* uvbuf0[eax+2048]*/\
134                 "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
135                 "paddw %%mm0, %%mm1             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
136                 "movq 4096(%3, %%eax,2), %%mm0  \n\t" /* uvbuf1[eax+2048]*/\
137                 "paddw %%mm2, %%mm3             \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
138                 "psubw %%mm0, %%mm4             \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
139                 "psubw w80, %%mm1               \n\t" /* 8(Y-16)*/\
140                 "psubw w400, %%mm3              \n\t" /* 8(U-128)*/\
141                 "pmulhw yCoeff, %%mm1           \n\t"\
142 \
143 \
144                 "pmulhw %%mm5, %%mm4            \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
145                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
146                 "pmulhw ubCoeff, %%mm3          \n\t"\
147                 "psraw $4, %%mm0                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
148                 "pmulhw ugCoeff, %%mm2          \n\t"\
149                 "paddw %%mm4, %%mm0             \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
150                 "psubw w400, %%mm0              \n\t" /* (V-128)8*/\
151 \
152 \
153                 "movq %%mm0, %%mm4              \n\t" /* (V-128)8*/\
154                 "pmulhw vrCoeff, %%mm0          \n\t"\
155                 "pmulhw vgCoeff, %%mm4          \n\t"\
156                 "paddw %%mm1, %%mm3             \n\t" /* B*/\
157                 "paddw %%mm1, %%mm0             \n\t" /* R*/\
158                 "packuswb %%mm3, %%mm3          \n\t"\
159 \
160                 "packuswb %%mm0, %%mm0          \n\t"\
161                 "paddw %%mm4, %%mm2             \n\t"\
162                 "paddw %%mm2, %%mm1             \n\t" /* G*/\
163 \
164                 "packuswb %%mm1, %%mm1          \n\t"
165
166 #define YSCALEYUV2RGB \
167                 "movd %6, %%mm6                 \n\t" /*yalpha1*/\
168                 "punpcklwd %%mm6, %%mm6         \n\t"\
169                 "punpcklwd %%mm6, %%mm6         \n\t"\
170                 "movq %%mm6, asm_yalpha1        \n\t"\
171                 "movd %7, %%mm5                 \n\t" /*uvalpha1*/\
172                 "punpcklwd %%mm5, %%mm5         \n\t"\
173                 "punpcklwd %%mm5, %%mm5         \n\t"\
174                 "movq %%mm5, asm_uvalpha1       \n\t"\
175                 "xorl %%eax, %%eax              \n\t"\
176                 "1:                             \n\t"\
177                 "movq (%2, %%eax), %%mm2        \n\t" /* uvbuf0[eax]*/\
178                 "movq (%3, %%eax), %%mm3        \n\t" /* uvbuf1[eax]*/\
179                 "movq 4096(%2, %%eax), %%mm5    \n\t" /* uvbuf0[eax+2048]*/\
180                 "movq 4096(%3, %%eax), %%mm4    \n\t" /* uvbuf1[eax+2048]*/\
181                 "psubw %%mm3, %%mm2             \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
182                 "psubw %%mm4, %%mm5             \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
183                 "movq asm_uvalpha1, %%mm0       \n\t"\
184                 "pmulhw %%mm0, %%mm2            \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
185                 "pmulhw %%mm0, %%mm5            \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
186                 "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
187                 "psraw $4, %%mm4                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
188                 "paddw %%mm2, %%mm3             \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
189                 "paddw %%mm5, %%mm4             \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
190                 "psubw w400, %%mm3              \n\t" /* (U-128)8*/\
191                 "psubw w400, %%mm4              \n\t" /* (V-128)8*/\
192                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
193                 "movq %%mm4, %%mm5              \n\t" /* (V-128)8*/\
194                 "pmulhw ugCoeff, %%mm3          \n\t"\
195                 "pmulhw vgCoeff, %%mm4          \n\t"\
196         /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
197                 "movq (%0, %%eax, 2), %%mm0     \n\t" /*buf0[eax]*/\
198                 "movq (%1, %%eax, 2), %%mm1     \n\t" /*buf1[eax]*/\
199                 "movq 8(%0, %%eax, 2), %%mm6    \n\t" /*buf0[eax]*/\
200                 "movq 8(%1, %%eax, 2), %%mm7    \n\t" /*buf1[eax]*/\
201                 "psubw %%mm1, %%mm0             \n\t" /* buf0[eax] - buf1[eax]*/\
202                 "psubw %%mm7, %%mm6             \n\t" /* buf0[eax] - buf1[eax]*/\
203                 "pmulhw asm_yalpha1, %%mm0      \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
204                 "pmulhw asm_yalpha1, %%mm6      \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
205                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
206                 "psraw $4, %%mm7                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
207                 "paddw %%mm0, %%mm1             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
208                 "paddw %%mm6, %%mm7             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
209                 "pmulhw ubCoeff, %%mm2          \n\t"\
210                 "pmulhw vrCoeff, %%mm5          \n\t"\
211                 "psubw w80, %%mm1               \n\t" /* 8(Y-16)*/\
212                 "psubw w80, %%mm7               \n\t" /* 8(Y-16)*/\
213                 "pmulhw yCoeff, %%mm1           \n\t"\
214                 "pmulhw yCoeff, %%mm7           \n\t"\
215         /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
216                 "paddw %%mm3, %%mm4             \n\t"\
217                 "movq %%mm2, %%mm0              \n\t"\
218                 "movq %%mm5, %%mm6              \n\t"\
219                 "movq %%mm4, %%mm3              \n\t"\
220                 "punpcklwd %%mm2, %%mm2         \n\t"\
221                 "punpcklwd %%mm5, %%mm5         \n\t"\
222                 "punpcklwd %%mm4, %%mm4         \n\t"\
223                 "paddw %%mm1, %%mm2             \n\t"\
224                 "paddw %%mm1, %%mm5             \n\t"\
225                 "paddw %%mm1, %%mm4             \n\t"\
226                 "punpckhwd %%mm0, %%mm0         \n\t"\
227                 "punpckhwd %%mm6, %%mm6         \n\t"\
228                 "punpckhwd %%mm3, %%mm3         \n\t"\
229                 "paddw %%mm7, %%mm0             \n\t"\
230                 "paddw %%mm7, %%mm6             \n\t"\
231                 "paddw %%mm7, %%mm3             \n\t"\
232                 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
233                 "packuswb %%mm0, %%mm2          \n\t"\
234                 "packuswb %%mm6, %%mm5          \n\t"\
235                 "packuswb %%mm3, %%mm4          \n\t"\
236                 "pxor %%mm7, %%mm7              \n\t"
237
238 #define YSCALEYUV2RGB1 \
239                 "xorl %%eax, %%eax              \n\t"\
240                 "1:                             \n\t"\
241                 "movq (%2, %%eax), %%mm3        \n\t" /* uvbuf0[eax]*/\
242                 "movq 4096(%2, %%eax), %%mm4    \n\t" /* uvbuf0[eax+2048]*/\
243                 "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
244                 "psraw $4, %%mm4                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
245                 "psubw w400, %%mm3              \n\t" /* (U-128)8*/\
246                 "psubw w400, %%mm4              \n\t" /* (V-128)8*/\
247                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
248                 "movq %%mm4, %%mm5              \n\t" /* (V-128)8*/\
249                 "pmulhw ugCoeff, %%mm3          \n\t"\
250                 "pmulhw vgCoeff, %%mm4          \n\t"\
251         /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
252                 "movq (%0, %%eax, 2), %%mm1     \n\t" /*buf0[eax]*/\
253                 "movq 8(%0, %%eax, 2), %%mm7    \n\t" /*buf0[eax]*/\
254                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
255                 "psraw $4, %%mm7                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
256                 "pmulhw ubCoeff, %%mm2          \n\t"\
257                 "pmulhw vrCoeff, %%mm5          \n\t"\
258                 "psubw w80, %%mm1               \n\t" /* 8(Y-16)*/\
259                 "psubw w80, %%mm7               \n\t" /* 8(Y-16)*/\
260                 "pmulhw yCoeff, %%mm1           \n\t"\
261                 "pmulhw yCoeff, %%mm7           \n\t"\
262         /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
263                 "paddw %%mm3, %%mm4             \n\t"\
264                 "movq %%mm2, %%mm0              \n\t"\
265                 "movq %%mm5, %%mm6              \n\t"\
266                 "movq %%mm4, %%mm3              \n\t"\
267                 "punpcklwd %%mm2, %%mm2         \n\t"\
268                 "punpcklwd %%mm5, %%mm5         \n\t"\
269                 "punpcklwd %%mm4, %%mm4         \n\t"\
270                 "paddw %%mm1, %%mm2             \n\t"\
271                 "paddw %%mm1, %%mm5             \n\t"\
272                 "paddw %%mm1, %%mm4             \n\t"\
273                 "punpckhwd %%mm0, %%mm0         \n\t"\
274                 "punpckhwd %%mm6, %%mm6         \n\t"\
275                 "punpckhwd %%mm3, %%mm3         \n\t"\
276                 "paddw %%mm7, %%mm0             \n\t"\
277                 "paddw %%mm7, %%mm6             \n\t"\
278                 "paddw %%mm7, %%mm3             \n\t"\
279                 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
280                 "packuswb %%mm0, %%mm2          \n\t"\
281                 "packuswb %%mm6, %%mm5          \n\t"\
282                 "packuswb %%mm3, %%mm4          \n\t"\
283                 "pxor %%mm7, %%mm7              \n\t"
284
285 // do vertical chrominance interpolation
286 #define YSCALEYUV2RGB1b \
287                 "xorl %%eax, %%eax              \n\t"\
288                 "1:                             \n\t"\
289                 "movq (%2, %%eax), %%mm2        \n\t" /* uvbuf0[eax]*/\
290                 "movq (%3, %%eax), %%mm3        \n\t" /* uvbuf1[eax]*/\
291                 "movq 4096(%2, %%eax), %%mm5    \n\t" /* uvbuf0[eax+2048]*/\
292                 "movq 4096(%3, %%eax), %%mm4    \n\t" /* uvbuf1[eax+2048]*/\
293                 "paddw %%mm2, %%mm3             \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
294                 "paddw %%mm5, %%mm4             \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
295                 "psrlw $5, %%mm3                \n\t"\
296                 "psrlw $5, %%mm4                \n\t"\
297                 "psubw w400, %%mm3              \n\t" /* (U-128)8*/\
298                 "psubw w400, %%mm4              \n\t" /* (V-128)8*/\
299                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
300                 "movq %%mm4, %%mm5              \n\t" /* (V-128)8*/\
301                 "pmulhw ugCoeff, %%mm3          \n\t"\
302                 "pmulhw vgCoeff, %%mm4          \n\t"\
303         /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
304                 "movq (%0, %%eax, 2), %%mm1     \n\t" /*buf0[eax]*/\
305                 "movq 8(%0, %%eax, 2), %%mm7    \n\t" /*buf0[eax]*/\
306                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
307                 "psraw $4, %%mm7                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
308                 "pmulhw ubCoeff, %%mm2          \n\t"\
309                 "pmulhw vrCoeff, %%mm5          \n\t"\
310                 "psubw w80, %%mm1               \n\t" /* 8(Y-16)*/\
311                 "psubw w80, %%mm7               \n\t" /* 8(Y-16)*/\
312                 "pmulhw yCoeff, %%mm1           \n\t"\
313                 "pmulhw yCoeff, %%mm7           \n\t"\
314         /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
315                 "paddw %%mm3, %%mm4             \n\t"\
316                 "movq %%mm2, %%mm0              \n\t"\
317                 "movq %%mm5, %%mm6              \n\t"\
318                 "movq %%mm4, %%mm3              \n\t"\
319                 "punpcklwd %%mm2, %%mm2         \n\t"\
320                 "punpcklwd %%mm5, %%mm5         \n\t"\
321                 "punpcklwd %%mm4, %%mm4         \n\t"\
322                 "paddw %%mm1, %%mm2             \n\t"\
323                 "paddw %%mm1, %%mm5             \n\t"\
324                 "paddw %%mm1, %%mm4             \n\t"\
325                 "punpckhwd %%mm0, %%mm0         \n\t"\
326                 "punpckhwd %%mm6, %%mm6         \n\t"\
327                 "punpckhwd %%mm3, %%mm3         \n\t"\
328                 "paddw %%mm7, %%mm0             \n\t"\
329                 "paddw %%mm7, %%mm6             \n\t"\
330                 "paddw %%mm7, %%mm3             \n\t"\
331                 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
332                 "packuswb %%mm0, %%mm2          \n\t"\
333                 "packuswb %%mm6, %%mm5          \n\t"\
334                 "packuswb %%mm3, %%mm4          \n\t"\
335                 "pxor %%mm7, %%mm7              \n\t"
336
337 #define WRITEBGR32 \
338                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
339                         "movq %%mm2, %%mm1              \n\t" /* B */\
340                         "movq %%mm5, %%mm6              \n\t" /* R */\
341                         "punpcklbw %%mm4, %%mm2         \n\t" /* GBGBGBGB 0 */\
342                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R 0 */\
343                         "punpckhbw %%mm4, %%mm1         \n\t" /* GBGBGBGB 2 */\
344                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R 2 */\
345                         "movq %%mm2, %%mm0              \n\t" /* GBGBGBGB 0 */\
346                         "movq %%mm1, %%mm3              \n\t" /* GBGBGBGB 2 */\
347                         "punpcklwd %%mm5, %%mm0         \n\t" /* 0RGB0RGB 0 */\
348                         "punpckhwd %%mm5, %%mm2         \n\t" /* 0RGB0RGB 1 */\
349                         "punpcklwd %%mm6, %%mm1         \n\t" /* 0RGB0RGB 2 */\
350                         "punpckhwd %%mm6, %%mm3         \n\t" /* 0RGB0RGB 3 */\
351 \
352                         MOVNTQ(%%mm0, (%4, %%eax, 4))\
353                         MOVNTQ(%%mm2, 8(%4, %%eax, 4))\
354                         MOVNTQ(%%mm1, 16(%4, %%eax, 4))\
355                         MOVNTQ(%%mm3, 24(%4, %%eax, 4))\
356 \
357                         "addl $8, %%eax                 \n\t"\
358                         "cmpl %5, %%eax                 \n\t"\
359                         " jb 1b                         \n\t"
360
361 #define WRITEBGR16 \
362                         "movq %%mm2, %%mm1              \n\t" /* B */\
363                         "movq %%mm4, %%mm3              \n\t" /* G */\
364                         "movq %%mm5, %%mm6              \n\t" /* R */\
365 \
366                         "punpcklbw %%mm7, %%mm3         \n\t" /* 0G0G0G0G */\
367                         "punpcklbw %%mm7, %%mm2         \n\t" /* 0B0B0B0B */\
368                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R */\
369 \
370                         "psrlw $3, %%mm2                \n\t"\
371                         "psllw $3, %%mm3                \n\t"\
372                         "psllw $8, %%mm5                \n\t"\
373 \
374                         "pand g16Mask, %%mm3            \n\t"\
375                         "pand r16Mask, %%mm5            \n\t"\
376 \
377                         "por %%mm3, %%mm2               \n\t"\
378                         "por %%mm5, %%mm2               \n\t"\
379 \
380                         "punpckhbw %%mm7, %%mm4         \n\t" /* 0G0G0G0G */\
381                         "punpckhbw %%mm7, %%mm1         \n\t" /* 0B0B0B0B */\
382                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R */\
383 \
384                         "psrlw $3, %%mm1                \n\t"\
385                         "psllw $3, %%mm4                \n\t"\
386                         "psllw $8, %%mm6                \n\t"\
387 \
388                         "pand g16Mask, %%mm4            \n\t"\
389                         "pand r16Mask, %%mm6            \n\t"\
390 \
391                         "por %%mm4, %%mm1               \n\t"\
392                         "por %%mm6, %%mm1               \n\t"\
393 \
394                         MOVNTQ(%%mm2, (%4, %%eax, 2))\
395                         MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
396 \
397                         "addl $8, %%eax                 \n\t"\
398                         "cmpl %5, %%eax                 \n\t"\
399                         " jb 1b                         \n\t"
400
401 #define WRITEBGR15 \
402                         "movq %%mm2, %%mm1              \n\t" /* B */\
403                         "movq %%mm4, %%mm3              \n\t" /* G */\
404                         "movq %%mm5, %%mm6              \n\t" /* R */\
405 \
406                         "punpcklbw %%mm7, %%mm3         \n\t" /* 0G0G0G0G */\
407                         "punpcklbw %%mm7, %%mm2         \n\t" /* 0B0B0B0B */\
408                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R */\
409 \
410                         "psrlw $3, %%mm2                \n\t"\
411                         "psllw $2, %%mm3                \n\t"\
412                         "psllw $7, %%mm5                \n\t"\
413 \
414                         "pand g15Mask, %%mm3            \n\t"\
415                         "pand r15Mask, %%mm5            \n\t"\
416 \
417                         "por %%mm3, %%mm2               \n\t"\
418                         "por %%mm5, %%mm2               \n\t"\
419 \
420                         "punpckhbw %%mm7, %%mm4         \n\t" /* 0G0G0G0G */\
421                         "punpckhbw %%mm7, %%mm1         \n\t" /* 0B0B0B0B */\
422                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R */\
423 \
424                         "psrlw $3, %%mm1                \n\t"\
425                         "psllw $2, %%mm4                \n\t"\
426                         "psllw $7, %%mm6                \n\t"\
427 \
428                         "pand g15Mask, %%mm4            \n\t"\
429                         "pand r15Mask, %%mm6            \n\t"\
430 \
431                         "por %%mm4, %%mm1               \n\t"\
432                         "por %%mm6, %%mm1               \n\t"\
433 \
434                         MOVNTQ(%%mm2, (%4, %%eax, 2))\
435                         MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
436 \
437                         "addl $8, %%eax                 \n\t"\
438                         "cmpl %5, %%eax                 \n\t"\
439                         " jb 1b                         \n\t"
440 // FIXME find a faster way to shuffle it to BGR24
441 #define WRITEBGR24 \
442                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
443                         "movq %%mm2, %%mm1              \n\t" /* B */\
444                         "movq %%mm5, %%mm6              \n\t" /* R */\
445                         "punpcklbw %%mm4, %%mm2         \n\t" /* GBGBGBGB 0 */\
446                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R 0 */\
447                         "punpckhbw %%mm4, %%mm1         \n\t" /* GBGBGBGB 2 */\
448                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R 2 */\
449                         "movq %%mm2, %%mm0              \n\t" /* GBGBGBGB 0 */\
450                         "movq %%mm1, %%mm3              \n\t" /* GBGBGBGB 2 */\
451                         "punpcklwd %%mm5, %%mm0         \n\t" /* 0RGB0RGB 0 */\
452                         "punpckhwd %%mm5, %%mm2         \n\t" /* 0RGB0RGB 1 */\
453                         "punpcklwd %%mm6, %%mm1         \n\t" /* 0RGB0RGB 2 */\
454                         "punpckhwd %%mm6, %%mm3         \n\t" /* 0RGB0RGB 3 */\
455 \
456                         "movq %%mm0, %%mm4              \n\t" /* 0RGB0RGB 0 */\
457                         "psrlq $8, %%mm0                \n\t" /* 00RGB0RG 0 */\
458                         "pand bm00000111, %%mm4         \n\t" /* 00000RGB 0 */\
459                         "pand bm11111000, %%mm0         \n\t" /* 00RGB000 0.5 */\
460                         "por %%mm4, %%mm0               \n\t" /* 00RGBRGB 0 */\
461                         "movq %%mm2, %%mm4              \n\t" /* 0RGB0RGB 1 */\
462                         "psllq $48, %%mm2               \n\t" /* GB000000 1 */\
463                         "por %%mm2, %%mm0               \n\t" /* GBRGBRGB 0 */\
464 \
465                         "movq %%mm4, %%mm2              \n\t" /* 0RGB0RGB 1 */\
466                         "psrld $16, %%mm4               \n\t" /* 000R000R 1 */\
467                         "psrlq $24, %%mm2               \n\t" /* 0000RGB0 1.5 */\
468                         "por %%mm4, %%mm2               \n\t" /* 000RRGBR 1 */\
469                         "pand bm00001111, %%mm2         \n\t" /* 0000RGBR 1 */\
470                         "movq %%mm1, %%mm4              \n\t" /* 0RGB0RGB 2 */\
471                         "psrlq $8, %%mm1                \n\t" /* 00RGB0RG 2 */\
472                         "pand bm00000111, %%mm4         \n\t" /* 00000RGB 2 */\
473                         "pand bm11111000, %%mm1         \n\t" /* 00RGB000 2.5 */\
474                         "por %%mm4, %%mm1               \n\t" /* 00RGBRGB 2 */\
475                         "movq %%mm1, %%mm4              \n\t" /* 00RGBRGB 2 */\
476                         "psllq $32, %%mm1               \n\t" /* BRGB0000 2 */\
477                         "por %%mm1, %%mm2               \n\t" /* BRGBRGBR 1 */\
478 \
479                         "psrlq $32, %%mm4               \n\t" /* 000000RG 2.5 */\
480                         "movq %%mm3, %%mm5              \n\t" /* 0RGB0RGB 3 */\
481                         "psrlq $8, %%mm3                \n\t" /* 00RGB0RG 3 */\
482                         "pand bm00000111, %%mm5         \n\t" /* 00000RGB 3 */\
483                         "pand bm11111000, %%mm3         \n\t" /* 00RGB000 3.5 */\
484                         "por %%mm5, %%mm3               \n\t" /* 00RGBRGB 3 */\
485                         "psllq $16, %%mm3               \n\t" /* RGBRGB00 3 */\
486                         "por %%mm4, %%mm3               \n\t" /* RGBRGBRG 2.5 */\
487 \
488                         "leal (%%eax, %%eax, 2), %%ebx  \n\t"\
489                         MOVNTQ(%%mm0, (%4, %%ebx))\
490                         MOVNTQ(%%mm2, 8(%4, %%ebx))\
491                         MOVNTQ(%%mm3, 16(%4, %%ebx))\
492 \
493                         "addl $8, %%eax                 \n\t"\
494                         "cmpl %5, %%eax                 \n\t"\
495                         " jb 1b                         \n\t"
496
497
498 static inline void yuv2yuv(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
499                            uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstw, int yalpha, int uvalpha)
500 {
501         int yalpha1=yalpha^4095;
502         int uvalpha1=uvalpha^4095;
503         int i;
504
505         asm volatile ("\n\t"::: "memory");
506
507         for(i=0;i<dstw;i++)
508         {
509                 ((uint8_t*)dest)[i] = (buf0[i]*yalpha1+buf1[i]*yalpha)>>19;
510         }
511
512         if(uvalpha != -1)
513         {
514                 for(i=0; i<dstw/2; i++)
515                 {
516                         ((uint8_t*)uDest)[i] = (uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19;
517                         ((uint8_t*)vDest)[i] = (uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19;
518                 }
519         }
520 }
521
522 /**
523  * vertical scale YV12 to RGB
524  */
525 static inline void yuv2rgbX(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
526                             uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
527 {
528         int yalpha1=yalpha^4095;
529         int uvalpha1=uvalpha^4095;
530         int i;
531
532         if(fullUVIpol)
533         {
534
535 #ifdef HAVE_MMX
536                 if(dstbpp == 32)
537                 {
538                         asm volatile(
539
540
541 FULL_YSCALEYUV2RGB
542                         "punpcklbw %%mm1, %%mm3         \n\t" // BGBGBGBG
543                         "punpcklbw %%mm7, %%mm0         \n\t" // R0R0R0R0
544
545                         "movq %%mm3, %%mm1              \n\t"
546                         "punpcklwd %%mm0, %%mm3         \n\t" // BGR0BGR0
547                         "punpckhwd %%mm0, %%mm1         \n\t" // BGR0BGR0
548
549                         MOVNTQ(%%mm3, (%4, %%eax, 4))
550                         MOVNTQ(%%mm1, 8(%4, %%eax, 4))
551
552                         "addl $4, %%eax                 \n\t"
553                         "cmpl %5, %%eax                 \n\t"
554                         " jb 1b                         \n\t"
555
556
557                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
558                         "m" (yalpha1), "m" (uvalpha1)
559                         : "%eax"
560                         );
561                 }
562                 else if(dstbpp==24)
563                 {
564                         asm volatile(
565
566 FULL_YSCALEYUV2RGB
567
568                                                                 // lsb ... msb
569                         "punpcklbw %%mm1, %%mm3         \n\t" // BGBGBGBG
570                         "punpcklbw %%mm7, %%mm0         \n\t" // R0R0R0R0
571
572                         "movq %%mm3, %%mm1              \n\t"
573                         "punpcklwd %%mm0, %%mm3         \n\t" // BGR0BGR0
574                         "punpckhwd %%mm0, %%mm1         \n\t" // BGR0BGR0
575
576                         "movq %%mm3, %%mm2              \n\t" // BGR0BGR0
577                         "psrlq $8, %%mm3                \n\t" // GR0BGR00
578                         "pand bm00000111, %%mm2         \n\t" // BGR00000
579                         "pand bm11111000, %%mm3         \n\t" // 000BGR00
580                         "por %%mm2, %%mm3               \n\t" // BGRBGR00
581                         "movq %%mm1, %%mm2              \n\t"
582                         "psllq $48, %%mm1               \n\t" // 000000BG
583                         "por %%mm1, %%mm3               \n\t" // BGRBGRBG
584
585                         "movq %%mm2, %%mm1              \n\t" // BGR0BGR0
586                         "psrld $16, %%mm2               \n\t" // R000R000
587                         "psrlq $24, %%mm1               \n\t" // 0BGR0000
588                         "por %%mm2, %%mm1               \n\t" // RBGRR000
589
590                         "movl %4, %%ebx                 \n\t"
591                         "addl %%eax, %%ebx              \n\t"
592
593 #ifdef HAVE_MMX2
594                         //FIXME Alignment
595                         "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
596                         "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
597 #else
598                         "movd %%mm3, (%%ebx, %%eax, 2)  \n\t"
599                         "psrlq $32, %%mm3               \n\t"
600                         "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
601                         "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
602 #endif
603                         "addl $4, %%eax                 \n\t"
604                         "cmpl %5, %%eax                 \n\t"
605                         " jb 1b                         \n\t"
606
607                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
608                         "m" (yalpha1), "m" (uvalpha1)
609                         : "%eax", "%ebx"
610                         );
611                 }
612                 else if(dstbpp==15)
613                 {
614                         asm volatile(
615
616 FULL_YSCALEYUV2RGB
617 #ifdef DITHER1XBPP
618                         "paddusb b16Dither, %%mm1       \n\t"
619                         "paddusb b16Dither, %%mm0       \n\t"
620                         "paddusb b16Dither, %%mm3       \n\t"
621 #endif
622                         "punpcklbw %%mm7, %%mm1         \n\t" // 0G0G0G0G
623                         "punpcklbw %%mm7, %%mm3         \n\t" // 0B0B0B0B
624                         "punpcklbw %%mm7, %%mm0         \n\t" // 0R0R0R0R
625
626                         "psrlw $3, %%mm3                \n\t"
627                         "psllw $2, %%mm1                \n\t"
628                         "psllw $7, %%mm0                \n\t"
629                         "pand g15Mask, %%mm1            \n\t"
630                         "pand r15Mask, %%mm0            \n\t"
631
632                         "por %%mm3, %%mm1               \n\t"
633                         "por %%mm1, %%mm0               \n\t"
634
635                         MOVNTQ(%%mm0, (%4, %%eax, 2))
636
637                         "addl $4, %%eax                 \n\t"
638                         "cmpl %5, %%eax                 \n\t"
639                         " jb 1b                         \n\t"
640
641                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
642                         "m" (yalpha1), "m" (uvalpha1)
643                         : "%eax"
644                         );
645                 }
646                 else if(dstbpp==16)
647                 {
648                         asm volatile(
649
650 FULL_YSCALEYUV2RGB
651 #ifdef DITHER1XBPP
652                         "paddusb g16Dither, %%mm1       \n\t"
653                         "paddusb b16Dither, %%mm0       \n\t"
654                         "paddusb b16Dither, %%mm3       \n\t"
655 #endif
656                         "punpcklbw %%mm7, %%mm1         \n\t" // 0G0G0G0G
657                         "punpcklbw %%mm7, %%mm3         \n\t" // 0B0B0B0B
658                         "punpcklbw %%mm7, %%mm0         \n\t" // 0R0R0R0R
659
660                         "psrlw $3, %%mm3                \n\t"
661                         "psllw $3, %%mm1                \n\t"
662                         "psllw $8, %%mm0                \n\t"
663                         "pand g16Mask, %%mm1            \n\t"
664                         "pand r16Mask, %%mm0            \n\t"
665
666                         "por %%mm3, %%mm1               \n\t"
667                         "por %%mm1, %%mm0               \n\t"
668
669                         MOVNTQ(%%mm0, (%4, %%eax, 2))
670
671                         "addl $4, %%eax                 \n\t"
672                         "cmpl %5, %%eax                 \n\t"
673                         " jb 1b                         \n\t"
674
675                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
676                         "m" (yalpha1), "m" (uvalpha1)
677                         : "%eax"
678                         );
679                 }
680 #else
681                 asm volatile ("\n\t"::: "memory");
682
683                 if(dstbpp==32 || dstbpp==24)
684                 {
685                         for(i=0;i<dstw;i++){
686                                 // vertical linear interpolation && yuv2rgb in a single step:
687                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
688                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
689                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
690                                 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
691                                 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
692                                 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
693                                 dest+=dstbpp>>3;
694                         }
695                 }
696                 else if(dstbpp==16)
697                 {
698                         for(i=0;i<dstw;i++){
699                                 // vertical linear interpolation && yuv2rgb in a single step:
700                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
701                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
702                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
703
704                                 ((uint16_t*)dest)[i] =
705                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
706                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<3)&0x07E0) |
707                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<8)&0xF800);
708                         }
709                 }
710                 else if(dstbpp==15)
711                 {
712                         for(i=0;i<dstw;i++){
713                                 // vertical linear interpolation && yuv2rgb in a single step:
714                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
715                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
716                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
717
718                                 ((uint16_t*)dest)[i] =
719                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
720                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<2)&0x03E0) |
721                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<7)&0x7C00);
722                         }
723                 }
724 #endif
725         }//FULL_UV_IPOL
726         else
727         {
728 #ifdef HAVE_MMX
729                 if(dstbpp == 32)
730                 {
731                         asm volatile(
732                                 YSCALEYUV2RGB
733                                 WRITEBGR32
734
735                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
736                         "m" (yalpha1), "m" (uvalpha1)
737                         : "%eax"
738                         );
739                 }
740                 else if(dstbpp==24)
741                 {
742                         asm volatile(
743                                 YSCALEYUV2RGB
744                                 WRITEBGR24
745
746                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
747                         "m" (yalpha1), "m" (uvalpha1)
748                         : "%eax", "%ebx"
749                         );
750                 }
751                 else if(dstbpp==15)
752                 {
753                         asm volatile(
754                                 YSCALEYUV2RGB
755                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
756 #ifdef DITHER1XBPP
757                                 "paddusb b16Dither, %%mm2       \n\t"
758                                 "paddusb b16Dither, %%mm4       \n\t"
759                                 "paddusb b16Dither, %%mm5       \n\t"
760 #endif
761
762                                 WRITEBGR15
763
764                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
765                         "m" (yalpha1), "m" (uvalpha1)
766                         : "%eax"
767                         );
768                 }
769                 else if(dstbpp==16)
770                 {
771                         asm volatile(
772                                 YSCALEYUV2RGB
773                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
774 #ifdef DITHER1XBPP
775                                 "paddusb g16Dither, %%mm2       \n\t"
776                                 "paddusb b16Dither, %%mm4       \n\t"
777                                 "paddusb b16Dither, %%mm5       \n\t"
778 #endif
779
780                                 WRITEBGR16
781
782                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
783                         "m" (yalpha1), "m" (uvalpha1)
784                         : "%eax"
785                         );
786                 }
787 #else
788                 asm volatile ("\n\t"::: "memory");
789
790                 if(dstbpp==32)
791                 {
792                         for(i=0; i<dstw-1; i+=2){
793                                 // vertical linear interpolation && yuv2rgb in a single step:
794                                 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
795                                 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
796                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
797                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
798
799                                 int Cb= yuvtab_40cf[U];
800                                 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
801                                 int Cr= yuvtab_3343[V];
802
803                                 dest[4*i+0]=clip_table[((Y1 + Cb) >>13)];
804                                 dest[4*i+1]=clip_table[((Y1 + Cg) >>13)];
805                                 dest[4*i+2]=clip_table[((Y1 + Cr) >>13)];
806
807                                 dest[4*i+4]=clip_table[((Y2 + Cb) >>13)];
808                                 dest[4*i+5]=clip_table[((Y2 + Cg) >>13)];
809                                 dest[4*i+6]=clip_table[((Y2 + Cr) >>13)];
810                         }
811                 }
812                 if(dstbpp==24)
813                 {
814                         for(i=0; i<dstw-1; i+=2){
815                                 // vertical linear interpolation && yuv2rgb in a single step:
816                                 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
817                                 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
818                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
819                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
820
821                                 int Cb= yuvtab_40cf[U];
822                                 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
823                                 int Cr= yuvtab_3343[V];
824
825                                 dest[0]=clip_table[((Y1 + Cb) >>13)];
826                                 dest[1]=clip_table[((Y1 + Cg) >>13)];
827                                 dest[2]=clip_table[((Y1 + Cr) >>13)];
828
829                                 dest[3]=clip_table[((Y2 + Cb) >>13)];
830                                 dest[4]=clip_table[((Y2 + Cg) >>13)];
831                                 dest[5]=clip_table[((Y2 + Cr) >>13)];
832                                 dest+=6;
833                         }
834                 }
835                 else if(dstbpp==16)
836                 {
837                         for(i=0; i<dstw-1; i+=2){
838                                 // vertical linear interpolation && yuv2rgb in a single step:
839                                 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
840                                 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
841                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
842                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
843
844                                 int Cb= yuvtab_40cf[U];
845                                 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
846                                 int Cr= yuvtab_3343[V];
847
848                                 ((uint16_t*)dest)[i] =
849                                         (clip_table[(Y1 + Cb) >>13]>>3) |
850                                         ((clip_table[(Y1 + Cg) >>13]<<3)&0x07E0) |
851                                         ((clip_table[(Y1 + Cr) >>13]<<8)&0xF800);
852
853                                 ((uint16_t*)dest)[i+1] =
854                                         (clip_table[(Y2 + Cb) >>13]>>3) |
855                                         ((clip_table[(Y2 + Cg) >>13]<<3)&0x07E0) |
856                                         ((clip_table[(Y2 + Cr) >>13]<<8)&0xF800);
857                         }
858                 }
859                 else if(dstbpp==15)
860                 {
861                         for(i=0; i<dstw-1; i+=2){
862                                 // vertical linear interpolation && yuv2rgb in a single step:
863                                 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
864                                 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
865                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
866                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
867
868                                 int Cb= yuvtab_40cf[U];
869                                 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
870                                 int Cr= yuvtab_3343[V];
871
872                                 ((uint16_t*)dest)[i] =
873                                         (clip_table[(Y1 + Cb) >>13]>>3) |
874                                         ((clip_table[(Y1 + Cg) >>13]<<2)&0x03E0) |
875                                         ((clip_table[(Y1 + Cr) >>13]<<7)&0x7C00);
876                                 ((uint16_t*)dest)[i+1] =
877                                         (clip_table[(Y2 + Cb) >>13]>>3) |
878                                         ((clip_table[(Y2 + Cg) >>13]<<2)&0x03E0) |
879                                         ((clip_table[(Y2 + Cr) >>13]<<7)&0x7C00);
880                         }
881                 }
882 #endif
883         } //!FULL_UV_IPOL
884 }
885
886 /**
887  * YV12 to RGB without scaling or interpolating
888  */
889 static inline void yuv2rgb1(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
890                             uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
891 {
892         int yalpha1=yalpha^4095;
893         int uvalpha1=uvalpha^4095;
894         int i;
895         if(fullUVIpol || allwaysIpol)
896         {
897                 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
898                 return;
899         }
900         if( yalpha > 2048 ) buf0 = buf1;
901
902 #ifdef HAVE_MMX
903         if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
904         {
905                 if(dstbpp == 32)
906                 {
907                         asm volatile(
908                                 YSCALEYUV2RGB1
909                                 WRITEBGR32
910                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
911                         "m" (yalpha1), "m" (uvalpha1)
912                         : "%eax"
913                         );
914                 }
915                 else if(dstbpp==24)
916                 {
917                         asm volatile(
918                                 YSCALEYUV2RGB1
919                                 WRITEBGR24
920                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
921                         "m" (yalpha1), "m" (uvalpha1)
922                         : "%eax", "%ebx"
923                         );
924                 }
925                 else if(dstbpp==15)
926                 {
927                         asm volatile(
928                                 YSCALEYUV2RGB1
929                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
930 #ifdef DITHER1XBPP
931                                 "paddusb b16Dither, %%mm2       \n\t"
932                                 "paddusb b16Dither, %%mm4       \n\t"
933                                 "paddusb b16Dither, %%mm5       \n\t"
934 #endif
935                                 WRITEBGR15
936                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
937                         "m" (yalpha1), "m" (uvalpha1)
938                         : "%eax"
939                         );
940                 }
941                 else if(dstbpp==16)
942                 {
943                         asm volatile(
944                                 YSCALEYUV2RGB1
945                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
946 #ifdef DITHER1XBPP
947                                 "paddusb g16Dither, %%mm2       \n\t"
948                                 "paddusb b16Dither, %%mm4       \n\t"
949                                 "paddusb b16Dither, %%mm5       \n\t"
950 #endif
951
952                                 WRITEBGR16
953                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
954                         "m" (yalpha1), "m" (uvalpha1)
955                         : "%eax"
956                         );
957                 }
958         }
959         else
960         {
961                 if(dstbpp == 32)
962                 {
963                         asm volatile(
964                                 YSCALEYUV2RGB1b
965                                 WRITEBGR32
966                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
967                         "m" (yalpha1), "m" (uvalpha1)
968                         : "%eax"
969                         );
970                 }
971                 else if(dstbpp==24)
972                 {
973                         asm volatile(
974                                 YSCALEYUV2RGB1b
975                                 WRITEBGR24
976                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
977                         "m" (yalpha1), "m" (uvalpha1)
978                         : "%eax", "%ebx"
979                         );
980                 }
981                 else if(dstbpp==15)
982                 {
983                         asm volatile(
984                                 YSCALEYUV2RGB1b
985                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
986 #ifdef DITHER1XBPP
987                                 "paddusb b16Dither, %%mm2       \n\t"
988                                 "paddusb b16Dither, %%mm4       \n\t"
989                                 "paddusb b16Dither, %%mm5       \n\t"
990 #endif
991                                 WRITEBGR15
992                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
993                         "m" (yalpha1), "m" (uvalpha1)
994                         : "%eax"
995                         );
996                 }
997                 else if(dstbpp==16)
998                 {
999                         asm volatile(
1000                                 YSCALEYUV2RGB1b
1001                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1002 #ifdef DITHER1XBPP
1003                                 "paddusb g16Dither, %%mm2       \n\t"
1004                                 "paddusb b16Dither, %%mm4       \n\t"
1005                                 "paddusb b16Dither, %%mm5       \n\t"
1006 #endif
1007
1008                                 WRITEBGR16
1009                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
1010                         "m" (yalpha1), "m" (uvalpha1)
1011                         : "%eax"
1012                         );
1013                 }
1014         }
1015 #else
1016 //FIXME write 2 versions (for even & odd lines)
1017         asm volatile ("\n\t"::: "memory");
1018
1019         if(dstbpp==32)
1020         {
1021                 for(i=0; i<dstw-1; i+=2){
1022                         // vertical linear interpolation && yuv2rgb in a single step:
1023                         int Y1=yuvtab_2568[buf0[i]>>7];
1024                         int Y2=yuvtab_2568[buf0[i+1]>>7];
1025                         int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
1026                         int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
1027
1028                         int Cb= yuvtab_40cf[U];
1029                         int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1030                         int Cr= yuvtab_3343[V];
1031
1032                         dest[4*i+0]=clip_table[((Y1 + Cb) >>13)];
1033                         dest[4*i+1]=clip_table[((Y1 + Cg) >>13)];
1034                         dest[4*i+2]=clip_table[((Y1 + Cr) >>13)];
1035
1036                         dest[4*i+4]=clip_table[((Y2 + Cb) >>13)];
1037                         dest[4*i+5]=clip_table[((Y2 + Cg) >>13)];
1038                         dest[4*i+6]=clip_table[((Y2 + Cr) >>13)];
1039                 }
1040         }
1041         if(dstbpp==24)
1042         {
1043                 for(i=0; i<dstw-1; i+=2){
1044                         // vertical linear interpolation && yuv2rgb in a single step:
1045                         int Y1=yuvtab_2568[buf0[i]>>7];
1046                         int Y2=yuvtab_2568[buf0[i+1]>>7];
1047                         int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
1048                         int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
1049
1050                         int Cb= yuvtab_40cf[U];
1051                         int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1052                         int Cr= yuvtab_3343[V];
1053
1054                         dest[0]=clip_table[((Y1 + Cb) >>13)];
1055                         dest[1]=clip_table[((Y1 + Cg) >>13)];
1056                         dest[2]=clip_table[((Y1 + Cr) >>13)];
1057
1058                         dest[3]=clip_table[((Y2 + Cb) >>13)];
1059                         dest[4]=clip_table[((Y2 + Cg) >>13)];
1060                         dest[5]=clip_table[((Y2 + Cr) >>13)];
1061                         dest+=6;
1062                 }
1063         }
1064         else if(dstbpp==16)
1065         {
1066                 for(i=0; i<dstw-1; i+=2){
1067                         // vertical linear interpolation && yuv2rgb in a single step:
1068                         int Y1=yuvtab_2568[buf0[i]>>7];
1069                         int Y2=yuvtab_2568[buf0[i+1]>>7];
1070                         int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
1071                         int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
1072
1073                         int Cb= yuvtab_40cf[U];
1074                         int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1075                         int Cr= yuvtab_3343[V];
1076
1077                         ((uint16_t*)dest)[i] =
1078                                 (clip_table[(Y1 + Cb) >>13]>>3) |
1079                                 ((clip_table[(Y1 + Cg) >>13]<<3)&0x07E0) |
1080                                 ((clip_table[(Y1 + Cr) >>13]<<8)&0xF800);
1081
1082                         ((uint16_t*)dest)[i+1] =
1083                                 (clip_table[(Y2 + Cb) >>13]>>3) |
1084                                 ((clip_table[(Y2 + Cg) >>13]<<3)&0x07E0) |
1085                                 ((clip_table[(Y2 + Cr) >>13]<<8)&0xF800);
1086                 }
1087         }
1088         else if(dstbpp==15)
1089         {
1090                 for(i=0; i<dstw-1; i+=2){
1091                         // vertical linear interpolation && yuv2rgb in a single step:
1092                         int Y1=yuvtab_2568[buf0[i]>>7];
1093                         int Y2=yuvtab_2568[buf0[i+1]>>7];
1094                         int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
1095                         int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
1096
1097                         int Cb= yuvtab_40cf[U];
1098                         int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1099                         int Cr= yuvtab_3343[V];
1100
1101                         ((uint16_t*)dest)[i] =
1102                                 (clip_table[(Y1 + Cb) >>13]>>3) |
1103                                 ((clip_table[(Y1 + Cg) >>13]<<2)&0x03E0) |
1104                                 ((clip_table[(Y1 + Cr) >>13]<<7)&0x7C00);
1105                         ((uint16_t*)dest)[i+1] =
1106                                 (clip_table[(Y2 + Cb) >>13]>>3) |
1107                                 ((clip_table[(Y2 + Cg) >>13]<<2)&0x03E0) |
1108                                 ((clip_table[(Y2 + Cr) >>13]<<7)&0x7C00);
1109                 }
1110         }
1111 #endif
1112 }
1113
1114
1115 static inline void hyscale(uint16_t *dst, int dstWidth, uint8_t *src, int srcWidth, int xInc)
1116 {
1117         int i;
1118       unsigned int xpos=0;
1119       // *** horizontal scale Y line to temp buffer
1120 #ifdef ARCH_X86
1121 #ifdef HAVE_MMX2
1122         if(canMMX2BeUsed)
1123         {
1124                 asm volatile(
1125                         "pxor %%mm7, %%mm7              \n\t"
1126                         "pxor %%mm2, %%mm2              \n\t" // 2*xalpha
1127                         "movd %5, %%mm6                 \n\t" // xInc&0xFFFF
1128                         "punpcklwd %%mm6, %%mm6         \n\t"
1129                         "punpcklwd %%mm6, %%mm6         \n\t"
1130                         "movq %%mm6, %%mm2              \n\t"
1131                         "psllq $16, %%mm2               \n\t"
1132                         "paddw %%mm6, %%mm2             \n\t"
1133                         "psllq $16, %%mm2               \n\t"
1134                         "paddw %%mm6, %%mm2             \n\t"
1135                         "psllq $16, %%mm2               \n\t" //0,t,2t,3t               t=xInc&0xFF
1136                         "movq %%mm2, temp0              \n\t"
1137                         "movd %4, %%mm6                 \n\t" //(xInc*4)&0xFFFF
1138                         "punpcklwd %%mm6, %%mm6         \n\t"
1139                         "punpcklwd %%mm6, %%mm6         \n\t"
1140                         "xorl %%eax, %%eax              \n\t" // i
1141                         "movl %0, %%esi                 \n\t" // src
1142                         "movl %1, %%edi                 \n\t" // buf1
1143                         "movl %3, %%edx                 \n\t" // (xInc*4)>>16
1144                         "xorl %%ecx, %%ecx              \n\t"
1145                         "xorl %%ebx, %%ebx              \n\t"
1146                         "movw %4, %%bx                  \n\t" // (xInc*4)&0xFFFF
1147
1148 #define FUNNY_Y_CODE \
1149                         PREFETCH" 1024(%%esi)           \n\t"\
1150                         PREFETCH" 1056(%%esi)           \n\t"\
1151                         PREFETCH" 1088(%%esi)           \n\t"\
1152                         "call funnyYCode                \n\t"\
1153                         "movq temp0, %%mm2              \n\t"\
1154                         "xorl %%ecx, %%ecx              \n\t"
1155
1156 FUNNY_Y_CODE
1157 FUNNY_Y_CODE
1158 FUNNY_Y_CODE
1159 FUNNY_Y_CODE
1160 FUNNY_Y_CODE
1161 FUNNY_Y_CODE
1162 FUNNY_Y_CODE
1163 FUNNY_Y_CODE
1164
1165                         :: "m" (src), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
1166                         "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF)
1167                         : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
1168                 );
1169                 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth-1; i--) dst[i] = src[srcWidth-1]*128;
1170         }
1171         else
1172         {
1173 #endif
1174         //NO MMX just normal asm ...
1175         asm volatile(
1176                 "xorl %%eax, %%eax              \n\t" // i
1177                 "xorl %%ebx, %%ebx              \n\t" // xx
1178                 "xorl %%ecx, %%ecx              \n\t" // 2*xalpha
1179                 "1:                             \n\t"
1180                 "movzbl  (%0, %%ebx), %%edi     \n\t" //src[xx]
1181                 "movzbl 1(%0, %%ebx), %%esi     \n\t" //src[xx+1]
1182                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
1183                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
1184                 "shll $16, %%edi                \n\t"
1185                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1186                 "movl %1, %%edi                 \n\t"
1187                 "shrl $9, %%esi                 \n\t"
1188                 "movw %%si, (%%edi, %%eax, 2)   \n\t"
1189                 "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
1190                 "adcl %3, %%ebx                 \n\t" //xx+= xInc>>8 + carry
1191
1192                 "movzbl (%0, %%ebx), %%edi      \n\t" //src[xx]
1193                 "movzbl 1(%0, %%ebx), %%esi     \n\t" //src[xx+1]
1194                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
1195                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
1196                 "shll $16, %%edi                \n\t"
1197                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1198                 "movl %1, %%edi                 \n\t"
1199                 "shrl $9, %%esi                 \n\t"
1200                 "movw %%si, 2(%%edi, %%eax, 2)  \n\t"
1201                 "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
1202                 "adcl %3, %%ebx                 \n\t" //xx+= xInc>>8 + carry
1203
1204
1205                 "addl $2, %%eax                 \n\t"
1206                 "cmpl %2, %%eax                 \n\t"
1207                 " jb 1b                         \n\t"
1208
1209
1210                 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
1211                 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1212                 );
1213 #ifdef HAVE_MMX2
1214         } //if MMX2 cant be used
1215 #endif
1216 #else
1217       for(i=0;i<dstWidth;i++){
1218         register unsigned int xx=xpos>>16;
1219         register unsigned int xalpha=(xpos&0xFFFF)>>9;
1220         dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
1221         xpos+=xInc;
1222       }
1223 #endif
1224 }
1225
1226 inline static void hcscale(uint16_t *dst, int dstWidth,
1227                                 uint8_t *src1, uint8_t *src2, int srcWidth, int xInc)
1228 {
1229         int xpos=0;
1230         int i;
1231 #ifdef ARCH_X86
1232 #ifdef HAVE_MMX2
1233         if(canMMX2BeUsed)
1234         {
1235                 asm volatile(
1236                 "pxor %%mm7, %%mm7              \n\t"
1237                 "pxor %%mm2, %%mm2              \n\t" // 2*xalpha
1238                 "movd %5, %%mm6                 \n\t" // xInc&0xFFFF
1239                 "punpcklwd %%mm6, %%mm6         \n\t"
1240                 "punpcklwd %%mm6, %%mm6         \n\t"
1241                 "movq %%mm6, %%mm2              \n\t"
1242                 "psllq $16, %%mm2               \n\t"
1243                 "paddw %%mm6, %%mm2             \n\t"
1244                 "psllq $16, %%mm2               \n\t"
1245                 "paddw %%mm6, %%mm2             \n\t"
1246                 "psllq $16, %%mm2               \n\t" //0,t,2t,3t               t=xInc&0xFFFF
1247                 "movq %%mm2, temp0              \n\t"
1248                 "movd %4, %%mm6                 \n\t" //(xInc*4)&0xFFFF
1249                 "punpcklwd %%mm6, %%mm6         \n\t"
1250                 "punpcklwd %%mm6, %%mm6         \n\t"
1251                 "xorl %%eax, %%eax              \n\t" // i
1252                 "movl %0, %%esi                 \n\t" // src
1253                 "movl %1, %%edi                 \n\t" // buf1
1254                 "movl %3, %%edx                 \n\t" // (xInc*4)>>16
1255                 "xorl %%ecx, %%ecx              \n\t"
1256                 "xorl %%ebx, %%ebx              \n\t"
1257                 "movw %4, %%bx                  \n\t" // (xInc*4)&0xFFFF
1258
1259 #define FUNNYUVCODE \
1260                         PREFETCH" 1024(%%esi)           \n\t"\
1261                         PREFETCH" 1056(%%esi)           \n\t"\
1262                         PREFETCH" 1088(%%esi)           \n\t"\
1263                         "call funnyUVCode               \n\t"\
1264                         "movq temp0, %%mm2              \n\t"\
1265                         "xorl %%ecx, %%ecx              \n\t"
1266
1267 FUNNYUVCODE
1268 FUNNYUVCODE
1269 FUNNYUVCODE
1270 FUNNYUVCODE
1271
1272 FUNNYUVCODE
1273 FUNNYUVCODE
1274 FUNNYUVCODE
1275 FUNNYUVCODE
1276                 "xorl %%eax, %%eax              \n\t" // i
1277                 "movl %6, %%esi                 \n\t" // src
1278                 "movl %1, %%edi                 \n\t" // buf1
1279                 "addl $4096, %%edi              \n\t"
1280
1281 FUNNYUVCODE
1282 FUNNYUVCODE
1283 FUNNYUVCODE
1284 FUNNYUVCODE
1285
1286 FUNNYUVCODE
1287 FUNNYUVCODE
1288 FUNNYUVCODE
1289 FUNNYUVCODE
1290
1291                 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
1292                   "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF), "m" (src2)
1293                 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
1294         );
1295                 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth/2-1; i--)
1296                 {
1297                         dst[i] = src1[srcWidth/2-1]*128;
1298                         dst[i+2048] = src2[srcWidth/2-1]*128;
1299                 }
1300         }
1301         else
1302         {
1303 #endif
1304         asm volatile(
1305                 "xorl %%eax, %%eax              \n\t" // i
1306                 "xorl %%ebx, %%ebx              \n\t" // xx
1307                 "xorl %%ecx, %%ecx              \n\t" // 2*xalpha
1308                 "1:                             \n\t"
1309                 "movl %0, %%esi                 \n\t"
1310                 "movzbl  (%%esi, %%ebx), %%edi  \n\t" //src[xx]
1311                 "movzbl 1(%%esi, %%ebx), %%esi  \n\t" //src[xx+1]
1312                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
1313                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
1314                 "shll $16, %%edi                \n\t"
1315                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1316                 "movl %1, %%edi                 \n\t"
1317                 "shrl $9, %%esi                 \n\t"
1318                 "movw %%si, (%%edi, %%eax, 2)   \n\t"
1319
1320                 "movzbl  (%5, %%ebx), %%edi     \n\t" //src[xx]
1321                 "movzbl 1(%5, %%ebx), %%esi     \n\t" //src[xx+1]
1322                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
1323                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
1324                 "shll $16, %%edi                \n\t"
1325                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1326                 "movl %1, %%edi                 \n\t"
1327                 "shrl $9, %%esi                 \n\t"
1328                 "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
1329
1330                 "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
1331                 "adcl %3, %%ebx                 \n\t" //xx+= xInc>>8 + carry
1332                 "addl $1, %%eax                 \n\t"
1333                 "cmpl %2, %%eax                 \n\t"
1334                 " jb 1b                         \n\t"
1335
1336                 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
1337                 "r" (src2)
1338                 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1339                 );
1340 #ifdef HAVE_MMX2
1341         } //if MMX2 cant be used
1342 #endif
1343 #else
1344       for(i=0;i<dstWidth;i++){
1345           register unsigned int xx=xpos>>16;
1346           register unsigned int xalpha=(xpos&0xFFFF)>>9;
1347           dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
1348           dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
1349 /* slower
1350           dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
1351           dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
1352 */
1353           xpos+=xInc;
1354       }
1355 #endif
1356 }
1357
1358
1359 // *** bilinear scaling and yuv->rgb or yuv->yuv conversion of yv12 slices:
1360 // *** Note: it's called multiple times while decoding a frame, first time y==0
1361 // *** Designed to upscale, but may work for downscale too.
1362 // s_xinc = (src_width << 16) / dst_width
1363 // s_yinc = (src_height << 16) / dst_height
1364 void SwScale_YV12slice(unsigned char* srcptr[],int stride[], int y, int h,
1365                              uint8_t* dstptr[], int dststride, int dstw, int dstbpp,
1366                              unsigned int s_xinc,unsigned int s_yinc){
1367
1368 // scaling factors:
1369 //static int s_yinc=(vo_dga_src_height<<16)/vo_dga_vp_height;
1370 //static int s_xinc=(vo_dga_src_width<<8)/vo_dga_vp_width;
1371
1372 unsigned int s_xinc2;
1373
1374 static int s_srcypos; // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1375 static int s_ypos;
1376
1377 // last horzontally interpolated lines, used to avoid unnecessary calculations
1378 static int s_last_ypos;
1379 static int s_last_y1pos;
1380
1381 static int static_dstw;
1382
1383 #ifdef HAVE_MMX2
1384 // used to detect a horizontal size change
1385 static int old_dstw= -1;
1386 static int old_s_xinc= -1;
1387 #endif
1388
1389 int srcWidth= (dstw*s_xinc + 0x8000)>>16;
1390 int dstUVw= fullUVIpol ? dstw : dstw/2;
1391
1392
1393 #ifdef HAVE_MMX2
1394 canMMX2BeUsed= (s_xinc <= 0x10000 && (dstw&31)==0 && (srcWidth&15)==0) ? 1 : 0;
1395 #endif
1396
1397 // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
1398 // n-2 is the last chrominance sample available
1399 // FIXME this is not perfect, but noone shuld notice the difference, the more correct variant
1400 // would be like the vertical one, but that would require some special code for the
1401 // first and last pixel
1402 if(canMMX2BeUsed)       s_xinc+= 20;
1403 else                    s_xinc = ((srcWidth-2)<<16)/(dstw-2) - 20;
1404
1405 if(fullUVIpol && !(dstbpp==12))         s_xinc2= s_xinc>>1;
1406 else                                    s_xinc2= s_xinc;
1407   // force calculation of the horizontal interpolation of the first line
1408
1409   if(y==0){
1410         s_last_ypos=-99;
1411         s_last_y1pos=-99;
1412         s_srcypos= s_yinc/2 - 0x8000;
1413         s_ypos=0;
1414 #ifdef HAVE_MMX2
1415 // cant downscale !!!
1416         if((old_s_xinc != s_xinc || old_dstw!=dstw) && canMMX2BeUsed)
1417         {
1418                 uint8_t *fragment;
1419                 int imm8OfPShufW1;
1420                 int imm8OfPShufW2;
1421                 int fragmentLength;
1422
1423                 int xpos, xx, xalpha, i;
1424
1425                 old_s_xinc= s_xinc;
1426                 old_dstw= dstw;
1427
1428                 static_dstw= dstw;
1429
1430                 // create an optimized horizontal scaling routine
1431
1432                 //code fragment
1433
1434                 asm volatile(
1435                         "jmp 9f                         \n\t"
1436                 // Begin
1437                         "0:                             \n\t"
1438                         "movq (%%esi), %%mm0            \n\t" //FIXME Alignment
1439                         "movq %%mm0, %%mm1              \n\t"
1440                         "psrlq $8, %%mm0                \n\t"
1441                         "punpcklbw %%mm7, %%mm1 \n\t"
1442                         "movq %%mm2, %%mm3              \n\t"
1443                         "punpcklbw %%mm7, %%mm0 \n\t"
1444                         "addw %%bx, %%cx                \n\t" //2*xalpha += (4*s_xinc)&0xFFFF
1445                         "pshufw $0xFF, %%mm1, %%mm1     \n\t"
1446                         "1:                             \n\t"
1447                         "adcl %%edx, %%esi              \n\t" //xx+= (4*s_xinc)>>16 + carry
1448                         "pshufw $0xFF, %%mm0, %%mm0     \n\t"
1449                         "2:                             \n\t"
1450                         "psrlw $9, %%mm3                \n\t"
1451                         "psubw %%mm1, %%mm0             \n\t"
1452                         "pmullw %%mm3, %%mm0            \n\t"
1453                         "paddw %%mm6, %%mm2             \n\t" // 2*alpha += xpos&0xFFFF
1454                         "psllw $7, %%mm1                \n\t"
1455                         "paddw %%mm1, %%mm0             \n\t"
1456
1457                         "movq %%mm0, (%%edi, %%eax)     \n\t"
1458
1459                         "addl $8, %%eax                 \n\t"
1460                 // End
1461                         "9:                             \n\t"
1462 //              "int $3\n\t"
1463                         "leal 0b, %0                    \n\t"
1464                         "leal 1b, %1                    \n\t"
1465                         "leal 2b, %2                    \n\t"
1466                         "decl %1                        \n\t"
1467                         "decl %2                        \n\t"
1468                         "subl %0, %1                    \n\t"
1469                         "subl %0, %2                    \n\t"
1470                         "leal 9b, %3                    \n\t"
1471                         "subl %0, %3                    \n\t"
1472                         :"=r" (fragment), "=r" (imm8OfPShufW1), "=r" (imm8OfPShufW2),
1473                          "=r" (fragmentLength)
1474                 );
1475
1476                 xpos= 0; //s_xinc/2 - 0x8000; // difference between pixel centers
1477
1478                 /* choose xinc so that all 8 parts fit exactly
1479                    Note: we cannot use just 1 part because it would not fit in the code cache */
1480 //              s_xinc2_diff= -((((s_xinc2*(dstw/8))&0xFFFF))/(dstw/8))-10;
1481 //              s_xinc_diff= -((((s_xinc*(dstw/8))&0xFFFF))/(dstw/8));
1482 #ifdef ALT_ERROR
1483 //              s_xinc2_diff+= ((0x10000/(dstw/8)));
1484 #endif
1485 //              s_xinc_diff= s_xinc2_diff*2;
1486
1487 //              s_xinc2+= s_xinc2_diff;
1488 //              s_xinc+= s_xinc_diff;
1489
1490 //              old_s_xinc= s_xinc;
1491
1492                 for(i=0; i<dstw/8; i++)
1493                 {
1494                         int xx=xpos>>16;
1495
1496                         if((i&3) == 0)
1497                         {
1498                                 int a=0;
1499                                 int b=((xpos+s_xinc)>>16) - xx;
1500                                 int c=((xpos+s_xinc*2)>>16) - xx;
1501                                 int d=((xpos+s_xinc*3)>>16) - xx;
1502
1503                                 memcpy(funnyYCode + fragmentLength*i/4, fragment, fragmentLength);
1504
1505                                 funnyYCode[fragmentLength*i/4 + imm8OfPShufW1]=
1506                                 funnyYCode[fragmentLength*i/4 + imm8OfPShufW2]=
1507                                         a | (b<<2) | (c<<4) | (d<<6);
1508
1509                                 // if we dont need to read 8 bytes than dont :), reduces the chance of
1510                                 // crossing a cache line
1511                                 if(d<3) funnyYCode[fragmentLength*i/4 + 1]= 0x6E;
1512
1513                                 funnyYCode[fragmentLength*(i+4)/4]= RET;
1514                         }
1515                         xpos+=s_xinc;
1516                 }
1517
1518                 xpos= 0; //s_xinc2/2 - 0x10000; // difference between centers of chrom samples
1519                 for(i=0; i<dstUVw/8; i++)
1520                 {
1521                         int xx=xpos>>16;
1522
1523                         if((i&3) == 0)
1524                         {
1525                                 int a=0;
1526                                 int b=((xpos+s_xinc2)>>16) - xx;
1527                                 int c=((xpos+s_xinc2*2)>>16) - xx;
1528                                 int d=((xpos+s_xinc2*3)>>16) - xx;
1529
1530                                 memcpy(funnyUVCode + fragmentLength*i/4, fragment, fragmentLength);
1531
1532                                 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW1]=
1533                                 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW2]=
1534                                         a | (b<<2) | (c<<4) | (d<<6);
1535
1536                                 // if we dont need to read 8 bytes than dont :), reduces the chance of
1537                                 // crossing a cache line
1538                                 if(d<3) funnyUVCode[fragmentLength*i/4 + 1]= 0x6E;
1539
1540                                 funnyUVCode[fragmentLength*(i+4)/4]= RET;
1541                         }
1542                         xpos+=s_xinc2;
1543                 }
1544 //              funnyCode[0]= RET;
1545         }
1546
1547 #endif // HAVE_MMX2
1548   } // reset counters
1549
1550   while(1){
1551     unsigned char *dest =dstptr[0]+dststride*s_ypos;
1552     unsigned char *uDest=dstptr[1]+(dststride>>1)*(s_ypos>>1);
1553     unsigned char *vDest=dstptr[2]+(dststride>>1)*(s_ypos>>1);
1554
1555     int y0=(s_srcypos + 0xFFFF)>>16;  // first luminance source line number below the dst line
1556         // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1557     int srcuvpos= dstbpp==12 ?  s_srcypos + s_yinc/2 - 0x8000 :
1558                                 s_srcypos - 0x8000;
1559     int y1=(srcuvpos + 0x1FFFF)>>17; // first chrominance source line number below the dst line
1560     int yalpha=((s_srcypos-1)&0xFFFF)>>4;
1561     int uvalpha=((srcuvpos-1)&0x1FFFF)>>5;
1562     uint16_t *buf0=pix_buf_y[y0&1];             // top line of the interpolated slice
1563     uint16_t *buf1=pix_buf_y[((y0+1)&1)];       // bottom line of the interpolated slice
1564     uint16_t *uvbuf0=pix_buf_uv[y1&1];          // top line of the interpolated slice
1565     uint16_t *uvbuf1=pix_buf_uv[(y1+1)&1];      // bottom line of the interpolated slice
1566     int i;
1567
1568     if(y0>=y+h) break; // FIXME wrong, skips last lines, but they are dupliactes anyway
1569
1570     if((y0&1) && dstbpp==12) uvalpha=-1; // there is no alpha if there is no line
1571
1572     s_ypos++; s_srcypos+=s_yinc;
1573
1574     //only interpolate the src line horizontally if we didnt do it allready
1575         if(s_last_ypos!=y0)
1576         {
1577                 unsigned char *src;
1578                 // skip if first line has been horiz scaled alleady
1579                 if(s_last_ypos != y0-1)
1580                 {
1581                         // check if first line is before any available src lines
1582                         if(y0-1 < y)    src=srcptr[0]+(0     )*stride[0];
1583                         else            src=srcptr[0]+(y0-y-1)*stride[0];
1584
1585                         hyscale(buf0, dstw, src, srcWidth, s_xinc);
1586                 }
1587                 // check if second line is after any available src lines
1588                 if(y0-y >= h)   src=srcptr[0]+(h-1)*stride[0];
1589                 else            src=srcptr[0]+(y0-y)*stride[0];
1590
1591                 // the min() is required to avoid reuseing lines which where not available
1592                 s_last_ypos= MIN(y0, y+h-1);
1593                 hyscale(buf1, dstw, src, srcWidth, s_xinc);
1594         }
1595 //      printf("%d %d %d %d\n", y, y1, s_last_y1pos, h);
1596       // *** horizontal scale U and V lines to temp buffer
1597         if(s_last_y1pos!=y1)
1598         {
1599                 uint8_t *src1, *src2;
1600                 // skip if first line has been horiz scaled alleady
1601                 if(s_last_y1pos != y1-1)
1602                 {
1603                         // check if first line is before any available src lines
1604                         if(y1-y/2-1 < 0)
1605                         {
1606                                 src1= srcptr[1]+(0)*stride[1];
1607                                 src2= srcptr[2]+(0)*stride[2];
1608                         }else{
1609                                 src1= srcptr[1]+(y1-y/2-1)*stride[1];
1610                                 src2= srcptr[2]+(y1-y/2-1)*stride[2];
1611                         }
1612                         hcscale(uvbuf0, dstUVw, src1, src2, srcWidth, s_xinc2);
1613                 }
1614
1615                 // check if second line is after any available src lines
1616                 if(y1 - y/2 >= h/2)
1617                 {
1618                         src1= srcptr[1]+(h/2-1)*stride[1];
1619                         src2= srcptr[2]+(h/2-1)*stride[2];
1620                 }else{
1621                         src1= srcptr[1]+(y1-y/2)*stride[1];
1622                         src2= srcptr[2]+(y1-y/2)*stride[2];
1623                 }
1624                 hcscale(uvbuf1, dstUVw, src1, src2, srcWidth, s_xinc2);
1625
1626                 // the min() is required to avoid reuseing lines which where not available
1627                 s_last_y1pos= MIN(y1, y/2+h/2-1);
1628         }
1629
1630         if(dstbpp==12) //YV12
1631                 yuv2yuv(buf0, buf1, uvbuf0, uvbuf1, dest, uDest, vDest, dstw, yalpha, uvalpha);
1632         else if(ABS(s_yinc - 0x10000) < 10)
1633                 yuv2rgb1(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1634         else
1635                 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1636
1637 #ifdef HAVE_MMX
1638         b16Dither= b16Dither1;
1639         b16Dither1= b16Dither2;
1640         b16Dither2= b16Dither;
1641
1642         g16Dither= g16Dither1;
1643         g16Dither1= g16Dither2;
1644         g16Dither2= g16Dither;
1645 #endif
1646   }
1647
1648 #ifdef HAVE_MMX
1649         __asm __volatile(SFENCE:::"memory");
1650         __asm __volatile(EMMS:::"memory");
1651 #endif
1652 }
1653
1654
1655 void SwScale_Init(){
1656     // generating tables:
1657     int i;
1658     for(i=0;i<256;i++){
1659         clip_table[i]=0;
1660         clip_table[i+256]=i;
1661         clip_table[i+512]=255;
1662         yuvtab_2568[i]=(0x2568*(i-16))+(256<<13);
1663         yuvtab_3343[i]=0x3343*(i-128);
1664         yuvtab_0c92[i]=-0x0c92*(i-128);
1665         yuvtab_1a1e[i]=-0x1a1e*(i-128);
1666         yuvtab_40cf[i]=0x40cf*(i-128);
1667     }
1668
1669 }