]> git.sesse.net Git - ffmpeg/blob - postproc/swscale_template.c
0139a9cd318711b73a73a83241b8e2c699fca9ec
[ffmpeg] / postproc / swscale_template.c
1
2 // Software scaling and colorspace conversion routines for MPlayer
3
4 // Orginal C implementation by A'rpi/ESP-team <arpi@thot.banki.hu>
5 // current version mostly by Michael Niedermayer (michaelni@gmx.at)
6 // the parts written by michael are under GNU GPL
7
8 #include <inttypes.h>
9 #include <string.h>
10 #include "../config.h"
11 #include "swscale.h"
12 #include "../mmx_defs.h"
13 #undef MOVNTQ
14
15 //#undef HAVE_MMX2
16 //#undef HAVE_MMX
17 //#undef ARCH_X86
18 #define DITHER1XBPP
19 int fullUVIpol=0;
20 //disables the unscaled height version
21 int allwaysIpol=0;
22
23 #define RET 0xC3 //near return opcode
24 /*
25 NOTES
26
27 known BUGS with known cause (no bugreports please!, but patches are welcome :) )
28 horizontal MMX2 scaler reads 1-7 samples too much (might cause a sig11)
29
30 Supported output formats BGR15 BGR16 BGR24 BGR32
31 BGR15 & BGR16 MMX verions support dithering
32 Special versions: fast Y 1:1 scaling (no interpolation in y direction)
33
34 TODO
35 more intelligent missalignment avoidance for the horizontal scaler
36 bicubic scaler
37 */
38
39 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
40 #define MIN(a,b) ((a) > (b) ? (b) : (a))
41 #define MAX(a,b) ((a) < (b) ? (b) : (a))
42
43 #ifdef HAVE_MMX2
44 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
45 #elif defined (HAVE_3DNOW)
46 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
47 #endif
48
49 #ifdef HAVE_MMX2
50 #define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
51 #else
52 #define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
53 #endif
54
55
56 #ifdef HAVE_MMX
57 static uint64_t __attribute__((aligned(8))) yCoeff=    0x2568256825682568LL;
58 static uint64_t __attribute__((aligned(8))) vrCoeff=   0x3343334333433343LL;
59 static uint64_t __attribute__((aligned(8))) ubCoeff=   0x40cf40cf40cf40cfLL;
60 static uint64_t __attribute__((aligned(8))) vgCoeff=   0xE5E2E5E2E5E2E5E2LL;
61 static uint64_t __attribute__((aligned(8))) ugCoeff=   0xF36EF36EF36EF36ELL;
62 static uint64_t __attribute__((aligned(8))) w400=      0x0400040004000400LL;
63 static uint64_t __attribute__((aligned(8))) w80=       0x0080008000800080LL;
64 static uint64_t __attribute__((aligned(8))) w10=       0x0010001000100010LL;
65 static uint64_t __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
66 static uint64_t __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
67 static uint64_t __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
68
69 static uint64_t __attribute__((aligned(8))) b16Dither= 0x0004000400040004LL;
70 static uint64_t __attribute__((aligned(8))) b16Dither1=0x0004000400040004LL;
71 static uint64_t __attribute__((aligned(8))) b16Dither2=0x0602060206020602LL;
72 static uint64_t __attribute__((aligned(8))) g16Dither= 0x0002000200020002LL;
73 static uint64_t __attribute__((aligned(8))) g16Dither1=0x0002000200020002LL;
74 static uint64_t __attribute__((aligned(8))) g16Dither2=0x0301030103010301LL;
75
76 static uint64_t __attribute__((aligned(8))) b16Mask=   0x001F001F001F001FLL;
77 static uint64_t __attribute__((aligned(8))) g16Mask=   0x07E007E007E007E0LL;
78 static uint64_t __attribute__((aligned(8))) r16Mask=   0xF800F800F800F800LL;
79 static uint64_t __attribute__((aligned(8))) b15Mask=   0x001F001F001F001FLL;
80 static uint64_t __attribute__((aligned(8))) g15Mask=   0x03E003E003E003E0LL;
81 static uint64_t __attribute__((aligned(8))) r15Mask=   0x7C007C007C007C00LL;
82
83 static uint64_t __attribute__((aligned(8))) temp0;
84 static uint64_t __attribute__((aligned(8))) asm_yalpha1;
85 static uint64_t __attribute__((aligned(8))) asm_uvalpha1;
86 #endif
87
88 // temporary storage for 4 yuv lines:
89 // 16bit for now (mmx likes it more compact)
90 #ifdef HAVE_MMX
91 static uint16_t __attribute__((aligned(8))) pix_buf_y[4][2048];
92 static uint16_t __attribute__((aligned(8))) pix_buf_uv[2][2048*2];
93 #else
94 static uint16_t pix_buf_y[4][2048];
95 static uint16_t pix_buf_uv[2][2048*2];
96 #endif
97
98 // clipping helper table for C implementations:
99 static unsigned char clip_table[768];
100
101 // yuv->rgb conversion tables:
102 static    int yuvtab_2568[256];
103 static    int yuvtab_3343[256];
104 static    int yuvtab_0c92[256];
105 static    int yuvtab_1a1e[256];
106 static    int yuvtab_40cf[256];
107
108
109 static uint8_t funnyYCode[10000];
110 static uint8_t funnyUVCode[10000];
111
112 static int canMMX2BeUsed=0;
113
114 #define FULL_YSCALEYUV2RGB \
115                 "pxor %%mm7, %%mm7              \n\t"\
116                 "movd %6, %%mm6                 \n\t" /*yalpha1*/\
117                 "punpcklwd %%mm6, %%mm6         \n\t"\
118                 "punpcklwd %%mm6, %%mm6         \n\t"\
119                 "movd %7, %%mm5                 \n\t" /*uvalpha1*/\
120                 "punpcklwd %%mm5, %%mm5         \n\t"\
121                 "punpcklwd %%mm5, %%mm5         \n\t"\
122                 "xorl %%eax, %%eax              \n\t"\
123                 "1:                             \n\t"\
124                 "movq (%0, %%eax, 2), %%mm0     \n\t" /*buf0[eax]*/\
125                 "movq (%1, %%eax, 2), %%mm1     \n\t" /*buf1[eax]*/\
126                 "movq (%2, %%eax,2), %%mm2      \n\t" /* uvbuf0[eax]*/\
127                 "movq (%3, %%eax,2), %%mm3      \n\t" /* uvbuf1[eax]*/\
128                 "psubw %%mm1, %%mm0             \n\t" /* buf0[eax] - buf1[eax]*/\
129                 "psubw %%mm3, %%mm2             \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
130                 "pmulhw %%mm6, %%mm0            \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
131                 "pmulhw %%mm5, %%mm2            \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
132                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
133                 "movq 4096(%2, %%eax,2), %%mm4  \n\t" /* uvbuf0[eax+2048]*/\
134                 "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
135                 "paddw %%mm0, %%mm1             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
136                 "movq 4096(%3, %%eax,2), %%mm0  \n\t" /* uvbuf1[eax+2048]*/\
137                 "paddw %%mm2, %%mm3             \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
138                 "psubw %%mm0, %%mm4             \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
139                 "psubw w80, %%mm1               \n\t" /* 8(Y-16)*/\
140                 "psubw w400, %%mm3              \n\t" /* 8(U-128)*/\
141                 "pmulhw yCoeff, %%mm1           \n\t"\
142 \
143 \
144                 "pmulhw %%mm5, %%mm4            \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
145                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
146                 "pmulhw ubCoeff, %%mm3          \n\t"\
147                 "psraw $4, %%mm0                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
148                 "pmulhw ugCoeff, %%mm2          \n\t"\
149                 "paddw %%mm4, %%mm0             \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
150                 "psubw w400, %%mm0              \n\t" /* (V-128)8*/\
151 \
152 \
153                 "movq %%mm0, %%mm4              \n\t" /* (V-128)8*/\
154                 "pmulhw vrCoeff, %%mm0          \n\t"\
155                 "pmulhw vgCoeff, %%mm4          \n\t"\
156                 "paddw %%mm1, %%mm3             \n\t" /* B*/\
157                 "paddw %%mm1, %%mm0             \n\t" /* R*/\
158                 "packuswb %%mm3, %%mm3          \n\t"\
159 \
160                 "packuswb %%mm0, %%mm0          \n\t"\
161                 "paddw %%mm4, %%mm2             \n\t"\
162                 "paddw %%mm2, %%mm1             \n\t" /* G*/\
163 \
164                 "packuswb %%mm1, %%mm1          \n\t"
165
166 #define YSCALEYUV2RGB \
167                 "movd %6, %%mm6                 \n\t" /*yalpha1*/\
168                 "punpcklwd %%mm6, %%mm6         \n\t"\
169                 "punpcklwd %%mm6, %%mm6         \n\t"\
170                 "movq %%mm6, asm_yalpha1        \n\t"\
171                 "movd %7, %%mm5                 \n\t" /*uvalpha1*/\
172                 "punpcklwd %%mm5, %%mm5         \n\t"\
173                 "punpcklwd %%mm5, %%mm5         \n\t"\
174                 "movq %%mm5, asm_uvalpha1       \n\t"\
175                 "xorl %%eax, %%eax              \n\t"\
176                 "1:                             \n\t"\
177                 "movq (%2, %%eax), %%mm2        \n\t" /* uvbuf0[eax]*/\
178                 "movq (%3, %%eax), %%mm3        \n\t" /* uvbuf1[eax]*/\
179                 "movq 4096(%2, %%eax), %%mm5    \n\t" /* uvbuf0[eax+2048]*/\
180                 "movq 4096(%3, %%eax), %%mm4    \n\t" /* uvbuf1[eax+2048]*/\
181                 "psubw %%mm3, %%mm2             \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
182                 "psubw %%mm4, %%mm5             \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
183                 "movq asm_uvalpha1, %%mm0       \n\t"\
184                 "pmulhw %%mm0, %%mm2            \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
185                 "pmulhw %%mm0, %%mm5            \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
186                 "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
187                 "psraw $4, %%mm4                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
188                 "paddw %%mm2, %%mm3             \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
189                 "paddw %%mm5, %%mm4             \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
190                 "psubw w400, %%mm3              \n\t" /* (U-128)8*/\
191                 "psubw w400, %%mm4              \n\t" /* (V-128)8*/\
192                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
193                 "movq %%mm4, %%mm5              \n\t" /* (V-128)8*/\
194                 "pmulhw ugCoeff, %%mm3          \n\t"\
195                 "pmulhw vgCoeff, %%mm4          \n\t"\
196         /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
197                 "movq (%0, %%eax, 2), %%mm0     \n\t" /*buf0[eax]*/\
198                 "movq (%1, %%eax, 2), %%mm1     \n\t" /*buf1[eax]*/\
199                 "movq 8(%0, %%eax, 2), %%mm6    \n\t" /*buf0[eax]*/\
200                 "movq 8(%1, %%eax, 2), %%mm7    \n\t" /*buf1[eax]*/\
201                 "psubw %%mm1, %%mm0             \n\t" /* buf0[eax] - buf1[eax]*/\
202                 "psubw %%mm7, %%mm6             \n\t" /* buf0[eax] - buf1[eax]*/\
203                 "pmulhw asm_yalpha1, %%mm0      \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
204                 "pmulhw asm_yalpha1, %%mm6      \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
205                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
206                 "psraw $4, %%mm7                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
207                 "paddw %%mm0, %%mm1             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
208                 "paddw %%mm6, %%mm7             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
209                 "pmulhw ubCoeff, %%mm2          \n\t"\
210                 "pmulhw vrCoeff, %%mm5          \n\t"\
211                 "psubw w80, %%mm1               \n\t" /* 8(Y-16)*/\
212                 "psubw w80, %%mm7               \n\t" /* 8(Y-16)*/\
213                 "pmulhw yCoeff, %%mm1           \n\t"\
214                 "pmulhw yCoeff, %%mm7           \n\t"\
215         /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
216                 "paddw %%mm3, %%mm4             \n\t"\
217                 "movq %%mm2, %%mm0              \n\t"\
218                 "movq %%mm5, %%mm6              \n\t"\
219                 "movq %%mm4, %%mm3              \n\t"\
220                 "punpcklwd %%mm2, %%mm2         \n\t"\
221                 "punpcklwd %%mm5, %%mm5         \n\t"\
222                 "punpcklwd %%mm4, %%mm4         \n\t"\
223                 "paddw %%mm1, %%mm2             \n\t"\
224                 "paddw %%mm1, %%mm5             \n\t"\
225                 "paddw %%mm1, %%mm4             \n\t"\
226                 "punpckhwd %%mm0, %%mm0         \n\t"\
227                 "punpckhwd %%mm6, %%mm6         \n\t"\
228                 "punpckhwd %%mm3, %%mm3         \n\t"\
229                 "paddw %%mm7, %%mm0             \n\t"\
230                 "paddw %%mm7, %%mm6             \n\t"\
231                 "paddw %%mm7, %%mm3             \n\t"\
232                 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
233                 "packuswb %%mm0, %%mm2          \n\t"\
234                 "packuswb %%mm6, %%mm5          \n\t"\
235                 "packuswb %%mm3, %%mm4          \n\t"\
236                 "pxor %%mm7, %%mm7              \n\t"
237
238 #define YSCALEYUV2RGB1 \
239                 "xorl %%eax, %%eax              \n\t"\
240                 "1:                             \n\t"\
241                 "movq (%2, %%eax), %%mm3        \n\t" /* uvbuf0[eax]*/\
242                 "movq 4096(%2, %%eax), %%mm4    \n\t" /* uvbuf0[eax+2048]*/\
243                 "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
244                 "psraw $4, %%mm4                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
245                 "psubw w400, %%mm3              \n\t" /* (U-128)8*/\
246                 "psubw w400, %%mm4              \n\t" /* (V-128)8*/\
247                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
248                 "movq %%mm4, %%mm5              \n\t" /* (V-128)8*/\
249                 "pmulhw ugCoeff, %%mm3          \n\t"\
250                 "pmulhw vgCoeff, %%mm4          \n\t"\
251         /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
252                 "movq (%0, %%eax, 2), %%mm1     \n\t" /*buf0[eax]*/\
253                 "movq 8(%0, %%eax, 2), %%mm7    \n\t" /*buf0[eax]*/\
254                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
255                 "psraw $4, %%mm7                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
256                 "pmulhw ubCoeff, %%mm2          \n\t"\
257                 "pmulhw vrCoeff, %%mm5          \n\t"\
258                 "psubw w80, %%mm1               \n\t" /* 8(Y-16)*/\
259                 "psubw w80, %%mm7               \n\t" /* 8(Y-16)*/\
260                 "pmulhw yCoeff, %%mm1           \n\t"\
261                 "pmulhw yCoeff, %%mm7           \n\t"\
262         /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
263                 "paddw %%mm3, %%mm4             \n\t"\
264                 "movq %%mm2, %%mm0              \n\t"\
265                 "movq %%mm5, %%mm6              \n\t"\
266                 "movq %%mm4, %%mm3              \n\t"\
267                 "punpcklwd %%mm2, %%mm2         \n\t"\
268                 "punpcklwd %%mm5, %%mm5         \n\t"\
269                 "punpcklwd %%mm4, %%mm4         \n\t"\
270                 "paddw %%mm1, %%mm2             \n\t"\
271                 "paddw %%mm1, %%mm5             \n\t"\
272                 "paddw %%mm1, %%mm4             \n\t"\
273                 "punpckhwd %%mm0, %%mm0         \n\t"\
274                 "punpckhwd %%mm6, %%mm6         \n\t"\
275                 "punpckhwd %%mm3, %%mm3         \n\t"\
276                 "paddw %%mm7, %%mm0             \n\t"\
277                 "paddw %%mm7, %%mm6             \n\t"\
278                 "paddw %%mm7, %%mm3             \n\t"\
279                 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
280                 "packuswb %%mm0, %%mm2          \n\t"\
281                 "packuswb %%mm6, %%mm5          \n\t"\
282                 "packuswb %%mm3, %%mm4          \n\t"\
283                 "pxor %%mm7, %%mm7              \n\t"
284
285 // do vertical chrominance interpolation
286 #define YSCALEYUV2RGB1b \
287                 "xorl %%eax, %%eax              \n\t"\
288                 "1:                             \n\t"\
289                 "movq (%2, %%eax), %%mm2        \n\t" /* uvbuf0[eax]*/\
290                 "movq (%3, %%eax), %%mm3        \n\t" /* uvbuf1[eax]*/\
291                 "movq 4096(%2, %%eax), %%mm5    \n\t" /* uvbuf0[eax+2048]*/\
292                 "movq 4096(%3, %%eax), %%mm4    \n\t" /* uvbuf1[eax+2048]*/\
293                 "paddw %%mm2, %%mm3             \n\t"\
294                 "paddw %%mm5, %%mm4             \n\t"\
295                 "psraw $5, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
296                 "psraw $5, %%mm4                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
297                 "psubw w400, %%mm3              \n\t" /* (U-128)8*/\
298                 "psubw w400, %%mm4              \n\t" /* (V-128)8*/\
299                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
300                 "movq %%mm4, %%mm5              \n\t" /* (V-128)8*/\
301                 "pmulhw ugCoeff, %%mm3          \n\t"\
302                 "pmulhw vgCoeff, %%mm4          \n\t"\
303         /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
304                 "movq (%0, %%eax, 2), %%mm1     \n\t" /*buf0[eax]*/\
305                 "movq 8(%0, %%eax, 2), %%mm7    \n\t" /*buf0[eax]*/\
306                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
307                 "psraw $4, %%mm7                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
308                 "pmulhw ubCoeff, %%mm2          \n\t"\
309                 "pmulhw vrCoeff, %%mm5          \n\t"\
310                 "psubw w80, %%mm1               \n\t" /* 8(Y-16)*/\
311                 "psubw w80, %%mm7               \n\t" /* 8(Y-16)*/\
312                 "pmulhw yCoeff, %%mm1           \n\t"\
313                 "pmulhw yCoeff, %%mm7           \n\t"\
314         /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
315                 "paddw %%mm3, %%mm4             \n\t"\
316                 "movq %%mm2, %%mm0              \n\t"\
317                 "movq %%mm5, %%mm6              \n\t"\
318                 "movq %%mm4, %%mm3              \n\t"\
319                 "punpcklwd %%mm2, %%mm2         \n\t"\
320                 "punpcklwd %%mm5, %%mm5         \n\t"\
321                 "punpcklwd %%mm4, %%mm4         \n\t"\
322                 "paddw %%mm1, %%mm2             \n\t"\
323                 "paddw %%mm1, %%mm5             \n\t"\
324                 "paddw %%mm1, %%mm4             \n\t"\
325                 "punpckhwd %%mm0, %%mm0         \n\t"\
326                 "punpckhwd %%mm6, %%mm6         \n\t"\
327                 "punpckhwd %%mm3, %%mm3         \n\t"\
328                 "paddw %%mm7, %%mm0             \n\t"\
329                 "paddw %%mm7, %%mm6             \n\t"\
330                 "paddw %%mm7, %%mm3             \n\t"\
331                 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
332                 "packuswb %%mm0, %%mm2          \n\t"\
333                 "packuswb %%mm6, %%mm5          \n\t"\
334                 "packuswb %%mm3, %%mm4          \n\t"\
335                 "pxor %%mm7, %%mm7              \n\t"
336
337 #define WRITEBGR32 \
338                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
339                         "movq %%mm2, %%mm1              \n\t" /* B */\
340                         "movq %%mm5, %%mm6              \n\t" /* R */\
341                         "punpcklbw %%mm4, %%mm2         \n\t" /* GBGBGBGB 0 */\
342                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R 0 */\
343                         "punpckhbw %%mm4, %%mm1         \n\t" /* GBGBGBGB 2 */\
344                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R 2 */\
345                         "movq %%mm2, %%mm0              \n\t" /* GBGBGBGB 0 */\
346                         "movq %%mm1, %%mm3              \n\t" /* GBGBGBGB 2 */\
347                         "punpcklwd %%mm5, %%mm0         \n\t" /* 0RGB0RGB 0 */\
348                         "punpckhwd %%mm5, %%mm2         \n\t" /* 0RGB0RGB 1 */\
349                         "punpcklwd %%mm6, %%mm1         \n\t" /* 0RGB0RGB 2 */\
350                         "punpckhwd %%mm6, %%mm3         \n\t" /* 0RGB0RGB 3 */\
351 \
352                         MOVNTQ(%%mm0, (%4, %%eax, 4))\
353                         MOVNTQ(%%mm2, 8(%4, %%eax, 4))\
354                         MOVNTQ(%%mm1, 16(%4, %%eax, 4))\
355                         MOVNTQ(%%mm3, 24(%4, %%eax, 4))\
356 \
357                         "addl $8, %%eax                 \n\t"\
358                         "cmpl %5, %%eax                 \n\t"\
359                         " jb 1b                         \n\t"
360
361 #define WRITEBGR16 \
362                         "movq %%mm2, %%mm1              \n\t" /* B */\
363                         "movq %%mm4, %%mm3              \n\t" /* G */\
364                         "movq %%mm5, %%mm6              \n\t" /* R */\
365 \
366                         "punpcklbw %%mm7, %%mm3         \n\t" /* 0G0G0G0G */\
367                         "punpcklbw %%mm7, %%mm2         \n\t" /* 0B0B0B0B */\
368                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R */\
369 \
370                         "psrlw $3, %%mm2                \n\t"\
371                         "psllw $3, %%mm3                \n\t"\
372                         "psllw $8, %%mm5                \n\t"\
373 \
374                         "pand g16Mask, %%mm3            \n\t"\
375                         "pand r16Mask, %%mm5            \n\t"\
376 \
377                         "por %%mm3, %%mm2               \n\t"\
378                         "por %%mm5, %%mm2               \n\t"\
379 \
380                         "punpckhbw %%mm7, %%mm4         \n\t" /* 0G0G0G0G */\
381                         "punpckhbw %%mm7, %%mm1         \n\t" /* 0B0B0B0B */\
382                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R */\
383 \
384                         "psrlw $3, %%mm1                \n\t"\
385                         "psllw $3, %%mm4                \n\t"\
386                         "psllw $8, %%mm6                \n\t"\
387 \
388                         "pand g16Mask, %%mm4            \n\t"\
389                         "pand r16Mask, %%mm6            \n\t"\
390 \
391                         "por %%mm4, %%mm1               \n\t"\
392                         "por %%mm6, %%mm1               \n\t"\
393 \
394                         MOVNTQ(%%mm2, (%4, %%eax, 2))\
395                         MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
396 \
397                         "addl $8, %%eax                 \n\t"\
398                         "cmpl %5, %%eax                 \n\t"\
399                         " jb 1b                         \n\t"
400
401 #define WRITEBGR15 \
402                         "movq %%mm2, %%mm1              \n\t" /* B */\
403                         "movq %%mm4, %%mm3              \n\t" /* G */\
404                         "movq %%mm5, %%mm6              \n\t" /* R */\
405 \
406                         "punpcklbw %%mm7, %%mm3         \n\t" /* 0G0G0G0G */\
407                         "punpcklbw %%mm7, %%mm2         \n\t" /* 0B0B0B0B */\
408                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R */\
409 \
410                         "psrlw $3, %%mm2                \n\t"\
411                         "psllw $2, %%mm3                \n\t"\
412                         "psllw $7, %%mm5                \n\t"\
413 \
414                         "pand g15Mask, %%mm3            \n\t"\
415                         "pand r15Mask, %%mm5            \n\t"\
416 \
417                         "por %%mm3, %%mm2               \n\t"\
418                         "por %%mm5, %%mm2               \n\t"\
419 \
420                         "punpckhbw %%mm7, %%mm4         \n\t" /* 0G0G0G0G */\
421                         "punpckhbw %%mm7, %%mm1         \n\t" /* 0B0B0B0B */\
422                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R */\
423 \
424                         "psrlw $3, %%mm1                \n\t"\
425                         "psllw $2, %%mm4                \n\t"\
426                         "psllw $7, %%mm6                \n\t"\
427 \
428                         "pand g15Mask, %%mm4            \n\t"\
429                         "pand r15Mask, %%mm6            \n\t"\
430 \
431                         "por %%mm4, %%mm1               \n\t"\
432                         "por %%mm6, %%mm1               \n\t"\
433 \
434                         MOVNTQ(%%mm2, (%4, %%eax, 2))\
435                         MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
436 \
437                         "addl $8, %%eax                 \n\t"\
438                         "cmpl %5, %%eax                 \n\t"\
439                         " jb 1b                         \n\t"
440 // FIXME find a faster way to shuffle it to BGR24
441 #define WRITEBGR24 \
442                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
443                         "movq %%mm2, %%mm1              \n\t" /* B */\
444                         "movq %%mm5, %%mm6              \n\t" /* R */\
445                         "punpcklbw %%mm4, %%mm2         \n\t" /* GBGBGBGB 0 */\
446                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R 0 */\
447                         "punpckhbw %%mm4, %%mm1         \n\t" /* GBGBGBGB 2 */\
448                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R 2 */\
449                         "movq %%mm2, %%mm0              \n\t" /* GBGBGBGB 0 */\
450                         "movq %%mm1, %%mm3              \n\t" /* GBGBGBGB 2 */\
451                         "punpcklwd %%mm5, %%mm0         \n\t" /* 0RGB0RGB 0 */\
452                         "punpckhwd %%mm5, %%mm2         \n\t" /* 0RGB0RGB 1 */\
453                         "punpcklwd %%mm6, %%mm1         \n\t" /* 0RGB0RGB 2 */\
454                         "punpckhwd %%mm6, %%mm3         \n\t" /* 0RGB0RGB 3 */\
455 \
456                         "movq %%mm0, %%mm4              \n\t" /* 0RGB0RGB 0 */\
457                         "psrlq $8, %%mm0                \n\t" /* 00RGB0RG 0 */\
458                         "pand bm00000111, %%mm4         \n\t" /* 00000RGB 0 */\
459                         "pand bm11111000, %%mm0         \n\t" /* 00RGB000 0.5 */\
460                         "por %%mm4, %%mm0               \n\t" /* 00RGBRGB 0 */\
461                         "movq %%mm2, %%mm4              \n\t" /* 0RGB0RGB 1 */\
462                         "psllq $48, %%mm2               \n\t" /* GB000000 1 */\
463                         "por %%mm2, %%mm0               \n\t" /* GBRGBRGB 0 */\
464 \
465                         "movq %%mm4, %%mm2              \n\t" /* 0RGB0RGB 1 */\
466                         "psrld $16, %%mm4               \n\t" /* 000R000R 1 */\
467                         "psrlq $24, %%mm2               \n\t" /* 0000RGB0 1.5 */\
468                         "por %%mm4, %%mm2               \n\t" /* 000RRGBR 1 */\
469                         "pand bm00001111, %%mm2         \n\t" /* 0000RGBR 1 */\
470                         "movq %%mm1, %%mm4              \n\t" /* 0RGB0RGB 2 */\
471                         "psrlq $8, %%mm1                \n\t" /* 00RGB0RG 2 */\
472                         "pand bm00000111, %%mm4         \n\t" /* 00000RGB 2 */\
473                         "pand bm11111000, %%mm1         \n\t" /* 00RGB000 2.5 */\
474                         "por %%mm4, %%mm1               \n\t" /* 00RGBRGB 2 */\
475                         "movq %%mm1, %%mm4              \n\t" /* 00RGBRGB 2 */\
476                         "psllq $32, %%mm1               \n\t" /* BRGB0000 2 */\
477                         "por %%mm1, %%mm2               \n\t" /* BRGBRGBR 1 */\
478 \
479                         "psrlq $32, %%mm4               \n\t" /* 000000RG 2.5 */\
480                         "movq %%mm3, %%mm5              \n\t" /* 0RGB0RGB 3 */\
481                         "psrlq $8, %%mm3                \n\t" /* 00RGB0RG 3 */\
482                         "pand bm00000111, %%mm5         \n\t" /* 00000RGB 3 */\
483                         "pand bm11111000, %%mm3         \n\t" /* 00RGB000 3.5 */\
484                         "por %%mm5, %%mm3               \n\t" /* 00RGBRGB 3 */\
485                         "psllq $16, %%mm3               \n\t" /* RGBRGB00 3 */\
486                         "por %%mm4, %%mm3               \n\t" /* RGBRGBRG 2.5 */\
487 \
488                         "leal (%%eax, %%eax, 2), %%ebx  \n\t"\
489                         MOVNTQ(%%mm0, (%4, %%ebx))\
490                         MOVNTQ(%%mm2, 8(%4, %%ebx))\
491                         MOVNTQ(%%mm3, 16(%4, %%ebx))\
492 \
493                         "addl $8, %%eax                 \n\t"\
494                         "cmpl %5, %%eax                 \n\t"\
495                         " jb 1b                         \n\t"
496
497
498 static inline void yuv2yuv(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
499                            uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstw, int yalpha, int uvalpha)
500 {
501         int yalpha1=yalpha^4095;
502         int uvalpha1=uvalpha^4095;
503         int i;
504
505         asm volatile ("\n\t"::: "memory");
506
507         for(i=0;i<dstw;i++)
508         {
509                 ((uint8_t*)dest)[0] = (buf0[i]*yalpha1+buf1[i]*yalpha)>>19;
510                 dest++;
511         }
512
513         if(uvalpha != -1)
514         {
515                 for(i=0; i<dstw/2; i++)
516                 {
517                         ((uint8_t*)uDest)[0] = (uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19;
518                         ((uint8_t*)vDest)[0] = (uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19;
519                         uDest++;
520                         vDest++;
521                 }
522         }
523 }
524
525 /**
526  * vertical scale YV12 to RGB
527  */
528 static inline void yuv2rgbX(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
529                             uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
530 {
531         int yalpha1=yalpha^4095;
532         int uvalpha1=uvalpha^4095;
533         int i;
534
535         if(fullUVIpol)
536         {
537
538 #ifdef HAVE_MMX
539                 if(dstbpp == 32)
540                 {
541                         asm volatile(
542
543
544 FULL_YSCALEYUV2RGB
545                         "punpcklbw %%mm1, %%mm3         \n\t" // BGBGBGBG
546                         "punpcklbw %%mm7, %%mm0         \n\t" // R0R0R0R0
547
548                         "movq %%mm3, %%mm1              \n\t"
549                         "punpcklwd %%mm0, %%mm3         \n\t" // BGR0BGR0
550                         "punpckhwd %%mm0, %%mm1         \n\t" // BGR0BGR0
551
552                         MOVNTQ(%%mm3, (%4, %%eax, 4))
553                         MOVNTQ(%%mm1, 8(%4, %%eax, 4))
554
555                         "addl $4, %%eax                 \n\t"
556                         "cmpl %5, %%eax                 \n\t"
557                         " jb 1b                         \n\t"
558
559
560                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
561                         "m" (yalpha1), "m" (uvalpha1)
562                         : "%eax"
563                         );
564                 }
565                 else if(dstbpp==24)
566                 {
567                         asm volatile(
568
569 FULL_YSCALEYUV2RGB
570
571                                                                 // lsb ... msb
572                         "punpcklbw %%mm1, %%mm3         \n\t" // BGBGBGBG
573                         "punpcklbw %%mm7, %%mm0         \n\t" // R0R0R0R0
574
575                         "movq %%mm3, %%mm1              \n\t"
576                         "punpcklwd %%mm0, %%mm3         \n\t" // BGR0BGR0
577                         "punpckhwd %%mm0, %%mm1         \n\t" // BGR0BGR0
578
579                         "movq %%mm3, %%mm2              \n\t" // BGR0BGR0
580                         "psrlq $8, %%mm3                \n\t" // GR0BGR00
581                         "pand bm00000111, %%mm2         \n\t" // BGR00000
582                         "pand bm11111000, %%mm3         \n\t" // 000BGR00
583                         "por %%mm2, %%mm3               \n\t" // BGRBGR00
584                         "movq %%mm1, %%mm2              \n\t"
585                         "psllq $48, %%mm1               \n\t" // 000000BG
586                         "por %%mm1, %%mm3               \n\t" // BGRBGRBG
587
588                         "movq %%mm2, %%mm1              \n\t" // BGR0BGR0
589                         "psrld $16, %%mm2               \n\t" // R000R000
590                         "psrlq $24, %%mm1               \n\t" // 0BGR0000
591                         "por %%mm2, %%mm1               \n\t" // RBGRR000
592
593                         "movl %4, %%ebx                 \n\t"
594                         "addl %%eax, %%ebx              \n\t"
595
596 #ifdef HAVE_MMX2
597                         //FIXME Alignment
598                         "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
599                         "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
600 #else
601                         "movd %%mm3, (%%ebx, %%eax, 2)  \n\t"
602                         "psrlq $32, %%mm3               \n\t"
603                         "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
604                         "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
605 #endif
606                         "addl $4, %%eax                 \n\t"
607                         "cmpl %5, %%eax                 \n\t"
608                         " jb 1b                         \n\t"
609
610                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
611                         "m" (yalpha1), "m" (uvalpha1)
612                         : "%eax", "%ebx"
613                         );
614                 }
615                 else if(dstbpp==15)
616                 {
617                         asm volatile(
618
619 FULL_YSCALEYUV2RGB
620 #ifdef DITHER1XBPP
621                         "paddusb b16Dither, %%mm1       \n\t"
622                         "paddusb b16Dither, %%mm0       \n\t"
623                         "paddusb b16Dither, %%mm3       \n\t"
624 #endif
625                         "punpcklbw %%mm7, %%mm1         \n\t" // 0G0G0G0G
626                         "punpcklbw %%mm7, %%mm3         \n\t" // 0B0B0B0B
627                         "punpcklbw %%mm7, %%mm0         \n\t" // 0R0R0R0R
628
629                         "psrlw $3, %%mm3                \n\t"
630                         "psllw $2, %%mm1                \n\t"
631                         "psllw $7, %%mm0                \n\t"
632                         "pand g15Mask, %%mm1            \n\t"
633                         "pand r15Mask, %%mm0            \n\t"
634
635                         "por %%mm3, %%mm1               \n\t"
636                         "por %%mm1, %%mm0               \n\t"
637
638                         MOVNTQ(%%mm0, (%4, %%eax, 2))
639
640                         "addl $4, %%eax                 \n\t"
641                         "cmpl %5, %%eax                 \n\t"
642                         " jb 1b                         \n\t"
643
644                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
645                         "m" (yalpha1), "m" (uvalpha1)
646                         : "%eax"
647                         );
648                 }
649                 else if(dstbpp==16)
650                 {
651                         asm volatile(
652
653 FULL_YSCALEYUV2RGB
654 #ifdef DITHER1XBPP
655                         "paddusb g16Dither, %%mm1       \n\t"
656                         "paddusb b16Dither, %%mm0       \n\t"
657                         "paddusb b16Dither, %%mm3       \n\t"
658 #endif
659                         "punpcklbw %%mm7, %%mm1         \n\t" // 0G0G0G0G
660                         "punpcklbw %%mm7, %%mm3         \n\t" // 0B0B0B0B
661                         "punpcklbw %%mm7, %%mm0         \n\t" // 0R0R0R0R
662
663                         "psrlw $3, %%mm3                \n\t"
664                         "psllw $3, %%mm1                \n\t"
665                         "psllw $8, %%mm0                \n\t"
666                         "pand g16Mask, %%mm1            \n\t"
667                         "pand r16Mask, %%mm0            \n\t"
668
669                         "por %%mm3, %%mm1               \n\t"
670                         "por %%mm1, %%mm0               \n\t"
671
672                         MOVNTQ(%%mm0, (%4, %%eax, 2))
673
674                         "addl $4, %%eax                 \n\t"
675                         "cmpl %5, %%eax                 \n\t"
676                         " jb 1b                         \n\t"
677
678                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
679                         "m" (yalpha1), "m" (uvalpha1)
680                         : "%eax"
681                         );
682                 }
683 #else
684                 asm volatile ("\n\t"::: "memory");
685
686                 if(dstbpp==32 || dstbpp==24)
687                 {
688                         for(i=0;i<dstw;i++){
689                                 // vertical linear interpolation && yuv2rgb in a single step:
690                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
691                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
692                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
693                                 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
694                                 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
695                                 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
696                                 dest+=dstbpp>>3;
697                         }
698                 }
699                 else if(dstbpp==16)
700                 {
701                         for(i=0;i<dstw;i++){
702                                 // vertical linear interpolation && yuv2rgb in a single step:
703                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
704                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
705                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
706
707                                 ((uint16_t*)dest)[0] =
708                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
709                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<3)&0x07E0) |
710                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<8)&0xF800);
711                                 dest+=2;
712                         }
713                 }
714                 else if(dstbpp==15)
715                 {
716                         for(i=0;i<dstw;i++){
717                                 // vertical linear interpolation && yuv2rgb in a single step:
718                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
719                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
720                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
721
722                                 ((uint16_t*)dest)[0] =
723                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
724                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<2)&0x03E0) |
725                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<7)&0x7C00);
726                                 dest+=2;
727                         }
728                 }
729 #endif
730         }//FULL_UV_IPOL
731         else
732         {
733 #ifdef HAVE_MMX
734                 if(dstbpp == 32)
735                 {
736                         asm volatile(
737                                 YSCALEYUV2RGB
738                                 WRITEBGR32
739
740                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
741                         "m" (yalpha1), "m" (uvalpha1)
742                         : "%eax"
743                         );
744                 }
745                 else if(dstbpp==24)
746                 {
747                         asm volatile(
748                                 YSCALEYUV2RGB
749                                 WRITEBGR24
750
751                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
752                         "m" (yalpha1), "m" (uvalpha1)
753                         : "%eax", "%ebx"
754                         );
755                 }
756                 else if(dstbpp==15)
757                 {
758                         asm volatile(
759                                 YSCALEYUV2RGB
760                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
761 #ifdef DITHER1XBPP
762                                 "paddusb b16Dither, %%mm2       \n\t"
763                                 "paddusb b16Dither, %%mm4       \n\t"
764                                 "paddusb b16Dither, %%mm5       \n\t"
765 #endif
766
767                                 WRITEBGR15
768
769                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
770                         "m" (yalpha1), "m" (uvalpha1)
771                         : "%eax"
772                         );
773                 }
774                 else if(dstbpp==16)
775                 {
776                         asm volatile(
777                                 YSCALEYUV2RGB
778                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
779 #ifdef DITHER1XBPP
780                                 "paddusb g16Dither, %%mm2       \n\t"
781                                 "paddusb b16Dither, %%mm4       \n\t"
782                                 "paddusb b16Dither, %%mm5       \n\t"
783 #endif
784
785                                 WRITEBGR16
786
787                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
788                         "m" (yalpha1), "m" (uvalpha1)
789                         : "%eax"
790                         );
791                 }
792 #else
793 //FIXME unroll C loop and dont recalculate UV
794                 asm volatile ("\n\t"::: "memory");
795
796                 if(dstbpp==32 || dstbpp==24)
797                 {
798                         for(i=0;i<dstw;i++){
799                                 // vertical linear interpolation && yuv2rgb in a single step:
800                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
801                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
802                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
803                                 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
804                                 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
805                                 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
806                                 dest+=dstbpp>>3;
807                         }
808                 }
809                 else if(dstbpp==16)
810                 {
811                         for(i=0;i<dstw;i++){
812                                 // vertical linear interpolation && yuv2rgb in a single step:
813                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
814                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
815                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
816
817                                 ((uint16_t*)dest)[0] =
818                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
819                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<3)&0x07E0) |
820                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<8)&0xF800);
821                                 dest+=2;
822                         }
823                 }
824                 else if(dstbpp==15)
825                 {
826                         for(i=0;i<dstw;i++){
827                                 // vertical linear interpolation && yuv2rgb in a single step:
828                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
829                                 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
830                                 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
831
832                                 ((uint16_t*)dest)[0] =
833                                         (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
834                                         ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<2)&0x03E0) |
835                                         ((clip_table[(Y + yuvtab_3343[V]) >>13]<<7)&0x7C00);
836                                 dest+=2;
837                         }
838                 }
839 #endif
840         } //!FULL_UV_IPOL
841 }
842
843 /**
844  * YV12 to RGB without scaling or interpolating
845  */
846 static inline void yuv2rgb1(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
847                             uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
848 {
849         int yalpha1=yalpha^4095;
850         int uvalpha1=uvalpha^4095;
851         int i;
852         if(fullUVIpol || allwaysIpol)
853         {
854                 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
855                 return;
856         }
857 #ifdef HAVE_MMX
858         if( yalpha > 2048 ) buf0 = buf1;
859         if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
860         {
861                 if(dstbpp == 32)
862                 {
863                         asm volatile(
864                                 YSCALEYUV2RGB1
865                                 WRITEBGR32
866                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
867                         "m" (yalpha1), "m" (uvalpha1)
868                         : "%eax"
869                         );
870                 }
871                 else if(dstbpp==24)
872                 {
873                         asm volatile(
874                                 YSCALEYUV2RGB1
875                                 WRITEBGR24
876                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
877                         "m" (yalpha1), "m" (uvalpha1)
878                         : "%eax", "%ebx"
879                         );
880                 }
881                 else if(dstbpp==15)
882                 {
883                         asm volatile(
884                                 YSCALEYUV2RGB1
885                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
886 #ifdef DITHER1XBPP
887                                 "paddusb b16Dither, %%mm2       \n\t"
888                                 "paddusb b16Dither, %%mm4       \n\t"
889                                 "paddusb b16Dither, %%mm5       \n\t"
890 #endif
891                                 WRITEBGR15
892                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
893                         "m" (yalpha1), "m" (uvalpha1)
894                         : "%eax"
895                         );
896                 }
897                 else if(dstbpp==16)
898                 {
899                         asm volatile(
900                                 YSCALEYUV2RGB1
901                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
902 #ifdef DITHER1XBPP
903                                 "paddusb g16Dither, %%mm2       \n\t"
904                                 "paddusb b16Dither, %%mm4       \n\t"
905                                 "paddusb b16Dither, %%mm5       \n\t"
906 #endif
907
908                                 WRITEBGR16
909                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
910                         "m" (yalpha1), "m" (uvalpha1)
911                         : "%eax"
912                         );
913                 }
914         }
915         else
916         {
917                 if(dstbpp == 32)
918                 {
919                         asm volatile(
920                                 YSCALEYUV2RGB1b
921                                 WRITEBGR32
922                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
923                         "m" (yalpha1), "m" (uvalpha1)
924                         : "%eax"
925                         );
926                 }
927                 else if(dstbpp==24)
928                 {
929                         asm volatile(
930                                 YSCALEYUV2RGB1b
931                                 WRITEBGR24
932                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
933                         "m" (yalpha1), "m" (uvalpha1)
934                         : "%eax", "%ebx"
935                         );
936                 }
937                 else if(dstbpp==15)
938                 {
939                         asm volatile(
940                                 YSCALEYUV2RGB1b
941                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
942 #ifdef DITHER1XBPP
943                                 "paddusb b16Dither, %%mm2       \n\t"
944                                 "paddusb b16Dither, %%mm4       \n\t"
945                                 "paddusb b16Dither, %%mm5       \n\t"
946 #endif
947                                 WRITEBGR15
948                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
949                         "m" (yalpha1), "m" (uvalpha1)
950                         : "%eax"
951                         );
952                 }
953                 else if(dstbpp==16)
954                 {
955                         asm volatile(
956                                 YSCALEYUV2RGB1b
957                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
958 #ifdef DITHER1XBPP
959                                 "paddusb g16Dither, %%mm2       \n\t"
960                                 "paddusb b16Dither, %%mm4       \n\t"
961                                 "paddusb b16Dither, %%mm5       \n\t"
962 #endif
963
964                                 WRITEBGR16
965                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
966                         "m" (yalpha1), "m" (uvalpha1)
967                         : "%eax"
968                         );
969                 }
970         }
971 #else
972 //FIXME unroll C loop and dont recalculate UV
973         asm volatile ("\n\t"::: "memory");
974
975         if(dstbpp==32 || dstbpp==24)
976         {
977                 for(i=0;i<dstw;i++){
978                         // vertical linear interpolation && yuv2rgb in a single step:
979                         int Y=yuvtab_2568[buf0[i]>>7];
980                         int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
981                         int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
982                         dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
983                         dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
984                         dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
985                         dest+=dstbpp>>3;
986                 }
987         }
988         else if(dstbpp==16)
989         {
990                 for(i=0;i<dstw;i++){
991                         // vertical linear interpolation && yuv2rgb in a single step:
992                         int Y=yuvtab_2568[buf0[i]>>7];
993                         int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
994                         int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
995
996                         ((uint16_t*)dest)[0] =
997                                 (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
998                                 ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<3)&0x07E0) |
999                                 ((clip_table[(Y + yuvtab_3343[V]) >>13]<<8)&0xF800);
1000                         dest+=2;
1001                 }
1002         }
1003         else if(dstbpp==15)
1004         {
1005                 for(i=0;i<dstw;i++){
1006                         // vertical linear interpolation && yuv2rgb in a single step:
1007                         int Y=yuvtab_2568[buf0[i]>>7];
1008                         int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
1009                         int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
1010
1011                         ((uint16_t*)dest)[0] =
1012                                 (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
1013                                 ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<2)&0x03E0) |
1014                                 ((clip_table[(Y + yuvtab_3343[V]) >>13]<<7)&0x7C00);
1015                         dest+=2;
1016                 }
1017         }
1018 #endif
1019 }
1020
1021
1022 static inline void hyscale(uint16_t *dst, int dstWidth, uint8_t *src, int srcWidth, int xInc)
1023 {
1024         int i;
1025       unsigned int xpos=0;
1026       // *** horizontal scale Y line to temp buffer
1027 #ifdef ARCH_X86
1028 #ifdef HAVE_MMX2
1029         if(canMMX2BeUsed)
1030         {
1031                 asm volatile(
1032                         "pxor %%mm7, %%mm7              \n\t"
1033                         "pxor %%mm2, %%mm2              \n\t" // 2*xalpha
1034                         "movd %5, %%mm6                 \n\t" // xInc&0xFFFF
1035                         "punpcklwd %%mm6, %%mm6         \n\t"
1036                         "punpcklwd %%mm6, %%mm6         \n\t"
1037                         "movq %%mm6, %%mm2              \n\t"
1038                         "psllq $16, %%mm2               \n\t"
1039                         "paddw %%mm6, %%mm2             \n\t"
1040                         "psllq $16, %%mm2               \n\t"
1041                         "paddw %%mm6, %%mm2             \n\t"
1042                         "psllq $16, %%mm2               \n\t" //0,t,2t,3t               t=xInc&0xFF
1043                         "movq %%mm2, temp0              \n\t"
1044                         "movd %4, %%mm6                 \n\t" //(xInc*4)&0xFFFF
1045                         "punpcklwd %%mm6, %%mm6         \n\t"
1046                         "punpcklwd %%mm6, %%mm6         \n\t"
1047                         "xorl %%eax, %%eax              \n\t" // i
1048                         "movl %0, %%esi                 \n\t" // src
1049                         "movl %1, %%edi                 \n\t" // buf1
1050                         "movl %3, %%edx                 \n\t" // (xInc*4)>>16
1051                         "xorl %%ecx, %%ecx              \n\t"
1052                         "xorl %%ebx, %%ebx              \n\t"
1053                         "movw %4, %%bx                  \n\t" // (xInc*4)&0xFFFF
1054
1055 #define FUNNY_Y_CODE \
1056                         PREFETCH" 1024(%%esi)           \n\t"\
1057                         PREFETCH" 1056(%%esi)           \n\t"\
1058                         PREFETCH" 1088(%%esi)           \n\t"\
1059                         "call funnyYCode                \n\t"\
1060                         "movq temp0, %%mm2              \n\t"\
1061                         "xorl %%ecx, %%ecx              \n\t"
1062
1063 FUNNY_Y_CODE
1064 FUNNY_Y_CODE
1065 FUNNY_Y_CODE
1066 FUNNY_Y_CODE
1067 FUNNY_Y_CODE
1068 FUNNY_Y_CODE
1069 FUNNY_Y_CODE
1070 FUNNY_Y_CODE
1071
1072                         :: "m" (src), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
1073                         "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF)
1074                         : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
1075                 );
1076                 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth-1; i--) dst[i] = src[srcWidth-1]*128;
1077         }
1078         else
1079         {
1080 #endif
1081         //NO MMX just normal asm ...
1082         asm volatile(
1083                 "xorl %%eax, %%eax              \n\t" // i
1084                 "xorl %%ebx, %%ebx              \n\t" // xx
1085                 "xorl %%ecx, %%ecx              \n\t" // 2*xalpha
1086                 "1:                             \n\t"
1087                 "movzbl  (%0, %%ebx), %%edi     \n\t" //src[xx]
1088                 "movzbl 1(%0, %%ebx), %%esi     \n\t" //src[xx+1]
1089                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
1090                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
1091                 "shll $16, %%edi                \n\t"
1092                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1093                 "movl %1, %%edi                 \n\t"
1094                 "shrl $9, %%esi                 \n\t"
1095                 "movw %%si, (%%edi, %%eax, 2)   \n\t"
1096                 "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
1097                 "adcl %3, %%ebx                 \n\t" //xx+= xInc>>8 + carry
1098
1099                 "movzbl (%0, %%ebx), %%edi      \n\t" //src[xx]
1100                 "movzbl 1(%0, %%ebx), %%esi     \n\t" //src[xx+1]
1101                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
1102                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
1103                 "shll $16, %%edi                \n\t"
1104                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1105                 "movl %1, %%edi                 \n\t"
1106                 "shrl $9, %%esi                 \n\t"
1107                 "movw %%si, 2(%%edi, %%eax, 2)  \n\t"
1108                 "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
1109                 "adcl %3, %%ebx                 \n\t" //xx+= xInc>>8 + carry
1110
1111
1112                 "addl $2, %%eax                 \n\t"
1113                 "cmpl %2, %%eax                 \n\t"
1114                 " jb 1b                         \n\t"
1115
1116
1117                 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
1118                 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1119                 );
1120 #ifdef HAVE_MMX2
1121         } //if MMX2 cant be used
1122 #endif
1123 #else
1124       for(i=0;i<dstWidth;i++){
1125         register unsigned int xx=xpos>>16;
1126         register unsigned int xalpha=(xpos&0xFFFF)>>9;
1127         dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
1128         xpos+=xInc;
1129       }
1130 #endif
1131 }
1132
1133 inline static void hcscale(uint16_t *dst, int dstWidth,
1134                                 uint8_t *src1, uint8_t *src2, int srcWidth, int xInc)
1135 {
1136         int xpos=0;
1137         int i;
1138 #ifdef ARCH_X86
1139 #ifdef HAVE_MMX2
1140         if(canMMX2BeUsed)
1141         {
1142                 asm volatile(
1143                 "pxor %%mm7, %%mm7              \n\t"
1144                 "pxor %%mm2, %%mm2              \n\t" // 2*xalpha
1145                 "movd %5, %%mm6                 \n\t" // xInc&0xFFFF
1146                 "punpcklwd %%mm6, %%mm6         \n\t"
1147                 "punpcklwd %%mm6, %%mm6         \n\t"
1148                 "movq %%mm6, %%mm2              \n\t"
1149                 "psllq $16, %%mm2               \n\t"
1150                 "paddw %%mm6, %%mm2             \n\t"
1151                 "psllq $16, %%mm2               \n\t"
1152                 "paddw %%mm6, %%mm2             \n\t"
1153                 "psllq $16, %%mm2               \n\t" //0,t,2t,3t               t=xInc&0xFFFF
1154                 "movq %%mm2, temp0              \n\t"
1155                 "movd %4, %%mm6                 \n\t" //(xInc*4)&0xFFFF
1156                 "punpcklwd %%mm6, %%mm6         \n\t"
1157                 "punpcklwd %%mm6, %%mm6         \n\t"
1158                 "xorl %%eax, %%eax              \n\t" // i
1159                 "movl %0, %%esi                 \n\t" // src
1160                 "movl %1, %%edi                 \n\t" // buf1
1161                 "movl %3, %%edx                 \n\t" // (xInc*4)>>16
1162                 "xorl %%ecx, %%ecx              \n\t"
1163                 "xorl %%ebx, %%ebx              \n\t"
1164                 "movw %4, %%bx                  \n\t" // (xInc*4)&0xFFFF
1165
1166 #define FUNNYUVCODE \
1167                         PREFETCH" 1024(%%esi)           \n\t"\
1168                         PREFETCH" 1056(%%esi)           \n\t"\
1169                         PREFETCH" 1088(%%esi)           \n\t"\
1170                         "call funnyUVCode               \n\t"\
1171                         "movq temp0, %%mm2              \n\t"\
1172                         "xorl %%ecx, %%ecx              \n\t"
1173
1174 FUNNYUVCODE
1175 FUNNYUVCODE
1176 FUNNYUVCODE
1177 FUNNYUVCODE
1178
1179 FUNNYUVCODE
1180 FUNNYUVCODE
1181 FUNNYUVCODE
1182 FUNNYUVCODE
1183                 "xorl %%eax, %%eax              \n\t" // i
1184                 "movl %6, %%esi                 \n\t" // src
1185                 "movl %1, %%edi                 \n\t" // buf1
1186                 "addl $4096, %%edi              \n\t"
1187
1188 FUNNYUVCODE
1189 FUNNYUVCODE
1190 FUNNYUVCODE
1191 FUNNYUVCODE
1192
1193 FUNNYUVCODE
1194 FUNNYUVCODE
1195 FUNNYUVCODE
1196 FUNNYUVCODE
1197
1198                 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
1199                   "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF), "m" (src2)
1200                 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
1201         );
1202                 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth/2-1; i--)
1203                 {
1204                         dst[i] = src1[srcWidth/2-1]*128;
1205                         dst[i+2048] = src2[srcWidth/2-1]*128;
1206                 }
1207         }
1208         else
1209         {
1210 #endif
1211         asm volatile(
1212                 "xorl %%eax, %%eax              \n\t" // i
1213                 "xorl %%ebx, %%ebx              \n\t" // xx
1214                 "xorl %%ecx, %%ecx              \n\t" // 2*xalpha
1215                 "1:                             \n\t"
1216                 "movl %0, %%esi                 \n\t"
1217                 "movzbl  (%%esi, %%ebx), %%edi  \n\t" //src[xx]
1218                 "movzbl 1(%%esi, %%ebx), %%esi  \n\t" //src[xx+1]
1219                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
1220                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
1221                 "shll $16, %%edi                \n\t"
1222                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1223                 "movl %1, %%edi                 \n\t"
1224                 "shrl $9, %%esi                 \n\t"
1225                 "movw %%si, (%%edi, %%eax, 2)   \n\t"
1226
1227                 "movzbl  (%5, %%ebx), %%edi     \n\t" //src[xx]
1228                 "movzbl 1(%5, %%ebx), %%esi     \n\t" //src[xx+1]
1229                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
1230                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
1231                 "shll $16, %%edi                \n\t"
1232                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1233                 "movl %1, %%edi                 \n\t"
1234                 "shrl $9, %%esi                 \n\t"
1235                 "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
1236
1237                 "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
1238                 "adcl %3, %%ebx                 \n\t" //xx+= xInc>>8 + carry
1239                 "addl $1, %%eax                 \n\t"
1240                 "cmpl %2, %%eax                 \n\t"
1241                 " jb 1b                         \n\t"
1242
1243                 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
1244                 "r" (src2)
1245                 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1246                 );
1247 #ifdef HAVE_MMX2
1248         } //if MMX2 cant be used
1249 #endif
1250 #else
1251       for(i=0;i<dstWidth;i++){
1252           register unsigned int xx=xpos>>16;
1253           register unsigned int xalpha=(xpos&0xFFFF)>>9;
1254           dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
1255           dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
1256 /* slower
1257           dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
1258           dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
1259 */
1260           xpos+=xInc;
1261       }
1262 #endif
1263 }
1264
1265
1266 // *** bilinear scaling and yuv->rgb or yuv->yuv conversion of yv12 slices:
1267 // *** Note: it's called multiple times while decoding a frame, first time y==0
1268 // *** Designed to upscale, but may work for downscale too.
1269 // s_xinc = (src_width << 16) / dst_width
1270 // s_yinc = (src_height << 16) / dst_height
1271 void SwScale_YV12slice(unsigned char* srcptr[],int stride[], int y, int h,
1272                              uint8_t* dstptr[], int dststride, int dstw, int dstbpp,
1273                              unsigned int s_xinc,unsigned int s_yinc){
1274
1275 // scaling factors:
1276 //static int s_yinc=(vo_dga_src_height<<16)/vo_dga_vp_height;
1277 //static int s_xinc=(vo_dga_src_width<<8)/vo_dga_vp_width;
1278
1279 unsigned int s_xinc2;
1280
1281 static int s_srcypos; // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1282 static int s_ypos;
1283
1284 // last horzontally interpolated lines, used to avoid unnecessary calculations
1285 static int s_last_ypos;
1286 static int s_last_y1pos;
1287
1288 static int static_dstw;
1289
1290 #ifdef HAVE_MMX2
1291 // used to detect a horizontal size change
1292 static int old_dstw= -1;
1293 static int old_s_xinc= -1;
1294 #endif
1295
1296 int srcWidth= (dstw*s_xinc + 0x8000)>>16;
1297 int dstUVw= fullUVIpol ? dstw : dstw/2;
1298
1299
1300 #ifdef HAVE_MMX2
1301 canMMX2BeUsed= (s_xinc <= 0x10000 && (dstw&31)==0 && (srcWidth&15)==0) ? 1 : 0;
1302 #endif
1303
1304 // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
1305 // n-2 is the last chrominance sample available
1306 // FIXME this is not perfect, but noone shuld notice the difference, the more correct variant
1307 // would be like the vertical one, but that would require some special code for the
1308 // first and last pixel
1309 if(canMMX2BeUsed)       s_xinc+= 20;
1310 else                    s_xinc = ((srcWidth-2)<<16)/(dstw-2) - 20;
1311
1312 if(fullUVIpol && !(dstbpp==12))         s_xinc2= s_xinc>>1;
1313 else                                    s_xinc2= s_xinc;
1314   // force calculation of the horizontal interpolation of the first line
1315
1316   if(y==0){
1317         s_last_ypos=-99;
1318         s_last_y1pos=-99;
1319         s_srcypos= s_yinc/2 - 0x8000;
1320         s_ypos=0;
1321 #ifdef HAVE_MMX2
1322 // cant downscale !!!
1323         if((old_s_xinc != s_xinc || old_dstw!=dstw) && canMMX2BeUsed)
1324         {
1325                 uint8_t *fragment;
1326                 int imm8OfPShufW1;
1327                 int imm8OfPShufW2;
1328                 int fragmentLength;
1329
1330                 int xpos, xx, xalpha, i;
1331
1332                 old_s_xinc= s_xinc;
1333                 old_dstw= dstw;
1334
1335                 static_dstw= dstw;
1336
1337                 // create an optimized horizontal scaling routine
1338
1339                 //code fragment
1340
1341                 asm volatile(
1342                         "jmp 9f                         \n\t"
1343                 // Begin
1344                         "0:                             \n\t"
1345                         "movq (%%esi), %%mm0            \n\t" //FIXME Alignment
1346                         "movq %%mm0, %%mm1              \n\t"
1347                         "psrlq $8, %%mm0                \n\t"
1348                         "punpcklbw %%mm7, %%mm1 \n\t"
1349                         "movq %%mm2, %%mm3              \n\t"
1350                         "punpcklbw %%mm7, %%mm0 \n\t"
1351                         "addw %%bx, %%cx                \n\t" //2*xalpha += (4*s_xinc)&0xFFFF
1352                         "pshufw $0xFF, %%mm1, %%mm1     \n\t"
1353                         "1:                             \n\t"
1354                         "adcl %%edx, %%esi              \n\t" //xx+= (4*s_xinc)>>16 + carry
1355                         "pshufw $0xFF, %%mm0, %%mm0     \n\t"
1356                         "2:                             \n\t"
1357                         "psrlw $9, %%mm3                \n\t"
1358                         "psubw %%mm1, %%mm0             \n\t"
1359                         "pmullw %%mm3, %%mm0            \n\t"
1360                         "paddw %%mm6, %%mm2             \n\t" // 2*alpha += xpos&0xFFFF
1361                         "psllw $7, %%mm1                \n\t"
1362                         "paddw %%mm1, %%mm0             \n\t"
1363
1364                         "movq %%mm0, (%%edi, %%eax)     \n\t"
1365
1366                         "addl $8, %%eax                 \n\t"
1367                 // End
1368                         "9:                             \n\t"
1369 //              "int $3\n\t"
1370                         "leal 0b, %0                    \n\t"
1371                         "leal 1b, %1                    \n\t"
1372                         "leal 2b, %2                    \n\t"
1373                         "decl %1                        \n\t"
1374                         "decl %2                        \n\t"
1375                         "subl %0, %1                    \n\t"
1376                         "subl %0, %2                    \n\t"
1377                         "leal 9b, %3                    \n\t"
1378                         "subl %0, %3                    \n\t"
1379                         :"=r" (fragment), "=r" (imm8OfPShufW1), "=r" (imm8OfPShufW2),
1380                          "=r" (fragmentLength)
1381                 );
1382
1383                 xpos= 0; //s_xinc/2 - 0x8000; // difference between pixel centers
1384
1385                 /* choose xinc so that all 8 parts fit exactly
1386                    Note: we cannot use just 1 part because it would not fit in the code cache */
1387 //              s_xinc2_diff= -((((s_xinc2*(dstw/8))&0xFFFF))/(dstw/8))-10;
1388 //              s_xinc_diff= -((((s_xinc*(dstw/8))&0xFFFF))/(dstw/8));
1389 #ifdef ALT_ERROR
1390 //              s_xinc2_diff+= ((0x10000/(dstw/8)));
1391 #endif
1392 //              s_xinc_diff= s_xinc2_diff*2;
1393
1394 //              s_xinc2+= s_xinc2_diff;
1395 //              s_xinc+= s_xinc_diff;
1396
1397 //              old_s_xinc= s_xinc;
1398
1399                 for(i=0; i<dstw/8; i++)
1400                 {
1401                         int xx=xpos>>16;
1402
1403                         if((i&3) == 0)
1404                         {
1405                                 int a=0;
1406                                 int b=((xpos+s_xinc)>>16) - xx;
1407                                 int c=((xpos+s_xinc*2)>>16) - xx;
1408                                 int d=((xpos+s_xinc*3)>>16) - xx;
1409
1410                                 memcpy(funnyYCode + fragmentLength*i/4, fragment, fragmentLength);
1411
1412                                 funnyYCode[fragmentLength*i/4 + imm8OfPShufW1]=
1413                                 funnyYCode[fragmentLength*i/4 + imm8OfPShufW2]=
1414                                         a | (b<<2) | (c<<4) | (d<<6);
1415
1416                                 // if we dont need to read 8 bytes than dont :), reduces the chance of
1417                                 // crossing a cache line
1418                                 if(d<3) funnyYCode[fragmentLength*i/4 + 1]= 0x6E;
1419
1420                                 funnyYCode[fragmentLength*(i+4)/4]= RET;
1421                         }
1422                         xpos+=s_xinc;
1423                 }
1424
1425                 xpos= 0; //s_xinc2/2 - 0x10000; // difference between centers of chrom samples
1426                 for(i=0; i<dstUVw/8; i++)
1427                 {
1428                         int xx=xpos>>16;
1429
1430                         if((i&3) == 0)
1431                         {
1432                                 int a=0;
1433                                 int b=((xpos+s_xinc2)>>16) - xx;
1434                                 int c=((xpos+s_xinc2*2)>>16) - xx;
1435                                 int d=((xpos+s_xinc2*3)>>16) - xx;
1436
1437                                 memcpy(funnyUVCode + fragmentLength*i/4, fragment, fragmentLength);
1438
1439                                 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW1]=
1440                                 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW2]=
1441                                         a | (b<<2) | (c<<4) | (d<<6);
1442
1443                                 // if we dont need to read 8 bytes than dont :), reduces the chance of
1444                                 // crossing a cache line
1445                                 if(d<3) funnyUVCode[fragmentLength*i/4 + 1]= 0x6E;
1446
1447                                 funnyUVCode[fragmentLength*(i+4)/4]= RET;
1448                         }
1449                         xpos+=s_xinc2;
1450                 }
1451 //              funnyCode[0]= RET;
1452         }
1453
1454 #endif // HAVE_MMX2
1455   } // reset counters
1456
1457   while(1){
1458     unsigned char *dest =dstptr[0]+dststride*s_ypos;
1459     unsigned char *uDest=dstptr[1]+(dststride>>1)*(s_ypos>>1);
1460     unsigned char *vDest=dstptr[2]+(dststride>>1)*(s_ypos>>1);
1461
1462     int y0=(s_srcypos + 0xFFFF)>>16;  // first luminance source line number below the dst line
1463         // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1464     int srcuvpos= dstbpp==12 ?  s_srcypos + s_yinc/2 - 0x8000 :
1465                                 s_srcypos - 0x8000;
1466     int y1=(srcuvpos + 0x1FFFF)>>17; // first chrominance source line number below the dst line
1467     int yalpha=((s_srcypos-1)&0xFFFF)>>4;
1468     int uvalpha=((srcuvpos-1)&0x1FFFF)>>5;
1469     uint16_t *buf0=pix_buf_y[y0&1];             // top line of the interpolated slice
1470     uint16_t *buf1=pix_buf_y[((y0+1)&1)];       // bottom line of the interpolated slice
1471     uint16_t *uvbuf0=pix_buf_uv[y1&1];          // top line of the interpolated slice
1472     uint16_t *uvbuf1=pix_buf_uv[(y1+1)&1];      // bottom line of the interpolated slice
1473     int i;
1474
1475     if(y0>=y+h) break; // FIXME wrong, skips last lines, but they are dupliactes anyway
1476
1477     if((y0&1) && dstbpp==12) uvalpha=-1; // there is no alpha if there is no line
1478
1479     s_ypos++; s_srcypos+=s_yinc;
1480
1481     //only interpolate the src line horizontally if we didnt do it allready
1482         if(s_last_ypos!=y0)
1483         {
1484                 unsigned char *src;
1485                 // skip if first line has been horiz scaled alleady
1486                 if(s_last_ypos != y0-1)
1487                 {
1488                         // check if first line is before any available src lines
1489                         if(y0-1 < y)    src=srcptr[0]+(0     )*stride[0];
1490                         else            src=srcptr[0]+(y0-y-1)*stride[0];
1491
1492                         hyscale(buf0, dstw, src, srcWidth, s_xinc);
1493                 }
1494                 // check if second line is after any available src lines
1495                 if(y0-y >= h)   src=srcptr[0]+(h-1)*stride[0];
1496                 else            src=srcptr[0]+(y0-y)*stride[0];
1497
1498                 // the min() is required to avoid reuseing lines which where not available
1499                 s_last_ypos= MIN(y0, y+h-1);
1500                 hyscale(buf1, dstw, src, srcWidth, s_xinc);
1501         }
1502 //      printf("%d %d %d %d\n", y, y1, s_last_y1pos, h);
1503       // *** horizontal scale U and V lines to temp buffer
1504         if(s_last_y1pos!=y1)
1505         {
1506                 uint8_t *src1, *src2;
1507                 // skip if first line has been horiz scaled alleady
1508                 if(s_last_y1pos != y1-1)
1509                 {
1510                         // check if first line is before any available src lines
1511                         if(y1-y/2-1 < 0)
1512                         {
1513                                 src1= srcptr[1]+(0)*stride[1];
1514                                 src2= srcptr[2]+(0)*stride[2];
1515                         }else{
1516                                 src1= srcptr[1]+(y1-y/2-1)*stride[1];
1517                                 src2= srcptr[2]+(y1-y/2-1)*stride[2];
1518                         }
1519                         hcscale(uvbuf0, dstUVw, src1, src2, srcWidth, s_xinc2);
1520                 }
1521
1522                 // check if second line is after any available src lines
1523                 if(y1 - y/2 >= h/2)
1524                 {
1525                         src1= srcptr[1]+(h/2-1)*stride[1];
1526                         src2= srcptr[2]+(h/2-1)*stride[2];
1527                 }else{
1528                         src1= srcptr[1]+(y1-y/2)*stride[1];
1529                         src2= srcptr[2]+(y1-y/2)*stride[2];
1530                 }
1531                 hcscale(uvbuf1, dstUVw, src1, src2, srcWidth, s_xinc2);
1532
1533                 // the min() is required to avoid reuseing lines which where not available
1534                 s_last_y1pos= MIN(y1, y/2+h/2-1);
1535         }
1536
1537         if(dstbpp==12) //YV12
1538                 yuv2yuv(buf0, buf1, uvbuf0, uvbuf1, dest, uDest, vDest, dstw, yalpha, uvalpha);
1539         else if(ABS(s_yinc - 0x10000) < 10)
1540                 yuv2rgb1(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1541         else
1542                 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1543
1544 #ifdef HAVE_MMX
1545         b16Dither= b16Dither1;
1546         b16Dither1= b16Dither2;
1547         b16Dither2= b16Dither;
1548
1549         g16Dither= g16Dither1;
1550         g16Dither1= g16Dither2;
1551         g16Dither2= g16Dither;
1552 #endif
1553   }
1554
1555 #ifdef HAVE_MMX
1556         __asm __volatile(SFENCE:::"memory");
1557         __asm __volatile(EMMS:::"memory");
1558 #endif
1559 }
1560
1561
1562 void SwScale_Init(){
1563     // generating tables:
1564     int i;
1565     for(i=0;i<256;i++){
1566         clip_table[i]=0;
1567         clip_table[i+256]=i;
1568         clip_table[i+512]=255;
1569         yuvtab_2568[i]=(0x2568*(i-16))+(256<<13);
1570         yuvtab_3343[i]=0x3343*(i-128);
1571         yuvtab_0c92[i]=-0x0c92*(i-128);
1572         yuvtab_1a1e[i]=-0x1a1e*(i-128);
1573         yuvtab_40cf[i]=0x40cf*(i-128);
1574     }
1575
1576 }