2 // Software scaling and colorspace conversion routines for MPlayer
4 // Orginal C implementation by A'rpi/ESP-team <arpi@thot.banki.hu>
5 // current version mostly by Michael Niedermayer (michaelni@gmx.at)
6 // the parts written by michael are under GNU GPL
10 #include "../config.h"
12 #include "../mmx_defs.h"
20 //disables the unscaled height version
23 #define RET 0xC3 //near return opcode
27 known BUGS with known cause (no bugreports please!, but patches are welcome :) )
28 horizontal MMX2 scaler reads 1-7 samples too much (might cause a sig11)
30 Supported output formats BGR15 BGR16 BGR24 BGR32
31 BGR15 & BGR16 MMX verions support dithering
32 Special versions: fast Y 1:1 scaling (no interpolation in y direction)
35 more intelligent missalignment avoidance for the horizontal scaler
39 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
40 #define MIN(a,b) ((a) > (b) ? (b) : (a))
41 #define MAX(a,b) ((a) < (b) ? (b) : (a))
44 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
45 #elif defined (HAVE_3DNOW)
46 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
50 #define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
52 #define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
57 static uint64_t __attribute__((aligned(8))) yCoeff= 0x2568256825682568LL;
58 static uint64_t __attribute__((aligned(8))) vrCoeff= 0x3343334333433343LL;
59 static uint64_t __attribute__((aligned(8))) ubCoeff= 0x40cf40cf40cf40cfLL;
60 static uint64_t __attribute__((aligned(8))) vgCoeff= 0xE5E2E5E2E5E2E5E2LL;
61 static uint64_t __attribute__((aligned(8))) ugCoeff= 0xF36EF36EF36EF36ELL;
62 static uint64_t __attribute__((aligned(8))) w400= 0x0400040004000400LL;
63 static uint64_t __attribute__((aligned(8))) w80= 0x0080008000800080LL;
64 static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL;
65 static uint64_t __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
66 static uint64_t __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
67 static uint64_t __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
69 static uint64_t __attribute__((aligned(8))) b16Dither= 0x0004000400040004LL;
70 static uint64_t __attribute__((aligned(8))) b16Dither1=0x0004000400040004LL;
71 static uint64_t __attribute__((aligned(8))) b16Dither2=0x0602060206020602LL;
72 static uint64_t __attribute__((aligned(8))) g16Dither= 0x0002000200020002LL;
73 static uint64_t __attribute__((aligned(8))) g16Dither1=0x0002000200020002LL;
74 static uint64_t __attribute__((aligned(8))) g16Dither2=0x0301030103010301LL;
76 static uint64_t __attribute__((aligned(8))) b16Mask= 0x001F001F001F001FLL;
77 static uint64_t __attribute__((aligned(8))) g16Mask= 0x07E007E007E007E0LL;
78 static uint64_t __attribute__((aligned(8))) r16Mask= 0xF800F800F800F800LL;
79 static uint64_t __attribute__((aligned(8))) b15Mask= 0x001F001F001F001FLL;
80 static uint64_t __attribute__((aligned(8))) g15Mask= 0x03E003E003E003E0LL;
81 static uint64_t __attribute__((aligned(8))) r15Mask= 0x7C007C007C007C00LL;
83 static uint64_t __attribute__((aligned(8))) temp0;
84 static uint64_t __attribute__((aligned(8))) asm_yalpha1;
85 static uint64_t __attribute__((aligned(8))) asm_uvalpha1;
88 // temporary storage for 4 yuv lines:
89 // 16bit for now (mmx likes it more compact)
91 static uint16_t __attribute__((aligned(8))) pix_buf_y[4][2048];
92 static uint16_t __attribute__((aligned(8))) pix_buf_uv[2][2048*2];
94 static uint16_t pix_buf_y[4][2048];
95 static uint16_t pix_buf_uv[2][2048*2];
98 // clipping helper table for C implementations:
99 static unsigned char clip_table[768];
101 // yuv->rgb conversion tables:
102 static int yuvtab_2568[256];
103 static int yuvtab_3343[256];
104 static int yuvtab_0c92[256];
105 static int yuvtab_1a1e[256];
106 static int yuvtab_40cf[256];
109 static uint8_t funnyYCode[10000];
110 static uint8_t funnyUVCode[10000];
112 static int canMMX2BeUsed=0;
114 #define FULL_YSCALEYUV2RGB \
115 "pxor %%mm7, %%mm7 \n\t"\
116 "movd %6, %%mm6 \n\t" /*yalpha1*/\
117 "punpcklwd %%mm6, %%mm6 \n\t"\
118 "punpcklwd %%mm6, %%mm6 \n\t"\
119 "movd %7, %%mm5 \n\t" /*uvalpha1*/\
120 "punpcklwd %%mm5, %%mm5 \n\t"\
121 "punpcklwd %%mm5, %%mm5 \n\t"\
122 "xorl %%eax, %%eax \n\t"\
124 "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
125 "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
126 "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\
127 "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\
128 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
129 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
130 "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
131 "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
132 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
133 "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
134 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
135 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
136 "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
137 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
138 "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
139 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
140 "psubw w400, %%mm3 \n\t" /* 8(U-128)*/\
141 "pmulhw yCoeff, %%mm1 \n\t"\
144 "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
145 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
146 "pmulhw ubCoeff, %%mm3 \n\t"\
147 "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
148 "pmulhw ugCoeff, %%mm2 \n\t"\
149 "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
150 "psubw w400, %%mm0 \n\t" /* (V-128)8*/\
153 "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
154 "pmulhw vrCoeff, %%mm0 \n\t"\
155 "pmulhw vgCoeff, %%mm4 \n\t"\
156 "paddw %%mm1, %%mm3 \n\t" /* B*/\
157 "paddw %%mm1, %%mm0 \n\t" /* R*/\
158 "packuswb %%mm3, %%mm3 \n\t"\
160 "packuswb %%mm0, %%mm0 \n\t"\
161 "paddw %%mm4, %%mm2 \n\t"\
162 "paddw %%mm2, %%mm1 \n\t" /* G*/\
164 "packuswb %%mm1, %%mm1 \n\t"
166 #define YSCALEYUV2RGB \
167 "movd %6, %%mm6 \n\t" /*yalpha1*/\
168 "punpcklwd %%mm6, %%mm6 \n\t"\
169 "punpcklwd %%mm6, %%mm6 \n\t"\
170 "movq %%mm6, asm_yalpha1 \n\t"\
171 "movd %7, %%mm5 \n\t" /*uvalpha1*/\
172 "punpcklwd %%mm5, %%mm5 \n\t"\
173 "punpcklwd %%mm5, %%mm5 \n\t"\
174 "movq %%mm5, asm_uvalpha1 \n\t"\
175 "xorl %%eax, %%eax \n\t"\
177 "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
178 "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
179 "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
180 "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
181 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
182 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
183 "movq asm_uvalpha1, %%mm0 \n\t"\
184 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
185 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
186 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
187 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
188 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
189 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
190 "psubw w400, %%mm3 \n\t" /* (U-128)8*/\
191 "psubw w400, %%mm4 \n\t" /* (V-128)8*/\
192 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
193 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
194 "pmulhw ugCoeff, %%mm3 \n\t"\
195 "pmulhw vgCoeff, %%mm4 \n\t"\
196 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
197 "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
198 "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
199 "movq 8(%0, %%eax, 2), %%mm6 \n\t" /*buf0[eax]*/\
200 "movq 8(%1, %%eax, 2), %%mm7 \n\t" /*buf1[eax]*/\
201 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
202 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
203 "pmulhw asm_yalpha1, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
204 "pmulhw asm_yalpha1, %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
205 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
206 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
207 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
208 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
209 "pmulhw ubCoeff, %%mm2 \n\t"\
210 "pmulhw vrCoeff, %%mm5 \n\t"\
211 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
212 "psubw w80, %%mm7 \n\t" /* 8(Y-16)*/\
213 "pmulhw yCoeff, %%mm1 \n\t"\
214 "pmulhw yCoeff, %%mm7 \n\t"\
215 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
216 "paddw %%mm3, %%mm4 \n\t"\
217 "movq %%mm2, %%mm0 \n\t"\
218 "movq %%mm5, %%mm6 \n\t"\
219 "movq %%mm4, %%mm3 \n\t"\
220 "punpcklwd %%mm2, %%mm2 \n\t"\
221 "punpcklwd %%mm5, %%mm5 \n\t"\
222 "punpcklwd %%mm4, %%mm4 \n\t"\
223 "paddw %%mm1, %%mm2 \n\t"\
224 "paddw %%mm1, %%mm5 \n\t"\
225 "paddw %%mm1, %%mm4 \n\t"\
226 "punpckhwd %%mm0, %%mm0 \n\t"\
227 "punpckhwd %%mm6, %%mm6 \n\t"\
228 "punpckhwd %%mm3, %%mm3 \n\t"\
229 "paddw %%mm7, %%mm0 \n\t"\
230 "paddw %%mm7, %%mm6 \n\t"\
231 "paddw %%mm7, %%mm3 \n\t"\
232 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
233 "packuswb %%mm0, %%mm2 \n\t"\
234 "packuswb %%mm6, %%mm5 \n\t"\
235 "packuswb %%mm3, %%mm4 \n\t"\
236 "pxor %%mm7, %%mm7 \n\t"
238 #define YSCALEYUV2RGB1 \
239 "xorl %%eax, %%eax \n\t"\
241 "movq (%2, %%eax), %%mm3 \n\t" /* uvbuf0[eax]*/\
242 "movq 4096(%2, %%eax), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
243 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
244 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
245 "psubw w400, %%mm3 \n\t" /* (U-128)8*/\
246 "psubw w400, %%mm4 \n\t" /* (V-128)8*/\
247 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
248 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
249 "pmulhw ugCoeff, %%mm3 \n\t"\
250 "pmulhw vgCoeff, %%mm4 \n\t"\
251 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
252 "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
253 "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
254 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
255 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
256 "pmulhw ubCoeff, %%mm2 \n\t"\
257 "pmulhw vrCoeff, %%mm5 \n\t"\
258 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
259 "psubw w80, %%mm7 \n\t" /* 8(Y-16)*/\
260 "pmulhw yCoeff, %%mm1 \n\t"\
261 "pmulhw yCoeff, %%mm7 \n\t"\
262 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
263 "paddw %%mm3, %%mm4 \n\t"\
264 "movq %%mm2, %%mm0 \n\t"\
265 "movq %%mm5, %%mm6 \n\t"\
266 "movq %%mm4, %%mm3 \n\t"\
267 "punpcklwd %%mm2, %%mm2 \n\t"\
268 "punpcklwd %%mm5, %%mm5 \n\t"\
269 "punpcklwd %%mm4, %%mm4 \n\t"\
270 "paddw %%mm1, %%mm2 \n\t"\
271 "paddw %%mm1, %%mm5 \n\t"\
272 "paddw %%mm1, %%mm4 \n\t"\
273 "punpckhwd %%mm0, %%mm0 \n\t"\
274 "punpckhwd %%mm6, %%mm6 \n\t"\
275 "punpckhwd %%mm3, %%mm3 \n\t"\
276 "paddw %%mm7, %%mm0 \n\t"\
277 "paddw %%mm7, %%mm6 \n\t"\
278 "paddw %%mm7, %%mm3 \n\t"\
279 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
280 "packuswb %%mm0, %%mm2 \n\t"\
281 "packuswb %%mm6, %%mm5 \n\t"\
282 "packuswb %%mm3, %%mm4 \n\t"\
283 "pxor %%mm7, %%mm7 \n\t"
285 // do vertical chrominance interpolation
286 #define YSCALEYUV2RGB1b \
287 "xorl %%eax, %%eax \n\t"\
289 "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
290 "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
291 "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
292 "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
293 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
294 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
295 "psrlw $5, %%mm3 \n\t"\
296 "psrlw $5, %%mm4 \n\t"\
297 "psubw w400, %%mm3 \n\t" /* (U-128)8*/\
298 "psubw w400, %%mm4 \n\t" /* (V-128)8*/\
299 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
300 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
301 "pmulhw ugCoeff, %%mm3 \n\t"\
302 "pmulhw vgCoeff, %%mm4 \n\t"\
303 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
304 "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
305 "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
306 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
307 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
308 "pmulhw ubCoeff, %%mm2 \n\t"\
309 "pmulhw vrCoeff, %%mm5 \n\t"\
310 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
311 "psubw w80, %%mm7 \n\t" /* 8(Y-16)*/\
312 "pmulhw yCoeff, %%mm1 \n\t"\
313 "pmulhw yCoeff, %%mm7 \n\t"\
314 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
315 "paddw %%mm3, %%mm4 \n\t"\
316 "movq %%mm2, %%mm0 \n\t"\
317 "movq %%mm5, %%mm6 \n\t"\
318 "movq %%mm4, %%mm3 \n\t"\
319 "punpcklwd %%mm2, %%mm2 \n\t"\
320 "punpcklwd %%mm5, %%mm5 \n\t"\
321 "punpcklwd %%mm4, %%mm4 \n\t"\
322 "paddw %%mm1, %%mm2 \n\t"\
323 "paddw %%mm1, %%mm5 \n\t"\
324 "paddw %%mm1, %%mm4 \n\t"\
325 "punpckhwd %%mm0, %%mm0 \n\t"\
326 "punpckhwd %%mm6, %%mm6 \n\t"\
327 "punpckhwd %%mm3, %%mm3 \n\t"\
328 "paddw %%mm7, %%mm0 \n\t"\
329 "paddw %%mm7, %%mm6 \n\t"\
330 "paddw %%mm7, %%mm3 \n\t"\
331 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
332 "packuswb %%mm0, %%mm2 \n\t"\
333 "packuswb %%mm6, %%mm5 \n\t"\
334 "packuswb %%mm3, %%mm4 \n\t"\
335 "pxor %%mm7, %%mm7 \n\t"
338 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
339 "movq %%mm2, %%mm1 \n\t" /* B */\
340 "movq %%mm5, %%mm6 \n\t" /* R */\
341 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
342 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
343 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
344 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
345 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
346 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
347 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
348 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
349 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
350 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
352 MOVNTQ(%%mm0, (%4, %%eax, 4))\
353 MOVNTQ(%%mm2, 8(%4, %%eax, 4))\
354 MOVNTQ(%%mm1, 16(%4, %%eax, 4))\
355 MOVNTQ(%%mm3, 24(%4, %%eax, 4))\
357 "addl $8, %%eax \n\t"\
358 "cmpl %5, %%eax \n\t"\
362 "movq %%mm2, %%mm1 \n\t" /* B */\
363 "movq %%mm4, %%mm3 \n\t" /* G */\
364 "movq %%mm5, %%mm6 \n\t" /* R */\
366 "punpcklbw %%mm7, %%mm3 \n\t" /* 0G0G0G0G */\
367 "punpcklbw %%mm7, %%mm2 \n\t" /* 0B0B0B0B */\
368 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R */\
370 "psrlw $3, %%mm2 \n\t"\
371 "psllw $3, %%mm3 \n\t"\
372 "psllw $8, %%mm5 \n\t"\
374 "pand g16Mask, %%mm3 \n\t"\
375 "pand r16Mask, %%mm5 \n\t"\
377 "por %%mm3, %%mm2 \n\t"\
378 "por %%mm5, %%mm2 \n\t"\
380 "punpckhbw %%mm7, %%mm4 \n\t" /* 0G0G0G0G */\
381 "punpckhbw %%mm7, %%mm1 \n\t" /* 0B0B0B0B */\
382 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R */\
384 "psrlw $3, %%mm1 \n\t"\
385 "psllw $3, %%mm4 \n\t"\
386 "psllw $8, %%mm6 \n\t"\
388 "pand g16Mask, %%mm4 \n\t"\
389 "pand r16Mask, %%mm6 \n\t"\
391 "por %%mm4, %%mm1 \n\t"\
392 "por %%mm6, %%mm1 \n\t"\
394 MOVNTQ(%%mm2, (%4, %%eax, 2))\
395 MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
397 "addl $8, %%eax \n\t"\
398 "cmpl %5, %%eax \n\t"\
402 "movq %%mm2, %%mm1 \n\t" /* B */\
403 "movq %%mm4, %%mm3 \n\t" /* G */\
404 "movq %%mm5, %%mm6 \n\t" /* R */\
406 "punpcklbw %%mm7, %%mm3 \n\t" /* 0G0G0G0G */\
407 "punpcklbw %%mm7, %%mm2 \n\t" /* 0B0B0B0B */\
408 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R */\
410 "psrlw $3, %%mm2 \n\t"\
411 "psllw $2, %%mm3 \n\t"\
412 "psllw $7, %%mm5 \n\t"\
414 "pand g15Mask, %%mm3 \n\t"\
415 "pand r15Mask, %%mm5 \n\t"\
417 "por %%mm3, %%mm2 \n\t"\
418 "por %%mm5, %%mm2 \n\t"\
420 "punpckhbw %%mm7, %%mm4 \n\t" /* 0G0G0G0G */\
421 "punpckhbw %%mm7, %%mm1 \n\t" /* 0B0B0B0B */\
422 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R */\
424 "psrlw $3, %%mm1 \n\t"\
425 "psllw $2, %%mm4 \n\t"\
426 "psllw $7, %%mm6 \n\t"\
428 "pand g15Mask, %%mm4 \n\t"\
429 "pand r15Mask, %%mm6 \n\t"\
431 "por %%mm4, %%mm1 \n\t"\
432 "por %%mm6, %%mm1 \n\t"\
434 MOVNTQ(%%mm2, (%4, %%eax, 2))\
435 MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
437 "addl $8, %%eax \n\t"\
438 "cmpl %5, %%eax \n\t"\
440 // FIXME find a faster way to shuffle it to BGR24
442 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
443 "movq %%mm2, %%mm1 \n\t" /* B */\
444 "movq %%mm5, %%mm6 \n\t" /* R */\
445 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
446 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
447 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
448 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
449 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
450 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
451 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
452 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
453 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
454 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
456 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
457 "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
458 "pand bm00000111, %%mm4 \n\t" /* 00000RGB 0 */\
459 "pand bm11111000, %%mm0 \n\t" /* 00RGB000 0.5 */\
460 "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
461 "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
462 "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
463 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
465 "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
466 "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
467 "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
468 "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
469 "pand bm00001111, %%mm2 \n\t" /* 0000RGBR 1 */\
470 "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
471 "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
472 "pand bm00000111, %%mm4 \n\t" /* 00000RGB 2 */\
473 "pand bm11111000, %%mm1 \n\t" /* 00RGB000 2.5 */\
474 "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
475 "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
476 "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
477 "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
479 "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
480 "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
481 "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
482 "pand bm00000111, %%mm5 \n\t" /* 00000RGB 3 */\
483 "pand bm11111000, %%mm3 \n\t" /* 00RGB000 3.5 */\
484 "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
485 "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
486 "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
488 "leal (%%eax, %%eax, 2), %%ebx \n\t"\
489 MOVNTQ(%%mm0, (%4, %%ebx))\
490 MOVNTQ(%%mm2, 8(%4, %%ebx))\
491 MOVNTQ(%%mm3, 16(%4, %%ebx))\
493 "addl $8, %%eax \n\t"\
494 "cmpl %5, %%eax \n\t"\
498 static inline void yuv2yuv(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
499 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstw, int yalpha, int uvalpha)
501 int yalpha1=yalpha^4095;
502 int uvalpha1=uvalpha^4095;
505 asm volatile ("\n\t"::: "memory");
509 ((uint8_t*)dest)[i] = (buf0[i]*yalpha1+buf1[i]*yalpha)>>19;
514 for(i=0; i<dstw/2; i++)
516 ((uint8_t*)uDest)[i] = (uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19;
517 ((uint8_t*)vDest)[i] = (uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19;
523 * vertical scale YV12 to RGB
525 static inline void yuv2rgbX(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
526 uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
528 int yalpha1=yalpha^4095;
529 int uvalpha1=uvalpha^4095;
542 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
543 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
545 "movq %%mm3, %%mm1 \n\t"
546 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
547 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
549 MOVNTQ(%%mm3, (%4, %%eax, 4))
550 MOVNTQ(%%mm1, 8(%4, %%eax, 4))
552 "addl $4, %%eax \n\t"
553 "cmpl %5, %%eax \n\t"
557 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
558 "m" (yalpha1), "m" (uvalpha1)
569 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
570 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
572 "movq %%mm3, %%mm1 \n\t"
573 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
574 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
576 "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
577 "psrlq $8, %%mm3 \n\t" // GR0BGR00
578 "pand bm00000111, %%mm2 \n\t" // BGR00000
579 "pand bm11111000, %%mm3 \n\t" // 000BGR00
580 "por %%mm2, %%mm3 \n\t" // BGRBGR00
581 "movq %%mm1, %%mm2 \n\t"
582 "psllq $48, %%mm1 \n\t" // 000000BG
583 "por %%mm1, %%mm3 \n\t" // BGRBGRBG
585 "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
586 "psrld $16, %%mm2 \n\t" // R000R000
587 "psrlq $24, %%mm1 \n\t" // 0BGR0000
588 "por %%mm2, %%mm1 \n\t" // RBGRR000
590 "movl %4, %%ebx \n\t"
591 "addl %%eax, %%ebx \n\t"
595 "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
596 "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
598 "movd %%mm3, (%%ebx, %%eax, 2) \n\t"
599 "psrlq $32, %%mm3 \n\t"
600 "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
601 "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
603 "addl $4, %%eax \n\t"
604 "cmpl %5, %%eax \n\t"
607 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
608 "m" (yalpha1), "m" (uvalpha1)
618 "paddusb b16Dither, %%mm1 \n\t"
619 "paddusb b16Dither, %%mm0 \n\t"
620 "paddusb b16Dither, %%mm3 \n\t"
622 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
623 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
624 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
626 "psrlw $3, %%mm3 \n\t"
627 "psllw $2, %%mm1 \n\t"
628 "psllw $7, %%mm0 \n\t"
629 "pand g15Mask, %%mm1 \n\t"
630 "pand r15Mask, %%mm0 \n\t"
632 "por %%mm3, %%mm1 \n\t"
633 "por %%mm1, %%mm0 \n\t"
635 MOVNTQ(%%mm0, (%4, %%eax, 2))
637 "addl $4, %%eax \n\t"
638 "cmpl %5, %%eax \n\t"
641 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
642 "m" (yalpha1), "m" (uvalpha1)
652 "paddusb g16Dither, %%mm1 \n\t"
653 "paddusb b16Dither, %%mm0 \n\t"
654 "paddusb b16Dither, %%mm3 \n\t"
656 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
657 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
658 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
660 "psrlw $3, %%mm3 \n\t"
661 "psllw $3, %%mm1 \n\t"
662 "psllw $8, %%mm0 \n\t"
663 "pand g16Mask, %%mm1 \n\t"
664 "pand r16Mask, %%mm0 \n\t"
666 "por %%mm3, %%mm1 \n\t"
667 "por %%mm1, %%mm0 \n\t"
669 MOVNTQ(%%mm0, (%4, %%eax, 2))
671 "addl $4, %%eax \n\t"
672 "cmpl %5, %%eax \n\t"
675 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
676 "m" (yalpha1), "m" (uvalpha1)
681 asm volatile ("\n\t"::: "memory");
683 if(dstbpp==32 || dstbpp==24)
686 // vertical linear interpolation && yuv2rgb in a single step:
687 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
688 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
689 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
690 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
691 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
692 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
699 // vertical linear interpolation && yuv2rgb in a single step:
700 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
701 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
702 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
704 ((uint16_t*)dest)[i] =
705 (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
706 ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<3)&0x07E0) |
707 ((clip_table[(Y + yuvtab_3343[V]) >>13]<<8)&0xF800);
713 // vertical linear interpolation && yuv2rgb in a single step:
714 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
715 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
716 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
718 ((uint16_t*)dest)[i] =
719 (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
720 ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<2)&0x03E0) |
721 ((clip_table[(Y + yuvtab_3343[V]) >>13]<<7)&0x7C00);
735 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
736 "m" (yalpha1), "m" (uvalpha1)
746 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
747 "m" (yalpha1), "m" (uvalpha1)
755 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
757 "paddusb b16Dither, %%mm2 \n\t"
758 "paddusb b16Dither, %%mm4 \n\t"
759 "paddusb b16Dither, %%mm5 \n\t"
764 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
765 "m" (yalpha1), "m" (uvalpha1)
773 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
775 "paddusb g16Dither, %%mm2 \n\t"
776 "paddusb b16Dither, %%mm4 \n\t"
777 "paddusb b16Dither, %%mm5 \n\t"
782 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
783 "m" (yalpha1), "m" (uvalpha1)
788 asm volatile ("\n\t"::: "memory");
792 for(i=0; i<dstw-1; i+=2){
793 // vertical linear interpolation && yuv2rgb in a single step:
794 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
795 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
796 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
797 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
799 int Cb= yuvtab_40cf[U];
800 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
801 int Cr= yuvtab_3343[V];
803 dest[4*i+0]=clip_table[((Y1 + Cb) >>13)];
804 dest[4*i+1]=clip_table[((Y1 + Cg) >>13)];
805 dest[4*i+2]=clip_table[((Y1 + Cr) >>13)];
807 dest[4*i+4]=clip_table[((Y2 + Cb) >>13)];
808 dest[4*i+5]=clip_table[((Y2 + Cg) >>13)];
809 dest[4*i+6]=clip_table[((Y2 + Cr) >>13)];
814 for(i=0; i<dstw-1; i+=2){
815 // vertical linear interpolation && yuv2rgb in a single step:
816 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
817 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
818 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
819 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
821 int Cb= yuvtab_40cf[U];
822 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
823 int Cr= yuvtab_3343[V];
825 dest[0]=clip_table[((Y1 + Cb) >>13)];
826 dest[1]=clip_table[((Y1 + Cg) >>13)];
827 dest[2]=clip_table[((Y1 + Cr) >>13)];
829 dest[3]=clip_table[((Y2 + Cb) >>13)];
830 dest[4]=clip_table[((Y2 + Cg) >>13)];
831 dest[5]=clip_table[((Y2 + Cr) >>13)];
837 for(i=0; i<dstw-1; i+=2){
838 // vertical linear interpolation && yuv2rgb in a single step:
839 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
840 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
841 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
842 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
844 int Cb= yuvtab_40cf[U];
845 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
846 int Cr= yuvtab_3343[V];
848 ((uint16_t*)dest)[i] =
849 (clip_table[(Y1 + Cb) >>13]>>3) |
850 ((clip_table[(Y1 + Cg) >>13]<<3)&0x07E0) |
851 ((clip_table[(Y1 + Cr) >>13]<<8)&0xF800);
853 ((uint16_t*)dest)[i+1] =
854 (clip_table[(Y2 + Cb) >>13]>>3) |
855 ((clip_table[(Y2 + Cg) >>13]<<3)&0x07E0) |
856 ((clip_table[(Y2 + Cr) >>13]<<8)&0xF800);
861 for(i=0; i<dstw-1; i+=2){
862 // vertical linear interpolation && yuv2rgb in a single step:
863 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
864 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
865 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
866 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
868 int Cb= yuvtab_40cf[U];
869 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
870 int Cr= yuvtab_3343[V];
872 ((uint16_t*)dest)[i] =
873 (clip_table[(Y1 + Cb) >>13]>>3) |
874 ((clip_table[(Y1 + Cg) >>13]<<2)&0x03E0) |
875 ((clip_table[(Y1 + Cr) >>13]<<7)&0x7C00);
876 ((uint16_t*)dest)[i+1] =
877 (clip_table[(Y2 + Cb) >>13]>>3) |
878 ((clip_table[(Y2 + Cg) >>13]<<2)&0x03E0) |
879 ((clip_table[(Y2 + Cr) >>13]<<7)&0x7C00);
887 * YV12 to RGB without scaling or interpolating
889 static inline void yuv2rgb1(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
890 uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
892 int yalpha1=yalpha^4095;
893 int uvalpha1=uvalpha^4095;
895 if(fullUVIpol || allwaysIpol)
897 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
900 if( yalpha > 2048 ) buf0 = buf1;
903 if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
910 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
911 "m" (yalpha1), "m" (uvalpha1)
920 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
921 "m" (yalpha1), "m" (uvalpha1)
929 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
931 "paddusb b16Dither, %%mm2 \n\t"
932 "paddusb b16Dither, %%mm4 \n\t"
933 "paddusb b16Dither, %%mm5 \n\t"
936 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
937 "m" (yalpha1), "m" (uvalpha1)
945 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
947 "paddusb g16Dither, %%mm2 \n\t"
948 "paddusb b16Dither, %%mm4 \n\t"
949 "paddusb b16Dither, %%mm5 \n\t"
953 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
954 "m" (yalpha1), "m" (uvalpha1)
966 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
967 "m" (yalpha1), "m" (uvalpha1)
976 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
977 "m" (yalpha1), "m" (uvalpha1)
985 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
987 "paddusb b16Dither, %%mm2 \n\t"
988 "paddusb b16Dither, %%mm4 \n\t"
989 "paddusb b16Dither, %%mm5 \n\t"
992 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
993 "m" (yalpha1), "m" (uvalpha1)
1001 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1003 "paddusb g16Dither, %%mm2 \n\t"
1004 "paddusb b16Dither, %%mm4 \n\t"
1005 "paddusb b16Dither, %%mm5 \n\t"
1009 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
1010 "m" (yalpha1), "m" (uvalpha1)
1016 //FIXME write 2 versions (for even & odd lines)
1017 asm volatile ("\n\t"::: "memory");
1021 for(i=0; i<dstw-1; i+=2){
1022 // vertical linear interpolation && yuv2rgb in a single step:
1023 int Y1=yuvtab_2568[buf0[i]>>7];
1024 int Y2=yuvtab_2568[buf0[i+1]>>7];
1025 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
1026 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
1028 int Cb= yuvtab_40cf[U];
1029 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1030 int Cr= yuvtab_3343[V];
1032 dest[4*i+0]=clip_table[((Y1 + Cb) >>13)];
1033 dest[4*i+1]=clip_table[((Y1 + Cg) >>13)];
1034 dest[4*i+2]=clip_table[((Y1 + Cr) >>13)];
1036 dest[4*i+4]=clip_table[((Y2 + Cb) >>13)];
1037 dest[4*i+5]=clip_table[((Y2 + Cg) >>13)];
1038 dest[4*i+6]=clip_table[((Y2 + Cr) >>13)];
1043 for(i=0; i<dstw-1; i+=2){
1044 // vertical linear interpolation && yuv2rgb in a single step:
1045 int Y1=yuvtab_2568[buf0[i]>>7];
1046 int Y2=yuvtab_2568[buf0[i+1]>>7];
1047 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
1048 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
1050 int Cb= yuvtab_40cf[U];
1051 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1052 int Cr= yuvtab_3343[V];
1054 dest[0]=clip_table[((Y1 + Cb) >>13)];
1055 dest[1]=clip_table[((Y1 + Cg) >>13)];
1056 dest[2]=clip_table[((Y1 + Cr) >>13)];
1058 dest[3]=clip_table[((Y2 + Cb) >>13)];
1059 dest[4]=clip_table[((Y2 + Cg) >>13)];
1060 dest[5]=clip_table[((Y2 + Cr) >>13)];
1066 for(i=0; i<dstw-1; i+=2){
1067 // vertical linear interpolation && yuv2rgb in a single step:
1068 int Y1=yuvtab_2568[buf0[i]>>7];
1069 int Y2=yuvtab_2568[buf0[i+1]>>7];
1070 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
1071 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
1073 int Cb= yuvtab_40cf[U];
1074 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1075 int Cr= yuvtab_3343[V];
1077 ((uint16_t*)dest)[i] =
1078 (clip_table[(Y1 + Cb) >>13]>>3) |
1079 ((clip_table[(Y1 + Cg) >>13]<<3)&0x07E0) |
1080 ((clip_table[(Y1 + Cr) >>13]<<8)&0xF800);
1082 ((uint16_t*)dest)[i+1] =
1083 (clip_table[(Y2 + Cb) >>13]>>3) |
1084 ((clip_table[(Y2 + Cg) >>13]<<3)&0x07E0) |
1085 ((clip_table[(Y2 + Cr) >>13]<<8)&0xF800);
1090 for(i=0; i<dstw-1; i+=2){
1091 // vertical linear interpolation && yuv2rgb in a single step:
1092 int Y1=yuvtab_2568[buf0[i]>>7];
1093 int Y2=yuvtab_2568[buf0[i+1]>>7];
1094 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
1095 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
1097 int Cb= yuvtab_40cf[U];
1098 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1099 int Cr= yuvtab_3343[V];
1101 ((uint16_t*)dest)[i] =
1102 (clip_table[(Y1 + Cb) >>13]>>3) |
1103 ((clip_table[(Y1 + Cg) >>13]<<2)&0x03E0) |
1104 ((clip_table[(Y1 + Cr) >>13]<<7)&0x7C00);
1105 ((uint16_t*)dest)[i+1] =
1106 (clip_table[(Y2 + Cb) >>13]>>3) |
1107 ((clip_table[(Y2 + Cg) >>13]<<2)&0x03E0) |
1108 ((clip_table[(Y2 + Cr) >>13]<<7)&0x7C00);
1115 static inline void hyscale(uint16_t *dst, int dstWidth, uint8_t *src, int srcWidth, int xInc)
1118 unsigned int xpos=0;
1119 // *** horizontal scale Y line to temp buffer
1125 "pxor %%mm7, %%mm7 \n\t"
1126 "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
1127 "movd %5, %%mm6 \n\t" // xInc&0xFFFF
1128 "punpcklwd %%mm6, %%mm6 \n\t"
1129 "punpcklwd %%mm6, %%mm6 \n\t"
1130 "movq %%mm6, %%mm2 \n\t"
1131 "psllq $16, %%mm2 \n\t"
1132 "paddw %%mm6, %%mm2 \n\t"
1133 "psllq $16, %%mm2 \n\t"
1134 "paddw %%mm6, %%mm2 \n\t"
1135 "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=xInc&0xFF
1136 "movq %%mm2, temp0 \n\t"
1137 "movd %4, %%mm6 \n\t" //(xInc*4)&0xFFFF
1138 "punpcklwd %%mm6, %%mm6 \n\t"
1139 "punpcklwd %%mm6, %%mm6 \n\t"
1140 "xorl %%eax, %%eax \n\t" // i
1141 "movl %0, %%esi \n\t" // src
1142 "movl %1, %%edi \n\t" // buf1
1143 "movl %3, %%edx \n\t" // (xInc*4)>>16
1144 "xorl %%ecx, %%ecx \n\t"
1145 "xorl %%ebx, %%ebx \n\t"
1146 "movw %4, %%bx \n\t" // (xInc*4)&0xFFFF
1148 #define FUNNY_Y_CODE \
1149 PREFETCH" 1024(%%esi) \n\t"\
1150 PREFETCH" 1056(%%esi) \n\t"\
1151 PREFETCH" 1088(%%esi) \n\t"\
1152 "call funnyYCode \n\t"\
1153 "movq temp0, %%mm2 \n\t"\
1154 "xorl %%ecx, %%ecx \n\t"
1165 :: "m" (src), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
1166 "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF)
1167 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
1169 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth-1; i--) dst[i] = src[srcWidth-1]*128;
1174 //NO MMX just normal asm ...
1176 "xorl %%eax, %%eax \n\t" // i
1177 "xorl %%ebx, %%ebx \n\t" // xx
1178 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
1180 "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
1181 "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
1182 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1183 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1184 "shll $16, %%edi \n\t"
1185 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1186 "movl %1, %%edi \n\t"
1187 "shrl $9, %%esi \n\t"
1188 "movw %%si, (%%edi, %%eax, 2) \n\t"
1189 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
1190 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
1192 "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
1193 "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
1194 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1195 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1196 "shll $16, %%edi \n\t"
1197 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1198 "movl %1, %%edi \n\t"
1199 "shrl $9, %%esi \n\t"
1200 "movw %%si, 2(%%edi, %%eax, 2) \n\t"
1201 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
1202 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
1205 "addl $2, %%eax \n\t"
1206 "cmpl %2, %%eax \n\t"
1210 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
1211 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1214 } //if MMX2 cant be used
1217 for(i=0;i<dstWidth;i++){
1218 register unsigned int xx=xpos>>16;
1219 register unsigned int xalpha=(xpos&0xFFFF)>>9;
1220 dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
1226 inline static void hcscale(uint16_t *dst, int dstWidth,
1227 uint8_t *src1, uint8_t *src2, int srcWidth, int xInc)
1236 "pxor %%mm7, %%mm7 \n\t"
1237 "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
1238 "movd %5, %%mm6 \n\t" // xInc&0xFFFF
1239 "punpcklwd %%mm6, %%mm6 \n\t"
1240 "punpcklwd %%mm6, %%mm6 \n\t"
1241 "movq %%mm6, %%mm2 \n\t"
1242 "psllq $16, %%mm2 \n\t"
1243 "paddw %%mm6, %%mm2 \n\t"
1244 "psllq $16, %%mm2 \n\t"
1245 "paddw %%mm6, %%mm2 \n\t"
1246 "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=xInc&0xFFFF
1247 "movq %%mm2, temp0 \n\t"
1248 "movd %4, %%mm6 \n\t" //(xInc*4)&0xFFFF
1249 "punpcklwd %%mm6, %%mm6 \n\t"
1250 "punpcklwd %%mm6, %%mm6 \n\t"
1251 "xorl %%eax, %%eax \n\t" // i
1252 "movl %0, %%esi \n\t" // src
1253 "movl %1, %%edi \n\t" // buf1
1254 "movl %3, %%edx \n\t" // (xInc*4)>>16
1255 "xorl %%ecx, %%ecx \n\t"
1256 "xorl %%ebx, %%ebx \n\t"
1257 "movw %4, %%bx \n\t" // (xInc*4)&0xFFFF
1259 #define FUNNYUVCODE \
1260 PREFETCH" 1024(%%esi) \n\t"\
1261 PREFETCH" 1056(%%esi) \n\t"\
1262 PREFETCH" 1088(%%esi) \n\t"\
1263 "call funnyUVCode \n\t"\
1264 "movq temp0, %%mm2 \n\t"\
1265 "xorl %%ecx, %%ecx \n\t"
1276 "xorl %%eax, %%eax \n\t" // i
1277 "movl %6, %%esi \n\t" // src
1278 "movl %1, %%edi \n\t" // buf1
1279 "addl $4096, %%edi \n\t"
1291 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
1292 "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF), "m" (src2)
1293 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
1295 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth/2-1; i--)
1297 dst[i] = src1[srcWidth/2-1]*128;
1298 dst[i+2048] = src2[srcWidth/2-1]*128;
1305 "xorl %%eax, %%eax \n\t" // i
1306 "xorl %%ebx, %%ebx \n\t" // xx
1307 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
1309 "movl %0, %%esi \n\t"
1310 "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
1311 "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1]
1312 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1313 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1314 "shll $16, %%edi \n\t"
1315 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1316 "movl %1, %%edi \n\t"
1317 "shrl $9, %%esi \n\t"
1318 "movw %%si, (%%edi, %%eax, 2) \n\t"
1320 "movzbl (%5, %%ebx), %%edi \n\t" //src[xx]
1321 "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1]
1322 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1323 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1324 "shll $16, %%edi \n\t"
1325 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1326 "movl %1, %%edi \n\t"
1327 "shrl $9, %%esi \n\t"
1328 "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
1330 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
1331 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
1332 "addl $1, %%eax \n\t"
1333 "cmpl %2, %%eax \n\t"
1336 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
1338 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1341 } //if MMX2 cant be used
1344 for(i=0;i<dstWidth;i++){
1345 register unsigned int xx=xpos>>16;
1346 register unsigned int xalpha=(xpos&0xFFFF)>>9;
1347 dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
1348 dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
1350 dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
1351 dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
1359 // *** bilinear scaling and yuv->rgb or yuv->yuv conversion of yv12 slices:
1360 // *** Note: it's called multiple times while decoding a frame, first time y==0
1361 // *** Designed to upscale, but may work for downscale too.
1362 // s_xinc = (src_width << 16) / dst_width
1363 // s_yinc = (src_height << 16) / dst_height
1364 void SwScale_YV12slice(unsigned char* srcptr[],int stride[], int y, int h,
1365 uint8_t* dstptr[], int dststride, int dstw, int dstbpp,
1366 unsigned int s_xinc,unsigned int s_yinc){
1369 //static int s_yinc=(vo_dga_src_height<<16)/vo_dga_vp_height;
1370 //static int s_xinc=(vo_dga_src_width<<8)/vo_dga_vp_width;
1372 unsigned int s_xinc2;
1374 static int s_srcypos; // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1377 // last horzontally interpolated lines, used to avoid unnecessary calculations
1378 static int s_last_ypos;
1379 static int s_last_y1pos;
1381 static int static_dstw;
1384 // used to detect a horizontal size change
1385 static int old_dstw= -1;
1386 static int old_s_xinc= -1;
1389 int srcWidth= (dstw*s_xinc + 0x8000)>>16;
1390 int dstUVw= fullUVIpol ? dstw : dstw/2;
1394 canMMX2BeUsed= (s_xinc <= 0x10000 && (dstw&31)==0 && (srcWidth&15)==0) ? 1 : 0;
1397 // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
1398 // n-2 is the last chrominance sample available
1399 // FIXME this is not perfect, but noone shuld notice the difference, the more correct variant
1400 // would be like the vertical one, but that would require some special code for the
1401 // first and last pixel
1402 if(canMMX2BeUsed) s_xinc+= 20;
1403 else s_xinc = ((srcWidth-2)<<16)/(dstw-2) - 20;
1405 if(fullUVIpol && !(dstbpp==12)) s_xinc2= s_xinc>>1;
1406 else s_xinc2= s_xinc;
1407 // force calculation of the horizontal interpolation of the first line
1412 s_srcypos= s_yinc/2 - 0x8000;
1415 // cant downscale !!!
1416 if((old_s_xinc != s_xinc || old_dstw!=dstw) && canMMX2BeUsed)
1423 int xpos, xx, xalpha, i;
1430 // create an optimized horizontal scaling routine
1438 "movq (%%esi), %%mm0 \n\t" //FIXME Alignment
1439 "movq %%mm0, %%mm1 \n\t"
1440 "psrlq $8, %%mm0 \n\t"
1441 "punpcklbw %%mm7, %%mm1 \n\t"
1442 "movq %%mm2, %%mm3 \n\t"
1443 "punpcklbw %%mm7, %%mm0 \n\t"
1444 "addw %%bx, %%cx \n\t" //2*xalpha += (4*s_xinc)&0xFFFF
1445 "pshufw $0xFF, %%mm1, %%mm1 \n\t"
1447 "adcl %%edx, %%esi \n\t" //xx+= (4*s_xinc)>>16 + carry
1448 "pshufw $0xFF, %%mm0, %%mm0 \n\t"
1450 "psrlw $9, %%mm3 \n\t"
1451 "psubw %%mm1, %%mm0 \n\t"
1452 "pmullw %%mm3, %%mm0 \n\t"
1453 "paddw %%mm6, %%mm2 \n\t" // 2*alpha += xpos&0xFFFF
1454 "psllw $7, %%mm1 \n\t"
1455 "paddw %%mm1, %%mm0 \n\t"
1457 "movq %%mm0, (%%edi, %%eax) \n\t"
1459 "addl $8, %%eax \n\t"
1472 :"=r" (fragment), "=r" (imm8OfPShufW1), "=r" (imm8OfPShufW2),
1473 "=r" (fragmentLength)
1476 xpos= 0; //s_xinc/2 - 0x8000; // difference between pixel centers
1478 /* choose xinc so that all 8 parts fit exactly
1479 Note: we cannot use just 1 part because it would not fit in the code cache */
1480 // s_xinc2_diff= -((((s_xinc2*(dstw/8))&0xFFFF))/(dstw/8))-10;
1481 // s_xinc_diff= -((((s_xinc*(dstw/8))&0xFFFF))/(dstw/8));
1483 // s_xinc2_diff+= ((0x10000/(dstw/8)));
1485 // s_xinc_diff= s_xinc2_diff*2;
1487 // s_xinc2+= s_xinc2_diff;
1488 // s_xinc+= s_xinc_diff;
1490 // old_s_xinc= s_xinc;
1492 for(i=0; i<dstw/8; i++)
1499 int b=((xpos+s_xinc)>>16) - xx;
1500 int c=((xpos+s_xinc*2)>>16) - xx;
1501 int d=((xpos+s_xinc*3)>>16) - xx;
1503 memcpy(funnyYCode + fragmentLength*i/4, fragment, fragmentLength);
1505 funnyYCode[fragmentLength*i/4 + imm8OfPShufW1]=
1506 funnyYCode[fragmentLength*i/4 + imm8OfPShufW2]=
1507 a | (b<<2) | (c<<4) | (d<<6);
1509 // if we dont need to read 8 bytes than dont :), reduces the chance of
1510 // crossing a cache line
1511 if(d<3) funnyYCode[fragmentLength*i/4 + 1]= 0x6E;
1513 funnyYCode[fragmentLength*(i+4)/4]= RET;
1518 xpos= 0; //s_xinc2/2 - 0x10000; // difference between centers of chrom samples
1519 for(i=0; i<dstUVw/8; i++)
1526 int b=((xpos+s_xinc2)>>16) - xx;
1527 int c=((xpos+s_xinc2*2)>>16) - xx;
1528 int d=((xpos+s_xinc2*3)>>16) - xx;
1530 memcpy(funnyUVCode + fragmentLength*i/4, fragment, fragmentLength);
1532 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW1]=
1533 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW2]=
1534 a | (b<<2) | (c<<4) | (d<<6);
1536 // if we dont need to read 8 bytes than dont :), reduces the chance of
1537 // crossing a cache line
1538 if(d<3) funnyUVCode[fragmentLength*i/4 + 1]= 0x6E;
1540 funnyUVCode[fragmentLength*(i+4)/4]= RET;
1544 // funnyCode[0]= RET;
1551 unsigned char *dest =dstptr[0]+dststride*s_ypos;
1552 unsigned char *uDest=dstptr[1]+(dststride>>1)*(s_ypos>>1);
1553 unsigned char *vDest=dstptr[2]+(dststride>>1)*(s_ypos>>1);
1555 int y0=(s_srcypos + 0xFFFF)>>16; // first luminance source line number below the dst line
1556 // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1557 int srcuvpos= dstbpp==12 ? s_srcypos + s_yinc/2 - 0x8000 :
1559 int y1=(srcuvpos + 0x1FFFF)>>17; // first chrominance source line number below the dst line
1560 int yalpha=((s_srcypos-1)&0xFFFF)>>4;
1561 int uvalpha=((srcuvpos-1)&0x1FFFF)>>5;
1562 uint16_t *buf0=pix_buf_y[y0&1]; // top line of the interpolated slice
1563 uint16_t *buf1=pix_buf_y[((y0+1)&1)]; // bottom line of the interpolated slice
1564 uint16_t *uvbuf0=pix_buf_uv[y1&1]; // top line of the interpolated slice
1565 uint16_t *uvbuf1=pix_buf_uv[(y1+1)&1]; // bottom line of the interpolated slice
1568 if(y0>=y+h) break; // FIXME wrong, skips last lines, but they are dupliactes anyway
1570 if((y0&1) && dstbpp==12) uvalpha=-1; // there is no alpha if there is no line
1572 s_ypos++; s_srcypos+=s_yinc;
1574 //only interpolate the src line horizontally if we didnt do it allready
1578 // skip if first line has been horiz scaled alleady
1579 if(s_last_ypos != y0-1)
1581 // check if first line is before any available src lines
1582 if(y0-1 < y) src=srcptr[0]+(0 )*stride[0];
1583 else src=srcptr[0]+(y0-y-1)*stride[0];
1585 hyscale(buf0, dstw, src, srcWidth, s_xinc);
1587 // check if second line is after any available src lines
1588 if(y0-y >= h) src=srcptr[0]+(h-1)*stride[0];
1589 else src=srcptr[0]+(y0-y)*stride[0];
1591 // the min() is required to avoid reuseing lines which where not available
1592 s_last_ypos= MIN(y0, y+h-1);
1593 hyscale(buf1, dstw, src, srcWidth, s_xinc);
1595 // printf("%d %d %d %d\n", y, y1, s_last_y1pos, h);
1596 // *** horizontal scale U and V lines to temp buffer
1597 if(s_last_y1pos!=y1)
1599 uint8_t *src1, *src2;
1600 // skip if first line has been horiz scaled alleady
1601 if(s_last_y1pos != y1-1)
1603 // check if first line is before any available src lines
1606 src1= srcptr[1]+(0)*stride[1];
1607 src2= srcptr[2]+(0)*stride[2];
1609 src1= srcptr[1]+(y1-y/2-1)*stride[1];
1610 src2= srcptr[2]+(y1-y/2-1)*stride[2];
1612 hcscale(uvbuf0, dstUVw, src1, src2, srcWidth, s_xinc2);
1615 // check if second line is after any available src lines
1618 src1= srcptr[1]+(h/2-1)*stride[1];
1619 src2= srcptr[2]+(h/2-1)*stride[2];
1621 src1= srcptr[1]+(y1-y/2)*stride[1];
1622 src2= srcptr[2]+(y1-y/2)*stride[2];
1624 hcscale(uvbuf1, dstUVw, src1, src2, srcWidth, s_xinc2);
1626 // the min() is required to avoid reuseing lines which where not available
1627 s_last_y1pos= MIN(y1, y/2+h/2-1);
1630 if(dstbpp==12) //YV12
1631 yuv2yuv(buf0, buf1, uvbuf0, uvbuf1, dest, uDest, vDest, dstw, yalpha, uvalpha);
1632 else if(ABS(s_yinc - 0x10000) < 10)
1633 yuv2rgb1(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1635 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1638 b16Dither= b16Dither1;
1639 b16Dither1= b16Dither2;
1640 b16Dither2= b16Dither;
1642 g16Dither= g16Dither1;
1643 g16Dither1= g16Dither2;
1644 g16Dither2= g16Dither;
1649 __asm __volatile(SFENCE:::"memory");
1650 __asm __volatile(EMMS:::"memory");
1655 void SwScale_Init(){
1656 // generating tables:
1660 clip_table[i+256]=i;
1661 clip_table[i+512]=255;
1662 yuvtab_2568[i]=(0x2568*(i-16))+(256<<13);
1663 yuvtab_3343[i]=0x3343*(i-128);
1664 yuvtab_0c92[i]=-0x0c92*(i-128);
1665 yuvtab_1a1e[i]=-0x1a1e*(i-128);
1666 yuvtab_40cf[i]=0x40cf*(i-128);