2 // Software scaling and colorspace conversion routines for MPlayer
4 // Orginal C implementation by A'rpi/ESP-team <arpi@thot.banki.hu>
5 // current version mostly by Michael Niedermayer (michaelni@gmx.at)
6 // the parts written by michael are under GNU GPL
10 #include "../config.h"
12 #include "../mmx_defs.h"
20 //disables the unscaled height version
23 #define RET 0xC3 //near return opcode
27 known BUGS with known cause (no bugreports please!, but patches are welcome :) )
28 horizontal MMX2 scaler reads 1-7 samples too much (might cause a sig11)
30 Supported output formats BGR15 BGR16 BGR24 BGR32
31 BGR15 & BGR16 MMX verions support dithering
32 Special versions: fast Y 1:1 scaling (no interpolation in y direction)
35 more intelligent missalignment avoidance for the horizontal scaler
38 change the distance of the u & v buffer
41 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
42 #define MIN(a,b) ((a) > (b) ? (b) : (a))
43 #define MAX(a,b) ((a) < (b) ? (b) : (a))
46 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
47 #elif defined (HAVE_3DNOW)
48 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
52 #define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
54 #define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
59 static uint64_t __attribute__((aligned(8))) yCoeff= 0x2568256825682568LL;
60 static uint64_t __attribute__((aligned(8))) vrCoeff= 0x3343334333433343LL;
61 static uint64_t __attribute__((aligned(8))) ubCoeff= 0x40cf40cf40cf40cfLL;
62 static uint64_t __attribute__((aligned(8))) vgCoeff= 0xE5E2E5E2E5E2E5E2LL;
63 static uint64_t __attribute__((aligned(8))) ugCoeff= 0xF36EF36EF36EF36ELL;
64 static uint64_t __attribute__((aligned(8))) w400= 0x0400040004000400LL;
65 static uint64_t __attribute__((aligned(8))) w80= 0x0080008000800080LL;
66 static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL;
67 static uint64_t __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
68 static uint64_t __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
69 static uint64_t __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
71 static uint64_t __attribute__((aligned(8))) b16Dither= 0x0004000400040004LL;
72 static uint64_t __attribute__((aligned(8))) b16Dither1=0x0004000400040004LL;
73 static uint64_t __attribute__((aligned(8))) b16Dither2=0x0602060206020602LL;
74 static uint64_t __attribute__((aligned(8))) g16Dither= 0x0002000200020002LL;
75 static uint64_t __attribute__((aligned(8))) g16Dither1=0x0002000200020002LL;
76 static uint64_t __attribute__((aligned(8))) g16Dither2=0x0301030103010301LL;
78 static uint64_t __attribute__((aligned(8))) b16Mask= 0x001F001F001F001FLL;
79 static uint64_t __attribute__((aligned(8))) g16Mask= 0x07E007E007E007E0LL;
80 static uint64_t __attribute__((aligned(8))) r16Mask= 0xF800F800F800F800LL;
81 static uint64_t __attribute__((aligned(8))) b15Mask= 0x001F001F001F001FLL;
82 static uint64_t __attribute__((aligned(8))) g15Mask= 0x03E003E003E003E0LL;
83 static uint64_t __attribute__((aligned(8))) r15Mask= 0x7C007C007C007C00LL;
85 static uint64_t __attribute__((aligned(8))) temp0;
86 static uint64_t __attribute__((aligned(8))) asm_yalpha1;
87 static uint64_t __attribute__((aligned(8))) asm_uvalpha1;
90 // temporary storage for 4 yuv lines:
91 // 16bit for now (mmx likes it more compact)
93 static uint16_t __attribute__((aligned(8))) pix_buf_y[4][2048];
94 static uint16_t __attribute__((aligned(8))) pix_buf_uv[2][2048*2];
96 static uint16_t pix_buf_y[4][2048];
97 static uint16_t pix_buf_uv[2][2048*2];
100 // clipping helper table for C implementations:
101 static unsigned char clip_table[768];
103 static unsigned short clip_table16b[768];
104 static unsigned short clip_table16g[768];
105 static unsigned short clip_table16r[768];
106 static unsigned short clip_table15b[768];
107 static unsigned short clip_table15g[768];
108 static unsigned short clip_table15r[768];
110 // yuv->rgb conversion tables:
111 static int yuvtab_2568[256];
112 static int yuvtab_3343[256];
113 static int yuvtab_0c92[256];
114 static int yuvtab_1a1e[256];
115 static int yuvtab_40cf[256];
118 static uint8_t funnyYCode[10000];
119 static uint8_t funnyUVCode[10000];
121 static int canMMX2BeUsed=0;
123 #define FULL_YSCALEYUV2RGB \
124 "pxor %%mm7, %%mm7 \n\t"\
125 "movd %6, %%mm6 \n\t" /*yalpha1*/\
126 "punpcklwd %%mm6, %%mm6 \n\t"\
127 "punpcklwd %%mm6, %%mm6 \n\t"\
128 "movd %7, %%mm5 \n\t" /*uvalpha1*/\
129 "punpcklwd %%mm5, %%mm5 \n\t"\
130 "punpcklwd %%mm5, %%mm5 \n\t"\
131 "xorl %%eax, %%eax \n\t"\
133 "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
134 "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
135 "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\
136 "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\
137 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
138 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
139 "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
140 "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
141 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
142 "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
143 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
144 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
145 "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
146 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
147 "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
148 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
149 "psubw w400, %%mm3 \n\t" /* 8(U-128)*/\
150 "pmulhw yCoeff, %%mm1 \n\t"\
153 "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
154 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
155 "pmulhw ubCoeff, %%mm3 \n\t"\
156 "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
157 "pmulhw ugCoeff, %%mm2 \n\t"\
158 "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
159 "psubw w400, %%mm0 \n\t" /* (V-128)8*/\
162 "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
163 "pmulhw vrCoeff, %%mm0 \n\t"\
164 "pmulhw vgCoeff, %%mm4 \n\t"\
165 "paddw %%mm1, %%mm3 \n\t" /* B*/\
166 "paddw %%mm1, %%mm0 \n\t" /* R*/\
167 "packuswb %%mm3, %%mm3 \n\t"\
169 "packuswb %%mm0, %%mm0 \n\t"\
170 "paddw %%mm4, %%mm2 \n\t"\
171 "paddw %%mm2, %%mm1 \n\t" /* G*/\
173 "packuswb %%mm1, %%mm1 \n\t"
175 #define YSCALEYUV2RGB \
176 "movd %6, %%mm6 \n\t" /*yalpha1*/\
177 "punpcklwd %%mm6, %%mm6 \n\t"\
178 "punpcklwd %%mm6, %%mm6 \n\t"\
179 "movq %%mm6, asm_yalpha1 \n\t"\
180 "movd %7, %%mm5 \n\t" /*uvalpha1*/\
181 "punpcklwd %%mm5, %%mm5 \n\t"\
182 "punpcklwd %%mm5, %%mm5 \n\t"\
183 "movq %%mm5, asm_uvalpha1 \n\t"\
184 "xorl %%eax, %%eax \n\t"\
186 "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
187 "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
188 "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
189 "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
190 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
191 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
192 "movq asm_uvalpha1, %%mm0 \n\t"\
193 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
194 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
195 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
196 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
197 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
198 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
199 "psubw w400, %%mm3 \n\t" /* (U-128)8*/\
200 "psubw w400, %%mm4 \n\t" /* (V-128)8*/\
201 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
202 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
203 "pmulhw ugCoeff, %%mm3 \n\t"\
204 "pmulhw vgCoeff, %%mm4 \n\t"\
205 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
206 "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
207 "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
208 "movq 8(%0, %%eax, 2), %%mm6 \n\t" /*buf0[eax]*/\
209 "movq 8(%1, %%eax, 2), %%mm7 \n\t" /*buf1[eax]*/\
210 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
211 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
212 "pmulhw asm_yalpha1, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
213 "pmulhw asm_yalpha1, %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
214 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
215 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
216 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
217 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
218 "pmulhw ubCoeff, %%mm2 \n\t"\
219 "pmulhw vrCoeff, %%mm5 \n\t"\
220 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
221 "psubw w80, %%mm7 \n\t" /* 8(Y-16)*/\
222 "pmulhw yCoeff, %%mm1 \n\t"\
223 "pmulhw yCoeff, %%mm7 \n\t"\
224 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
225 "paddw %%mm3, %%mm4 \n\t"\
226 "movq %%mm2, %%mm0 \n\t"\
227 "movq %%mm5, %%mm6 \n\t"\
228 "movq %%mm4, %%mm3 \n\t"\
229 "punpcklwd %%mm2, %%mm2 \n\t"\
230 "punpcklwd %%mm5, %%mm5 \n\t"\
231 "punpcklwd %%mm4, %%mm4 \n\t"\
232 "paddw %%mm1, %%mm2 \n\t"\
233 "paddw %%mm1, %%mm5 \n\t"\
234 "paddw %%mm1, %%mm4 \n\t"\
235 "punpckhwd %%mm0, %%mm0 \n\t"\
236 "punpckhwd %%mm6, %%mm6 \n\t"\
237 "punpckhwd %%mm3, %%mm3 \n\t"\
238 "paddw %%mm7, %%mm0 \n\t"\
239 "paddw %%mm7, %%mm6 \n\t"\
240 "paddw %%mm7, %%mm3 \n\t"\
241 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
242 "packuswb %%mm0, %%mm2 \n\t"\
243 "packuswb %%mm6, %%mm5 \n\t"\
244 "packuswb %%mm3, %%mm4 \n\t"\
245 "pxor %%mm7, %%mm7 \n\t"
247 #define YSCALEYUV2RGB1 \
248 "xorl %%eax, %%eax \n\t"\
250 "movq (%2, %%eax), %%mm3 \n\t" /* uvbuf0[eax]*/\
251 "movq 4096(%2, %%eax), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
252 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
253 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
254 "psubw w400, %%mm3 \n\t" /* (U-128)8*/\
255 "psubw w400, %%mm4 \n\t" /* (V-128)8*/\
256 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
257 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
258 "pmulhw ugCoeff, %%mm3 \n\t"\
259 "pmulhw vgCoeff, %%mm4 \n\t"\
260 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
261 "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
262 "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
263 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
264 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
265 "pmulhw ubCoeff, %%mm2 \n\t"\
266 "pmulhw vrCoeff, %%mm5 \n\t"\
267 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
268 "psubw w80, %%mm7 \n\t" /* 8(Y-16)*/\
269 "pmulhw yCoeff, %%mm1 \n\t"\
270 "pmulhw yCoeff, %%mm7 \n\t"\
271 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
272 "paddw %%mm3, %%mm4 \n\t"\
273 "movq %%mm2, %%mm0 \n\t"\
274 "movq %%mm5, %%mm6 \n\t"\
275 "movq %%mm4, %%mm3 \n\t"\
276 "punpcklwd %%mm2, %%mm2 \n\t"\
277 "punpcklwd %%mm5, %%mm5 \n\t"\
278 "punpcklwd %%mm4, %%mm4 \n\t"\
279 "paddw %%mm1, %%mm2 \n\t"\
280 "paddw %%mm1, %%mm5 \n\t"\
281 "paddw %%mm1, %%mm4 \n\t"\
282 "punpckhwd %%mm0, %%mm0 \n\t"\
283 "punpckhwd %%mm6, %%mm6 \n\t"\
284 "punpckhwd %%mm3, %%mm3 \n\t"\
285 "paddw %%mm7, %%mm0 \n\t"\
286 "paddw %%mm7, %%mm6 \n\t"\
287 "paddw %%mm7, %%mm3 \n\t"\
288 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
289 "packuswb %%mm0, %%mm2 \n\t"\
290 "packuswb %%mm6, %%mm5 \n\t"\
291 "packuswb %%mm3, %%mm4 \n\t"\
292 "pxor %%mm7, %%mm7 \n\t"
294 // do vertical chrominance interpolation
295 #define YSCALEYUV2RGB1b \
296 "xorl %%eax, %%eax \n\t"\
298 "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
299 "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
300 "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
301 "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
302 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
303 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
304 "psrlw $5, %%mm3 \n\t"\
305 "psrlw $5, %%mm4 \n\t"\
306 "psubw w400, %%mm3 \n\t" /* (U-128)8*/\
307 "psubw w400, %%mm4 \n\t" /* (V-128)8*/\
308 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
309 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
310 "pmulhw ugCoeff, %%mm3 \n\t"\
311 "pmulhw vgCoeff, %%mm4 \n\t"\
312 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
313 "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
314 "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
315 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
316 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
317 "pmulhw ubCoeff, %%mm2 \n\t"\
318 "pmulhw vrCoeff, %%mm5 \n\t"\
319 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
320 "psubw w80, %%mm7 \n\t" /* 8(Y-16)*/\
321 "pmulhw yCoeff, %%mm1 \n\t"\
322 "pmulhw yCoeff, %%mm7 \n\t"\
323 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
324 "paddw %%mm3, %%mm4 \n\t"\
325 "movq %%mm2, %%mm0 \n\t"\
326 "movq %%mm5, %%mm6 \n\t"\
327 "movq %%mm4, %%mm3 \n\t"\
328 "punpcklwd %%mm2, %%mm2 \n\t"\
329 "punpcklwd %%mm5, %%mm5 \n\t"\
330 "punpcklwd %%mm4, %%mm4 \n\t"\
331 "paddw %%mm1, %%mm2 \n\t"\
332 "paddw %%mm1, %%mm5 \n\t"\
333 "paddw %%mm1, %%mm4 \n\t"\
334 "punpckhwd %%mm0, %%mm0 \n\t"\
335 "punpckhwd %%mm6, %%mm6 \n\t"\
336 "punpckhwd %%mm3, %%mm3 \n\t"\
337 "paddw %%mm7, %%mm0 \n\t"\
338 "paddw %%mm7, %%mm6 \n\t"\
339 "paddw %%mm7, %%mm3 \n\t"\
340 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
341 "packuswb %%mm0, %%mm2 \n\t"\
342 "packuswb %%mm6, %%mm5 \n\t"\
343 "packuswb %%mm3, %%mm4 \n\t"\
344 "pxor %%mm7, %%mm7 \n\t"
347 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
348 "movq %%mm2, %%mm1 \n\t" /* B */\
349 "movq %%mm5, %%mm6 \n\t" /* R */\
350 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
351 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
352 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
353 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
354 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
355 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
356 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
357 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
358 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
359 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
361 MOVNTQ(%%mm0, (%4, %%eax, 4))\
362 MOVNTQ(%%mm2, 8(%4, %%eax, 4))\
363 MOVNTQ(%%mm1, 16(%4, %%eax, 4))\
364 MOVNTQ(%%mm3, 24(%4, %%eax, 4))\
366 "addl $8, %%eax \n\t"\
367 "cmpl %5, %%eax \n\t"\
371 "movq %%mm2, %%mm1 \n\t" /* B */\
372 "movq %%mm4, %%mm3 \n\t" /* G */\
373 "movq %%mm5, %%mm6 \n\t" /* R */\
375 "punpcklbw %%mm7, %%mm3 \n\t" /* 0G0G0G0G */\
376 "punpcklbw %%mm7, %%mm2 \n\t" /* 0B0B0B0B */\
377 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R */\
379 "psrlw $3, %%mm2 \n\t"\
380 "psllw $3, %%mm3 \n\t"\
381 "psllw $8, %%mm5 \n\t"\
383 "pand g16Mask, %%mm3 \n\t"\
384 "pand r16Mask, %%mm5 \n\t"\
386 "por %%mm3, %%mm2 \n\t"\
387 "por %%mm5, %%mm2 \n\t"\
389 "punpckhbw %%mm7, %%mm4 \n\t" /* 0G0G0G0G */\
390 "punpckhbw %%mm7, %%mm1 \n\t" /* 0B0B0B0B */\
391 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R */\
393 "psrlw $3, %%mm1 \n\t"\
394 "psllw $3, %%mm4 \n\t"\
395 "psllw $8, %%mm6 \n\t"\
397 "pand g16Mask, %%mm4 \n\t"\
398 "pand r16Mask, %%mm6 \n\t"\
400 "por %%mm4, %%mm1 \n\t"\
401 "por %%mm6, %%mm1 \n\t"\
403 MOVNTQ(%%mm2, (%4, %%eax, 2))\
404 MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
406 "addl $8, %%eax \n\t"\
407 "cmpl %5, %%eax \n\t"\
411 "movq %%mm2, %%mm1 \n\t" /* B */\
412 "movq %%mm4, %%mm3 \n\t" /* G */\
413 "movq %%mm5, %%mm6 \n\t" /* R */\
415 "punpcklbw %%mm7, %%mm3 \n\t" /* 0G0G0G0G */\
416 "punpcklbw %%mm7, %%mm2 \n\t" /* 0B0B0B0B */\
417 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R */\
419 "psrlw $3, %%mm2 \n\t"\
420 "psllw $2, %%mm3 \n\t"\
421 "psllw $7, %%mm5 \n\t"\
423 "pand g15Mask, %%mm3 \n\t"\
424 "pand r15Mask, %%mm5 \n\t"\
426 "por %%mm3, %%mm2 \n\t"\
427 "por %%mm5, %%mm2 \n\t"\
429 "punpckhbw %%mm7, %%mm4 \n\t" /* 0G0G0G0G */\
430 "punpckhbw %%mm7, %%mm1 \n\t" /* 0B0B0B0B */\
431 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R */\
433 "psrlw $3, %%mm1 \n\t"\
434 "psllw $2, %%mm4 \n\t"\
435 "psllw $7, %%mm6 \n\t"\
437 "pand g15Mask, %%mm4 \n\t"\
438 "pand r15Mask, %%mm6 \n\t"\
440 "por %%mm4, %%mm1 \n\t"\
441 "por %%mm6, %%mm1 \n\t"\
443 MOVNTQ(%%mm2, (%4, %%eax, 2))\
444 MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
446 "addl $8, %%eax \n\t"\
447 "cmpl %5, %%eax \n\t"\
449 // FIXME find a faster way to shuffle it to BGR24
451 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
452 "movq %%mm2, %%mm1 \n\t" /* B */\
453 "movq %%mm5, %%mm6 \n\t" /* R */\
454 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
455 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
456 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
457 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
458 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
459 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
460 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
461 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
462 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
463 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
465 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
466 "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
467 "pand bm00000111, %%mm4 \n\t" /* 00000RGB 0 */\
468 "pand bm11111000, %%mm0 \n\t" /* 00RGB000 0.5 */\
469 "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
470 "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
471 "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
472 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
474 "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
475 "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
476 "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
477 "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
478 "pand bm00001111, %%mm2 \n\t" /* 0000RGBR 1 */\
479 "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
480 "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
481 "pand bm00000111, %%mm4 \n\t" /* 00000RGB 2 */\
482 "pand bm11111000, %%mm1 \n\t" /* 00RGB000 2.5 */\
483 "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
484 "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
485 "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
486 "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
488 "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
489 "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
490 "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
491 "pand bm00000111, %%mm5 \n\t" /* 00000RGB 3 */\
492 "pand bm11111000, %%mm3 \n\t" /* 00RGB000 3.5 */\
493 "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
494 "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
495 "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
497 "leal (%%eax, %%eax, 2), %%ebx \n\t"\
498 MOVNTQ(%%mm0, (%4, %%ebx))\
499 MOVNTQ(%%mm2, 8(%4, %%ebx))\
500 MOVNTQ(%%mm3, 16(%4, %%ebx))\
502 "addl $8, %%eax \n\t"\
503 "cmpl %5, %%eax \n\t"\
507 static inline void yuv2yuv(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
508 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstw, int yalpha, int uvalpha)
510 int yalpha1=yalpha^4095;
511 int uvalpha1=uvalpha^4095;
514 asm volatile ("\n\t"::: "memory");
518 ((uint8_t*)dest)[i] = (buf0[i]*yalpha1+buf1[i]*yalpha)>>19;
523 for(i=0; i<(dstw>>1); i++)
525 ((uint8_t*)uDest)[i] = (uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19;
526 ((uint8_t*)vDest)[i] = (uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19;
532 * vertical scale YV12 to RGB
534 static inline void yuv2rgbX(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
535 uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
537 int yalpha1=yalpha^4095;
538 int uvalpha1=uvalpha^4095;
551 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
552 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
554 "movq %%mm3, %%mm1 \n\t"
555 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
556 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
558 MOVNTQ(%%mm3, (%4, %%eax, 4))
559 MOVNTQ(%%mm1, 8(%4, %%eax, 4))
561 "addl $4, %%eax \n\t"
562 "cmpl %5, %%eax \n\t"
566 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
567 "m" (yalpha1), "m" (uvalpha1)
578 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
579 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
581 "movq %%mm3, %%mm1 \n\t"
582 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
583 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
585 "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
586 "psrlq $8, %%mm3 \n\t" // GR0BGR00
587 "pand bm00000111, %%mm2 \n\t" // BGR00000
588 "pand bm11111000, %%mm3 \n\t" // 000BGR00
589 "por %%mm2, %%mm3 \n\t" // BGRBGR00
590 "movq %%mm1, %%mm2 \n\t"
591 "psllq $48, %%mm1 \n\t" // 000000BG
592 "por %%mm1, %%mm3 \n\t" // BGRBGRBG
594 "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
595 "psrld $16, %%mm2 \n\t" // R000R000
596 "psrlq $24, %%mm1 \n\t" // 0BGR0000
597 "por %%mm2, %%mm1 \n\t" // RBGRR000
599 "movl %4, %%ebx \n\t"
600 "addl %%eax, %%ebx \n\t"
604 "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
605 "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
607 "movd %%mm3, (%%ebx, %%eax, 2) \n\t"
608 "psrlq $32, %%mm3 \n\t"
609 "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
610 "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
612 "addl $4, %%eax \n\t"
613 "cmpl %5, %%eax \n\t"
616 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
617 "m" (yalpha1), "m" (uvalpha1)
627 "paddusb b16Dither, %%mm1 \n\t"
628 "paddusb b16Dither, %%mm0 \n\t"
629 "paddusb b16Dither, %%mm3 \n\t"
631 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
632 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
633 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
635 "psrlw $3, %%mm3 \n\t"
636 "psllw $2, %%mm1 \n\t"
637 "psllw $7, %%mm0 \n\t"
638 "pand g15Mask, %%mm1 \n\t"
639 "pand r15Mask, %%mm0 \n\t"
641 "por %%mm3, %%mm1 \n\t"
642 "por %%mm1, %%mm0 \n\t"
644 MOVNTQ(%%mm0, (%4, %%eax, 2))
646 "addl $4, %%eax \n\t"
647 "cmpl %5, %%eax \n\t"
650 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
651 "m" (yalpha1), "m" (uvalpha1)
661 "paddusb g16Dither, %%mm1 \n\t"
662 "paddusb b16Dither, %%mm0 \n\t"
663 "paddusb b16Dither, %%mm3 \n\t"
665 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
666 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
667 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
669 "psrlw $3, %%mm3 \n\t"
670 "psllw $3, %%mm1 \n\t"
671 "psllw $8, %%mm0 \n\t"
672 "pand g16Mask, %%mm1 \n\t"
673 "pand r16Mask, %%mm0 \n\t"
675 "por %%mm3, %%mm1 \n\t"
676 "por %%mm1, %%mm0 \n\t"
678 MOVNTQ(%%mm0, (%4, %%eax, 2))
680 "addl $4, %%eax \n\t"
681 "cmpl %5, %%eax \n\t"
684 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
685 "m" (yalpha1), "m" (uvalpha1)
690 asm volatile ("\n\t"::: "memory");
692 if(dstbpp==32 || dstbpp==24)
695 // vertical linear interpolation && yuv2rgb in a single step:
696 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
697 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
698 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
699 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
700 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
701 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
708 // vertical linear interpolation && yuv2rgb in a single step:
709 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
710 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
711 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
713 ((uint16_t*)dest)[i] =
714 clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
715 clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
716 clip_table16r[(Y + yuvtab_3343[V]) >>13];
722 // vertical linear interpolation && yuv2rgb in a single step:
723 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
724 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
725 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
727 ((uint16_t*)dest)[i] =
728 clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
729 clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
730 clip_table15r[(Y + yuvtab_3343[V]) >>13];
744 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
745 "m" (yalpha1), "m" (uvalpha1)
755 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
756 "m" (yalpha1), "m" (uvalpha1)
764 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
766 "paddusb b16Dither, %%mm2 \n\t"
767 "paddusb b16Dither, %%mm4 \n\t"
768 "paddusb b16Dither, %%mm5 \n\t"
773 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
774 "m" (yalpha1), "m" (uvalpha1)
782 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
784 "paddusb g16Dither, %%mm2 \n\t"
785 "paddusb b16Dither, %%mm4 \n\t"
786 "paddusb b16Dither, %%mm5 \n\t"
791 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
792 "m" (yalpha1), "m" (uvalpha1)
797 asm volatile ("\n\t"::: "memory");
801 for(i=0; i<dstw-1; i+=2){
802 // vertical linear interpolation && yuv2rgb in a single step:
803 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
804 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
805 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
806 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
808 int Cb= yuvtab_40cf[U];
809 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
810 int Cr= yuvtab_3343[V];
812 dest[4*i+0]=clip_table[((Y1 + Cb) >>13)];
813 dest[4*i+1]=clip_table[((Y1 + Cg) >>13)];
814 dest[4*i+2]=clip_table[((Y1 + Cr) >>13)];
816 dest[4*i+4]=clip_table[((Y2 + Cb) >>13)];
817 dest[4*i+5]=clip_table[((Y2 + Cg) >>13)];
818 dest[4*i+6]=clip_table[((Y2 + Cr) >>13)];
823 for(i=0; i<dstw-1; i+=2){
824 // vertical linear interpolation && yuv2rgb in a single step:
825 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
826 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
827 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
828 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
830 int Cb= yuvtab_40cf[U];
831 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
832 int Cr= yuvtab_3343[V];
834 dest[0]=clip_table[((Y1 + Cb) >>13)];
835 dest[1]=clip_table[((Y1 + Cg) >>13)];
836 dest[2]=clip_table[((Y1 + Cr) >>13)];
838 dest[3]=clip_table[((Y2 + Cb) >>13)];
839 dest[4]=clip_table[((Y2 + Cg) >>13)];
840 dest[5]=clip_table[((Y2 + Cr) >>13)];
846 for(i=0; i<dstw-1; i+=2){
847 // vertical linear interpolation && yuv2rgb in a single step:
848 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
849 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
850 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
851 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
853 int Cb= yuvtab_40cf[U];
854 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
855 int Cr= yuvtab_3343[V];
857 ((uint16_t*)dest)[i] =
858 clip_table16b[(Y1 + Cb) >>13] |
859 clip_table16g[(Y1 + Cg) >>13] |
860 clip_table16r[(Y1 + Cr) >>13];
862 ((uint16_t*)dest)[i+1] =
863 clip_table16b[(Y2 + Cb) >>13] |
864 clip_table16g[(Y2 + Cg) >>13] |
865 clip_table16r[(Y2 + Cr) >>13];
870 for(i=0; i<dstw-1; i+=2){
871 // vertical linear interpolation && yuv2rgb in a single step:
872 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
873 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
874 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
875 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
877 int Cb= yuvtab_40cf[U];
878 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
879 int Cr= yuvtab_3343[V];
881 ((uint16_t*)dest)[i] =
882 clip_table15b[(Y1 + Cb) >>13] |
883 clip_table15g[(Y1 + Cg) >>13] |
884 clip_table15r[(Y1 + Cr) >>13];
886 ((uint16_t*)dest)[i+1] =
887 clip_table15b[(Y2 + Cb) >>13] |
888 clip_table15g[(Y2 + Cg) >>13] |
889 clip_table15r[(Y2 + Cr) >>13];
897 * YV12 to RGB without scaling or interpolating
899 static inline void yuv2rgb1(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
900 uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
902 int yalpha1=yalpha^4095;
903 int uvalpha1=uvalpha^4095;
905 if(fullUVIpol || allwaysIpol)
907 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
910 if( yalpha > 2048 ) buf0 = buf1;
913 if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
920 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
921 "m" (yalpha1), "m" (uvalpha1)
930 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
931 "m" (yalpha1), "m" (uvalpha1)
939 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
941 "paddusb b16Dither, %%mm2 \n\t"
942 "paddusb b16Dither, %%mm4 \n\t"
943 "paddusb b16Dither, %%mm5 \n\t"
946 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
947 "m" (yalpha1), "m" (uvalpha1)
955 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
957 "paddusb g16Dither, %%mm2 \n\t"
958 "paddusb b16Dither, %%mm4 \n\t"
959 "paddusb b16Dither, %%mm5 \n\t"
963 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
964 "m" (yalpha1), "m" (uvalpha1)
976 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
977 "m" (yalpha1), "m" (uvalpha1)
986 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
987 "m" (yalpha1), "m" (uvalpha1)
995 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
997 "paddusb b16Dither, %%mm2 \n\t"
998 "paddusb b16Dither, %%mm4 \n\t"
999 "paddusb b16Dither, %%mm5 \n\t"
1002 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
1003 "m" (yalpha1), "m" (uvalpha1)
1011 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1013 "paddusb g16Dither, %%mm2 \n\t"
1014 "paddusb b16Dither, %%mm4 \n\t"
1015 "paddusb b16Dither, %%mm5 \n\t"
1019 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
1020 "m" (yalpha1), "m" (uvalpha1)
1026 //FIXME write 2 versions (for even & odd lines)
1027 asm volatile ("\n\t"::: "memory");
1031 for(i=0; i<dstw-1; i+=2){
1032 // vertical linear interpolation && yuv2rgb in a single step:
1033 int Y1=yuvtab_2568[buf0[i]>>7];
1034 int Y2=yuvtab_2568[buf0[i+1]>>7];
1035 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
1036 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
1038 int Cb= yuvtab_40cf[U];
1039 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1040 int Cr= yuvtab_3343[V];
1042 dest[4*i+0]=clip_table[((Y1 + Cb) >>13)];
1043 dest[4*i+1]=clip_table[((Y1 + Cg) >>13)];
1044 dest[4*i+2]=clip_table[((Y1 + Cr) >>13)];
1046 dest[4*i+4]=clip_table[((Y2 + Cb) >>13)];
1047 dest[4*i+5]=clip_table[((Y2 + Cg) >>13)];
1048 dest[4*i+6]=clip_table[((Y2 + Cr) >>13)];
1053 for(i=0; i<dstw-1; i+=2){
1054 // vertical linear interpolation && yuv2rgb in a single step:
1055 int Y1=yuvtab_2568[buf0[i]>>7];
1056 int Y2=yuvtab_2568[buf0[i+1]>>7];
1057 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
1058 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
1060 int Cb= yuvtab_40cf[U];
1061 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1062 int Cr= yuvtab_3343[V];
1064 dest[0]=clip_table[((Y1 + Cb) >>13)];
1065 dest[1]=clip_table[((Y1 + Cg) >>13)];
1066 dest[2]=clip_table[((Y1 + Cr) >>13)];
1068 dest[3]=clip_table[((Y2 + Cb) >>13)];
1069 dest[4]=clip_table[((Y2 + Cg) >>13)];
1070 dest[5]=clip_table[((Y2 + Cr) >>13)];
1076 for(i=0; i<dstw-1; i+=2){
1077 // vertical linear interpolation && yuv2rgb in a single step:
1078 int Y1=yuvtab_2568[buf0[i]>>7];
1079 int Y2=yuvtab_2568[buf0[i+1]>>7];
1080 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
1081 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
1083 int Cb= yuvtab_40cf[U];
1084 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1085 int Cr= yuvtab_3343[V];
1087 ((uint16_t*)dest)[i] =
1088 clip_table16b[(Y1 + Cb) >>13] |
1089 clip_table16g[(Y1 + Cg) >>13] |
1090 clip_table16r[(Y1 + Cr) >>13];
1092 ((uint16_t*)dest)[i+1] =
1093 clip_table16b[(Y2 + Cb) >>13] |
1094 clip_table16g[(Y2 + Cg) >>13] |
1095 clip_table16r[(Y2 + Cr) >>13];
1100 for(i=0; i<dstw-1; i+=2){
1101 // vertical linear interpolation && yuv2rgb in a single step:
1102 int Y1=yuvtab_2568[buf0[i]>>7];
1103 int Y2=yuvtab_2568[buf0[i+1]>>7];
1104 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
1105 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
1107 int Cb= yuvtab_40cf[U];
1108 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1109 int Cr= yuvtab_3343[V];
1111 ((uint16_t*)dest)[i] =
1112 clip_table15b[(Y1 + Cb) >>13] |
1113 clip_table15g[(Y1 + Cg) >>13] |
1114 clip_table15r[(Y1 + Cr) >>13];
1116 ((uint16_t*)dest)[i+1] =
1117 clip_table15b[(Y2 + Cb) >>13] |
1118 clip_table15g[(Y2 + Cg) >>13] |
1119 clip_table15r[(Y2 + Cr) >>13];
1126 static inline void hyscale(uint16_t *dst, int dstWidth, uint8_t *src, int srcWidth, int xInc)
1129 unsigned int xpos=0;
1130 // *** horizontal scale Y line to temp buffer
1136 "pxor %%mm7, %%mm7 \n\t"
1137 "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
1138 "movd %5, %%mm6 \n\t" // xInc&0xFFFF
1139 "punpcklwd %%mm6, %%mm6 \n\t"
1140 "punpcklwd %%mm6, %%mm6 \n\t"
1141 "movq %%mm6, %%mm2 \n\t"
1142 "psllq $16, %%mm2 \n\t"
1143 "paddw %%mm6, %%mm2 \n\t"
1144 "psllq $16, %%mm2 \n\t"
1145 "paddw %%mm6, %%mm2 \n\t"
1146 "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=xInc&0xFF
1147 "movq %%mm2, temp0 \n\t"
1148 "movd %4, %%mm6 \n\t" //(xInc*4)&0xFFFF
1149 "punpcklwd %%mm6, %%mm6 \n\t"
1150 "punpcklwd %%mm6, %%mm6 \n\t"
1151 "xorl %%eax, %%eax \n\t" // i
1152 "movl %0, %%esi \n\t" // src
1153 "movl %1, %%edi \n\t" // buf1
1154 "movl %3, %%edx \n\t" // (xInc*4)>>16
1155 "xorl %%ecx, %%ecx \n\t"
1156 "xorl %%ebx, %%ebx \n\t"
1157 "movw %4, %%bx \n\t" // (xInc*4)&0xFFFF
1159 #define FUNNY_Y_CODE \
1160 PREFETCH" 1024(%%esi) \n\t"\
1161 PREFETCH" 1056(%%esi) \n\t"\
1162 PREFETCH" 1088(%%esi) \n\t"\
1163 "call funnyYCode \n\t"\
1164 "movq temp0, %%mm2 \n\t"\
1165 "xorl %%ecx, %%ecx \n\t"
1176 :: "m" (src), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
1177 "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF)
1178 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
1180 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth-1; i--) dst[i] = src[srcWidth-1]*128;
1185 //NO MMX just normal asm ...
1187 "xorl %%eax, %%eax \n\t" // i
1188 "xorl %%ebx, %%ebx \n\t" // xx
1189 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
1191 "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
1192 "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
1193 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1194 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1195 "shll $16, %%edi \n\t"
1196 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1197 "movl %1, %%edi \n\t"
1198 "shrl $9, %%esi \n\t"
1199 "movw %%si, (%%edi, %%eax, 2) \n\t"
1200 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
1201 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
1203 "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
1204 "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
1205 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1206 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1207 "shll $16, %%edi \n\t"
1208 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1209 "movl %1, %%edi \n\t"
1210 "shrl $9, %%esi \n\t"
1211 "movw %%si, 2(%%edi, %%eax, 2) \n\t"
1212 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
1213 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
1216 "addl $2, %%eax \n\t"
1217 "cmpl %2, %%eax \n\t"
1221 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
1222 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1225 } //if MMX2 cant be used
1228 for(i=0;i<dstWidth;i++){
1229 register unsigned int xx=xpos>>16;
1230 register unsigned int xalpha=(xpos&0xFFFF)>>9;
1231 dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
1237 inline static void hcscale(uint16_t *dst, int dstWidth,
1238 uint8_t *src1, uint8_t *src2, int srcWidth, int xInc)
1247 "pxor %%mm7, %%mm7 \n\t"
1248 "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
1249 "movd %5, %%mm6 \n\t" // xInc&0xFFFF
1250 "punpcklwd %%mm6, %%mm6 \n\t"
1251 "punpcklwd %%mm6, %%mm6 \n\t"
1252 "movq %%mm6, %%mm2 \n\t"
1253 "psllq $16, %%mm2 \n\t"
1254 "paddw %%mm6, %%mm2 \n\t"
1255 "psllq $16, %%mm2 \n\t"
1256 "paddw %%mm6, %%mm2 \n\t"
1257 "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=xInc&0xFFFF
1258 "movq %%mm2, temp0 \n\t"
1259 "movd %4, %%mm6 \n\t" //(xInc*4)&0xFFFF
1260 "punpcklwd %%mm6, %%mm6 \n\t"
1261 "punpcklwd %%mm6, %%mm6 \n\t"
1262 "xorl %%eax, %%eax \n\t" // i
1263 "movl %0, %%esi \n\t" // src
1264 "movl %1, %%edi \n\t" // buf1
1265 "movl %3, %%edx \n\t" // (xInc*4)>>16
1266 "xorl %%ecx, %%ecx \n\t"
1267 "xorl %%ebx, %%ebx \n\t"
1268 "movw %4, %%bx \n\t" // (xInc*4)&0xFFFF
1270 #define FUNNYUVCODE \
1271 PREFETCH" 1024(%%esi) \n\t"\
1272 PREFETCH" 1056(%%esi) \n\t"\
1273 PREFETCH" 1088(%%esi) \n\t"\
1274 "call funnyUVCode \n\t"\
1275 "movq temp0, %%mm2 \n\t"\
1276 "xorl %%ecx, %%ecx \n\t"
1287 "xorl %%eax, %%eax \n\t" // i
1288 "movl %6, %%esi \n\t" // src
1289 "movl %1, %%edi \n\t" // buf1
1290 "addl $4096, %%edi \n\t"
1302 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
1303 "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF), "m" (src2)
1304 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
1306 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth/2-1; i--)
1308 dst[i] = src1[srcWidth/2-1]*128;
1309 dst[i+2048] = src2[srcWidth/2-1]*128;
1316 "xorl %%eax, %%eax \n\t" // i
1317 "xorl %%ebx, %%ebx \n\t" // xx
1318 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
1320 "movl %0, %%esi \n\t"
1321 "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
1322 "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1]
1323 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1324 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1325 "shll $16, %%edi \n\t"
1326 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1327 "movl %1, %%edi \n\t"
1328 "shrl $9, %%esi \n\t"
1329 "movw %%si, (%%edi, %%eax, 2) \n\t"
1331 "movzbl (%5, %%ebx), %%edi \n\t" //src[xx]
1332 "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1]
1333 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1334 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1335 "shll $16, %%edi \n\t"
1336 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1337 "movl %1, %%edi \n\t"
1338 "shrl $9, %%esi \n\t"
1339 "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
1341 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
1342 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
1343 "addl $1, %%eax \n\t"
1344 "cmpl %2, %%eax \n\t"
1347 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
1349 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1352 } //if MMX2 cant be used
1355 for(i=0;i<dstWidth;i++){
1356 register unsigned int xx=xpos>>16;
1357 register unsigned int xalpha=(xpos&0xFFFF)>>9;
1358 dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
1359 dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
1361 dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
1362 dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
1370 // *** bilinear scaling and yuv->rgb or yuv->yuv conversion of yv12 slices:
1371 // *** Note: it's called multiple times while decoding a frame, first time y==0
1372 // *** Designed to upscale, but may work for downscale too.
1373 // s_xinc = (src_width << 16) / dst_width
1374 // s_yinc = (src_height << 16) / dst_height
1375 void SwScale_YV12slice(unsigned char* srcptr[],int stride[], int y, int h,
1376 uint8_t* dstptr[], int dststride, int dstw, int dstbpp,
1377 unsigned int s_xinc,unsigned int s_yinc){
1380 //static int s_yinc=(vo_dga_src_height<<16)/vo_dga_vp_height;
1381 //static int s_xinc=(vo_dga_src_width<<8)/vo_dga_vp_width;
1383 unsigned int s_xinc2;
1385 static int s_srcypos; // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1388 // last horzontally interpolated lines, used to avoid unnecessary calculations
1389 static int s_last_ypos;
1390 static int s_last_y1pos;
1392 static int static_dstw;
1395 // used to detect a horizontal size change
1396 static int old_dstw= -1;
1397 static int old_s_xinc= -1;
1400 int srcWidth= (dstw*s_xinc + 0x8000)>>16;
1401 int dstUVw= fullUVIpol ? dstw : dstw/2;
1405 canMMX2BeUsed= (s_xinc <= 0x10000 && (dstw&31)==0 && (srcWidth&15)==0) ? 1 : 0;
1408 // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
1409 // n-2 is the last chrominance sample available
1410 // FIXME this is not perfect, but noone shuld notice the difference, the more correct variant
1411 // would be like the vertical one, but that would require some special code for the
1412 // first and last pixel
1413 if(canMMX2BeUsed) s_xinc+= 20;
1414 else s_xinc = ((srcWidth-2)<<16)/(dstw-2) - 20;
1416 if(fullUVIpol && !(dstbpp==12)) s_xinc2= s_xinc>>1;
1417 else s_xinc2= s_xinc;
1418 // force calculation of the horizontal interpolation of the first line
1423 s_srcypos= s_yinc/2 - 0x8000;
1426 // clean the buffers so that no green stuff is drawen if the width is not sane (%8=0)
1427 for(i=dstw-2; i<dstw+20; i++)
1429 pix_buf_uv[0][i] = pix_buf_uv[1][i]
1430 = pix_buf_uv[0][2048+i] = pix_buf_uv[1][2048+i] = 128;
1431 pix_buf_uv[0][i/2] = pix_buf_uv[1][i/2]
1432 = pix_buf_uv[0][2048+i/2] = pix_buf_uv[1][2048+i/2] = 128;
1433 pix_buf_y[0][i]= pix_buf_y[1][i]= 0;
1437 // cant downscale !!!
1438 if((old_s_xinc != s_xinc || old_dstw!=dstw) && canMMX2BeUsed)
1445 int xpos, xx, xalpha, i;
1452 // create an optimized horizontal scaling routine
1460 "movq (%%esi), %%mm0 \n\t" //FIXME Alignment
1461 "movq %%mm0, %%mm1 \n\t"
1462 "psrlq $8, %%mm0 \n\t"
1463 "punpcklbw %%mm7, %%mm1 \n\t"
1464 "movq %%mm2, %%mm3 \n\t"
1465 "punpcklbw %%mm7, %%mm0 \n\t"
1466 "addw %%bx, %%cx \n\t" //2*xalpha += (4*s_xinc)&0xFFFF
1467 "pshufw $0xFF, %%mm1, %%mm1 \n\t"
1469 "adcl %%edx, %%esi \n\t" //xx+= (4*s_xinc)>>16 + carry
1470 "pshufw $0xFF, %%mm0, %%mm0 \n\t"
1472 "psrlw $9, %%mm3 \n\t"
1473 "psubw %%mm1, %%mm0 \n\t"
1474 "pmullw %%mm3, %%mm0 \n\t"
1475 "paddw %%mm6, %%mm2 \n\t" // 2*alpha += xpos&0xFFFF
1476 "psllw $7, %%mm1 \n\t"
1477 "paddw %%mm1, %%mm0 \n\t"
1479 "movq %%mm0, (%%edi, %%eax) \n\t"
1481 "addl $8, %%eax \n\t"
1494 :"=r" (fragment), "=r" (imm8OfPShufW1), "=r" (imm8OfPShufW2),
1495 "=r" (fragmentLength)
1498 xpos= 0; //s_xinc/2 - 0x8000; // difference between pixel centers
1500 /* choose xinc so that all 8 parts fit exactly
1501 Note: we cannot use just 1 part because it would not fit in the code cache */
1502 // s_xinc2_diff= -((((s_xinc2*(dstw/8))&0xFFFF))/(dstw/8))-10;
1503 // s_xinc_diff= -((((s_xinc*(dstw/8))&0xFFFF))/(dstw/8));
1505 // s_xinc2_diff+= ((0x10000/(dstw/8)));
1507 // s_xinc_diff= s_xinc2_diff*2;
1509 // s_xinc2+= s_xinc2_diff;
1510 // s_xinc+= s_xinc_diff;
1512 // old_s_xinc= s_xinc;
1514 for(i=0; i<dstw/8; i++)
1521 int b=((xpos+s_xinc)>>16) - xx;
1522 int c=((xpos+s_xinc*2)>>16) - xx;
1523 int d=((xpos+s_xinc*3)>>16) - xx;
1525 memcpy(funnyYCode + fragmentLength*i/4, fragment, fragmentLength);
1527 funnyYCode[fragmentLength*i/4 + imm8OfPShufW1]=
1528 funnyYCode[fragmentLength*i/4 + imm8OfPShufW2]=
1529 a | (b<<2) | (c<<4) | (d<<6);
1531 // if we dont need to read 8 bytes than dont :), reduces the chance of
1532 // crossing a cache line
1533 if(d<3) funnyYCode[fragmentLength*i/4 + 1]= 0x6E;
1535 funnyYCode[fragmentLength*(i+4)/4]= RET;
1540 xpos= 0; //s_xinc2/2 - 0x10000; // difference between centers of chrom samples
1541 for(i=0; i<dstUVw/8; i++)
1548 int b=((xpos+s_xinc2)>>16) - xx;
1549 int c=((xpos+s_xinc2*2)>>16) - xx;
1550 int d=((xpos+s_xinc2*3)>>16) - xx;
1552 memcpy(funnyUVCode + fragmentLength*i/4, fragment, fragmentLength);
1554 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW1]=
1555 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW2]=
1556 a | (b<<2) | (c<<4) | (d<<6);
1558 // if we dont need to read 8 bytes than dont :), reduces the chance of
1559 // crossing a cache line
1560 if(d<3) funnyUVCode[fragmentLength*i/4 + 1]= 0x6E;
1562 funnyUVCode[fragmentLength*(i+4)/4]= RET;
1566 // funnyCode[0]= RET;
1573 unsigned char *dest =dstptr[0]+dststride*s_ypos;
1574 unsigned char *uDest=dstptr[1]+(dststride>>1)*(s_ypos>>1);
1575 unsigned char *vDest=dstptr[2]+(dststride>>1)*(s_ypos>>1);
1577 int y0=(s_srcypos + 0xFFFF)>>16; // first luminance source line number below the dst line
1578 // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1579 int srcuvpos= dstbpp==12 ? s_srcypos + s_yinc/2 - 0x8000 :
1581 int y1=(srcuvpos + 0x1FFFF)>>17; // first chrominance source line number below the dst line
1582 int yalpha=((s_srcypos-1)&0xFFFF)>>4;
1583 int uvalpha=((srcuvpos-1)&0x1FFFF)>>5;
1584 uint16_t *buf0=pix_buf_y[y0&1]; // top line of the interpolated slice
1585 uint16_t *buf1=pix_buf_y[((y0+1)&1)]; // bottom line of the interpolated slice
1586 uint16_t *uvbuf0=pix_buf_uv[y1&1]; // top line of the interpolated slice
1587 uint16_t *uvbuf1=pix_buf_uv[(y1+1)&1]; // bottom line of the interpolated slice
1590 if(y0>=y+h) break; // FIXME wrong, skips last lines, but they are dupliactes anyway
1592 if((y0&1) && dstbpp==12) uvalpha=-1; // there is no alpha if there is no line
1594 s_ypos++; s_srcypos+=s_yinc;
1596 //only interpolate the src line horizontally if we didnt do it allready
1600 // skip if first line has been horiz scaled alleady
1601 if(s_last_ypos != y0-1)
1603 // check if first line is before any available src lines
1604 if(y0-1 < y) src=srcptr[0]+(0 )*stride[0];
1605 else src=srcptr[0]+(y0-y-1)*stride[0];
1607 hyscale(buf0, dstw, src, srcWidth, s_xinc);
1609 // check if second line is after any available src lines
1610 if(y0-y >= h) src=srcptr[0]+(h-1)*stride[0];
1611 else src=srcptr[0]+(y0-y)*stride[0];
1613 // the min() is required to avoid reuseing lines which where not available
1614 s_last_ypos= MIN(y0, y+h-1);
1615 hyscale(buf1, dstw, src, srcWidth, s_xinc);
1617 // printf("%d %d %d %d\n", y, y1, s_last_y1pos, h);
1618 // *** horizontal scale U and V lines to temp buffer
1619 if(s_last_y1pos!=y1)
1621 uint8_t *src1, *src2;
1622 // skip if first line has been horiz scaled alleady
1623 if(s_last_y1pos != y1-1)
1625 // check if first line is before any available src lines
1628 src1= srcptr[1]+(0)*stride[1];
1629 src2= srcptr[2]+(0)*stride[2];
1631 src1= srcptr[1]+(y1-y/2-1)*stride[1];
1632 src2= srcptr[2]+(y1-y/2-1)*stride[2];
1634 hcscale(uvbuf0, dstUVw, src1, src2, srcWidth, s_xinc2);
1637 // check if second line is after any available src lines
1640 src1= srcptr[1]+(h/2-1)*stride[1];
1641 src2= srcptr[2]+(h/2-1)*stride[2];
1643 src1= srcptr[1]+(y1-y/2)*stride[1];
1644 src2= srcptr[2]+(y1-y/2)*stride[2];
1646 hcscale(uvbuf1, dstUVw, src1, src2, srcWidth, s_xinc2);
1648 // the min() is required to avoid reuseing lines which where not available
1649 s_last_y1pos= MIN(y1, y/2+h/2-1);
1652 if(dstbpp==12) //YV12
1653 yuv2yuv(buf0, buf1, uvbuf0, uvbuf1, dest, uDest, vDest, dstw, yalpha, uvalpha);
1654 else if(ABS(s_yinc - 0x10000) < 10)
1655 yuv2rgb1(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1657 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1660 b16Dither= b16Dither1;
1661 b16Dither1= b16Dither2;
1662 b16Dither2= b16Dither;
1664 g16Dither= g16Dither1;
1665 g16Dither1= g16Dither2;
1666 g16Dither2= g16Dither;
1671 __asm __volatile(SFENCE:::"memory");
1672 __asm __volatile(EMMS:::"memory");
1677 void SwScale_Init(){
1678 // generating tables:
1682 clip_table[i+256]=i;
1683 clip_table[i+512]=255;
1684 yuvtab_2568[i]=(0x2568*(i-16))+(256<<13);
1685 yuvtab_3343[i]=0x3343*(i-128);
1686 yuvtab_0c92[i]=-0x0c92*(i-128);
1687 yuvtab_1a1e[i]=-0x1a1e*(i-128);
1688 yuvtab_40cf[i]=0x40cf*(i-128);
1691 for(i=0; i<768; i++)
1693 int v= clip_table[i];
1694 clip_table16b[i]= v>>3;
1695 clip_table16g[i]= (v<<3)&0x07E0;
1696 clip_table16r[i]= (v<<8)&0xF800;
1697 clip_table15b[i]= v>>3;
1698 clip_table15g[i]= (v<<2)&0x03E0;
1699 clip_table15r[i]= (v<<7)&0x7C00;