2 // Software scaling and colorspace conversion routines for MPlayer
4 // Orginal C implementation by A'rpi/ESP-team <arpi@thot.banki.hu>
5 // current version mostly by Michael Niedermayer (michaelni@gmx.at)
6 // the parts written by michael are under GNU GPL
10 #include "../config.h"
12 #include "../mmx_defs.h"
20 //disables the unscaled height version
23 #define RET 0xC3 //near return opcode
27 known BUGS with known cause (no bugreports please!, but patches are welcome :) )
28 horizontal MMX2 scaler reads 1-7 samples too much (might cause a sig11)
30 Supported output formats BGR15 BGR16 BGR24 BGR32
31 BGR15 & BGR16 MMX verions support dithering
32 Special versions: fast Y 1:1 scaling (no interpolation in y direction)
35 more intelligent missalignment avoidance for the horizontal scaler
38 change the distance of the u & v buffer
41 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
42 #define MIN(a,b) ((a) > (b) ? (b) : (a))
43 #define MAX(a,b) ((a) < (b) ? (b) : (a))
46 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
47 #elif defined (HAVE_3DNOW)
48 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
52 #define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
54 #define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
59 static uint64_t __attribute__((aligned(8))) yCoeff= 0x2568256825682568LL;
60 static uint64_t __attribute__((aligned(8))) vrCoeff= 0x3343334333433343LL;
61 static uint64_t __attribute__((aligned(8))) ubCoeff= 0x40cf40cf40cf40cfLL;
62 static uint64_t __attribute__((aligned(8))) vgCoeff= 0xE5E2E5E2E5E2E5E2LL;
63 static uint64_t __attribute__((aligned(8))) ugCoeff= 0xF36EF36EF36EF36ELL;
64 static uint64_t __attribute__((aligned(8))) bF8= 0xF8F8F8F8F8F8F8F8LL;
65 static uint64_t __attribute__((aligned(8))) bFC= 0xFCFCFCFCFCFCFCFCLL;
66 static uint64_t __attribute__((aligned(8))) w400= 0x0400040004000400LL;
67 static uint64_t __attribute__((aligned(8))) w80= 0x0080008000800080LL;
68 static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL;
69 static uint64_t __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
70 static uint64_t __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
71 static uint64_t __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
73 static uint64_t __attribute__((aligned(8))) b16Dither= 0x0004000400040004LL;
74 static uint64_t __attribute__((aligned(8))) b16Dither1=0x0004000400040004LL;
75 static uint64_t __attribute__((aligned(8))) b16Dither2=0x0602060206020602LL;
76 static uint64_t __attribute__((aligned(8))) g16Dither= 0x0002000200020002LL;
77 static uint64_t __attribute__((aligned(8))) g16Dither1=0x0002000200020002LL;
78 static uint64_t __attribute__((aligned(8))) g16Dither2=0x0301030103010301LL;
80 static uint64_t __attribute__((aligned(8))) b16Mask= 0x001F001F001F001FLL;
81 static uint64_t __attribute__((aligned(8))) g16Mask= 0x07E007E007E007E0LL;
82 static uint64_t __attribute__((aligned(8))) r16Mask= 0xF800F800F800F800LL;
83 static uint64_t __attribute__((aligned(8))) b15Mask= 0x001F001F001F001FLL;
84 static uint64_t __attribute__((aligned(8))) g15Mask= 0x03E003E003E003E0LL;
85 static uint64_t __attribute__((aligned(8))) r15Mask= 0x7C007C007C007C00LL;
87 static uint64_t __attribute__((aligned(8))) temp0;
88 static uint64_t __attribute__((aligned(8))) asm_yalpha1;
89 static uint64_t __attribute__((aligned(8))) asm_uvalpha1;
92 // temporary storage for 4 yuv lines:
93 // 16bit for now (mmx likes it more compact)
95 static uint16_t __attribute__((aligned(8))) pix_buf_y[4][2048];
96 static uint16_t __attribute__((aligned(8))) pix_buf_uv[2][2048*2];
98 static uint16_t pix_buf_y[4][2048];
99 static uint16_t pix_buf_uv[2][2048*2];
102 // clipping helper table for C implementations:
103 static unsigned char clip_table[768];
105 static unsigned short clip_table16b[768];
106 static unsigned short clip_table16g[768];
107 static unsigned short clip_table16r[768];
108 static unsigned short clip_table15b[768];
109 static unsigned short clip_table15g[768];
110 static unsigned short clip_table15r[768];
112 // yuv->rgb conversion tables:
113 static int yuvtab_2568[256];
114 static int yuvtab_3343[256];
115 static int yuvtab_0c92[256];
116 static int yuvtab_1a1e[256];
117 static int yuvtab_40cf[256];
120 static uint8_t funnyYCode[10000];
121 static uint8_t funnyUVCode[10000];
124 static int canMMX2BeUsed=0;
126 #define FULL_YSCALEYUV2RGB \
127 "pxor %%mm7, %%mm7 \n\t"\
128 "movd %6, %%mm6 \n\t" /*yalpha1*/\
129 "punpcklwd %%mm6, %%mm6 \n\t"\
130 "punpcklwd %%mm6, %%mm6 \n\t"\
131 "movd %7, %%mm5 \n\t" /*uvalpha1*/\
132 "punpcklwd %%mm5, %%mm5 \n\t"\
133 "punpcklwd %%mm5, %%mm5 \n\t"\
134 "xorl %%eax, %%eax \n\t"\
136 "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
137 "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
138 "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\
139 "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\
140 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
141 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
142 "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
143 "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
144 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
145 "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
146 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
147 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
148 "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
149 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
150 "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
151 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
152 "psubw w400, %%mm3 \n\t" /* 8(U-128)*/\
153 "pmulhw yCoeff, %%mm1 \n\t"\
156 "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
157 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
158 "pmulhw ubCoeff, %%mm3 \n\t"\
159 "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
160 "pmulhw ugCoeff, %%mm2 \n\t"\
161 "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
162 "psubw w400, %%mm0 \n\t" /* (V-128)8*/\
165 "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
166 "pmulhw vrCoeff, %%mm0 \n\t"\
167 "pmulhw vgCoeff, %%mm4 \n\t"\
168 "paddw %%mm1, %%mm3 \n\t" /* B*/\
169 "paddw %%mm1, %%mm0 \n\t" /* R*/\
170 "packuswb %%mm3, %%mm3 \n\t"\
172 "packuswb %%mm0, %%mm0 \n\t"\
173 "paddw %%mm4, %%mm2 \n\t"\
174 "paddw %%mm2, %%mm1 \n\t" /* G*/\
176 "packuswb %%mm1, %%mm1 \n\t"
178 #define YSCALEYUV2RGB \
179 "movd %6, %%mm6 \n\t" /*yalpha1*/\
180 "punpcklwd %%mm6, %%mm6 \n\t"\
181 "punpcklwd %%mm6, %%mm6 \n\t"\
182 "movq %%mm6, asm_yalpha1 \n\t"\
183 "movd %7, %%mm5 \n\t" /*uvalpha1*/\
184 "punpcklwd %%mm5, %%mm5 \n\t"\
185 "punpcklwd %%mm5, %%mm5 \n\t"\
186 "movq %%mm5, asm_uvalpha1 \n\t"\
187 "xorl %%eax, %%eax \n\t"\
189 "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
190 "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
191 "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
192 "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
193 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
194 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
195 "movq asm_uvalpha1, %%mm0 \n\t"\
196 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
197 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
198 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
199 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
200 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
201 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
202 "psubw w400, %%mm3 \n\t" /* (U-128)8*/\
203 "psubw w400, %%mm4 \n\t" /* (V-128)8*/\
204 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
205 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
206 "pmulhw ugCoeff, %%mm3 \n\t"\
207 "pmulhw vgCoeff, %%mm4 \n\t"\
208 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
209 "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
210 "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
211 "movq 8(%0, %%eax, 2), %%mm6 \n\t" /*buf0[eax]*/\
212 "movq 8(%1, %%eax, 2), %%mm7 \n\t" /*buf1[eax]*/\
213 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
214 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
215 "pmulhw asm_yalpha1, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
216 "pmulhw asm_yalpha1, %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
217 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
218 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
219 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
220 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
221 "pmulhw ubCoeff, %%mm2 \n\t"\
222 "pmulhw vrCoeff, %%mm5 \n\t"\
223 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
224 "psubw w80, %%mm7 \n\t" /* 8(Y-16)*/\
225 "pmulhw yCoeff, %%mm1 \n\t"\
226 "pmulhw yCoeff, %%mm7 \n\t"\
227 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
228 "paddw %%mm3, %%mm4 \n\t"\
229 "movq %%mm2, %%mm0 \n\t"\
230 "movq %%mm5, %%mm6 \n\t"\
231 "movq %%mm4, %%mm3 \n\t"\
232 "punpcklwd %%mm2, %%mm2 \n\t"\
233 "punpcklwd %%mm5, %%mm5 \n\t"\
234 "punpcklwd %%mm4, %%mm4 \n\t"\
235 "paddw %%mm1, %%mm2 \n\t"\
236 "paddw %%mm1, %%mm5 \n\t"\
237 "paddw %%mm1, %%mm4 \n\t"\
238 "punpckhwd %%mm0, %%mm0 \n\t"\
239 "punpckhwd %%mm6, %%mm6 \n\t"\
240 "punpckhwd %%mm3, %%mm3 \n\t"\
241 "paddw %%mm7, %%mm0 \n\t"\
242 "paddw %%mm7, %%mm6 \n\t"\
243 "paddw %%mm7, %%mm3 \n\t"\
244 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
245 "packuswb %%mm0, %%mm2 \n\t"\
246 "packuswb %%mm6, %%mm5 \n\t"\
247 "packuswb %%mm3, %%mm4 \n\t"\
248 "pxor %%mm7, %%mm7 \n\t"
250 #define YSCALEYUV2RGB1 \
251 "xorl %%eax, %%eax \n\t"\
253 "movq (%2, %%eax), %%mm3 \n\t" /* uvbuf0[eax]*/\
254 "movq 4096(%2, %%eax), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
255 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
256 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
257 "psubw w400, %%mm3 \n\t" /* (U-128)8*/\
258 "psubw w400, %%mm4 \n\t" /* (V-128)8*/\
259 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
260 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
261 "pmulhw ugCoeff, %%mm3 \n\t"\
262 "pmulhw vgCoeff, %%mm4 \n\t"\
263 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
264 "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
265 "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
266 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
267 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
268 "pmulhw ubCoeff, %%mm2 \n\t"\
269 "pmulhw vrCoeff, %%mm5 \n\t"\
270 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
271 "psubw w80, %%mm7 \n\t" /* 8(Y-16)*/\
272 "pmulhw yCoeff, %%mm1 \n\t"\
273 "pmulhw yCoeff, %%mm7 \n\t"\
274 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
275 "paddw %%mm3, %%mm4 \n\t"\
276 "movq %%mm2, %%mm0 \n\t"\
277 "movq %%mm5, %%mm6 \n\t"\
278 "movq %%mm4, %%mm3 \n\t"\
279 "punpcklwd %%mm2, %%mm2 \n\t"\
280 "punpcklwd %%mm5, %%mm5 \n\t"\
281 "punpcklwd %%mm4, %%mm4 \n\t"\
282 "paddw %%mm1, %%mm2 \n\t"\
283 "paddw %%mm1, %%mm5 \n\t"\
284 "paddw %%mm1, %%mm4 \n\t"\
285 "punpckhwd %%mm0, %%mm0 \n\t"\
286 "punpckhwd %%mm6, %%mm6 \n\t"\
287 "punpckhwd %%mm3, %%mm3 \n\t"\
288 "paddw %%mm7, %%mm0 \n\t"\
289 "paddw %%mm7, %%mm6 \n\t"\
290 "paddw %%mm7, %%mm3 \n\t"\
291 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
292 "packuswb %%mm0, %%mm2 \n\t"\
293 "packuswb %%mm6, %%mm5 \n\t"\
294 "packuswb %%mm3, %%mm4 \n\t"\
295 "pxor %%mm7, %%mm7 \n\t"
297 // do vertical chrominance interpolation
298 #define YSCALEYUV2RGB1b \
299 "xorl %%eax, %%eax \n\t"\
301 "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
302 "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
303 "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
304 "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
305 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
306 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
307 "psrlw $5, %%mm3 \n\t"\
308 "psrlw $5, %%mm4 \n\t"\
309 "psubw w400, %%mm3 \n\t" /* (U-128)8*/\
310 "psubw w400, %%mm4 \n\t" /* (V-128)8*/\
311 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
312 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
313 "pmulhw ugCoeff, %%mm3 \n\t"\
314 "pmulhw vgCoeff, %%mm4 \n\t"\
315 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
316 "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
317 "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
318 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
319 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
320 "pmulhw ubCoeff, %%mm2 \n\t"\
321 "pmulhw vrCoeff, %%mm5 \n\t"\
322 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
323 "psubw w80, %%mm7 \n\t" /* 8(Y-16)*/\
324 "pmulhw yCoeff, %%mm1 \n\t"\
325 "pmulhw yCoeff, %%mm7 \n\t"\
326 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
327 "paddw %%mm3, %%mm4 \n\t"\
328 "movq %%mm2, %%mm0 \n\t"\
329 "movq %%mm5, %%mm6 \n\t"\
330 "movq %%mm4, %%mm3 \n\t"\
331 "punpcklwd %%mm2, %%mm2 \n\t"\
332 "punpcklwd %%mm5, %%mm5 \n\t"\
333 "punpcklwd %%mm4, %%mm4 \n\t"\
334 "paddw %%mm1, %%mm2 \n\t"\
335 "paddw %%mm1, %%mm5 \n\t"\
336 "paddw %%mm1, %%mm4 \n\t"\
337 "punpckhwd %%mm0, %%mm0 \n\t"\
338 "punpckhwd %%mm6, %%mm6 \n\t"\
339 "punpckhwd %%mm3, %%mm3 \n\t"\
340 "paddw %%mm7, %%mm0 \n\t"\
341 "paddw %%mm7, %%mm6 \n\t"\
342 "paddw %%mm7, %%mm3 \n\t"\
343 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
344 "packuswb %%mm0, %%mm2 \n\t"\
345 "packuswb %%mm6, %%mm5 \n\t"\
346 "packuswb %%mm3, %%mm4 \n\t"\
347 "pxor %%mm7, %%mm7 \n\t"
350 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
351 "movq %%mm2, %%mm1 \n\t" /* B */\
352 "movq %%mm5, %%mm6 \n\t" /* R */\
353 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
354 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
355 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
356 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
357 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
358 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
359 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
360 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
361 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
362 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
364 MOVNTQ(%%mm0, (%4, %%eax, 4))\
365 MOVNTQ(%%mm2, 8(%4, %%eax, 4))\
366 MOVNTQ(%%mm1, 16(%4, %%eax, 4))\
367 MOVNTQ(%%mm3, 24(%4, %%eax, 4))\
369 "addl $8, %%eax \n\t"\
370 "cmpl %5, %%eax \n\t"\
374 "pand bF8, %%mm2 \n\t" /* B */\
375 "pand bFC, %%mm4 \n\t" /* G */\
376 "pand bF8, %%mm5 \n\t" /* R */\
377 "psrlq $3, %%mm2 \n\t"\
379 "movq %%mm2, %%mm1 \n\t"\
380 "movq %%mm4, %%mm3 \n\t"\
382 "punpcklbw %%mm7, %%mm3 \n\t"\
383 "punpcklbw %%mm5, %%mm2 \n\t"\
384 "punpckhbw %%mm7, %%mm4 \n\t"\
385 "punpckhbw %%mm5, %%mm1 \n\t"\
387 "psllq $3, %%mm3 \n\t"\
388 "psllq $3, %%mm4 \n\t"\
390 "por %%mm3, %%mm2 \n\t"\
391 "por %%mm4, %%mm1 \n\t"\
393 MOVNTQ(%%mm2, (%4, %%eax, 2))\
394 MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
396 "addl $8, %%eax \n\t"\
397 "cmpl %5, %%eax \n\t"\
401 "pand bF8, %%mm2 \n\t" /* B */\
402 "pand bF8, %%mm4 \n\t" /* G */\
403 "pand bF8, %%mm5 \n\t" /* R */\
404 "psrlq $3, %%mm2 \n\t"\
405 "psrlq $1, %%mm5 \n\t"\
407 "movq %%mm2, %%mm1 \n\t"\
408 "movq %%mm4, %%mm3 \n\t"\
410 "punpcklbw %%mm7, %%mm3 \n\t"\
411 "punpcklbw %%mm5, %%mm2 \n\t"\
412 "punpckhbw %%mm7, %%mm4 \n\t"\
413 "punpckhbw %%mm5, %%mm1 \n\t"\
415 "psllq $2, %%mm3 \n\t"\
416 "psllq $2, %%mm4 \n\t"\
418 "por %%mm3, %%mm2 \n\t"\
419 "por %%mm4, %%mm1 \n\t"\
421 MOVNTQ(%%mm2, (%4, %%eax, 2))\
422 MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
424 "addl $8, %%eax \n\t"\
425 "cmpl %5, %%eax \n\t"\
428 // FIXME find a faster way to shuffle it to BGR24
430 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
431 "movq %%mm2, %%mm1 \n\t" /* B */\
432 "movq %%mm5, %%mm6 \n\t" /* R */\
433 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
434 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
435 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
436 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
437 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
438 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
439 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
440 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
441 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
442 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
444 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
445 "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
446 "pand bm00000111, %%mm4 \n\t" /* 00000RGB 0 */\
447 "pand bm11111000, %%mm0 \n\t" /* 00RGB000 0.5 */\
448 "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
449 "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
450 "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
451 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
453 "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
454 "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
455 "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
456 "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
457 "pand bm00001111, %%mm2 \n\t" /* 0000RGBR 1 */\
458 "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
459 "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
460 "pand bm00000111, %%mm4 \n\t" /* 00000RGB 2 */\
461 "pand bm11111000, %%mm1 \n\t" /* 00RGB000 2.5 */\
462 "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
463 "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
464 "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
465 "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
467 "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
468 "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
469 "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
470 "pand bm00000111, %%mm5 \n\t" /* 00000RGB 3 */\
471 "pand bm11111000, %%mm3 \n\t" /* 00RGB000 3.5 */\
472 "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
473 "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
474 "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
476 "leal (%%eax, %%eax, 2), %%ebx \n\t"\
477 MOVNTQ(%%mm0, (%4, %%ebx))\
478 MOVNTQ(%%mm2, 8(%4, %%ebx))\
479 MOVNTQ(%%mm3, 16(%4, %%ebx))\
481 "addl $8, %%eax \n\t"\
482 "cmpl %5, %%eax \n\t"\
486 void in_asm_used_var_warning_killer()
488 int i= yCoeff+vrCoeff+ubCoeff+vgCoeff+ugCoeff+bF8+bFC+w400+w80+w10+
489 bm00001111+bm00000111+bm11111000+b16Dither+b16Dither1+b16Dither2+g16Dither+g16Dither1+
490 g16Dither2+b16Mask+g16Mask+r16Mask+b15Mask+g15Mask+r15Mask+temp0+asm_yalpha1+ asm_uvalpha1;
495 static inline void yuv2yuv(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
496 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstw, int yalpha, int uvalpha)
498 int yalpha1=yalpha^4095;
499 int uvalpha1=uvalpha^4095;
502 asm volatile ("\n\t"::: "memory");
506 ((uint8_t*)dest)[i] = (buf0[i]*yalpha1+buf1[i]*yalpha)>>19;
511 for(i=0; i<(dstw>>1); i++)
513 ((uint8_t*)uDest)[i] = (uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19;
514 ((uint8_t*)vDest)[i] = (uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19;
520 * vertical scale YV12 to RGB
522 static inline void yuv2rgbX(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
523 uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
525 int yalpha1=yalpha^4095;
526 int uvalpha1=uvalpha^4095;
538 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
539 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
541 "movq %%mm3, %%mm1 \n\t"
542 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
543 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
545 MOVNTQ(%%mm3, (%4, %%eax, 4))
546 MOVNTQ(%%mm1, 8(%4, %%eax, 4))
548 "addl $4, %%eax \n\t"
549 "cmpl %5, %%eax \n\t"
553 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
554 "m" (yalpha1), "m" (uvalpha1)
565 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
566 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
568 "movq %%mm3, %%mm1 \n\t"
569 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
570 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
572 "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
573 "psrlq $8, %%mm3 \n\t" // GR0BGR00
574 "pand bm00000111, %%mm2 \n\t" // BGR00000
575 "pand bm11111000, %%mm3 \n\t" // 000BGR00
576 "por %%mm2, %%mm3 \n\t" // BGRBGR00
577 "movq %%mm1, %%mm2 \n\t"
578 "psllq $48, %%mm1 \n\t" // 000000BG
579 "por %%mm1, %%mm3 \n\t" // BGRBGRBG
581 "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
582 "psrld $16, %%mm2 \n\t" // R000R000
583 "psrlq $24, %%mm1 \n\t" // 0BGR0000
584 "por %%mm2, %%mm1 \n\t" // RBGRR000
586 "movl %4, %%ebx \n\t"
587 "addl %%eax, %%ebx \n\t"
591 "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
592 "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
594 "movd %%mm3, (%%ebx, %%eax, 2) \n\t"
595 "psrlq $32, %%mm3 \n\t"
596 "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
597 "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
599 "addl $4, %%eax \n\t"
600 "cmpl %5, %%eax \n\t"
603 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
604 "m" (yalpha1), "m" (uvalpha1)
614 "paddusb b16Dither, %%mm1 \n\t"
615 "paddusb b16Dither, %%mm0 \n\t"
616 "paddusb b16Dither, %%mm3 \n\t"
618 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
619 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
620 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
622 "psrlw $3, %%mm3 \n\t"
623 "psllw $2, %%mm1 \n\t"
624 "psllw $7, %%mm0 \n\t"
625 "pand g15Mask, %%mm1 \n\t"
626 "pand r15Mask, %%mm0 \n\t"
628 "por %%mm3, %%mm1 \n\t"
629 "por %%mm1, %%mm0 \n\t"
631 MOVNTQ(%%mm0, (%4, %%eax, 2))
633 "addl $4, %%eax \n\t"
634 "cmpl %5, %%eax \n\t"
637 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
638 "m" (yalpha1), "m" (uvalpha1)
648 "paddusb g16Dither, %%mm1 \n\t"
649 "paddusb b16Dither, %%mm0 \n\t"
650 "paddusb b16Dither, %%mm3 \n\t"
652 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
653 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
654 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
656 "psrlw $3, %%mm3 \n\t"
657 "psllw $3, %%mm1 \n\t"
658 "psllw $8, %%mm0 \n\t"
659 "pand g16Mask, %%mm1 \n\t"
660 "pand r16Mask, %%mm0 \n\t"
662 "por %%mm3, %%mm1 \n\t"
663 "por %%mm1, %%mm0 \n\t"
665 MOVNTQ(%%mm0, (%4, %%eax, 2))
667 "addl $4, %%eax \n\t"
668 "cmpl %5, %%eax \n\t"
671 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
672 "m" (yalpha1), "m" (uvalpha1)
677 asm volatile ("\n\t"::: "memory");
679 if(dstbpp==32 || dstbpp==24)
683 // vertical linear interpolation && yuv2rgb in a single step:
684 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
685 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
686 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
687 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
688 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
689 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
697 // vertical linear interpolation && yuv2rgb in a single step:
698 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
699 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
700 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
702 ((uint16_t*)dest)[i] =
703 clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
704 clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
705 clip_table16r[(Y + yuvtab_3343[V]) >>13];
712 // vertical linear interpolation && yuv2rgb in a single step:
713 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
714 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
715 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
717 ((uint16_t*)dest)[i] =
718 clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
719 clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
720 clip_table15r[(Y + yuvtab_3343[V]) >>13];
734 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
735 "m" (yalpha1), "m" (uvalpha1)
745 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
746 "m" (yalpha1), "m" (uvalpha1)
754 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
756 "paddusb b16Dither, %%mm2 \n\t"
757 "paddusb b16Dither, %%mm4 \n\t"
758 "paddusb b16Dither, %%mm5 \n\t"
763 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
764 "m" (yalpha1), "m" (uvalpha1)
772 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
774 "paddusb g16Dither, %%mm2 \n\t"
775 "paddusb b16Dither, %%mm4 \n\t"
776 "paddusb b16Dither, %%mm5 \n\t"
781 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
782 "m" (yalpha1), "m" (uvalpha1)
787 asm volatile ("\n\t"::: "memory");
792 for(i=0; i<dstw-1; i+=2){
793 // vertical linear interpolation && yuv2rgb in a single step:
794 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
795 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
796 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
797 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
799 int Cb= yuvtab_40cf[U];
800 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
801 int Cr= yuvtab_3343[V];
803 dest[4*i+0]=clip_table[((Y1 + Cb) >>13)];
804 dest[4*i+1]=clip_table[((Y1 + Cg) >>13)];
805 dest[4*i+2]=clip_table[((Y1 + Cr) >>13)];
807 dest[4*i+4]=clip_table[((Y2 + Cb) >>13)];
808 dest[4*i+5]=clip_table[((Y2 + Cg) >>13)];
809 dest[4*i+6]=clip_table[((Y2 + Cr) >>13)];
815 for(i=0; i<dstw-1; i+=2){
816 // vertical linear interpolation && yuv2rgb in a single step:
817 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
818 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
819 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
820 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
822 int Cb= yuvtab_40cf[U];
823 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
824 int Cr= yuvtab_3343[V];
826 dest[0]=clip_table[((Y1 + Cb) >>13)];
827 dest[1]=clip_table[((Y1 + Cg) >>13)];
828 dest[2]=clip_table[((Y1 + Cr) >>13)];
830 dest[3]=clip_table[((Y2 + Cb) >>13)];
831 dest[4]=clip_table[((Y2 + Cg) >>13)];
832 dest[5]=clip_table[((Y2 + Cr) >>13)];
839 for(i=0; i<dstw-1; i+=2){
840 // vertical linear interpolation && yuv2rgb in a single step:
841 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
842 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
843 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
844 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
846 int Cb= yuvtab_40cf[U];
847 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
848 int Cr= yuvtab_3343[V];
850 ((uint16_t*)dest)[i] =
851 clip_table16b[(Y1 + Cb) >>13] |
852 clip_table16g[(Y1 + Cg) >>13] |
853 clip_table16r[(Y1 + Cr) >>13];
855 ((uint16_t*)dest)[i+1] =
856 clip_table16b[(Y2 + Cb) >>13] |
857 clip_table16g[(Y2 + Cg) >>13] |
858 clip_table16r[(Y2 + Cr) >>13];
864 for(i=0; i<dstw-1; i+=2){
865 // vertical linear interpolation && yuv2rgb in a single step:
866 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
867 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
868 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
869 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
871 int Cb= yuvtab_40cf[U];
872 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
873 int Cr= yuvtab_3343[V];
875 ((uint16_t*)dest)[i] =
876 clip_table15b[(Y1 + Cb) >>13] |
877 clip_table15g[(Y1 + Cg) >>13] |
878 clip_table15r[(Y1 + Cr) >>13];
880 ((uint16_t*)dest)[i+1] =
881 clip_table15b[(Y2 + Cb) >>13] |
882 clip_table15g[(Y2 + Cg) >>13] |
883 clip_table15r[(Y2 + Cr) >>13];
891 * YV12 to RGB without scaling or interpolating
893 static inline void yuv2rgb1(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
894 uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
896 int uvalpha1=uvalpha^4095;
898 int yalpha1=yalpha^4095;
901 if(fullUVIpol || allwaysIpol)
903 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
906 if( yalpha > 2048 ) buf0 = buf1;
909 if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
916 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
917 "m" (yalpha1), "m" (uvalpha1)
926 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
927 "m" (yalpha1), "m" (uvalpha1)
935 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
937 "paddusb b16Dither, %%mm2 \n\t"
938 "paddusb b16Dither, %%mm4 \n\t"
939 "paddusb b16Dither, %%mm5 \n\t"
942 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
943 "m" (yalpha1), "m" (uvalpha1)
951 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
953 "paddusb g16Dither, %%mm2 \n\t"
954 "paddusb b16Dither, %%mm4 \n\t"
955 "paddusb b16Dither, %%mm5 \n\t"
959 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
960 "m" (yalpha1), "m" (uvalpha1)
972 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
973 "m" (yalpha1), "m" (uvalpha1)
982 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
983 "m" (yalpha1), "m" (uvalpha1)
991 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
993 "paddusb b16Dither, %%mm2 \n\t"
994 "paddusb b16Dither, %%mm4 \n\t"
995 "paddusb b16Dither, %%mm5 \n\t"
998 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
999 "m" (yalpha1), "m" (uvalpha1)
1007 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1009 "paddusb g16Dither, %%mm2 \n\t"
1010 "paddusb b16Dither, %%mm4 \n\t"
1011 "paddusb b16Dither, %%mm5 \n\t"
1015 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
1016 "m" (yalpha1), "m" (uvalpha1)
1022 //FIXME write 2 versions (for even & odd lines)
1023 asm volatile ("\n\t"::: "memory");
1028 for(i=0; i<dstw-1; i+=2){
1029 // vertical linear interpolation && yuv2rgb in a single step:
1030 int Y1=yuvtab_2568[buf0[i]>>7];
1031 int Y2=yuvtab_2568[buf0[i+1]>>7];
1032 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
1033 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
1035 int Cb= yuvtab_40cf[U];
1036 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1037 int Cr= yuvtab_3343[V];
1039 dest[4*i+0]=clip_table[((Y1 + Cb) >>13)];
1040 dest[4*i+1]=clip_table[((Y1 + Cg) >>13)];
1041 dest[4*i+2]=clip_table[((Y1 + Cr) >>13)];
1043 dest[4*i+4]=clip_table[((Y2 + Cb) >>13)];
1044 dest[4*i+5]=clip_table[((Y2 + Cg) >>13)];
1045 dest[4*i+6]=clip_table[((Y2 + Cr) >>13)];
1051 for(i=0; i<dstw-1; i+=2){
1052 // vertical linear interpolation && yuv2rgb in a single step:
1053 int Y1=yuvtab_2568[buf0[i]>>7];
1054 int Y2=yuvtab_2568[buf0[i+1]>>7];
1055 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
1056 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
1058 int Cb= yuvtab_40cf[U];
1059 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1060 int Cr= yuvtab_3343[V];
1062 dest[0]=clip_table[((Y1 + Cb) >>13)];
1063 dest[1]=clip_table[((Y1 + Cg) >>13)];
1064 dest[2]=clip_table[((Y1 + Cr) >>13)];
1066 dest[3]=clip_table[((Y2 + Cb) >>13)];
1067 dest[4]=clip_table[((Y2 + Cg) >>13)];
1068 dest[5]=clip_table[((Y2 + Cr) >>13)];
1075 for(i=0; i<dstw-1; i+=2){
1076 // vertical linear interpolation && yuv2rgb in a single step:
1077 int Y1=yuvtab_2568[buf0[i]>>7];
1078 int Y2=yuvtab_2568[buf0[i+1]>>7];
1079 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
1080 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
1082 int Cb= yuvtab_40cf[U];
1083 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1084 int Cr= yuvtab_3343[V];
1086 ((uint16_t*)dest)[i] =
1087 clip_table16b[(Y1 + Cb) >>13] |
1088 clip_table16g[(Y1 + Cg) >>13] |
1089 clip_table16r[(Y1 + Cr) >>13];
1091 ((uint16_t*)dest)[i+1] =
1092 clip_table16b[(Y2 + Cb) >>13] |
1093 clip_table16g[(Y2 + Cg) >>13] |
1094 clip_table16r[(Y2 + Cr) >>13];
1100 for(i=0; i<dstw-1; i+=2){
1101 // vertical linear interpolation && yuv2rgb in a single step:
1102 int Y1=yuvtab_2568[buf0[i]>>7];
1103 int Y2=yuvtab_2568[buf0[i+1]>>7];
1104 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
1105 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
1107 int Cb= yuvtab_40cf[U];
1108 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1109 int Cr= yuvtab_3343[V];
1111 ((uint16_t*)dest)[i] =
1112 clip_table15b[(Y1 + Cb) >>13] |
1113 clip_table15g[(Y1 + Cg) >>13] |
1114 clip_table15r[(Y1 + Cr) >>13];
1116 ((uint16_t*)dest)[i+1] =
1117 clip_table15b[(Y2 + Cb) >>13] |
1118 clip_table15g[(Y2 + Cg) >>13] |
1119 clip_table15r[(Y2 + Cr) >>13];
1126 static inline void hyscale(uint16_t *dst, int dstWidth, uint8_t *src, int srcWidth, int xInc)
1128 // *** horizontal scale Y line to temp buffer
1135 "pxor %%mm7, %%mm7 \n\t"
1136 "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
1137 "movd %5, %%mm6 \n\t" // xInc&0xFFFF
1138 "punpcklwd %%mm6, %%mm6 \n\t"
1139 "punpcklwd %%mm6, %%mm6 \n\t"
1140 "movq %%mm6, %%mm2 \n\t"
1141 "psllq $16, %%mm2 \n\t"
1142 "paddw %%mm6, %%mm2 \n\t"
1143 "psllq $16, %%mm2 \n\t"
1144 "paddw %%mm6, %%mm2 \n\t"
1145 "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=xInc&0xFF
1146 "movq %%mm2, temp0 \n\t"
1147 "movd %4, %%mm6 \n\t" //(xInc*4)&0xFFFF
1148 "punpcklwd %%mm6, %%mm6 \n\t"
1149 "punpcklwd %%mm6, %%mm6 \n\t"
1150 "xorl %%eax, %%eax \n\t" // i
1151 "movl %0, %%esi \n\t" // src
1152 "movl %1, %%edi \n\t" // buf1
1153 "movl %3, %%edx \n\t" // (xInc*4)>>16
1154 "xorl %%ecx, %%ecx \n\t"
1155 "xorl %%ebx, %%ebx \n\t"
1156 "movw %4, %%bx \n\t" // (xInc*4)&0xFFFF
1158 #define FUNNY_Y_CODE \
1159 PREFETCH" 1024(%%esi) \n\t"\
1160 PREFETCH" 1056(%%esi) \n\t"\
1161 PREFETCH" 1088(%%esi) \n\t"\
1162 "call funnyYCode \n\t"\
1163 "movq temp0, %%mm2 \n\t"\
1164 "xorl %%ecx, %%ecx \n\t"
1175 :: "m" (src), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
1176 "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF)
1177 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
1179 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth-1; i--) dst[i] = src[srcWidth-1]*128;
1184 //NO MMX just normal asm ...
1186 "xorl %%eax, %%eax \n\t" // i
1187 "xorl %%ebx, %%ebx \n\t" // xx
1188 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
1190 "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
1191 "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
1192 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1193 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1194 "shll $16, %%edi \n\t"
1195 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1196 "movl %1, %%edi \n\t"
1197 "shrl $9, %%esi \n\t"
1198 "movw %%si, (%%edi, %%eax, 2) \n\t"
1199 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
1200 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
1202 "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
1203 "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
1204 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1205 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1206 "shll $16, %%edi \n\t"
1207 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1208 "movl %1, %%edi \n\t"
1209 "shrl $9, %%esi \n\t"
1210 "movw %%si, 2(%%edi, %%eax, 2) \n\t"
1211 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
1212 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
1215 "addl $2, %%eax \n\t"
1216 "cmpl %2, %%eax \n\t"
1220 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
1221 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1224 } //if MMX2 cant be used
1228 unsigned int xpos=0;
1229 for(i=0;i<dstWidth;i++)
1231 register unsigned int xx=xpos>>16;
1232 register unsigned int xalpha=(xpos&0xFFFF)>>9;
1233 dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
1239 inline static void hcscale(uint16_t *dst, int dstWidth,
1240 uint8_t *src1, uint8_t *src2, int srcWidth, int xInc)
1248 "pxor %%mm7, %%mm7 \n\t"
1249 "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
1250 "movd %5, %%mm6 \n\t" // xInc&0xFFFF
1251 "punpcklwd %%mm6, %%mm6 \n\t"
1252 "punpcklwd %%mm6, %%mm6 \n\t"
1253 "movq %%mm6, %%mm2 \n\t"
1254 "psllq $16, %%mm2 \n\t"
1255 "paddw %%mm6, %%mm2 \n\t"
1256 "psllq $16, %%mm2 \n\t"
1257 "paddw %%mm6, %%mm2 \n\t"
1258 "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=xInc&0xFFFF
1259 "movq %%mm2, temp0 \n\t"
1260 "movd %4, %%mm6 \n\t" //(xInc*4)&0xFFFF
1261 "punpcklwd %%mm6, %%mm6 \n\t"
1262 "punpcklwd %%mm6, %%mm6 \n\t"
1263 "xorl %%eax, %%eax \n\t" // i
1264 "movl %0, %%esi \n\t" // src
1265 "movl %1, %%edi \n\t" // buf1
1266 "movl %3, %%edx \n\t" // (xInc*4)>>16
1267 "xorl %%ecx, %%ecx \n\t"
1268 "xorl %%ebx, %%ebx \n\t"
1269 "movw %4, %%bx \n\t" // (xInc*4)&0xFFFF
1271 #define FUNNYUVCODE \
1272 PREFETCH" 1024(%%esi) \n\t"\
1273 PREFETCH" 1056(%%esi) \n\t"\
1274 PREFETCH" 1088(%%esi) \n\t"\
1275 "call funnyUVCode \n\t"\
1276 "movq temp0, %%mm2 \n\t"\
1277 "xorl %%ecx, %%ecx \n\t"
1288 "xorl %%eax, %%eax \n\t" // i
1289 "movl %6, %%esi \n\t" // src
1290 "movl %1, %%edi \n\t" // buf1
1291 "addl $4096, %%edi \n\t"
1303 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
1304 "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF), "m" (src2)
1305 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
1307 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth/2-1; i--)
1309 dst[i] = src1[srcWidth/2-1]*128;
1310 dst[i+2048] = src2[srcWidth/2-1]*128;
1317 "xorl %%eax, %%eax \n\t" // i
1318 "xorl %%ebx, %%ebx \n\t" // xx
1319 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
1321 "movl %0, %%esi \n\t"
1322 "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
1323 "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1]
1324 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1325 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1326 "shll $16, %%edi \n\t"
1327 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1328 "movl %1, %%edi \n\t"
1329 "shrl $9, %%esi \n\t"
1330 "movw %%si, (%%edi, %%eax, 2) \n\t"
1332 "movzbl (%5, %%ebx), %%edi \n\t" //src[xx]
1333 "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1]
1334 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1335 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1336 "shll $16, %%edi \n\t"
1337 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1338 "movl %1, %%edi \n\t"
1339 "shrl $9, %%esi \n\t"
1340 "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
1342 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
1343 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
1344 "addl $1, %%eax \n\t"
1345 "cmpl %2, %%eax \n\t"
1348 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
1350 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1353 } //if MMX2 cant be used
1357 unsigned int xpos=0;
1358 for(i=0;i<dstWidth;i++)
1360 register unsigned int xx=xpos>>16;
1361 register unsigned int xalpha=(xpos&0xFFFF)>>9;
1362 dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
1363 dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
1365 dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
1366 dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
1374 // *** bilinear scaling and yuv->rgb or yuv->yuv conversion of yv12 slices:
1375 // *** Note: it's called multiple times while decoding a frame, first time y==0
1376 // *** Designed to upscale, but may work for downscale too.
1377 // s_xinc = (src_width << 16) / dst_width
1378 // s_yinc = (src_height << 16) / dst_height
1379 void SwScale_YV12slice(unsigned char* srcptr[],int stride[], int y, int h,
1380 uint8_t* dstptr[], int dststride, int dstw, int dstbpp,
1381 unsigned int s_xinc,unsigned int s_yinc){
1384 //static int s_yinc=(vo_dga_src_height<<16)/vo_dga_vp_height;
1385 //static int s_xinc=(vo_dga_src_width<<8)/vo_dga_vp_width;
1387 unsigned int s_xinc2;
1389 static int s_srcypos; // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1392 // last horzontally interpolated lines, used to avoid unnecessary calculations
1393 static int s_last_ypos;
1394 static int s_last_y1pos;
1397 // used to detect a horizontal size change
1398 static int old_dstw= -1;
1399 static int old_s_xinc= -1;
1402 int srcWidth= (dstw*s_xinc + 0x8000)>>16;
1403 int dstUVw= fullUVIpol ? dstw : dstw/2;
1407 canMMX2BeUsed= (s_xinc <= 0x10000 && (dstw&31)==0 && (srcWidth&15)==0) ? 1 : 0;
1410 // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
1411 // n-2 is the last chrominance sample available
1412 // FIXME this is not perfect, but noone shuld notice the difference, the more correct variant
1413 // would be like the vertical one, but that would require some special code for the
1414 // first and last pixel
1415 if(canMMX2BeUsed) s_xinc+= 20;
1416 else s_xinc = ((srcWidth-2)<<16)/(dstw-2) - 20;
1418 if(fullUVIpol && !(dstbpp==12)) s_xinc2= s_xinc>>1;
1419 else s_xinc2= s_xinc;
1420 // force calculation of the horizontal interpolation of the first line
1425 s_srcypos= s_yinc/2 - 0x8000;
1428 // clean the buffers so that no green stuff is drawen if the width is not sane (%8=0)
1429 for(i=dstw-2; i<dstw+20; i++)
1431 pix_buf_uv[0][i] = pix_buf_uv[1][i]
1432 = pix_buf_uv[0][2048+i] = pix_buf_uv[1][2048+i] = 128;
1433 pix_buf_uv[0][i/2] = pix_buf_uv[1][i/2]
1434 = pix_buf_uv[0][2048+i/2] = pix_buf_uv[1][2048+i/2] = 128;
1435 pix_buf_y[0][i]= pix_buf_y[1][i]= 0;
1439 // cant downscale !!!
1440 if((old_s_xinc != s_xinc || old_dstw!=dstw) && canMMX2BeUsed)
1452 // create an optimized horizontal scaling routine
1460 "movq (%%esi), %%mm0 \n\t" //FIXME Alignment
1461 "movq %%mm0, %%mm1 \n\t"
1462 "psrlq $8, %%mm0 \n\t"
1463 "punpcklbw %%mm7, %%mm1 \n\t"
1464 "movq %%mm2, %%mm3 \n\t"
1465 "punpcklbw %%mm7, %%mm0 \n\t"
1466 "addw %%bx, %%cx \n\t" //2*xalpha += (4*s_xinc)&0xFFFF
1467 "pshufw $0xFF, %%mm1, %%mm1 \n\t"
1469 "adcl %%edx, %%esi \n\t" //xx+= (4*s_xinc)>>16 + carry
1470 "pshufw $0xFF, %%mm0, %%mm0 \n\t"
1472 "psrlw $9, %%mm3 \n\t"
1473 "psubw %%mm1, %%mm0 \n\t"
1474 "pmullw %%mm3, %%mm0 \n\t"
1475 "paddw %%mm6, %%mm2 \n\t" // 2*alpha += xpos&0xFFFF
1476 "psllw $7, %%mm1 \n\t"
1477 "paddw %%mm1, %%mm0 \n\t"
1479 "movq %%mm0, (%%edi, %%eax) \n\t"
1481 "addl $8, %%eax \n\t"
1494 :"=r" (fragment), "=r" (imm8OfPShufW1), "=r" (imm8OfPShufW2),
1495 "=r" (fragmentLength)
1498 xpos= 0; //s_xinc/2 - 0x8000; // difference between pixel centers
1500 /* choose xinc so that all 8 parts fit exactly
1501 Note: we cannot use just 1 part because it would not fit in the code cache */
1502 // s_xinc2_diff= -((((s_xinc2*(dstw/8))&0xFFFF))/(dstw/8))-10;
1503 // s_xinc_diff= -((((s_xinc*(dstw/8))&0xFFFF))/(dstw/8));
1505 // s_xinc2_diff+= ((0x10000/(dstw/8)));
1507 // s_xinc_diff= s_xinc2_diff*2;
1509 // s_xinc2+= s_xinc2_diff;
1510 // s_xinc+= s_xinc_diff;
1512 // old_s_xinc= s_xinc;
1514 for(i=0; i<dstw/8; i++)
1521 int b=((xpos+s_xinc)>>16) - xx;
1522 int c=((xpos+s_xinc*2)>>16) - xx;
1523 int d=((xpos+s_xinc*3)>>16) - xx;
1525 memcpy(funnyYCode + fragmentLength*i/4, fragment, fragmentLength);
1527 funnyYCode[fragmentLength*i/4 + imm8OfPShufW1]=
1528 funnyYCode[fragmentLength*i/4 + imm8OfPShufW2]=
1529 a | (b<<2) | (c<<4) | (d<<6);
1531 // if we dont need to read 8 bytes than dont :), reduces the chance of
1532 // crossing a cache line
1533 if(d<3) funnyYCode[fragmentLength*i/4 + 1]= 0x6E;
1535 funnyYCode[fragmentLength*(i+4)/4]= RET;
1540 xpos= 0; //s_xinc2/2 - 0x10000; // difference between centers of chrom samples
1541 for(i=0; i<dstUVw/8; i++)
1548 int b=((xpos+s_xinc2)>>16) - xx;
1549 int c=((xpos+s_xinc2*2)>>16) - xx;
1550 int d=((xpos+s_xinc2*3)>>16) - xx;
1552 memcpy(funnyUVCode + fragmentLength*i/4, fragment, fragmentLength);
1554 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW1]=
1555 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW2]=
1556 a | (b<<2) | (c<<4) | (d<<6);
1558 // if we dont need to read 8 bytes than dont :), reduces the chance of
1559 // crossing a cache line
1560 if(d<3) funnyUVCode[fragmentLength*i/4 + 1]= 0x6E;
1562 funnyUVCode[fragmentLength*(i+4)/4]= RET;
1566 // funnyCode[0]= RET;
1573 unsigned char *dest =dstptr[0]+dststride*s_ypos;
1574 unsigned char *uDest=dstptr[1]+(dststride>>1)*(s_ypos>>1);
1575 unsigned char *vDest=dstptr[2]+(dststride>>1)*(s_ypos>>1);
1577 int y0=(s_srcypos + 0xFFFF)>>16; // first luminance source line number below the dst line
1578 // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1579 int srcuvpos= dstbpp==12 ? s_srcypos + s_yinc/2 - 0x8000 :
1581 int y1=(srcuvpos + 0x1FFFF)>>17; // first chrominance source line number below the dst line
1582 int yalpha=((s_srcypos-1)&0xFFFF)>>4;
1583 int uvalpha=((srcuvpos-1)&0x1FFFF)>>5;
1584 uint16_t *buf0=pix_buf_y[y0&1]; // top line of the interpolated slice
1585 uint16_t *buf1=pix_buf_y[((y0+1)&1)]; // bottom line of the interpolated slice
1586 uint16_t *uvbuf0=pix_buf_uv[y1&1]; // top line of the interpolated slice
1587 uint16_t *uvbuf1=pix_buf_uv[(y1+1)&1]; // bottom line of the interpolated slice
1589 if(y0>=y+h) break; // FIXME wrong, skips last lines, but they are dupliactes anyway
1591 if((y0&1) && dstbpp==12) uvalpha=-1; // there is no alpha if there is no line
1593 s_ypos++; s_srcypos+=s_yinc;
1595 //only interpolate the src line horizontally if we didnt do it allready
1599 // skip if first line has been horiz scaled alleady
1600 if(s_last_ypos != y0-1)
1602 // check if first line is before any available src lines
1603 if(y0-1 < y) src=srcptr[0]+(0 )*stride[0];
1604 else src=srcptr[0]+(y0-y-1)*stride[0];
1606 hyscale(buf0, dstw, src, srcWidth, s_xinc);
1608 // check if second line is after any available src lines
1609 if(y0-y >= h) src=srcptr[0]+(h-1)*stride[0];
1610 else src=srcptr[0]+(y0-y)*stride[0];
1612 // the min() is required to avoid reuseing lines which where not available
1613 s_last_ypos= MIN(y0, y+h-1);
1614 hyscale(buf1, dstw, src, srcWidth, s_xinc);
1616 // printf("%d %d %d %d\n", y, y1, s_last_y1pos, h);
1617 // *** horizontal scale U and V lines to temp buffer
1618 if(s_last_y1pos!=y1)
1620 uint8_t *src1, *src2;
1621 // skip if first line has been horiz scaled alleady
1622 if(s_last_y1pos != y1-1)
1624 // check if first line is before any available src lines
1627 src1= srcptr[1]+(0)*stride[1];
1628 src2= srcptr[2]+(0)*stride[2];
1630 src1= srcptr[1]+(y1-y/2-1)*stride[1];
1631 src2= srcptr[2]+(y1-y/2-1)*stride[2];
1633 hcscale(uvbuf0, dstUVw, src1, src2, srcWidth, s_xinc2);
1636 // check if second line is after any available src lines
1639 src1= srcptr[1]+(h/2-1)*stride[1];
1640 src2= srcptr[2]+(h/2-1)*stride[2];
1642 src1= srcptr[1]+(y1-y/2)*stride[1];
1643 src2= srcptr[2]+(y1-y/2)*stride[2];
1645 hcscale(uvbuf1, dstUVw, src1, src2, srcWidth, s_xinc2);
1647 // the min() is required to avoid reuseing lines which where not available
1648 s_last_y1pos= MIN(y1, y/2+h/2-1);
1651 if(dstbpp==12) //YV12
1652 yuv2yuv(buf0, buf1, uvbuf0, uvbuf1, dest, uDest, vDest, dstw, yalpha, uvalpha);
1653 else if(ABS(s_yinc - 0x10000) < 10)
1654 yuv2rgb1(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1656 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1659 b16Dither= b16Dither1;
1660 b16Dither1= b16Dither2;
1661 b16Dither2= b16Dither;
1663 g16Dither= g16Dither1;
1664 g16Dither1= g16Dither2;
1665 g16Dither2= g16Dither;
1670 __asm __volatile(SFENCE:::"memory");
1671 __asm __volatile(EMMS:::"memory");
1676 void SwScale_Init(){
1677 // generating tables:
1681 clip_table[i+256]=i;
1682 clip_table[i+512]=255;
1683 yuvtab_2568[i]=(0x2568*(i-16))+(256<<13);
1684 yuvtab_3343[i]=0x3343*(i-128);
1685 yuvtab_0c92[i]=-0x0c92*(i-128);
1686 yuvtab_1a1e[i]=-0x1a1e*(i-128);
1687 yuvtab_40cf[i]=0x40cf*(i-128);
1690 for(i=0; i<768; i++)
1692 int v= clip_table[i];
1693 clip_table16b[i]= v>>3;
1694 clip_table16g[i]= (v<<3)&0x07E0;
1695 clip_table16r[i]= (v<<8)&0xF800;
1696 clip_table15b[i]= v>>3;
1697 clip_table15g[i]= (v<<2)&0x03E0;
1698 clip_table15r[i]= (v<<7)&0x7C00;