2 // Software scaling and colorspace conversion routines for MPlayer
4 // Orginal C implementation by A'rpi/ESP-team <arpi@thot.banki.hu>
5 // current version mostly by Michael Niedermayer (michaelni@gmx.at)
6 // the parts written by michael are under GNU GPL
10 #include "../config.h"
18 //disables the unscaled height version
21 #define RET 0xC3 //near return opcode
25 known BUGS with known cause (no bugreports please!, but patches are welcome :) )
26 horizontal MMX2 scaler reads 1-7 samples too much (might cause a sig11)
28 Supported output formats BGR15 BGR16 BGR24 BGR32
29 BGR15 & BGR16 MMX verions support dithering
30 Special versions: fast Y 1:1 scaling (no interpolation in y direction)
33 more intelligent missalignment avoidance for the horizontal scaler
36 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
37 #define MIN(a,b) ((a) > (b) ? (b) : (a))
38 #define MAX(a,b) ((a) < (b) ? (b) : (a))
41 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
42 #elif defined (HAVE_3DNOW)
43 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
47 #define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
49 #define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
54 static uint64_t __attribute__((aligned(8))) yCoeff= 0x2568256825682568LL;
55 static uint64_t __attribute__((aligned(8))) vrCoeff= 0x3343334333433343LL;
56 static uint64_t __attribute__((aligned(8))) ubCoeff= 0x40cf40cf40cf40cfLL;
57 static uint64_t __attribute__((aligned(8))) vgCoeff= 0xE5E2E5E2E5E2E5E2LL;
58 static uint64_t __attribute__((aligned(8))) ugCoeff= 0xF36EF36EF36EF36ELL;
59 static uint64_t __attribute__((aligned(8))) w400= 0x0400040004000400LL;
60 static uint64_t __attribute__((aligned(8))) w80= 0x0080008000800080LL;
61 static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL;
62 static uint64_t __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
63 static uint64_t __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
64 static uint64_t __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
66 static uint64_t __attribute__((aligned(8))) b16Dither= 0x0004000400040004LL;
67 static uint64_t __attribute__((aligned(8))) b16Dither1=0x0004000400040004LL;
68 static uint64_t __attribute__((aligned(8))) b16Dither2=0x0602060206020602LL;
69 static uint64_t __attribute__((aligned(8))) g16Dither= 0x0002000200020002LL;
70 static uint64_t __attribute__((aligned(8))) g16Dither1=0x0002000200020002LL;
71 static uint64_t __attribute__((aligned(8))) g16Dither2=0x0301030103010301LL;
73 static uint64_t __attribute__((aligned(8))) b16Mask= 0x001F001F001F001FLL;
74 static uint64_t __attribute__((aligned(8))) g16Mask= 0x07E007E007E007E0LL;
75 static uint64_t __attribute__((aligned(8))) r16Mask= 0xF800F800F800F800LL;
76 static uint64_t __attribute__((aligned(8))) b15Mask= 0x001F001F001F001FLL;
77 static uint64_t __attribute__((aligned(8))) g15Mask= 0x03E003E003E003E0LL;
78 static uint64_t __attribute__((aligned(8))) r15Mask= 0x7C007C007C007C00LL;
80 static uint64_t __attribute__((aligned(8))) temp0;
81 static uint64_t __attribute__((aligned(8))) asm_yalpha1;
82 static uint64_t __attribute__((aligned(8))) asm_uvalpha1;
85 // temporary storage for 4 yuv lines:
86 // 16bit for now (mmx likes it more compact)
88 static uint16_t __attribute__((aligned(8))) pix_buf_y[4][2048];
89 static uint16_t __attribute__((aligned(8))) pix_buf_uv[2][2048*2];
91 static uint16_t pix_buf_y[4][2048];
92 static uint16_t pix_buf_uv[2][2048*2];
95 // clipping helper table for C implementations:
96 static unsigned char clip_table[768];
98 // yuv->rgb conversion tables:
99 static int yuvtab_2568[256];
100 static int yuvtab_3343[256];
101 static int yuvtab_0c92[256];
102 static int yuvtab_1a1e[256];
103 static int yuvtab_40cf[256];
106 static uint8_t funnyYCode[10000];
107 static uint8_t funnyUVCode[10000];
109 static int canMMX2BeUsed=0;
111 #define FULL_YSCALEYUV2RGB \
112 "pxor %%mm7, %%mm7 \n\t"\
113 "movd %6, %%mm6 \n\t" /*yalpha1*/\
114 "punpcklwd %%mm6, %%mm6 \n\t"\
115 "punpcklwd %%mm6, %%mm6 \n\t"\
116 "movd %7, %%mm5 \n\t" /*uvalpha1*/\
117 "punpcklwd %%mm5, %%mm5 \n\t"\
118 "punpcklwd %%mm5, %%mm5 \n\t"\
119 "xorl %%eax, %%eax \n\t"\
121 "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
122 "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
123 "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\
124 "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\
125 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
126 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
127 "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
128 "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
129 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
130 "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
131 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
132 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
133 "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
134 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
135 "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
136 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
137 "psubw w400, %%mm3 \n\t" /* 8(U-128)*/\
138 "pmulhw yCoeff, %%mm1 \n\t"\
141 "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
142 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
143 "pmulhw ubCoeff, %%mm3 \n\t"\
144 "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
145 "pmulhw ugCoeff, %%mm2 \n\t"\
146 "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
147 "psubw w400, %%mm0 \n\t" /* (V-128)8*/\
150 "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
151 "pmulhw vrCoeff, %%mm0 \n\t"\
152 "pmulhw vgCoeff, %%mm4 \n\t"\
153 "paddw %%mm1, %%mm3 \n\t" /* B*/\
154 "paddw %%mm1, %%mm0 \n\t" /* R*/\
155 "packuswb %%mm3, %%mm3 \n\t"\
157 "packuswb %%mm0, %%mm0 \n\t"\
158 "paddw %%mm4, %%mm2 \n\t"\
159 "paddw %%mm2, %%mm1 \n\t" /* G*/\
161 "packuswb %%mm1, %%mm1 \n\t"
163 #define YSCALEYUV2RGB \
164 "movd %6, %%mm6 \n\t" /*yalpha1*/\
165 "punpcklwd %%mm6, %%mm6 \n\t"\
166 "punpcklwd %%mm6, %%mm6 \n\t"\
167 "movq %%mm6, asm_yalpha1 \n\t"\
168 "movd %7, %%mm5 \n\t" /*uvalpha1*/\
169 "punpcklwd %%mm5, %%mm5 \n\t"\
170 "punpcklwd %%mm5, %%mm5 \n\t"\
171 "movq %%mm5, asm_uvalpha1 \n\t"\
172 "xorl %%eax, %%eax \n\t"\
174 "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
175 "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
176 "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
177 "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
178 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
179 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
180 "movq asm_uvalpha1, %%mm0 \n\t"\
181 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
182 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
183 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
184 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
185 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
186 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
187 "psubw w400, %%mm3 \n\t" /* (U-128)8*/\
188 "psubw w400, %%mm4 \n\t" /* (V-128)8*/\
189 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
190 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
191 "pmulhw ugCoeff, %%mm3 \n\t"\
192 "pmulhw vgCoeff, %%mm4 \n\t"\
193 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
194 "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
195 "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
196 "movq 8(%0, %%eax, 2), %%mm6 \n\t" /*buf0[eax]*/\
197 "movq 8(%1, %%eax, 2), %%mm7 \n\t" /*buf1[eax]*/\
198 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
199 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
200 "pmulhw asm_yalpha1, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
201 "pmulhw asm_yalpha1, %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
202 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
203 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
204 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
205 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
206 "pmulhw ubCoeff, %%mm2 \n\t"\
207 "pmulhw vrCoeff, %%mm5 \n\t"\
208 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
209 "psubw w80, %%mm7 \n\t" /* 8(Y-16)*/\
210 "pmulhw yCoeff, %%mm1 \n\t"\
211 "pmulhw yCoeff, %%mm7 \n\t"\
212 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
213 "paddw %%mm3, %%mm4 \n\t"\
214 "movq %%mm2, %%mm0 \n\t"\
215 "movq %%mm5, %%mm6 \n\t"\
216 "movq %%mm4, %%mm3 \n\t"\
217 "punpcklwd %%mm2, %%mm2 \n\t"\
218 "punpcklwd %%mm5, %%mm5 \n\t"\
219 "punpcklwd %%mm4, %%mm4 \n\t"\
220 "paddw %%mm1, %%mm2 \n\t"\
221 "paddw %%mm1, %%mm5 \n\t"\
222 "paddw %%mm1, %%mm4 \n\t"\
223 "punpckhwd %%mm0, %%mm0 \n\t"\
224 "punpckhwd %%mm6, %%mm6 \n\t"\
225 "punpckhwd %%mm3, %%mm3 \n\t"\
226 "paddw %%mm7, %%mm0 \n\t"\
227 "paddw %%mm7, %%mm6 \n\t"\
228 "paddw %%mm7, %%mm3 \n\t"\
229 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
230 "packuswb %%mm0, %%mm2 \n\t"\
231 "packuswb %%mm6, %%mm5 \n\t"\
232 "packuswb %%mm3, %%mm4 \n\t"\
233 "pxor %%mm7, %%mm7 \n\t"
235 #define YSCALEYUV2RGB1 \
236 "xorl %%eax, %%eax \n\t"\
238 "movq (%2, %%eax), %%mm3 \n\t" /* uvbuf0[eax]*/\
239 "movq 4096(%2, %%eax), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
240 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
241 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
242 "psubw w400, %%mm3 \n\t" /* (U-128)8*/\
243 "psubw w400, %%mm4 \n\t" /* (V-128)8*/\
244 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
245 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
246 "pmulhw ugCoeff, %%mm3 \n\t"\
247 "pmulhw vgCoeff, %%mm4 \n\t"\
248 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
249 "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
250 "movq 8(%1, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
251 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
252 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
253 "pmulhw ubCoeff, %%mm2 \n\t"\
254 "pmulhw vrCoeff, %%mm5 \n\t"\
255 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
256 "psubw w80, %%mm7 \n\t" /* 8(Y-16)*/\
257 "pmulhw yCoeff, %%mm1 \n\t"\
258 "pmulhw yCoeff, %%mm7 \n\t"\
259 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
260 "paddw %%mm3, %%mm4 \n\t"\
261 "movq %%mm2, %%mm0 \n\t"\
262 "movq %%mm5, %%mm6 \n\t"\
263 "movq %%mm4, %%mm3 \n\t"\
264 "punpcklwd %%mm2, %%mm2 \n\t"\
265 "punpcklwd %%mm5, %%mm5 \n\t"\
266 "punpcklwd %%mm4, %%mm4 \n\t"\
267 "paddw %%mm1, %%mm2 \n\t"\
268 "paddw %%mm1, %%mm5 \n\t"\
269 "paddw %%mm1, %%mm4 \n\t"\
270 "punpckhwd %%mm0, %%mm0 \n\t"\
271 "punpckhwd %%mm6, %%mm6 \n\t"\
272 "punpckhwd %%mm3, %%mm3 \n\t"\
273 "paddw %%mm7, %%mm0 \n\t"\
274 "paddw %%mm7, %%mm6 \n\t"\
275 "paddw %%mm7, %%mm3 \n\t"\
276 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
277 "packuswb %%mm0, %%mm2 \n\t"\
278 "packuswb %%mm6, %%mm5 \n\t"\
279 "packuswb %%mm3, %%mm4 \n\t"\
280 "pxor %%mm7, %%mm7 \n\t"
283 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
284 "movq %%mm2, %%mm1 \n\t" /* B */\
285 "movq %%mm5, %%mm6 \n\t" /* R */\
286 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
287 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
288 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
289 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
290 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
291 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
292 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
293 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
294 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
295 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
297 MOVNTQ(%%mm0, (%4, %%eax, 4))\
298 MOVNTQ(%%mm2, 8(%4, %%eax, 4))\
299 MOVNTQ(%%mm1, 16(%4, %%eax, 4))\
300 MOVNTQ(%%mm3, 24(%4, %%eax, 4))\
302 "addl $8, %%eax \n\t"\
303 "cmpl %5, %%eax \n\t"\
307 "movq %%mm2, %%mm1 \n\t" /* B */\
308 "movq %%mm4, %%mm3 \n\t" /* G */\
309 "movq %%mm5, %%mm6 \n\t" /* R */\
311 "punpcklbw %%mm7, %%mm3 \n\t" /* 0G0G0G0G */\
312 "punpcklbw %%mm7, %%mm2 \n\t" /* 0B0B0B0B */\
313 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R */\
315 "psrlw $3, %%mm2 \n\t"\
316 "psllw $3, %%mm3 \n\t"\
317 "psllw $8, %%mm5 \n\t"\
319 "pand g16Mask, %%mm3 \n\t"\
320 "pand r16Mask, %%mm5 \n\t"\
322 "por %%mm3, %%mm2 \n\t"\
323 "por %%mm5, %%mm2 \n\t"\
325 "punpckhbw %%mm7, %%mm4 \n\t" /* 0G0G0G0G */\
326 "punpckhbw %%mm7, %%mm1 \n\t" /* 0B0B0B0B */\
327 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R */\
329 "psrlw $3, %%mm1 \n\t"\
330 "psllw $3, %%mm4 \n\t"\
331 "psllw $8, %%mm6 \n\t"\
333 "pand g16Mask, %%mm4 \n\t"\
334 "pand r16Mask, %%mm6 \n\t"\
336 "por %%mm4, %%mm1 \n\t"\
337 "por %%mm6, %%mm1 \n\t"\
339 MOVNTQ(%%mm2, (%4, %%eax, 2))\
340 MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
342 "addl $8, %%eax \n\t"\
343 "cmpl %5, %%eax \n\t"\
347 "movq %%mm2, %%mm1 \n\t" /* B */\
348 "movq %%mm4, %%mm3 \n\t" /* G */\
349 "movq %%mm5, %%mm6 \n\t" /* R */\
351 "punpcklbw %%mm7, %%mm3 \n\t" /* 0G0G0G0G */\
352 "punpcklbw %%mm7, %%mm2 \n\t" /* 0B0B0B0B */\
353 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R */\
355 "psrlw $3, %%mm2 \n\t"\
356 "psllw $2, %%mm3 \n\t"\
357 "psllw $7, %%mm5 \n\t"\
359 "pand g15Mask, %%mm3 \n\t"\
360 "pand r15Mask, %%mm5 \n\t"\
362 "por %%mm3, %%mm2 \n\t"\
363 "por %%mm5, %%mm2 \n\t"\
365 "punpckhbw %%mm7, %%mm4 \n\t" /* 0G0G0G0G */\
366 "punpckhbw %%mm7, %%mm1 \n\t" /* 0B0B0B0B */\
367 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R */\
369 "psrlw $3, %%mm1 \n\t"\
370 "psllw $2, %%mm4 \n\t"\
371 "psllw $7, %%mm6 \n\t"\
373 "pand g15Mask, %%mm4 \n\t"\
374 "pand r15Mask, %%mm6 \n\t"\
376 "por %%mm4, %%mm1 \n\t"\
377 "por %%mm6, %%mm1 \n\t"\
379 MOVNTQ(%%mm2, (%4, %%eax, 2))\
380 MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
382 "addl $8, %%eax \n\t"\
383 "cmpl %5, %%eax \n\t"\
385 // FIXME find a faster way to shuffle it to BGR24
387 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
388 "movq %%mm2, %%mm1 \n\t" /* B */\
389 "movq %%mm5, %%mm6 \n\t" /* R */\
390 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
391 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
392 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
393 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
394 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
395 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
396 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
397 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
398 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
399 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
401 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
402 "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
403 "pand bm00000111, %%mm4 \n\t" /* 00000RGB 0 */\
404 "pand bm11111000, %%mm0 \n\t" /* 00RGB000 0.5 */\
405 "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
406 "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
407 "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
408 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
410 "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
411 "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
412 "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
413 "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
414 "pand bm00001111, %%mm2 \n\t" /* 0000RGBR 1 */\
415 "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
416 "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
417 "pand bm00000111, %%mm4 \n\t" /* 00000RGB 2 */\
418 "pand bm11111000, %%mm1 \n\t" /* 00RGB000 2.5 */\
419 "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
420 "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
421 "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
422 "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
424 "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
425 "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
426 "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
427 "pand bm00000111, %%mm5 \n\t" /* 00000RGB 3 */\
428 "pand bm11111000, %%mm3 \n\t" /* 00RGB000 3.5 */\
429 "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
430 "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
431 "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
433 "leal (%%eax, %%eax, 2), %%ebx \n\t"\
434 MOVNTQ(%%mm0, (%4, %%ebx))\
435 MOVNTQ(%%mm2, 8(%4, %%ebx))\
436 MOVNTQ(%%mm3, 16(%4, %%ebx))\
438 "addl $8, %%eax \n\t"\
439 "cmpl %5, %%eax \n\t"\
444 * vertical scale YV12 to RGB
446 static inline void yuv2rgbX(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
447 uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
449 int yalpha1=yalpha^4095;
450 int uvalpha1=uvalpha^4095;
463 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
464 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
466 "movq %%mm3, %%mm1 \n\t"
467 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
468 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
470 MOVNTQ(%%mm3, (%4, %%eax, 4))
471 MOVNTQ(%%mm1, 8(%4, %%eax, 4))
473 "addl $4, %%eax \n\t"
474 "cmpl %5, %%eax \n\t"
478 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
479 "m" (yalpha1), "m" (uvalpha1)
490 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
491 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
493 "movq %%mm3, %%mm1 \n\t"
494 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
495 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
497 "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
498 "psrlq $8, %%mm3 \n\t" // GR0BGR00
499 "pand bm00000111, %%mm2 \n\t" // BGR00000
500 "pand bm11111000, %%mm3 \n\t" // 000BGR00
501 "por %%mm2, %%mm3 \n\t" // BGRBGR00
502 "movq %%mm1, %%mm2 \n\t"
503 "psllq $48, %%mm1 \n\t" // 000000BG
504 "por %%mm1, %%mm3 \n\t" // BGRBGRBG
506 "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
507 "psrld $16, %%mm2 \n\t" // R000R000
508 "psrlq $24, %%mm1 \n\t" // 0BGR0000
509 "por %%mm2, %%mm1 \n\t" // RBGRR000
511 "movl %4, %%ebx \n\t"
512 "addl %%eax, %%ebx \n\t"
516 "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
517 "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
519 "movd %%mm3, (%%ebx, %%eax, 2) \n\t"
520 "psrlq $32, %%mm3 \n\t"
521 "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
522 "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
524 "addl $4, %%eax \n\t"
525 "cmpl %5, %%eax \n\t"
528 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
529 "m" (yalpha1), "m" (uvalpha1)
539 "paddusb b16Dither, %%mm1 \n\t"
540 "paddusb b16Dither, %%mm0 \n\t"
541 "paddusb b16Dither, %%mm3 \n\t"
543 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
544 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
545 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
547 "psrlw $3, %%mm3 \n\t"
548 "psllw $2, %%mm1 \n\t"
549 "psllw $7, %%mm0 \n\t"
550 "pand g15Mask, %%mm1 \n\t"
551 "pand r15Mask, %%mm0 \n\t"
553 "por %%mm3, %%mm1 \n\t"
554 "por %%mm1, %%mm0 \n\t"
556 MOVNTQ(%%mm0, (%4, %%eax, 2))
558 "addl $4, %%eax \n\t"
559 "cmpl %5, %%eax \n\t"
562 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
563 "m" (yalpha1), "m" (uvalpha1)
573 "paddusb g16Dither, %%mm1 \n\t"
574 "paddusb b16Dither, %%mm0 \n\t"
575 "paddusb b16Dither, %%mm3 \n\t"
577 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
578 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
579 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
581 "psrlw $3, %%mm3 \n\t"
582 "psllw $3, %%mm1 \n\t"
583 "psllw $8, %%mm0 \n\t"
584 "pand g16Mask, %%mm1 \n\t"
585 "pand r16Mask, %%mm0 \n\t"
587 "por %%mm3, %%mm1 \n\t"
588 "por %%mm1, %%mm0 \n\t"
590 MOVNTQ(%%mm0, (%4, %%eax, 2))
592 "addl $4, %%eax \n\t"
593 "cmpl %5, %%eax \n\t"
596 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
597 "m" (yalpha1), "m" (uvalpha1)
602 if(dstbpp==32 || dstbpp==24)
605 // vertical linear interpolation && yuv2rgb in a single step:
606 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
607 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
608 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
609 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
610 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
611 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
618 // vertical linear interpolation && yuv2rgb in a single step:
619 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
620 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
621 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
623 ((uint16_t*)dest)[0] =
624 (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
625 ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<3)&0x07E0) |
626 ((clip_table[(Y + yuvtab_3343[V]) >>13]<<8)&0xF800);
633 // vertical linear interpolation && yuv2rgb in a single step:
634 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
635 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
636 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
638 ((uint16_t*)dest)[0] =
639 (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
640 ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<2)&0x03E0) |
641 ((clip_table[(Y + yuvtab_3343[V]) >>13]<<7)&0x7C00);
656 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
657 "m" (yalpha1), "m" (uvalpha1)
667 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
668 "m" (yalpha1), "m" (uvalpha1)
676 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
678 "paddusb b16Dither, %%mm2 \n\t"
679 "paddusb b16Dither, %%mm4 \n\t"
680 "paddusb b16Dither, %%mm5 \n\t"
685 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
686 "m" (yalpha1), "m" (uvalpha1)
694 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
696 "paddusb g16Dither, %%mm2 \n\t"
697 "paddusb b16Dither, %%mm4 \n\t"
698 "paddusb b16Dither, %%mm5 \n\t"
703 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
704 "m" (yalpha1), "m" (uvalpha1)
709 //FIXME unroll C loop and dont recalculate UV
710 if(dstbpp==32 || dstbpp==24)
713 // vertical linear interpolation && yuv2rgb in a single step:
714 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
715 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
716 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
717 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
718 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
719 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
726 // vertical linear interpolation && yuv2rgb in a single step:
727 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
728 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
729 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
731 ((uint16_t*)dest)[0] =
732 (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
733 ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<3)&0x07E0) |
734 ((clip_table[(Y + yuvtab_3343[V]) >>13]<<8)&0xF800);
741 // vertical linear interpolation && yuv2rgb in a single step:
742 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
743 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
744 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
746 ((uint16_t*)dest)[0] =
747 (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
748 ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<2)&0x03E0) |
749 ((clip_table[(Y + yuvtab_3343[V]) >>13]<<7)&0x7C00);
758 * YV12 to RGB without scaling or interpolating
760 static inline void yuv2rgb1(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
761 uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
763 int yalpha1=yalpha^4095;
764 int uvalpha1=uvalpha^4095;
766 if(fullUVIpol || allwaysIpol)
768 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
777 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
778 "m" (yalpha1), "m" (uvalpha1)
787 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
788 "m" (yalpha1), "m" (uvalpha1)
796 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
798 "paddusb b16Dither, %%mm2 \n\t"
799 "paddusb b16Dither, %%mm4 \n\t"
800 "paddusb b16Dither, %%mm5 \n\t"
803 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
804 "m" (yalpha1), "m" (uvalpha1)
812 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
814 "paddusb g16Dither, %%mm2 \n\t"
815 "paddusb b16Dither, %%mm4 \n\t"
816 "paddusb b16Dither, %%mm5 \n\t"
820 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
821 "m" (yalpha1), "m" (uvalpha1)
826 //FIXME unroll C loop and dont recalculate UV
827 if(dstbpp==32 || dstbpp==24)
830 // vertical linear interpolation && yuv2rgb in a single step:
831 int Y=yuvtab_2568[buf0[i]>>7];
832 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
833 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
834 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
835 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
836 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
843 // vertical linear interpolation && yuv2rgb in a single step:
844 int Y=yuvtab_2568[buf0[i]>>7];
845 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
846 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
848 ((uint16_t*)dest)[0] =
849 (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
850 ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<3)&0x07E0) |
851 ((clip_table[(Y + yuvtab_3343[V]) >>13]<<8)&0xF800);
858 // vertical linear interpolation && yuv2rgb in a single step:
859 int Y=yuvtab_2568[buf0[i]>>7];
860 int U=((uvbuf0[i/2]*uvalpha1+uvbuf1[i/2]*uvalpha)>>19);
861 int V=((uvbuf0[i/2+2048]*uvalpha1+uvbuf1[i/2+2048]*uvalpha)>>19);
863 ((uint16_t*)dest)[0] =
864 (clip_table[(Y + yuvtab_40cf[U]) >>13]>>3) |
865 ((clip_table[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13]<<2)&0x03E0) |
866 ((clip_table[(Y + yuvtab_3343[V]) >>13]<<7)&0x7C00);
874 static inline void hyscale(uint16_t *dst, int dstWidth, uint8_t *src, int srcWidth, int xInc)
878 // *** horizontal scale Y line to temp buffer
885 "pxor %%mm7, %%mm7 \n\t"
886 "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
887 "movd %5, %%mm6 \n\t" // xInc&0xFFFF
888 "punpcklwd %%mm6, %%mm6 \n\t"
889 "punpcklwd %%mm6, %%mm6 \n\t"
890 "movq %%mm6, %%mm2 \n\t"
891 "psllq $16, %%mm2 \n\t"
892 "paddw %%mm6, %%mm2 \n\t"
893 "psllq $16, %%mm2 \n\t"
894 "paddw %%mm6, %%mm2 \n\t"
895 "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=xInc&0xFF
896 "movq %%mm2, temp0 \n\t"
897 "movd %4, %%mm6 \n\t" //(xInc*4)&0xFFFF
898 "punpcklwd %%mm6, %%mm6 \n\t"
899 "punpcklwd %%mm6, %%mm6 \n\t"
900 "xorl %%eax, %%eax \n\t" // i
901 "movl %0, %%esi \n\t" // src
902 "movl %1, %%edi \n\t" // buf1
903 "movl %3, %%edx \n\t" // (xInc*4)>>16
904 "xorl %%ecx, %%ecx \n\t"
905 "xorl %%ebx, %%ebx \n\t"
906 "movw %4, %%bx \n\t" // (xInc*4)&0xFFFF
908 #define FUNNY_Y_CODE \
909 "prefetchnta 1024(%%esi) \n\t"\
910 "prefetchnta 1056(%%esi) \n\t"\
911 "prefetchnta 1088(%%esi) \n\t"\
912 "call funnyYCode \n\t"\
913 "movq temp0, %%mm2 \n\t"\
914 "xorl %%ecx, %%ecx \n\t"
916 #define FUNNY_Y_CODE \
917 "call funnyYCode \n\t"\
918 "movq temp0, %%mm2 \n\t"\
919 "xorl %%ecx, %%ecx \n\t"
930 :: "m" (src), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
931 "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF)
932 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
934 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth-1; i--) dst[i] = src[srcWidth-1]*128;
939 //NO MMX just normal asm ...
941 "xorl %%eax, %%eax \n\t" // i
942 "xorl %%ebx, %%ebx \n\t" // xx
943 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
945 "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
946 "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
947 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
948 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
949 "shll $16, %%edi \n\t"
950 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
951 "movl %1, %%edi \n\t"
952 "shrl $9, %%esi \n\t"
953 "movw %%si, (%%edi, %%eax, 2) \n\t"
954 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
955 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
957 "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
958 "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
959 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
960 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
961 "shll $16, %%edi \n\t"
962 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
963 "movl %1, %%edi \n\t"
964 "shrl $9, %%esi \n\t"
965 "movw %%si, 2(%%edi, %%eax, 2) \n\t"
966 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
967 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
970 "addl $2, %%eax \n\t"
971 "cmpl %2, %%eax \n\t"
975 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
976 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
979 } //if MMX2 cant be used
982 for(i=0;i<dstWidth;i++){
983 register unsigned int xx=xpos>>16;
984 register unsigned int xalpha=(xpos&0xFFFF)>>9;
985 dst[i]=(src[xx]*(xalpha^127)+src[xx+1]*xalpha);
991 inline static void hcscale(uint16_t *dst, int dstWidth,
992 uint8_t *src1, uint8_t *src2, int srcWidth, int xInc)
1001 "pxor %%mm7, %%mm7 \n\t"
1002 "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
1003 "movd %5, %%mm6 \n\t" // xInc&0xFFFF
1004 "punpcklwd %%mm6, %%mm6 \n\t"
1005 "punpcklwd %%mm6, %%mm6 \n\t"
1006 "movq %%mm6, %%mm2 \n\t"
1007 "psllq $16, %%mm2 \n\t"
1008 "paddw %%mm6, %%mm2 \n\t"
1009 "psllq $16, %%mm2 \n\t"
1010 "paddw %%mm6, %%mm2 \n\t"
1011 "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=xInc&0xFFFF
1012 "movq %%mm2, temp0 \n\t"
1013 "movd %4, %%mm6 \n\t" //(xInc*4)&0xFFFF
1014 "punpcklwd %%mm6, %%mm6 \n\t"
1015 "punpcklwd %%mm6, %%mm6 \n\t"
1016 "xorl %%eax, %%eax \n\t" // i
1017 "movl %0, %%esi \n\t" // src
1018 "movl %1, %%edi \n\t" // buf1
1019 "movl %3, %%edx \n\t" // (xInc*4)>>16
1020 "xorl %%ecx, %%ecx \n\t"
1021 "xorl %%ebx, %%ebx \n\t"
1022 "movw %4, %%bx \n\t" // (xInc*4)&0xFFFF
1025 #define FUNNYUVCODE \
1026 "prefetchnta 1024(%%esi) \n\t"\
1027 "prefetchnta 1056(%%esi) \n\t"\
1028 "prefetchnta 1088(%%esi) \n\t"\
1029 "call funnyUVCode \n\t"\
1030 "movq temp0, %%mm2 \n\t"\
1031 "xorl %%ecx, %%ecx \n\t"
1033 #define FUNNYUVCODE \
1034 "call funnyUVCode \n\t"\
1035 "movq temp0, %%mm2 \n\t"\
1036 "xorl %%ecx, %%ecx \n\t"
1050 "xorl %%eax, %%eax \n\t" // i
1051 "movl %6, %%esi \n\t" // src
1052 "movl %1, %%edi \n\t" // buf1
1053 "addl $4096, %%edi \n\t"
1065 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
1066 "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF), "m" (src2)
1067 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
1069 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth/2-1; i--)
1071 dst[i] = src1[srcWidth/2-1]*128;
1072 dst[i+2048] = src2[srcWidth/2-1]*128;
1079 "xorl %%eax, %%eax \n\t" // i
1080 "xorl %%ebx, %%ebx \n\t" // xx
1081 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
1083 "movl %0, %%esi \n\t"
1084 "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
1085 "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1]
1086 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1087 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1088 "shll $16, %%edi \n\t"
1089 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1090 "movl %1, %%edi \n\t"
1091 "shrl $9, %%esi \n\t"
1092 "movw %%si, (%%edi, %%eax, 2) \n\t"
1094 "movzbl (%5, %%ebx), %%edi \n\t" //src[xx]
1095 "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1]
1096 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1097 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1098 "shll $16, %%edi \n\t"
1099 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1100 "movl %1, %%edi \n\t"
1101 "shrl $9, %%esi \n\t"
1102 "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
1104 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
1105 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
1106 "addl $1, %%eax \n\t"
1107 "cmpl %2, %%eax \n\t"
1110 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
1112 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1115 } //if MMX2 cant be used
1118 for(i=0;i<dstWidth;i++){
1119 register unsigned int xx=xpos>>16;
1120 register unsigned int xalpha=(xpos&0xFFFF)>>9;
1121 dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
1122 dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
1129 // *** bilinear scaling and yuv->rgb conversion of yv12 slices:
1130 // *** Note: it's called multiple times while decoding a frame, first time y==0
1131 // *** Designed to upscale, but may work for downscale too.
1132 // s_xinc = (src_width << 16) / dst_width
1133 // s_yinc = (src_height << 16) / dst_height
1134 void SwScale_YV12slice_brg24(unsigned char* srcptr[],int stride[], int y, int h,
1135 unsigned char* dstptr, int dststride, int dstw, int dstbpp,
1136 unsigned int s_xinc,unsigned int s_yinc){
1139 //static int s_yinc=(vo_dga_src_height<<16)/vo_dga_vp_height;
1140 //static int s_xinc=(vo_dga_src_width<<8)/vo_dga_vp_width;
1142 unsigned int s_xinc2;
1144 static int s_srcypos; // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1147 // last horzontally interpolated lines, used to avoid unnecessary calculations
1148 static int s_last_ypos;
1149 static int s_last_y1pos;
1151 static int static_dstw;
1154 // used to detect a horizontal size change
1155 static int old_dstw= -1;
1156 static int old_s_xinc= -1;
1159 int srcWidth= (dstw*s_xinc + 0x8000)>>16;
1160 int dstUVw= fullUVIpol ? dstw : dstw/2;
1164 canMMX2BeUsed= (s_xinc <= 0x10000 && (dstw&31)==0 && (srcWidth&15)==0) ? 1 : 0;
1167 // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
1168 // n-2 is the last chrominance sample available
1169 // FIXME this is not perfect, but noone shuld notice the difference, the more correct variant
1170 // would be like the vertical one, but that would require some special code for the
1171 // first and last pixel
1172 if(canMMX2BeUsed) s_xinc+= 20;
1173 else s_xinc = ((srcWidth-2)<<16)/(dstw-2) - 20;
1175 if(fullUVIpol) s_xinc2= s_xinc>>1;
1176 else s_xinc2= s_xinc;
1177 // force calculation of the horizontal interpolation of the first line
1182 s_srcypos= s_yinc/2 - 0x8000;
1185 // cant downscale !!!
1186 if((old_s_xinc != s_xinc || old_dstw!=dstw) && canMMX2BeUsed)
1193 int xpos, xx, xalpha, i;
1200 // create an optimized horizontal scaling routine
1208 "movq (%%esi), %%mm0 \n\t" //FIXME Alignment
1209 "movq %%mm0, %%mm1 \n\t"
1210 "psrlq $8, %%mm0 \n\t"
1211 "punpcklbw %%mm7, %%mm1 \n\t"
1212 "movq %%mm2, %%mm3 \n\t"
1213 "punpcklbw %%mm7, %%mm0 \n\t"
1214 "addw %%bx, %%cx \n\t" //2*xalpha += (4*s_xinc)&0xFFFF
1215 "pshufw $0xFF, %%mm1, %%mm1 \n\t"
1217 "adcl %%edx, %%esi \n\t" //xx+= (4*s_xinc)>>16 + carry
1218 "pshufw $0xFF, %%mm0, %%mm0 \n\t"
1220 "psrlw $9, %%mm3 \n\t"
1221 "psubw %%mm1, %%mm0 \n\t"
1222 "pmullw %%mm3, %%mm0 \n\t"
1223 "paddw %%mm6, %%mm2 \n\t" // 2*alpha += xpos&0xFFFF
1224 "psllw $7, %%mm1 \n\t"
1225 "paddw %%mm1, %%mm0 \n\t"
1227 "movq %%mm0, (%%edi, %%eax) \n\t"
1229 "addl $8, %%eax \n\t"
1242 :"=r" (fragment), "=r" (imm8OfPShufW1), "=r" (imm8OfPShufW2),
1243 "=r" (fragmentLength)
1246 xpos= 0; //s_xinc/2 - 0x8000; // difference between pixel centers
1248 /* choose xinc so that all 8 parts fit exactly
1249 Note: we cannot use just 1 part because it would not fit in the code cache */
1250 // s_xinc2_diff= -((((s_xinc2*(dstw/8))&0xFFFF))/(dstw/8))-10;
1251 // s_xinc_diff= -((((s_xinc*(dstw/8))&0xFFFF))/(dstw/8));
1253 // s_xinc2_diff+= ((0x10000/(dstw/8)));
1255 // s_xinc_diff= s_xinc2_diff*2;
1257 // s_xinc2+= s_xinc2_diff;
1258 // s_xinc+= s_xinc_diff;
1260 // old_s_xinc= s_xinc;
1262 for(i=0; i<dstw/8; i++)
1269 int b=((xpos+s_xinc)>>16) - xx;
1270 int c=((xpos+s_xinc*2)>>16) - xx;
1271 int d=((xpos+s_xinc*3)>>16) - xx;
1273 memcpy(funnyYCode + fragmentLength*i/4, fragment, fragmentLength);
1275 funnyYCode[fragmentLength*i/4 + imm8OfPShufW1]=
1276 funnyYCode[fragmentLength*i/4 + imm8OfPShufW2]=
1277 a | (b<<2) | (c<<4) | (d<<6);
1279 // if we dont need to read 8 bytes than dont :), reduces the chance of
1280 // crossing a cache line
1281 if(d<3) funnyYCode[fragmentLength*i/4 + 1]= 0x6E;
1283 funnyYCode[fragmentLength*(i+4)/4]= RET;
1288 xpos= 0; //s_xinc2/2 - 0x10000; // difference between centers of chrom samples
1289 for(i=0; i<dstUVw/8; i++)
1296 int b=((xpos+s_xinc2)>>16) - xx;
1297 int c=((xpos+s_xinc2*2)>>16) - xx;
1298 int d=((xpos+s_xinc2*3)>>16) - xx;
1300 memcpy(funnyUVCode + fragmentLength*i/4, fragment, fragmentLength);
1302 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW1]=
1303 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW2]=
1304 a | (b<<2) | (c<<4) | (d<<6);
1306 // if we dont need to read 8 bytes than dont :), reduces the chance of
1307 // crossing a cache line
1308 if(d<3) funnyUVCode[fragmentLength*i/4 + 1]= 0x6E;
1310 funnyUVCode[fragmentLength*(i+4)/4]= RET;
1314 // funnyCode[0]= RET;
1321 unsigned char *dest=dstptr+dststride*s_ypos;
1322 int y0=(s_srcypos + 0xFFFF)>>16; // first luminance source line number below the dst line
1323 // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1324 int srcuvpos= s_srcypos + s_yinc/2 - 0x8000;
1325 int y1=(srcuvpos + 0x1FFFF)>>17; // first chrominance source line number below the dst line
1326 int yalpha=((s_srcypos-1)&0xFFFF)>>4;
1327 int uvalpha=((srcuvpos-1)&0x1FFFF)>>5;
1328 uint16_t *buf0=pix_buf_y[y0&1]; // top line of the interpolated slice
1329 uint16_t *buf1=pix_buf_y[((y0+1)&1)]; // bottom line of the interpolated slice
1330 uint16_t *uvbuf0=pix_buf_uv[y1&1]; // top line of the interpolated slice
1331 uint16_t *uvbuf1=pix_buf_uv[(y1+1)&1]; // bottom line of the interpolated slice
1334 if(y0>=y+h) break; // FIXME wrong, skips last lines, but they are dupliactes anyway
1336 // if this is after the last line than use only the last src line
1349 s_ypos++; s_srcypos+=s_yinc;
1351 //only interpolate the src line horizontally if we didnt do it allready
1355 // skip if first line has been horiz scaled alleady
1356 if(s_last_ypos != y0-1)
1358 // check if first line is before any available src lines
1359 if(y0-1 < y) src=srcptr[0]+(0 )*stride[0];
1360 else src=srcptr[0]+(y0-y-1)*stride[0];
1362 hyscale(buf0, dstw, src, srcWidth, s_xinc);
1364 // check if second line is after any available src lines
1365 if(y0-y >= h) src=srcptr[0]+(h-1)*stride[0];
1366 else src=srcptr[0]+(y0-y)*stride[0];
1368 // the min() is required to avoid reuseing lines which where not available
1369 s_last_ypos= MIN(y0, y+h-1);
1370 hyscale(buf1, dstw, src, srcWidth, s_xinc);
1372 // printf("%d %d %d %d\n", y, y1, s_last_y1pos, h);
1373 // *** horizontal scale U and V lines to temp buffer
1374 if(s_last_y1pos!=y1)
1376 uint8_t *src1, *src2;
1377 // skip if first line has been horiz scaled alleady
1378 if(s_last_y1pos != y1-1)
1380 // check if first line is before any available src lines
1383 src1= srcptr[1]+(0)*stride[1];
1384 src2= srcptr[2]+(0)*stride[2];
1386 src1= srcptr[1]+(y1-y/2-1)*stride[1];
1387 src2= srcptr[2]+(y1-y/2-1)*stride[2];
1389 hcscale(uvbuf0, dstUVw, src1, src2, srcWidth, s_xinc2);
1392 // check if second line is after any available src lines
1395 src1= srcptr[1]+(h/2-1)*stride[1];
1396 src2= srcptr[2]+(h/2-1)*stride[2];
1398 src1= srcptr[1]+(y1-y/2)*stride[1];
1399 src2= srcptr[2]+(y1-y/2)*stride[2];
1401 hcscale(uvbuf1, dstUVw, src1, src2, srcWidth, s_xinc2);
1403 // the min() is required to avoid reuseing lines which where not available
1404 s_last_y1pos= MIN(y1, y/2+h/2-1);
1408 if(ABS(s_yinc - 0x10000) < 10)
1409 yuv2rgb1(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1411 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1414 b16Dither= b16Dither1;
1415 b16Dither1= b16Dither2;
1416 b16Dither2= b16Dither;
1418 g16Dither= g16Dither1;
1419 g16Dither1= g16Dither2;
1420 g16Dither2= g16Dither;
1425 asm volatile("femms");
1426 #elif defined (HAVE_MMX)
1427 asm volatile("emms");
1432 void SwScale_Init(){
1433 // generating tables:
1437 clip_table[i+256]=i;
1438 clip_table[i+512]=255;
1439 yuvtab_2568[i]=(0x2568*(i-16))+(256<<13);
1440 yuvtab_3343[i]=0x3343*(i-128);
1441 yuvtab_0c92[i]=-0x0c92*(i-128);
1442 yuvtab_1a1e[i]=-0x1a1e*(i-128);
1443 yuvtab_40cf[i]=0x40cf*(i-128);