2 * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 * The C code (not assembly, MMX, ...) of this file can be used
21 * under the LGPL license.
31 #define PREFETCH "prefetch"
32 #define PREFETCHW "prefetchw"
34 #define PREFETCH "prefetchnta"
35 #define PREFETCHW "prefetcht0"
37 #define PREFETCH " # nop"
38 #define PREFETCHW " # nop"
42 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
44 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
48 #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
50 #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
52 #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
55 #include "ppc/swscale_altivec_template.c"
58 #define YSCALEYUV2YV12X(x, offset, dest, width) \
60 "xor %%"REG_a", %%"REG_a" \n\t"\
61 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
62 "movq %%mm3, %%mm4 \n\t"\
63 "lea " offset "(%0), %%"REG_d" \n\t"\
64 "mov (%%"REG_d"), %%"REG_S" \n\t"\
65 ASMALIGN(4) /* FIXME Unroll? */\
67 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
68 "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
69 "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* srcData */\
70 "add $16, %%"REG_d" \n\t"\
71 "mov (%%"REG_d"), %%"REG_S" \n\t"\
72 "test %%"REG_S", %%"REG_S" \n\t"\
73 "pmulhw %%mm0, %%mm2 \n\t"\
74 "pmulhw %%mm0, %%mm5 \n\t"\
75 "paddw %%mm2, %%mm3 \n\t"\
76 "paddw %%mm5, %%mm4 \n\t"\
78 "psraw $3, %%mm3 \n\t"\
79 "psraw $3, %%mm4 \n\t"\
80 "packuswb %%mm4, %%mm3 \n\t"\
81 MOVNTQ(%%mm3, (%1, %%REGa))\
82 "add $8, %%"REG_a" \n\t"\
83 "cmp %2, %%"REG_a" \n\t"\
84 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
85 "movq %%mm3, %%mm4 \n\t"\
86 "lea " offset "(%0), %%"REG_d" \n\t"\
87 "mov (%%"REG_d"), %%"REG_S" \n\t"\
89 :: "r" (&c->redDither),\
90 "r" (dest), "g" (width)\
91 : "%"REG_a, "%"REG_d, "%"REG_S\
94 #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
96 "lea " offset "(%0), %%"REG_d" \n\t"\
97 "xor %%"REG_a", %%"REG_a" \n\t"\
98 "pxor %%mm4, %%mm4 \n\t"\
99 "pxor %%mm5, %%mm5 \n\t"\
100 "pxor %%mm6, %%mm6 \n\t"\
101 "pxor %%mm7, %%mm7 \n\t"\
102 "mov (%%"REG_d"), %%"REG_S" \n\t"\
105 "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* srcData */\
106 "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
107 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
108 "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm1 \n\t" /* srcData */\
109 "movq %%mm0, %%mm3 \n\t"\
110 "punpcklwd %%mm1, %%mm0 \n\t"\
111 "punpckhwd %%mm1, %%mm3 \n\t"\
112 "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
113 "pmaddwd %%mm1, %%mm0 \n\t"\
114 "pmaddwd %%mm1, %%mm3 \n\t"\
115 "paddd %%mm0, %%mm4 \n\t"\
116 "paddd %%mm3, %%mm5 \n\t"\
117 "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* srcData */\
118 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
119 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
120 "test %%"REG_S", %%"REG_S" \n\t"\
121 "movq %%mm2, %%mm0 \n\t"\
122 "punpcklwd %%mm3, %%mm2 \n\t"\
123 "punpckhwd %%mm3, %%mm0 \n\t"\
124 "pmaddwd %%mm1, %%mm2 \n\t"\
125 "pmaddwd %%mm1, %%mm0 \n\t"\
126 "paddd %%mm2, %%mm6 \n\t"\
127 "paddd %%mm0, %%mm7 \n\t"\
129 "psrad $16, %%mm4 \n\t"\
130 "psrad $16, %%mm5 \n\t"\
131 "psrad $16, %%mm6 \n\t"\
132 "psrad $16, %%mm7 \n\t"\
133 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
134 "packssdw %%mm5, %%mm4 \n\t"\
135 "packssdw %%mm7, %%mm6 \n\t"\
136 "paddw %%mm0, %%mm4 \n\t"\
137 "paddw %%mm0, %%mm6 \n\t"\
138 "psraw $3, %%mm4 \n\t"\
139 "psraw $3, %%mm6 \n\t"\
140 "packuswb %%mm6, %%mm4 \n\t"\
141 MOVNTQ(%%mm4, (%1, %%REGa))\
142 "add $8, %%"REG_a" \n\t"\
143 "cmp %2, %%"REG_a" \n\t"\
144 "lea " offset "(%0), %%"REG_d" \n\t"\
145 "pxor %%mm4, %%mm4 \n\t"\
146 "pxor %%mm5, %%mm5 \n\t"\
147 "pxor %%mm6, %%mm6 \n\t"\
148 "pxor %%mm7, %%mm7 \n\t"\
149 "mov (%%"REG_d"), %%"REG_S" \n\t"\
151 :: "r" (&c->redDither),\
152 "r" (dest), "g" (width)\
153 : "%"REG_a, "%"REG_d, "%"REG_S\
156 #define YSCALEYUV2YV121 \
157 "mov %2, %%"REG_a" \n\t"\
158 ASMALIGN(4) /* FIXME Unroll? */\
160 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
161 "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
162 "psraw $7, %%mm0 \n\t"\
163 "psraw $7, %%mm1 \n\t"\
164 "packuswb %%mm1, %%mm0 \n\t"\
165 MOVNTQ(%%mm0, (%1, %%REGa))\
166 "add $8, %%"REG_a" \n\t"\
169 #define YSCALEYUV2YV121_ACCURATE \
170 "mov %2, %%"REG_a" \n\t"\
171 "pcmpeqw %%mm7, %%mm7 \n\t"\
172 "psrlw $15, %%mm7 \n\t"\
173 "psllw $6, %%mm7 \n\t"\
174 ASMALIGN(4) /* FIXME Unroll? */\
176 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
177 "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
178 "paddsw %%mm7, %%mm0 \n\t"\
179 "paddsw %%mm7, %%mm1 \n\t"\
180 "psraw $7, %%mm0 \n\t"\
181 "psraw $7, %%mm1 \n\t"\
182 "packuswb %%mm1, %%mm0 \n\t"\
183 MOVNTQ(%%mm0, (%1, %%REGa))\
184 "add $8, %%"REG_a" \n\t"\
188 :: "m" (-lumFilterSize), "m" (-chrFilterSize),
189 "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
190 "r" (dest), "m" (dstW),
191 "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
192 : "%eax", "%ebx", "%ecx", "%edx", "%esi"
194 #define YSCALEYUV2PACKEDX_UV \
196 "xor %%"REG_a", %%"REG_a" \n\t"\
200 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
201 "mov (%%"REG_d"), %%"REG_S" \n\t"\
202 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
203 "movq %%mm3, %%mm4 \n\t"\
206 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
207 "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
208 "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
209 "add $16, %%"REG_d" \n\t"\
210 "mov (%%"REG_d"), %%"REG_S" \n\t"\
211 "pmulhw %%mm0, %%mm2 \n\t"\
212 "pmulhw %%mm0, %%mm5 \n\t"\
213 "paddw %%mm2, %%mm3 \n\t"\
214 "paddw %%mm5, %%mm4 \n\t"\
215 "test %%"REG_S", %%"REG_S" \n\t"\
218 #define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
219 "lea "offset"(%0), %%"REG_d" \n\t"\
220 "mov (%%"REG_d"), %%"REG_S" \n\t"\
221 "movq "VROUNDER_OFFSET"(%0), "#dst1" \n\t"\
222 "movq "#dst1", "#dst2" \n\t"\
225 "movq 8(%%"REG_d"), "#coeff" \n\t" /* filterCoeff */\
226 "movq (%%"REG_S", %%"REG_a", 2), "#src1" \n\t" /* Y1srcData */\
227 "movq 8(%%"REG_S", %%"REG_a", 2), "#src2" \n\t" /* Y2srcData */\
228 "add $16, %%"REG_d" \n\t"\
229 "mov (%%"REG_d"), %%"REG_S" \n\t"\
230 "pmulhw "#coeff", "#src1" \n\t"\
231 "pmulhw "#coeff", "#src2" \n\t"\
232 "paddw "#src1", "#dst1" \n\t"\
233 "paddw "#src2", "#dst2" \n\t"\
234 "test %%"REG_S", %%"REG_S" \n\t"\
237 #define YSCALEYUV2PACKEDX \
238 YSCALEYUV2PACKEDX_UV \
239 YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
241 #define YSCALEYUV2PACKEDX_END \
242 :: "r" (&c->redDither), \
243 "m" (dummy), "m" (dummy), "m" (dummy),\
244 "r" (dest), "m" (dstW) \
245 : "%"REG_a, "%"REG_d, "%"REG_S \
248 #define YSCALEYUV2PACKEDX_ACCURATE_UV \
250 "xor %%"REG_a", %%"REG_a" \n\t"\
254 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
255 "mov (%%"REG_d"), %%"REG_S" \n\t"\
256 "pxor %%mm4, %%mm4 \n\t"\
257 "pxor %%mm5, %%mm5 \n\t"\
258 "pxor %%mm6, %%mm6 \n\t"\
259 "pxor %%mm7, %%mm7 \n\t"\
262 "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
263 "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
264 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
265 "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
266 "movq %%mm0, %%mm3 \n\t"\
267 "punpcklwd %%mm1, %%mm0 \n\t"\
268 "punpckhwd %%mm1, %%mm3 \n\t"\
269 "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" /* filterCoeff */\
270 "pmaddwd %%mm1, %%mm0 \n\t"\
271 "pmaddwd %%mm1, %%mm3 \n\t"\
272 "paddd %%mm0, %%mm4 \n\t"\
273 "paddd %%mm3, %%mm5 \n\t"\
274 "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
275 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
276 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
277 "test %%"REG_S", %%"REG_S" \n\t"\
278 "movq %%mm2, %%mm0 \n\t"\
279 "punpcklwd %%mm3, %%mm2 \n\t"\
280 "punpckhwd %%mm3, %%mm0 \n\t"\
281 "pmaddwd %%mm1, %%mm2 \n\t"\
282 "pmaddwd %%mm1, %%mm0 \n\t"\
283 "paddd %%mm2, %%mm6 \n\t"\
284 "paddd %%mm0, %%mm7 \n\t"\
286 "psrad $16, %%mm4 \n\t"\
287 "psrad $16, %%mm5 \n\t"\
288 "psrad $16, %%mm6 \n\t"\
289 "psrad $16, %%mm7 \n\t"\
290 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
291 "packssdw %%mm5, %%mm4 \n\t"\
292 "packssdw %%mm7, %%mm6 \n\t"\
293 "paddw %%mm0, %%mm4 \n\t"\
294 "paddw %%mm0, %%mm6 \n\t"\
295 "movq %%mm4, "U_TEMP"(%0) \n\t"\
296 "movq %%mm6, "V_TEMP"(%0) \n\t"\
298 #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
299 "lea "offset"(%0), %%"REG_d" \n\t"\
300 "mov (%%"REG_d"), %%"REG_S" \n\t"\
301 "pxor %%mm1, %%mm1 \n\t"\
302 "pxor %%mm5, %%mm5 \n\t"\
303 "pxor %%mm7, %%mm7 \n\t"\
304 "pxor %%mm6, %%mm6 \n\t"\
307 "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
308 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
309 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
310 "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
311 "movq %%mm0, %%mm3 \n\t"\
312 "punpcklwd %%mm4, %%mm0 \n\t"\
313 "punpckhwd %%mm4, %%mm3 \n\t"\
314 "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
315 "pmaddwd %%mm4, %%mm0 \n\t"\
316 "pmaddwd %%mm4, %%mm3 \n\t"\
317 "paddd %%mm0, %%mm1 \n\t"\
318 "paddd %%mm3, %%mm5 \n\t"\
319 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
320 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
321 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
322 "test %%"REG_S", %%"REG_S" \n\t"\
323 "movq %%mm2, %%mm0 \n\t"\
324 "punpcklwd %%mm3, %%mm2 \n\t"\
325 "punpckhwd %%mm3, %%mm0 \n\t"\
326 "pmaddwd %%mm4, %%mm2 \n\t"\
327 "pmaddwd %%mm4, %%mm0 \n\t"\
328 "paddd %%mm2, %%mm7 \n\t"\
329 "paddd %%mm0, %%mm6 \n\t"\
331 "psrad $16, %%mm1 \n\t"\
332 "psrad $16, %%mm5 \n\t"\
333 "psrad $16, %%mm7 \n\t"\
334 "psrad $16, %%mm6 \n\t"\
335 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
336 "packssdw %%mm5, %%mm1 \n\t"\
337 "packssdw %%mm6, %%mm7 \n\t"\
338 "paddw %%mm0, %%mm1 \n\t"\
339 "paddw %%mm0, %%mm7 \n\t"\
340 "movq "U_TEMP"(%0), %%mm3 \n\t"\
341 "movq "V_TEMP"(%0), %%mm4 \n\t"\
343 #define YSCALEYUV2PACKEDX_ACCURATE \
344 YSCALEYUV2PACKEDX_ACCURATE_UV \
345 YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
347 #define YSCALEYUV2RGBX \
348 "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
349 "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
350 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
351 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
352 "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
353 "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
354 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
355 "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
356 "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
357 "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
358 "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
359 "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
360 "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
361 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
362 "paddw %%mm3, %%mm4 \n\t"\
363 "movq %%mm2, %%mm0 \n\t"\
364 "movq %%mm5, %%mm6 \n\t"\
365 "movq %%mm4, %%mm3 \n\t"\
366 "punpcklwd %%mm2, %%mm2 \n\t"\
367 "punpcklwd %%mm5, %%mm5 \n\t"\
368 "punpcklwd %%mm4, %%mm4 \n\t"\
369 "paddw %%mm1, %%mm2 \n\t"\
370 "paddw %%mm1, %%mm5 \n\t"\
371 "paddw %%mm1, %%mm4 \n\t"\
372 "punpckhwd %%mm0, %%mm0 \n\t"\
373 "punpckhwd %%mm6, %%mm6 \n\t"\
374 "punpckhwd %%mm3, %%mm3 \n\t"\
375 "paddw %%mm7, %%mm0 \n\t"\
376 "paddw %%mm7, %%mm6 \n\t"\
377 "paddw %%mm7, %%mm3 \n\t"\
378 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
379 "packuswb %%mm0, %%mm2 \n\t"\
380 "packuswb %%mm6, %%mm5 \n\t"\
381 "packuswb %%mm3, %%mm4 \n\t"\
383 #define REAL_YSCALEYUV2PACKED(index, c) \
384 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
385 "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
386 "psraw $3, %%mm0 \n\t"\
387 "psraw $3, %%mm1 \n\t"\
388 "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
389 "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
390 "xor "#index", "#index" \n\t"\
393 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
394 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
395 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
396 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
397 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
398 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
399 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
400 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
401 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
402 "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
403 "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
404 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
405 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
406 "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
407 "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
408 "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
409 "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
410 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
411 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
412 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
413 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
414 "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
415 "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
416 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
417 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
419 #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
421 #define REAL_YSCALEYUV2RGB_UV(index, c) \
422 "xor "#index", "#index" \n\t"\
425 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
426 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
427 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
428 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
429 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
430 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
431 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
432 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
433 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
434 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
435 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
436 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
437 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
438 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
439 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
440 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
441 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
442 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
443 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
444 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
446 #define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
447 "movq ("#b1", "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
448 "movq ("#b2", "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
449 "movq 8("#b1", "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
450 "movq 8("#b2", "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
451 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
452 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
453 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
454 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
455 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
456 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
457 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
458 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
460 #define REAL_YSCALEYUV2RGB_COEFF(c) \
461 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
462 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
463 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
464 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
465 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
466 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
467 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
468 "paddw %%mm3, %%mm4 \n\t"\
469 "movq %%mm2, %%mm0 \n\t"\
470 "movq %%mm5, %%mm6 \n\t"\
471 "movq %%mm4, %%mm3 \n\t"\
472 "punpcklwd %%mm2, %%mm2 \n\t"\
473 "punpcklwd %%mm5, %%mm5 \n\t"\
474 "punpcklwd %%mm4, %%mm4 \n\t"\
475 "paddw %%mm1, %%mm2 \n\t"\
476 "paddw %%mm1, %%mm5 \n\t"\
477 "paddw %%mm1, %%mm4 \n\t"\
478 "punpckhwd %%mm0, %%mm0 \n\t"\
479 "punpckhwd %%mm6, %%mm6 \n\t"\
480 "punpckhwd %%mm3, %%mm3 \n\t"\
481 "paddw %%mm7, %%mm0 \n\t"\
482 "paddw %%mm7, %%mm6 \n\t"\
483 "paddw %%mm7, %%mm3 \n\t"\
484 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
485 "packuswb %%mm0, %%mm2 \n\t"\
486 "packuswb %%mm6, %%mm5 \n\t"\
487 "packuswb %%mm3, %%mm4 \n\t"\
489 #define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
491 #define YSCALEYUV2RGB(index, c) \
492 REAL_YSCALEYUV2RGB_UV(index, c) \
493 REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
494 REAL_YSCALEYUV2RGB_COEFF(c)
496 #define REAL_YSCALEYUV2PACKED1(index, c) \
497 "xor "#index", "#index" \n\t"\
500 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
501 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
502 "psraw $7, %%mm3 \n\t" \
503 "psraw $7, %%mm4 \n\t" \
504 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
505 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
506 "psraw $7, %%mm1 \n\t" \
507 "psraw $7, %%mm7 \n\t" \
509 #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
511 #define REAL_YSCALEYUV2RGB1(index, c) \
512 "xor "#index", "#index" \n\t"\
515 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
516 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
517 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
518 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
519 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
520 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
521 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
522 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
523 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
524 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
525 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
526 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
527 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
528 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
529 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
530 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
531 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
532 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
533 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
534 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
535 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
536 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
537 "paddw %%mm3, %%mm4 \n\t"\
538 "movq %%mm2, %%mm0 \n\t"\
539 "movq %%mm5, %%mm6 \n\t"\
540 "movq %%mm4, %%mm3 \n\t"\
541 "punpcklwd %%mm2, %%mm2 \n\t"\
542 "punpcklwd %%mm5, %%mm5 \n\t"\
543 "punpcklwd %%mm4, %%mm4 \n\t"\
544 "paddw %%mm1, %%mm2 \n\t"\
545 "paddw %%mm1, %%mm5 \n\t"\
546 "paddw %%mm1, %%mm4 \n\t"\
547 "punpckhwd %%mm0, %%mm0 \n\t"\
548 "punpckhwd %%mm6, %%mm6 \n\t"\
549 "punpckhwd %%mm3, %%mm3 \n\t"\
550 "paddw %%mm7, %%mm0 \n\t"\
551 "paddw %%mm7, %%mm6 \n\t"\
552 "paddw %%mm7, %%mm3 \n\t"\
553 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
554 "packuswb %%mm0, %%mm2 \n\t"\
555 "packuswb %%mm6, %%mm5 \n\t"\
556 "packuswb %%mm3, %%mm4 \n\t"\
558 #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
560 #define REAL_YSCALEYUV2PACKED1b(index, c) \
561 "xor "#index", "#index" \n\t"\
564 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
565 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
566 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
567 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
568 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
569 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
570 "psrlw $8, %%mm3 \n\t" \
571 "psrlw $8, %%mm4 \n\t" \
572 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
573 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
574 "psraw $7, %%mm1 \n\t" \
575 "psraw $7, %%mm7 \n\t"
576 #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
578 // do vertical chrominance interpolation
579 #define REAL_YSCALEYUV2RGB1b(index, c) \
580 "xor "#index", "#index" \n\t"\
583 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
584 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
585 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
586 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
587 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
588 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
589 "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
590 "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
591 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
592 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
593 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
594 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
595 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
596 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
597 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
598 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
599 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
600 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
601 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
602 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
603 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
604 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
605 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
606 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
607 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
608 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
609 "paddw %%mm3, %%mm4 \n\t"\
610 "movq %%mm2, %%mm0 \n\t"\
611 "movq %%mm5, %%mm6 \n\t"\
612 "movq %%mm4, %%mm3 \n\t"\
613 "punpcklwd %%mm2, %%mm2 \n\t"\
614 "punpcklwd %%mm5, %%mm5 \n\t"\
615 "punpcklwd %%mm4, %%mm4 \n\t"\
616 "paddw %%mm1, %%mm2 \n\t"\
617 "paddw %%mm1, %%mm5 \n\t"\
618 "paddw %%mm1, %%mm4 \n\t"\
619 "punpckhwd %%mm0, %%mm0 \n\t"\
620 "punpckhwd %%mm6, %%mm6 \n\t"\
621 "punpckhwd %%mm3, %%mm3 \n\t"\
622 "paddw %%mm7, %%mm0 \n\t"\
623 "paddw %%mm7, %%mm6 \n\t"\
624 "paddw %%mm7, %%mm3 \n\t"\
625 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
626 "packuswb %%mm0, %%mm2 \n\t"\
627 "packuswb %%mm6, %%mm5 \n\t"\
628 "packuswb %%mm3, %%mm4 \n\t"\
630 #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
632 #define REAL_YSCALEYUV2RGB1_ALPHA(index) \
633 "movq (%1, "#index", 2), %%mm7 \n\t" /* abuf0[index ] */\
634 "movq 8(%1, "#index", 2), %%mm1 \n\t" /* abuf0[index+4] */\
635 "psraw $7, %%mm7 \n\t" /* abuf0[index ] >>7 */\
636 "psraw $7, %%mm1 \n\t" /* abuf0[index+4] >>7 */\
637 "packuswb %%mm1, %%mm7 \n\t"
638 #define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
640 #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
641 "movq "#b", "#q2" \n\t" /* B */\
642 "movq "#r", "#t" \n\t" /* R */\
643 "punpcklbw "#g", "#b" \n\t" /* GBGBGBGB 0 */\
644 "punpcklbw "#a", "#r" \n\t" /* ARARARAR 0 */\
645 "punpckhbw "#g", "#q2" \n\t" /* GBGBGBGB 2 */\
646 "punpckhbw "#a", "#t" \n\t" /* ARARARAR 2 */\
647 "movq "#b", "#q0" \n\t" /* GBGBGBGB 0 */\
648 "movq "#q2", "#q3" \n\t" /* GBGBGBGB 2 */\
649 "punpcklwd "#r", "#q0" \n\t" /* ARGBARGB 0 */\
650 "punpckhwd "#r", "#b" \n\t" /* ARGBARGB 1 */\
651 "punpcklwd "#t", "#q2" \n\t" /* ARGBARGB 2 */\
652 "punpckhwd "#t", "#q3" \n\t" /* ARGBARGB 3 */\
654 MOVNTQ( q0, (dst, index, 4))\
655 MOVNTQ( b, 8(dst, index, 4))\
656 MOVNTQ( q2, 16(dst, index, 4))\
657 MOVNTQ( q3, 24(dst, index, 4))\
659 "add $8, "#index" \n\t"\
660 "cmp "#dstw", "#index" \n\t"\
662 #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
664 #define REAL_WRITERGB16(dst, dstw, index) \
665 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
666 "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
667 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
668 "psrlq $3, %%mm2 \n\t"\
670 "movq %%mm2, %%mm1 \n\t"\
671 "movq %%mm4, %%mm3 \n\t"\
673 "punpcklbw %%mm7, %%mm3 \n\t"\
674 "punpcklbw %%mm5, %%mm2 \n\t"\
675 "punpckhbw %%mm7, %%mm4 \n\t"\
676 "punpckhbw %%mm5, %%mm1 \n\t"\
678 "psllq $3, %%mm3 \n\t"\
679 "psllq $3, %%mm4 \n\t"\
681 "por %%mm3, %%mm2 \n\t"\
682 "por %%mm4, %%mm1 \n\t"\
684 MOVNTQ(%%mm2, (dst, index, 2))\
685 MOVNTQ(%%mm1, 8(dst, index, 2))\
687 "add $8, "#index" \n\t"\
688 "cmp "#dstw", "#index" \n\t"\
690 #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
692 #define REAL_WRITERGB15(dst, dstw, index) \
693 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
694 "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
695 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
696 "psrlq $3, %%mm2 \n\t"\
697 "psrlq $1, %%mm5 \n\t"\
699 "movq %%mm2, %%mm1 \n\t"\
700 "movq %%mm4, %%mm3 \n\t"\
702 "punpcklbw %%mm7, %%mm3 \n\t"\
703 "punpcklbw %%mm5, %%mm2 \n\t"\
704 "punpckhbw %%mm7, %%mm4 \n\t"\
705 "punpckhbw %%mm5, %%mm1 \n\t"\
707 "psllq $2, %%mm3 \n\t"\
708 "psllq $2, %%mm4 \n\t"\
710 "por %%mm3, %%mm2 \n\t"\
711 "por %%mm4, %%mm1 \n\t"\
713 MOVNTQ(%%mm2, (dst, index, 2))\
714 MOVNTQ(%%mm1, 8(dst, index, 2))\
716 "add $8, "#index" \n\t"\
717 "cmp "#dstw", "#index" \n\t"\
719 #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
721 #define WRITEBGR24OLD(dst, dstw, index) \
722 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
723 "movq %%mm2, %%mm1 \n\t" /* B */\
724 "movq %%mm5, %%mm6 \n\t" /* R */\
725 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
726 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
727 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
728 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
729 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
730 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
731 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
732 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
733 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
734 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
736 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
737 "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
738 "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 0 */\
739 "pand "MANGLE(bm11111000)", %%mm0 \n\t" /* 00RGB000 0.5 */\
740 "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
741 "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
742 "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
743 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
745 "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
746 "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
747 "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
748 "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
749 "pand "MANGLE(bm00001111)", %%mm2 \n\t" /* 0000RGBR 1 */\
750 "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
751 "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
752 "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 2 */\
753 "pand "MANGLE(bm11111000)", %%mm1 \n\t" /* 00RGB000 2.5 */\
754 "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
755 "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
756 "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
757 "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
759 "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
760 "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
761 "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
762 "pand "MANGLE(bm00000111)", %%mm5 \n\t" /* 00000RGB 3 */\
763 "pand "MANGLE(bm11111000)", %%mm3 \n\t" /* 00RGB000 3.5 */\
764 "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
765 "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
766 "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
768 MOVNTQ(%%mm0, (dst))\
769 MOVNTQ(%%mm2, 8(dst))\
770 MOVNTQ(%%mm3, 16(dst))\
771 "add $24, "#dst" \n\t"\
773 "add $8, "#index" \n\t"\
774 "cmp "#dstw", "#index" \n\t"\
777 #define WRITEBGR24MMX(dst, dstw, index) \
778 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
779 "movq %%mm2, %%mm1 \n\t" /* B */\
780 "movq %%mm5, %%mm6 \n\t" /* R */\
781 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
782 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
783 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
784 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
785 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
786 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
787 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
788 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
789 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
790 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
792 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
793 "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
794 "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
795 "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
797 "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
798 "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
799 "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
800 "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
802 "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
803 "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
804 "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
805 "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
807 "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
808 "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
809 "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
810 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
811 MOVNTQ(%%mm0, (dst))\
813 "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
814 "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
815 "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
816 "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
817 MOVNTQ(%%mm6, 8(dst))\
819 "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
820 "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
821 "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
822 MOVNTQ(%%mm5, 16(dst))\
824 "add $24, "#dst" \n\t"\
826 "add $8, "#index" \n\t"\
827 "cmp "#dstw", "#index" \n\t"\
830 #define WRITEBGR24MMX2(dst, dstw, index) \
831 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
832 "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
833 "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
834 "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
835 "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
836 "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
838 "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
839 "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
840 "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
842 "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
843 "por %%mm1, %%mm6 \n\t"\
844 "por %%mm3, %%mm6 \n\t"\
845 MOVNTQ(%%mm6, (dst))\
847 "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
848 "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
849 "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
850 "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
852 "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
853 "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
854 "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
856 "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
857 "por %%mm3, %%mm6 \n\t"\
858 MOVNTQ(%%mm6, 8(dst))\
860 "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
861 "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
862 "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
864 "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
865 "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
866 "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
868 "por %%mm1, %%mm3 \n\t"\
869 "por %%mm3, %%mm6 \n\t"\
870 MOVNTQ(%%mm6, 16(dst))\
872 "add $24, "#dst" \n\t"\
874 "add $8, "#index" \n\t"\
875 "cmp "#dstw", "#index" \n\t"\
880 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
883 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
886 #define REAL_WRITEYUY2(dst, dstw, index) \
887 "packuswb %%mm3, %%mm3 \n\t"\
888 "packuswb %%mm4, %%mm4 \n\t"\
889 "packuswb %%mm7, %%mm1 \n\t"\
890 "punpcklbw %%mm4, %%mm3 \n\t"\
891 "movq %%mm1, %%mm7 \n\t"\
892 "punpcklbw %%mm3, %%mm1 \n\t"\
893 "punpckhbw %%mm3, %%mm7 \n\t"\
895 MOVNTQ(%%mm1, (dst, index, 2))\
896 MOVNTQ(%%mm7, 8(dst, index, 2))\
898 "add $8, "#index" \n\t"\
899 "cmp "#dstw", "#index" \n\t"\
901 #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
904 static inline void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
905 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize, const int16_t **alpSrc,
906 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
909 if(!(c->flags & SWS_BITEXACT)){
910 if (c->flags & SWS_ACCURATE_RND){
912 YSCALEYUV2YV12X_ACCURATE( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
913 YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
915 if (CONFIG_SWSCALE_ALPHA && aDest){
916 YSCALEYUV2YV12X_ACCURATE( "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
919 YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
922 YSCALEYUV2YV12X( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
923 YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
925 if (CONFIG_SWSCALE_ALPHA && aDest){
926 YSCALEYUV2YV12X( "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
929 YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
935 yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
936 chrFilter, chrSrc, chrFilterSize,
937 dest, uDest, vDest, dstW, chrDstW);
939 yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
940 chrFilter, chrSrc, chrFilterSize,
941 alpSrc, dest, uDest, vDest, aDest, dstW, chrDstW);
942 #endif //!HAVE_ALTIVEC
945 static inline void RENAME(yuv2nv12X)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
946 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
947 uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
949 yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
950 chrFilter, chrSrc, chrFilterSize,
951 dest, uDest, dstW, chrDstW, dstFormat);
954 static inline void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc, const int16_t *chrSrc, const int16_t *alpSrc,
955 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
959 if(!(c->flags & SWS_BITEXACT)){
961 uint8_t *src[4]= {alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
962 uint8_t *dst[4]= {aDest, dest, uDest, vDest};
963 x86_reg counter[4]= {dstW, dstW, chrDstW, chrDstW};
965 if (c->flags & SWS_ACCURATE_RND){
969 YSCALEYUV2YV121_ACCURATE
970 :: "r" (src[p]), "r" (dst[p] + counter[p]),
981 :: "r" (src[p]), "r" (dst[p] + counter[p]),
991 for (i=0; i<dstW; i++)
993 int val= (lumSrc[i]+64)>>7;
1004 for (i=0; i<chrDstW; i++)
1006 int u=(chrSrc[i ]+64)>>7;
1007 int v=(chrSrc[i + VOFW]+64)>>7;
1011 else if (u>255) u=255;
1013 else if (v>255) v=255;
1020 if (CONFIG_SWSCALE_ALPHA && aDest)
1021 for (i=0; i<dstW; i++){
1022 int val= (alpSrc[i]+64)>>7;
1023 aDest[i]= av_clip_uint8(val);
1029 * vertical scale YV12 to RGB
1031 static inline void RENAME(yuv2packedX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
1032 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
1033 const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
1037 if(!(c->flags & SWS_BITEXACT)){
1038 if (c->flags & SWS_ACCURATE_RND){
1039 switch(c->dstFormat){
1041 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
1042 YSCALEYUV2PACKEDX_ACCURATE
1044 "movq %%mm2, "U_TEMP"(%0) \n\t"
1045 "movq %%mm4, "V_TEMP"(%0) \n\t"
1046 "movq %%mm5, "Y_TEMP"(%0) \n\t"
1047 YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
1048 "movq "Y_TEMP"(%0), %%mm5 \n\t"
1049 "psraw $3, %%mm1 \n\t"
1050 "psraw $3, %%mm7 \n\t"
1051 "packuswb %%mm7, %%mm1 \n\t"
1052 WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
1054 YSCALEYUV2PACKEDX_END
1056 YSCALEYUV2PACKEDX_ACCURATE
1058 "pcmpeqd %%mm7, %%mm7 \n\t"
1059 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1061 YSCALEYUV2PACKEDX_END
1065 YSCALEYUV2PACKEDX_ACCURATE
1067 "pxor %%mm7, %%mm7 \n\t"
1068 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
1069 "add %4, %%"REG_c" \n\t"
1070 WRITEBGR24(%%REGc, %5, %%REGa)
1073 :: "r" (&c->redDither),
1074 "m" (dummy), "m" (dummy), "m" (dummy),
1075 "r" (dest), "m" (dstW)
1076 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
1079 case PIX_FMT_RGB555:
1080 YSCALEYUV2PACKEDX_ACCURATE
1082 "pxor %%mm7, %%mm7 \n\t"
1083 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1085 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
1086 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
1087 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
1090 WRITERGB15(%4, %5, %%REGa)
1091 YSCALEYUV2PACKEDX_END
1093 case PIX_FMT_RGB565:
1094 YSCALEYUV2PACKEDX_ACCURATE
1096 "pxor %%mm7, %%mm7 \n\t"
1097 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1099 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
1100 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
1101 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
1104 WRITERGB16(%4, %5, %%REGa)
1105 YSCALEYUV2PACKEDX_END
1107 case PIX_FMT_YUYV422:
1108 YSCALEYUV2PACKEDX_ACCURATE
1109 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1111 "psraw $3, %%mm3 \n\t"
1112 "psraw $3, %%mm4 \n\t"
1113 "psraw $3, %%mm1 \n\t"
1114 "psraw $3, %%mm7 \n\t"
1115 WRITEYUY2(%4, %5, %%REGa)
1116 YSCALEYUV2PACKEDX_END
1120 switch(c->dstFormat)
1123 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
1126 YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
1127 "psraw $3, %%mm1 \n\t"
1128 "psraw $3, %%mm7 \n\t"
1129 "packuswb %%mm7, %%mm1 \n\t"
1130 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1131 YSCALEYUV2PACKEDX_END
1135 "pcmpeqd %%mm7, %%mm7 \n\t"
1136 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1137 YSCALEYUV2PACKEDX_END
1143 "pxor %%mm7, %%mm7 \n\t"
1144 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
1145 "add %4, %%"REG_c" \n\t"
1146 WRITEBGR24(%%REGc, %5, %%REGa)
1148 :: "r" (&c->redDither),
1149 "m" (dummy), "m" (dummy), "m" (dummy),
1150 "r" (dest), "m" (dstW)
1151 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
1154 case PIX_FMT_RGB555:
1157 "pxor %%mm7, %%mm7 \n\t"
1158 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1160 "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
1161 "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
1162 "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
1165 WRITERGB15(%4, %5, %%REGa)
1166 YSCALEYUV2PACKEDX_END
1168 case PIX_FMT_RGB565:
1171 "pxor %%mm7, %%mm7 \n\t"
1172 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1174 "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
1175 "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
1176 "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
1179 WRITERGB16(%4, %5, %%REGa)
1180 YSCALEYUV2PACKEDX_END
1182 case PIX_FMT_YUYV422:
1184 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1186 "psraw $3, %%mm3 \n\t"
1187 "psraw $3, %%mm4 \n\t"
1188 "psraw $3, %%mm1 \n\t"
1189 "psraw $3, %%mm7 \n\t"
1190 WRITEYUY2(%4, %5, %%REGa)
1191 YSCALEYUV2PACKEDX_END
1196 #endif /* HAVE_MMX */
1198 /* The following list of supported dstFormat values should
1199 match what's found in the body of ff_yuv2packedX_altivec() */
1200 if (!(c->flags & SWS_BITEXACT) && !c->alpPixBuf &&
1201 (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
1202 c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
1203 c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB))
1204 ff_yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize,
1205 chrFilter, chrSrc, chrFilterSize,
1209 yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
1210 chrFilter, chrSrc, chrFilterSize,
1211 alpSrc, dest, dstW, dstY);
1215 * vertical bilinear scale YV12 to RGB
1217 static inline void RENAME(yuv2packed2)(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1218 const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
1220 int yalpha1=4095- yalpha;
1221 int uvalpha1=4095-uvalpha;
1225 if(!(c->flags & SWS_BITEXACT)){
1226 switch(c->dstFormat)
1228 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1230 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
1233 YSCALEYUV2RGB(%%REGBP, %5)
1234 YSCALEYUV2RGB_YA(%%REGBP, %5, %6, %7)
1235 "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1236 "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1237 "packuswb %%mm7, %%mm1 \n\t"
1238 WRITEBGR32(%4, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1240 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "r" (dest),
1242 ,"r" (abuf0), "r" (abuf1)
1246 *(uint16_t **)(&c->u_temp)=abuf0;
1247 *(uint16_t **)(&c->v_temp)=abuf1;
1249 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1250 "mov %4, %%"REG_b" \n\t"
1251 "push %%"REG_BP" \n\t"
1252 YSCALEYUV2RGB(%%REGBP, %5)
1255 "mov "U_TEMP"(%5), %0 \n\t"
1256 "mov "V_TEMP"(%5), %1 \n\t"
1257 YSCALEYUV2RGB_YA(%%REGBP, %5, %0, %1)
1258 "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1259 "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1260 "packuswb %%mm7, %%mm1 \n\t"
1263 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1264 "pop %%"REG_BP" \n\t"
1265 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1267 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1273 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1274 "mov %4, %%"REG_b" \n\t"
1275 "push %%"REG_BP" \n\t"
1276 YSCALEYUV2RGB(%%REGBP, %5)
1277 "pcmpeqd %%mm7, %%mm7 \n\t"
1278 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1279 "pop %%"REG_BP" \n\t"
1280 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1282 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1289 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1290 "mov %4, %%"REG_b" \n\t"
1291 "push %%"REG_BP" \n\t"
1292 YSCALEYUV2RGB(%%REGBP, %5)
1293 "pxor %%mm7, %%mm7 \n\t"
1294 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1295 "pop %%"REG_BP" \n\t"
1296 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1297 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1301 case PIX_FMT_RGB555:
1303 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1304 "mov %4, %%"REG_b" \n\t"
1305 "push %%"REG_BP" \n\t"
1306 YSCALEYUV2RGB(%%REGBP, %5)
1307 "pxor %%mm7, %%mm7 \n\t"
1308 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1310 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1311 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1312 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1315 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1316 "pop %%"REG_BP" \n\t"
1317 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1319 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1323 case PIX_FMT_RGB565:
1325 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1326 "mov %4, %%"REG_b" \n\t"
1327 "push %%"REG_BP" \n\t"
1328 YSCALEYUV2RGB(%%REGBP, %5)
1329 "pxor %%mm7, %%mm7 \n\t"
1330 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1332 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1333 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1334 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1337 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1338 "pop %%"REG_BP" \n\t"
1339 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1340 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1344 case PIX_FMT_YUYV422:
1346 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1347 "mov %4, %%"REG_b" \n\t"
1348 "push %%"REG_BP" \n\t"
1349 YSCALEYUV2PACKED(%%REGBP, %5)
1350 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1351 "pop %%"REG_BP" \n\t"
1352 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1353 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1361 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C(void,0), YSCALE_YUV_2_GRAY16_2_C, YSCALE_YUV_2_MONO2_C)
1365 * YV12 to RGB without scaling or interpolating
1367 static inline void RENAME(yuv2packed1)(SwsContext *c, const uint16_t *buf0, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1368 const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
1370 const int yalpha1=0;
1373 const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1374 const int yalpha= 4096; //FIXME ...
1376 if (flags&SWS_FULL_CHR_H_INT)
1378 c->yuv2packed2(c, buf0, buf0, uvbuf0, uvbuf1, abuf0, abuf0, dest, dstW, 0, uvalpha, y);
1383 if(!(flags & SWS_BITEXACT)){
1384 if (uvalpha < 2048) // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1389 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
1391 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1392 "mov %4, %%"REG_b" \n\t"
1393 "push %%"REG_BP" \n\t"
1394 YSCALEYUV2RGB1(%%REGBP, %5)
1395 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1396 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1397 "pop %%"REG_BP" \n\t"
1398 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1400 :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1405 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1406 "mov %4, %%"REG_b" \n\t"
1407 "push %%"REG_BP" \n\t"
1408 YSCALEYUV2RGB1(%%REGBP, %5)
1409 "pcmpeqd %%mm7, %%mm7 \n\t"
1410 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1411 "pop %%"REG_BP" \n\t"
1412 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1414 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1421 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1422 "mov %4, %%"REG_b" \n\t"
1423 "push %%"REG_BP" \n\t"
1424 YSCALEYUV2RGB1(%%REGBP, %5)
1425 "pxor %%mm7, %%mm7 \n\t"
1426 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1427 "pop %%"REG_BP" \n\t"
1428 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1430 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1434 case PIX_FMT_RGB555:
1436 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1437 "mov %4, %%"REG_b" \n\t"
1438 "push %%"REG_BP" \n\t"
1439 YSCALEYUV2RGB1(%%REGBP, %5)
1440 "pxor %%mm7, %%mm7 \n\t"
1441 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1443 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1444 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1445 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1447 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1448 "pop %%"REG_BP" \n\t"
1449 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1451 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1455 case PIX_FMT_RGB565:
1457 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1458 "mov %4, %%"REG_b" \n\t"
1459 "push %%"REG_BP" \n\t"
1460 YSCALEYUV2RGB1(%%REGBP, %5)
1461 "pxor %%mm7, %%mm7 \n\t"
1462 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1464 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1465 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1466 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1469 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1470 "pop %%"REG_BP" \n\t"
1471 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1473 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1477 case PIX_FMT_YUYV422:
1479 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1480 "mov %4, %%"REG_b" \n\t"
1481 "push %%"REG_BP" \n\t"
1482 YSCALEYUV2PACKED1(%%REGBP, %5)
1483 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1484 "pop %%"REG_BP" \n\t"
1485 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1487 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1498 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
1500 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1501 "mov %4, %%"REG_b" \n\t"
1502 "push %%"REG_BP" \n\t"
1503 YSCALEYUV2RGB1b(%%REGBP, %5)
1504 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1505 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1506 "pop %%"REG_BP" \n\t"
1507 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1509 :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1514 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1515 "mov %4, %%"REG_b" \n\t"
1516 "push %%"REG_BP" \n\t"
1517 YSCALEYUV2RGB1b(%%REGBP, %5)
1518 "pcmpeqd %%mm7, %%mm7 \n\t"
1519 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1520 "pop %%"REG_BP" \n\t"
1521 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1523 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1530 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1531 "mov %4, %%"REG_b" \n\t"
1532 "push %%"REG_BP" \n\t"
1533 YSCALEYUV2RGB1b(%%REGBP, %5)
1534 "pxor %%mm7, %%mm7 \n\t"
1535 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1536 "pop %%"REG_BP" \n\t"
1537 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1539 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1543 case PIX_FMT_RGB555:
1545 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1546 "mov %4, %%"REG_b" \n\t"
1547 "push %%"REG_BP" \n\t"
1548 YSCALEYUV2RGB1b(%%REGBP, %5)
1549 "pxor %%mm7, %%mm7 \n\t"
1550 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1552 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1553 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1554 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1556 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1557 "pop %%"REG_BP" \n\t"
1558 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1560 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1564 case PIX_FMT_RGB565:
1566 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1567 "mov %4, %%"REG_b" \n\t"
1568 "push %%"REG_BP" \n\t"
1569 YSCALEYUV2RGB1b(%%REGBP, %5)
1570 "pxor %%mm7, %%mm7 \n\t"
1571 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1573 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1574 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1575 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1578 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1579 "pop %%"REG_BP" \n\t"
1580 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1582 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1586 case PIX_FMT_YUYV422:
1588 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1589 "mov %4, %%"REG_b" \n\t"
1590 "push %%"REG_BP" \n\t"
1591 YSCALEYUV2PACKED1b(%%REGBP, %5)
1592 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1593 "pop %%"REG_BP" \n\t"
1594 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1596 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1603 #endif /* HAVE_MMX */
1606 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C(void,0), YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
1608 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C(void,0), YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
1612 //FIXME yuy2* can read up to 7 samples too much
1614 static inline void RENAME(yuy2ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1618 "movq "MANGLE(bm01010101)", %%mm2 \n\t"
1619 "mov %0, %%"REG_a" \n\t"
1621 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1622 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1623 "pand %%mm2, %%mm0 \n\t"
1624 "pand %%mm2, %%mm1 \n\t"
1625 "packuswb %%mm1, %%mm0 \n\t"
1626 "movq %%mm0, (%2, %%"REG_a") \n\t"
1627 "add $8, %%"REG_a" \n\t"
1629 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1634 for (i=0; i<width; i++)
1639 static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1643 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1644 "mov %0, %%"REG_a" \n\t"
1646 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
1647 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1648 "psrlw $8, %%mm0 \n\t"
1649 "psrlw $8, %%mm1 \n\t"
1650 "packuswb %%mm1, %%mm0 \n\t"
1651 "movq %%mm0, %%mm1 \n\t"
1652 "psrlw $8, %%mm0 \n\t"
1653 "pand %%mm4, %%mm1 \n\t"
1654 "packuswb %%mm0, %%mm0 \n\t"
1655 "packuswb %%mm1, %%mm1 \n\t"
1656 "movd %%mm0, (%3, %%"REG_a") \n\t"
1657 "movd %%mm1, (%2, %%"REG_a") \n\t"
1658 "add $4, %%"REG_a" \n\t"
1660 : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1665 for (i=0; i<width; i++)
1667 dstU[i]= src1[4*i + 1];
1668 dstV[i]= src1[4*i + 3];
1671 assert(src1 == src2);
1674 /* This is almost identical to the previous, end exists only because
1675 * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
1676 static inline void RENAME(uyvyToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1680 "mov %0, %%"REG_a" \n\t"
1682 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1683 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1684 "psrlw $8, %%mm0 \n\t"
1685 "psrlw $8, %%mm1 \n\t"
1686 "packuswb %%mm1, %%mm0 \n\t"
1687 "movq %%mm0, (%2, %%"REG_a") \n\t"
1688 "add $8, %%"REG_a" \n\t"
1690 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1695 for (i=0; i<width; i++)
1700 static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1704 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1705 "mov %0, %%"REG_a" \n\t"
1707 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
1708 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1709 "pand %%mm4, %%mm0 \n\t"
1710 "pand %%mm4, %%mm1 \n\t"
1711 "packuswb %%mm1, %%mm0 \n\t"
1712 "movq %%mm0, %%mm1 \n\t"
1713 "psrlw $8, %%mm0 \n\t"
1714 "pand %%mm4, %%mm1 \n\t"
1715 "packuswb %%mm0, %%mm0 \n\t"
1716 "packuswb %%mm1, %%mm1 \n\t"
1717 "movd %%mm0, (%3, %%"REG_a") \n\t"
1718 "movd %%mm1, (%2, %%"REG_a") \n\t"
1719 "add $4, %%"REG_a" \n\t"
1721 : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1726 for (i=0; i<width; i++)
1728 dstU[i]= src1[4*i + 0];
1729 dstV[i]= src1[4*i + 2];
1732 assert(src1 == src2);
1735 #define BGR2Y(type, name, shr, shg, shb, maskr, maskg, maskb, RY, GY, BY, S)\
1736 static inline void RENAME(name)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)\
1739 for (i=0; i<width; i++)\
1741 int b= (((const type*)src)[i]>>shb)&maskb;\
1742 int g= (((const type*)src)[i]>>shg)&maskg;\
1743 int r= (((const type*)src)[i]>>shr)&maskr;\
1745 dst[i]= (((RY)*r + (GY)*g + (BY)*b + (33<<((S)-1)))>>(S));\
1749 BGR2Y(uint32_t, bgr32ToY,16, 0, 0, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY , BY<< 8, RGB2YUV_SHIFT+8)
1750 BGR2Y(uint32_t, rgb32ToY, 0, 0,16, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY , BY<< 8, RGB2YUV_SHIFT+8)
1751 BGR2Y(uint16_t, bgr16ToY, 0, 0, 0, 0x001F, 0x07E0, 0xF800, RY<<11, GY<<5, BY , RGB2YUV_SHIFT+8)
1752 BGR2Y(uint16_t, bgr15ToY, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RY<<10, GY<<5, BY , RGB2YUV_SHIFT+7)
1753 BGR2Y(uint16_t, rgb16ToY, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RY , GY<<5, BY<<11, RGB2YUV_SHIFT+8)
1754 BGR2Y(uint16_t, rgb15ToY, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RY , GY<<5, BY<<10, RGB2YUV_SHIFT+7)
1756 static inline void RENAME(abgrToA)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused){
1758 for (i=0; i<width; i++){
1763 #define BGR2UV(type, name, shr, shg, shb, maska, maskr, maskg, maskb, RU, GU, BU, RV, GV, BV, S)\
1764 static inline void RENAME(name)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, const uint8_t *dummy, long width, uint32_t *unused)\
1767 for (i=0; i<width; i++)\
1769 int b= (((const type*)src)[i]&maskb)>>shb;\
1770 int g= (((const type*)src)[i]&maskg)>>shg;\
1771 int r= (((const type*)src)[i]&maskr)>>shr;\
1773 dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<((S)-1)))>>(S);\
1774 dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<((S)-1)))>>(S);\
1777 static inline void RENAME(name ## _half)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, const uint8_t *dummy, long width, uint32_t *unused)\
1780 for (i=0; i<width; i++)\
1782 int pix0= ((const type*)src)[2*i+0];\
1783 int pix1= ((const type*)src)[2*i+1];\
1784 int g= (pix0&~(maskr|maskb))+(pix1&~(maskr|maskb));\
1785 int b= ((pix0+pix1-g)&(maskb|(2*maskb)))>>shb;\
1786 int r= ((pix0+pix1-g)&(maskr|(2*maskr)))>>shr;\
1787 g&= maskg|(2*maskg);\
1791 dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<(S)))>>((S)+1);\
1792 dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<(S)))>>((S)+1);\
1796 BGR2UV(uint32_t, bgr32ToUV,16, 0, 0, 0xFF000000, 0xFF0000, 0xFF00, 0x00FF, RU<< 8, GU , BU<< 8, RV<< 8, GV , BV<< 8, RGB2YUV_SHIFT+8)
1797 BGR2UV(uint32_t, rgb32ToUV, 0, 0,16, 0xFF000000, 0x00FF, 0xFF00, 0xFF0000, RU<< 8, GU , BU<< 8, RV<< 8, GV , BV<< 8, RGB2YUV_SHIFT+8)
1798 BGR2UV(uint16_t, bgr16ToUV, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, RU<<11, GU<<5, BU , RV<<11, GV<<5, BV , RGB2YUV_SHIFT+8)
1799 BGR2UV(uint16_t, bgr15ToUV, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RU<<10, GU<<5, BU , RV<<10, GV<<5, BV , RGB2YUV_SHIFT+7)
1800 BGR2UV(uint16_t, rgb16ToUV, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RU , GU<<5, BU<<11, RV , GV<<5, BV<<11, RGB2YUV_SHIFT+8)
1801 BGR2UV(uint16_t, rgb15ToUV, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RU , GU<<5, BU<<10, RV , GV<<5, BV<<10, RGB2YUV_SHIFT+7)
1804 static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, const uint8_t *src, long width, int srcFormat)
1807 if(srcFormat == PIX_FMT_BGR24){
1809 "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t"
1810 "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t"
1815 "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t"
1816 "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t"
1822 "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t"
1823 "mov %2, %%"REG_a" \n\t"
1824 "pxor %%mm7, %%mm7 \n\t"
1826 PREFETCH" 64(%0) \n\t"
1827 "movd (%0), %%mm0 \n\t"
1828 "movd 2(%0), %%mm1 \n\t"
1829 "movd 6(%0), %%mm2 \n\t"
1830 "movd 8(%0), %%mm3 \n\t"
1832 "punpcklbw %%mm7, %%mm0 \n\t"
1833 "punpcklbw %%mm7, %%mm1 \n\t"
1834 "punpcklbw %%mm7, %%mm2 \n\t"
1835 "punpcklbw %%mm7, %%mm3 \n\t"
1836 "pmaddwd %%mm5, %%mm0 \n\t"
1837 "pmaddwd %%mm6, %%mm1 \n\t"
1838 "pmaddwd %%mm5, %%mm2 \n\t"
1839 "pmaddwd %%mm6, %%mm3 \n\t"
1840 "paddd %%mm1, %%mm0 \n\t"
1841 "paddd %%mm3, %%mm2 \n\t"
1842 "paddd %%mm4, %%mm0 \n\t"
1843 "paddd %%mm4, %%mm2 \n\t"
1844 "psrad $15, %%mm0 \n\t"
1845 "psrad $15, %%mm2 \n\t"
1846 "packssdw %%mm2, %%mm0 \n\t"
1847 "packuswb %%mm0, %%mm0 \n\t"
1848 "movd %%mm0, (%1, %%"REG_a") \n\t"
1849 "add $4, %%"REG_a" \n\t"
1852 : "r" (dst+width), "g" ((x86_reg)-width)
1857 static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, long width, int srcFormat)
1860 "movq 24+%4, %%mm6 \n\t"
1861 "mov %3, %%"REG_a" \n\t"
1862 "pxor %%mm7, %%mm7 \n\t"
1864 PREFETCH" 64(%0) \n\t"
1865 "movd (%0), %%mm0 \n\t"
1866 "movd 2(%0), %%mm1 \n\t"
1867 "punpcklbw %%mm7, %%mm0 \n\t"
1868 "punpcklbw %%mm7, %%mm1 \n\t"
1869 "movq %%mm0, %%mm2 \n\t"
1870 "movq %%mm1, %%mm3 \n\t"
1871 "pmaddwd %4, %%mm0 \n\t"
1872 "pmaddwd 8+%4, %%mm1 \n\t"
1873 "pmaddwd 16+%4, %%mm2 \n\t"
1874 "pmaddwd %%mm6, %%mm3 \n\t"
1875 "paddd %%mm1, %%mm0 \n\t"
1876 "paddd %%mm3, %%mm2 \n\t"
1878 "movd 6(%0), %%mm1 \n\t"
1879 "movd 8(%0), %%mm3 \n\t"
1881 "punpcklbw %%mm7, %%mm1 \n\t"
1882 "punpcklbw %%mm7, %%mm3 \n\t"
1883 "movq %%mm1, %%mm4 \n\t"
1884 "movq %%mm3, %%mm5 \n\t"
1885 "pmaddwd %4, %%mm1 \n\t"
1886 "pmaddwd 8+%4, %%mm3 \n\t"
1887 "pmaddwd 16+%4, %%mm4 \n\t"
1888 "pmaddwd %%mm6, %%mm5 \n\t"
1889 "paddd %%mm3, %%mm1 \n\t"
1890 "paddd %%mm5, %%mm4 \n\t"
1892 "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3 \n\t"
1893 "paddd %%mm3, %%mm0 \n\t"
1894 "paddd %%mm3, %%mm2 \n\t"
1895 "paddd %%mm3, %%mm1 \n\t"
1896 "paddd %%mm3, %%mm4 \n\t"
1897 "psrad $15, %%mm0 \n\t"
1898 "psrad $15, %%mm2 \n\t"
1899 "psrad $15, %%mm1 \n\t"
1900 "psrad $15, %%mm4 \n\t"
1901 "packssdw %%mm1, %%mm0 \n\t"
1902 "packssdw %%mm4, %%mm2 \n\t"
1903 "packuswb %%mm0, %%mm0 \n\t"
1904 "packuswb %%mm2, %%mm2 \n\t"
1905 "movd %%mm0, (%1, %%"REG_a") \n\t"
1906 "movd %%mm2, (%2, %%"REG_a") \n\t"
1907 "add $4, %%"REG_a" \n\t"
1910 : "r" (dstU+width), "r" (dstV+width), "g" ((x86_reg)-width), "m"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24][0])
1916 static inline void RENAME(bgr24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1919 RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
1922 for (i=0; i<width; i++)
1928 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
1930 #endif /* HAVE_MMX */
1933 static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1936 RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
1939 for (i=0; i<width; i++)
1941 int b= src1[3*i + 0];
1942 int g= src1[3*i + 1];
1943 int r= src1[3*i + 2];
1945 dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
1946 dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
1948 #endif /* HAVE_MMX */
1949 assert(src1 == src2);
1952 static inline void RENAME(bgr24ToUV_half)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1955 for (i=0; i<width; i++)
1957 int b= src1[6*i + 0] + src1[6*i + 3];
1958 int g= src1[6*i + 1] + src1[6*i + 4];
1959 int r= src1[6*i + 2] + src1[6*i + 5];
1961 dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
1962 dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
1964 assert(src1 == src2);
1967 static inline void RENAME(rgb24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1970 RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
1973 for (i=0; i<width; i++)
1979 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
1984 static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1988 RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
1992 for (i=0; i<width; i++)
1994 int r= src1[3*i + 0];
1995 int g= src1[3*i + 1];
1996 int b= src1[3*i + 2];
1998 dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
1999 dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
2004 static inline void RENAME(rgb24ToUV_half)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
2008 for (i=0; i<width; i++)
2010 int r= src1[6*i + 0] + src1[6*i + 3];
2011 int g= src1[6*i + 1] + src1[6*i + 4];
2012 int b= src1[6*i + 2] + src1[6*i + 5];
2014 dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
2015 dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
2020 static inline void RENAME(palToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *pal)
2023 for (i=0; i<width; i++)
2027 dst[i]= pal[d] & 0xFF;
2031 static inline void RENAME(palToUV)(uint8_t *dstU, uint8_t *dstV,
2032 const uint8_t *src1, const uint8_t *src2,
2033 long width, uint32_t *pal)
2036 assert(src1 == src2);
2037 for (i=0; i<width; i++)
2039 int p= pal[src1[i]];
2046 static inline void RENAME(monowhite2Y)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
2049 for (i=0; i<width/8; i++){
2052 dst[8*i+j]= ((d>>(7-j))&1)*255;
2056 static inline void RENAME(monoblack2Y)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
2059 for (i=0; i<width/8; i++){
2062 dst[8*i+j]= ((d>>(7-j))&1)*255;
2066 // bilinear / bicubic scaling
2067 static inline void RENAME(hScale)(int16_t *dst, int dstW, const uint8_t *src, int srcW, int xInc,
2068 const int16_t *filter, const int16_t *filterPos, long filterSize)
2071 assert(filterSize % 4 == 0 && filterSize>0);
2072 if (filterSize==4) // Always true for upscaling, sometimes for down, too.
2074 x86_reg counter= -2*dstW;
2076 filterPos-= counter/2;
2080 "push %%"REG_b" \n\t"
2082 "pxor %%mm7, %%mm7 \n\t"
2083 "push %%"REG_BP" \n\t" // we use 7 regs here ...
2084 "mov %%"REG_a", %%"REG_BP" \n\t"
2087 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
2088 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
2089 "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
2090 "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
2091 "movd (%3, %%"REG_a"), %%mm0 \n\t"
2092 "movd (%3, %%"REG_b"), %%mm2 \n\t"
2093 "punpcklbw %%mm7, %%mm0 \n\t"
2094 "punpcklbw %%mm7, %%mm2 \n\t"
2095 "pmaddwd %%mm1, %%mm0 \n\t"
2096 "pmaddwd %%mm2, %%mm3 \n\t"
2097 "movq %%mm0, %%mm4 \n\t"
2098 "punpckldq %%mm3, %%mm0 \n\t"
2099 "punpckhdq %%mm3, %%mm4 \n\t"
2100 "paddd %%mm4, %%mm0 \n\t"
2101 "psrad $7, %%mm0 \n\t"
2102 "packssdw %%mm0, %%mm0 \n\t"
2103 "movd %%mm0, (%4, %%"REG_BP") \n\t"
2104 "add $4, %%"REG_BP" \n\t"
2107 "pop %%"REG_BP" \n\t"
2109 "pop %%"REG_b" \n\t"
2112 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
2118 else if (filterSize==8)
2120 x86_reg counter= -2*dstW;
2122 filterPos-= counter/2;
2126 "push %%"REG_b" \n\t"
2128 "pxor %%mm7, %%mm7 \n\t"
2129 "push %%"REG_BP" \n\t" // we use 7 regs here ...
2130 "mov %%"REG_a", %%"REG_BP" \n\t"
2133 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
2134 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
2135 "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
2136 "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
2137 "movd (%3, %%"REG_a"), %%mm0 \n\t"
2138 "movd (%3, %%"REG_b"), %%mm2 \n\t"
2139 "punpcklbw %%mm7, %%mm0 \n\t"
2140 "punpcklbw %%mm7, %%mm2 \n\t"
2141 "pmaddwd %%mm1, %%mm0 \n\t"
2142 "pmaddwd %%mm2, %%mm3 \n\t"
2144 "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
2145 "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
2146 "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
2147 "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
2148 "punpcklbw %%mm7, %%mm4 \n\t"
2149 "punpcklbw %%mm7, %%mm2 \n\t"
2150 "pmaddwd %%mm1, %%mm4 \n\t"
2151 "pmaddwd %%mm2, %%mm5 \n\t"
2152 "paddd %%mm4, %%mm0 \n\t"
2153 "paddd %%mm5, %%mm3 \n\t"
2154 "movq %%mm0, %%mm4 \n\t"
2155 "punpckldq %%mm3, %%mm0 \n\t"
2156 "punpckhdq %%mm3, %%mm4 \n\t"
2157 "paddd %%mm4, %%mm0 \n\t"
2158 "psrad $7, %%mm0 \n\t"
2159 "packssdw %%mm0, %%mm0 \n\t"
2160 "movd %%mm0, (%4, %%"REG_BP") \n\t"
2161 "add $4, %%"REG_BP" \n\t"
2164 "pop %%"REG_BP" \n\t"
2166 "pop %%"REG_b" \n\t"
2169 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
2177 uint8_t *offset = src+filterSize;
2178 x86_reg counter= -2*dstW;
2179 //filter-= counter*filterSize/2;
2180 filterPos-= counter/2;
2183 "pxor %%mm7, %%mm7 \n\t"
2186 "mov %2, %%"REG_c" \n\t"
2187 "movzwl (%%"REG_c", %0), %%eax \n\t"
2188 "movzwl 2(%%"REG_c", %0), %%edx \n\t"
2189 "mov %5, %%"REG_c" \n\t"
2190 "pxor %%mm4, %%mm4 \n\t"
2191 "pxor %%mm5, %%mm5 \n\t"
2193 "movq (%1), %%mm1 \n\t"
2194 "movq (%1, %6), %%mm3 \n\t"
2195 "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t"
2196 "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t"
2197 "punpcklbw %%mm7, %%mm0 \n\t"
2198 "punpcklbw %%mm7, %%mm2 \n\t"
2199 "pmaddwd %%mm1, %%mm0 \n\t"
2200 "pmaddwd %%mm2, %%mm3 \n\t"
2201 "paddd %%mm3, %%mm5 \n\t"
2202 "paddd %%mm0, %%mm4 \n\t"
2204 "add $4, %%"REG_c" \n\t"
2205 "cmp %4, %%"REG_c" \n\t"
2208 "movq %%mm4, %%mm0 \n\t"
2209 "punpckldq %%mm5, %%mm4 \n\t"
2210 "punpckhdq %%mm5, %%mm0 \n\t"
2211 "paddd %%mm0, %%mm4 \n\t"
2212 "psrad $7, %%mm4 \n\t"
2213 "packssdw %%mm4, %%mm4 \n\t"
2214 "mov %3, %%"REG_a" \n\t"
2215 "movd %%mm4, (%%"REG_a", %0) \n\t"
2219 : "+r" (counter), "+r" (filter)
2220 : "m" (filterPos), "m" (dst), "m"(offset),
2221 "m" (src), "r" ((x86_reg)filterSize*2)
2222 : "%"REG_a, "%"REG_c, "%"REG_d
2227 hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
2230 for (i=0; i<dstW; i++)
2233 int srcPos= filterPos[i];
2235 //printf("filterPos: %d\n", filterPos[i]);
2236 for (j=0; j<filterSize; j++)
2238 //printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
2239 val += ((int)src[srcPos + j])*filter[filterSize*i + j];
2241 //filter += hFilterSize;
2242 dst[i] = FFMIN(val>>7, (1<<15)-1); // the cubic equation does overflow ...
2245 #endif /* HAVE_ALTIVEC */
2246 #endif /* HAVE_MMX */
2249 static inline void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst,
2250 int dstWidth, const uint8_t *src, int srcW,
2254 unsigned int xpos=0;
2255 for (i=0;i<dstWidth;i++)
2257 register unsigned int xx=xpos>>16;
2258 register unsigned int xalpha=(xpos&0xFFFF)>>9;
2259 dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
2264 // *** horizontal scale Y line to temp buffer
2265 static inline void RENAME(hyscale)(SwsContext *c, uint16_t *dst, long dstWidth, const uint8_t *src, int srcW, int xInc,
2266 int flags, const int16_t *hLumFilter,
2267 const int16_t *hLumFilterPos, int hLumFilterSize,
2268 int srcFormat, uint8_t *formatConvBuffer,
2269 uint32_t *pal, int isAlpha)
2271 int32_t *mmx2FilterPos = c->lumMmx2FilterPos;
2272 int16_t *mmx2Filter = c->lumMmx2Filter;
2273 int canMMX2BeUsed = c->canMMX2BeUsed;
2274 void *funnyYCode = c->funnyYCode;
2275 void (*internal_func)(uint8_t *, const uint8_t *, long, uint32_t *) = isAlpha ? c->hascale_internal : c->hyscale_internal;
2278 if (srcFormat == PIX_FMT_RGB32 || srcFormat == PIX_FMT_BGR32 )
2281 if (srcFormat == PIX_FMT_RGB32_1 || srcFormat == PIX_FMT_BGR32_1)
2285 if (internal_func) {
2286 internal_func(formatConvBuffer, src, srcW, pal);
2287 src= formatConvBuffer;
2291 // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
2292 if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
2294 if (!(flags&SWS_FAST_BILINEAR))
2297 c->hScale(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
2299 else // fast bilinear upscale / crap downscale
2301 #if ARCH_X86 && CONFIG_GPL
2305 uint64_t ebxsave __attribute__((aligned(8)));
2311 "mov %%"REG_b", %5 \n\t"
2313 "pxor %%mm7, %%mm7 \n\t"
2314 "mov %0, %%"REG_c" \n\t"
2315 "mov %1, %%"REG_D" \n\t"
2316 "mov %2, %%"REG_d" \n\t"
2317 "mov %3, %%"REG_b" \n\t"
2318 "xor %%"REG_a", %%"REG_a" \n\t" // i
2319 PREFETCH" (%%"REG_c") \n\t"
2320 PREFETCH" 32(%%"REG_c") \n\t"
2321 PREFETCH" 64(%%"REG_c") \n\t"
2325 #define FUNNY_Y_CODE \
2326 "movl (%%"REG_b"), %%esi \n\t"\
2328 "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
2329 "add %%"REG_S", %%"REG_c" \n\t"\
2330 "add %%"REG_a", %%"REG_D" \n\t"\
2331 "xor %%"REG_a", %%"REG_a" \n\t"\
2335 #define FUNNY_Y_CODE \
2336 "movl (%%"REG_b"), %%esi \n\t"\
2338 "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
2339 "add %%"REG_a", %%"REG_D" \n\t"\
2340 "xor %%"REG_a", %%"REG_a" \n\t"\
2342 #endif /* ARCH_X86_64 */
2354 "mov %5, %%"REG_b" \n\t"
2356 :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
2361 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2366 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
2370 #endif /* HAVE_MMX2 */
2371 x86_reg xInc_shr16 = xInc >> 16;
2372 uint16_t xInc_mask = xInc & 0xffff;
2373 //NO MMX just normal asm ...
2375 "xor %%"REG_a", %%"REG_a" \n\t" // i
2376 "xor %%"REG_d", %%"REG_d" \n\t" // xx
2377 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
2380 "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
2381 "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
2382 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
2383 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
2384 "shll $16, %%edi \n\t"
2385 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2386 "mov %1, %%"REG_D" \n\t"
2387 "shrl $9, %%esi \n\t"
2388 "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
2389 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
2390 "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
2392 "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
2393 "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
2394 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
2395 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
2396 "shll $16, %%edi \n\t"
2397 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2398 "mov %1, %%"REG_D" \n\t"
2399 "shrl $9, %%esi \n\t"
2400 "movw %%si, 2(%%"REG_D", %%"REG_a", 2) \n\t"
2401 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
2402 "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
2405 "add $2, %%"REG_a" \n\t"
2406 "cmp %2, %%"REG_a" \n\t"
2410 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
2411 : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
2414 } //if MMX2 can't be used
2417 c->hyscale_fast(c, dst, dstWidth, src, srcW, xInc);
2418 #endif /* ARCH_X86 */
2421 if(!isAlpha && c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){
2423 //FIXME all pal and rgb srcFormats could do this convertion as well
2424 //FIXME all scalers more complex than bilinear could do half of this transform
2426 for (i=0; i<dstWidth; i++)
2427 dst[i]= (dst[i]*14071 + 33561947)>>14;
2429 for (i=0; i<dstWidth; i++)
2430 dst[i]= (FFMIN(dst[i],30189)*19077 - 39057361)>>14;
2435 static inline void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst,
2436 int dstWidth, const uint8_t *src1,
2437 const uint8_t *src2, int srcW, int xInc)
2440 unsigned int xpos=0;
2441 for (i=0;i<dstWidth;i++)
2443 register unsigned int xx=xpos>>16;
2444 register unsigned int xalpha=(xpos&0xFFFF)>>9;
2445 dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
2446 dst[i+VOFW]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
2448 dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
2449 dst[i+VOFW]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
2455 inline static void RENAME(hcscale)(SwsContext *c, uint16_t *dst, long dstWidth, const uint8_t *src1, const uint8_t *src2,
2456 int srcW, int xInc, int flags, const int16_t *hChrFilter,
2457 const int16_t *hChrFilterPos, int hChrFilterSize,
2458 int srcFormat, uint8_t *formatConvBuffer,
2461 int32_t *mmx2FilterPos = c->chrMmx2FilterPos;
2462 int16_t *mmx2Filter = c->chrMmx2Filter;
2463 int canMMX2BeUsed = c->canMMX2BeUsed;
2464 void *funnyUVCode = c->funnyUVCode;
2466 if (isGray(srcFormat) || srcFormat==PIX_FMT_MONOBLACK || srcFormat==PIX_FMT_MONOWHITE)
2469 if (srcFormat==PIX_FMT_RGB32_1) {
2474 if (c->hcscale_internal) {
2475 c->hcscale_internal(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2476 src1= formatConvBuffer;
2477 src2= formatConvBuffer+VOFW;
2481 // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
2482 if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
2484 if (!(flags&SWS_FAST_BILINEAR))
2487 c->hScale(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
2488 c->hScale(dst+VOFW, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
2490 else // fast bilinear upscale / crap downscale
2492 #if ARCH_X86 && CONFIG_GPL
2496 uint64_t ebxsave __attribute__((aligned(8)));
2502 "mov %%"REG_b", %6 \n\t"
2504 "pxor %%mm7, %%mm7 \n\t"
2505 "mov %0, %%"REG_c" \n\t"
2506 "mov %1, %%"REG_D" \n\t"
2507 "mov %2, %%"REG_d" \n\t"
2508 "mov %3, %%"REG_b" \n\t"
2509 "xor %%"REG_a", %%"REG_a" \n\t" // i
2510 PREFETCH" (%%"REG_c") \n\t"
2511 PREFETCH" 32(%%"REG_c") \n\t"
2512 PREFETCH" 64(%%"REG_c") \n\t"
2516 #define FUNNY_UV_CODE \
2517 "movl (%%"REG_b"), %%esi \n\t"\
2519 "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
2520 "add %%"REG_S", %%"REG_c" \n\t"\
2521 "add %%"REG_a", %%"REG_D" \n\t"\
2522 "xor %%"REG_a", %%"REG_a" \n\t"\
2526 #define FUNNY_UV_CODE \
2527 "movl (%%"REG_b"), %%esi \n\t"\
2529 "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
2530 "add %%"REG_a", %%"REG_D" \n\t"\
2531 "xor %%"REG_a", %%"REG_a" \n\t"\
2533 #endif /* ARCH_X86_64 */
2539 "xor %%"REG_a", %%"REG_a" \n\t" // i
2540 "mov %5, %%"REG_c" \n\t" // src
2541 "mov %1, %%"REG_D" \n\t" // buf1
2542 "add $"AV_STRINGIFY(VOF)", %%"REG_D" \n\t"
2543 PREFETCH" (%%"REG_c") \n\t"
2544 PREFETCH" 32(%%"REG_c") \n\t"
2545 PREFETCH" 64(%%"REG_c") \n\t"
2553 "mov %6, %%"REG_b" \n\t"
2555 :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
2556 "m" (funnyUVCode), "m" (src2)
2560 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2565 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
2567 //printf("%d %d %d\n", dstWidth, i, srcW);
2568 dst[i] = src1[srcW-1]*128;
2569 dst[i+VOFW] = src2[srcW-1]*128;
2574 #endif /* HAVE_MMX2 */
2575 x86_reg xInc_shr16 = (x86_reg) (xInc >> 16);
2576 uint16_t xInc_mask = xInc & 0xffff;
2578 "xor %%"REG_a", %%"REG_a" \n\t" // i
2579 "xor %%"REG_d", %%"REG_d" \n\t" // xx
2580 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
2583 "mov %0, %%"REG_S" \n\t"
2584 "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
2585 "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
2586 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
2587 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
2588 "shll $16, %%edi \n\t"
2589 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2590 "mov %1, %%"REG_D" \n\t"
2591 "shrl $9, %%esi \n\t"
2592 "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
2594 "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
2595 "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
2596 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
2597 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
2598 "shll $16, %%edi \n\t"
2599 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2600 "mov %1, %%"REG_D" \n\t"
2601 "shrl $9, %%esi \n\t"
2602 "movw %%si, "AV_STRINGIFY(VOF)"(%%"REG_D", %%"REG_a", 2) \n\t"
2604 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
2605 "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
2606 "add $1, %%"REG_a" \n\t"
2607 "cmp %2, %%"REG_a" \n\t"
2610 /* GCC 3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
2611 which is needed to support GCC 4.0. */
2612 #if ARCH_X86_64 && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
2613 :: "m" (src1), "m" (dst), "g" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
2615 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
2618 : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
2621 } //if MMX2 can't be used
2624 c->hcscale_fast(c, dst, dstWidth, src1, src2, srcW, xInc);
2625 #endif /* ARCH_X86 */
2627 if(c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){
2629 //FIXME all pal and rgb srcFormats could do this convertion as well
2630 //FIXME all scalers more complex than bilinear could do half of this transform
2632 for (i=0; i<dstWidth; i++){
2633 dst[i ]= (dst[i ]*1799 + 4081085)>>11; //1469
2634 dst[i+VOFW]= (dst[i+VOFW]*1799 + 4081085)>>11; //1469
2637 for (i=0; i<dstWidth; i++){
2638 dst[i ]= (FFMIN(dst[i ],30775)*4663 - 9289992)>>12; //-264
2639 dst[i+VOFW]= (FFMIN(dst[i+VOFW],30775)*4663 - 9289992)>>12; //-264
2645 static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
2646 int srcSliceH, uint8_t* dst[], int dstStride[]){
2648 /* load a few things into local vars to make the code more readable? and faster */
2649 const int srcW= c->srcW;
2650 const int dstW= c->dstW;
2651 const int dstH= c->dstH;
2652 const int chrDstW= c->chrDstW;
2653 const int chrSrcW= c->chrSrcW;
2654 const int lumXInc= c->lumXInc;
2655 const int chrXInc= c->chrXInc;
2656 const int dstFormat= c->dstFormat;
2657 const int srcFormat= c->srcFormat;
2658 const int flags= c->flags;
2659 int16_t *vLumFilterPos= c->vLumFilterPos;
2660 int16_t *vChrFilterPos= c->vChrFilterPos;
2661 int16_t *hLumFilterPos= c->hLumFilterPos;
2662 int16_t *hChrFilterPos= c->hChrFilterPos;
2663 int16_t *vLumFilter= c->vLumFilter;
2664 int16_t *vChrFilter= c->vChrFilter;
2665 int16_t *hLumFilter= c->hLumFilter;
2666 int16_t *hChrFilter= c->hChrFilter;
2667 int32_t *lumMmxFilter= c->lumMmxFilter;
2668 int32_t *chrMmxFilter= c->chrMmxFilter;
2669 int32_t *alpMmxFilter= c->alpMmxFilter;
2670 const int vLumFilterSize= c->vLumFilterSize;
2671 const int vChrFilterSize= c->vChrFilterSize;
2672 const int hLumFilterSize= c->hLumFilterSize;
2673 const int hChrFilterSize= c->hChrFilterSize;
2674 int16_t **lumPixBuf= c->lumPixBuf;
2675 int16_t **chrPixBuf= c->chrPixBuf;
2676 int16_t **alpPixBuf= c->alpPixBuf;
2677 const int vLumBufSize= c->vLumBufSize;
2678 const int vChrBufSize= c->vChrBufSize;
2679 uint8_t *formatConvBuffer= c->formatConvBuffer;
2680 const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
2681 const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
2683 uint32_t *pal=c->pal_yuv;
2685 /* vars which will change and which we need to store back in the context */
2687 int lumBufIndex= c->lumBufIndex;
2688 int chrBufIndex= c->chrBufIndex;
2689 int lastInLumBuf= c->lastInLumBuf;
2690 int lastInChrBuf= c->lastInChrBuf;
2692 if (isPacked(c->srcFormat)){
2700 srcStride[3]= srcStride[0];
2702 srcStride[1]<<= c->vChrDrop;
2703 srcStride[2]<<= c->vChrDrop;
2705 //printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
2706 // (int)dst[0], (int)dst[1], (int)dst[2]);
2708 #if 0 //self test FIXME move to a vfilter or something
2710 static volatile int i=0;
2712 if (srcFormat==PIX_FMT_YUV420P && i==1 && srcSliceH>= c->srcH)
2713 selfTest(src, srcStride, c->srcW, c->srcH);
2718 //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
2719 //dstStride[0],dstStride[1],dstStride[2]);
2721 if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0 || dstStride[3]%8 != 0)
2723 static int warnedAlready=0; //FIXME move this into the context perhaps
2724 if (flags & SWS_PRINT_INFO && !warnedAlready)
2726 av_log(c, AV_LOG_WARNING, "Warning: dstStride is not aligned!\n"
2727 " ->cannot do aligned memory accesses anymore\n");
2732 /* Note the user might start scaling the picture in the middle so this
2733 will not get executed. This is not really intended but works
2734 currently, so people might do it. */
2745 for (;dstY < dstH; dstY++){
2746 unsigned char *dest =dst[0]+dstStride[0]*dstY;
2747 const int chrDstY= dstY>>c->chrDstVSubSample;
2748 unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
2749 unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
2750 unsigned char *aDest=(CONFIG_SWSCALE_ALPHA && alpPixBuf) ? dst[3]+dstStride[3]*dstY : NULL;
2752 const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
2753 const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
2754 const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
2755 const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
2757 //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
2758 // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
2759 //handle holes (FAST_BILINEAR & weird filters)
2760 if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
2761 if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
2762 //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
2763 assert(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1);
2764 assert(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1);
2766 // Do we have enough lines in this slice to output the dstY line
2767 if (lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
2769 //Do horizontal scaling
2770 while(lastInLumBuf < lastLumSrcY)
2772 uint8_t *src1= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
2773 uint8_t *src2= src[3]+(lastInLumBuf + 1 - srcSliceY)*srcStride[3];
2775 //printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
2776 assert(lumBufIndex < 2*vLumBufSize);
2777 assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
2778 assert(lastInLumBuf + 1 - srcSliceY >= 0);
2779 //printf("%d %d\n", lumBufIndex, vLumBufSize);
2780 RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, src1, srcW, lumXInc,
2781 flags, hLumFilter, hLumFilterPos, hLumFilterSize,
2782 c->srcFormat, formatConvBuffer,
2784 if (CONFIG_SWSCALE_ALPHA && alpPixBuf)
2785 RENAME(hyscale)(c, alpPixBuf[ lumBufIndex ], dstW, src2, srcW, lumXInc,
2786 flags, hLumFilter, hLumFilterPos, hLumFilterSize,
2787 c->srcFormat, formatConvBuffer,
2791 while(lastInChrBuf < lastChrSrcY)
2793 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
2794 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
2796 assert(chrBufIndex < 2*vChrBufSize);
2797 assert(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH));
2798 assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
2799 //FIXME replace parameters through context struct (some at least)
2801 if (!(isGray(srcFormat) || isGray(dstFormat)))
2802 RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
2803 flags, hChrFilter, hChrFilterPos, hChrFilterSize,
2804 c->srcFormat, formatConvBuffer,
2808 //wrap buf index around to stay inside the ring buffer
2809 if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
2810 if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
2812 else // not enough lines left in this slice -> load the rest in the buffer
2814 /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
2815 firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
2816 lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
2817 vChrBufSize, vLumBufSize);*/
2819 //Do horizontal scaling
2820 while(lastInLumBuf+1 < srcSliceY + srcSliceH)
2822 uint8_t *src1= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
2823 uint8_t *src2= src[3]+(lastInLumBuf + 1 - srcSliceY)*srcStride[3];
2825 assert(lumBufIndex < 2*vLumBufSize);
2826 assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
2827 assert(lastInLumBuf + 1 - srcSliceY >= 0);
2828 RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, src1, srcW, lumXInc,
2829 flags, hLumFilter, hLumFilterPos, hLumFilterSize,
2830 c->srcFormat, formatConvBuffer,
2832 if (CONFIG_SWSCALE_ALPHA && alpPixBuf)
2833 RENAME(hyscale)(c, alpPixBuf[ lumBufIndex ], dstW, src2, srcW, lumXInc,
2834 flags, hLumFilter, hLumFilterPos, hLumFilterSize,
2835 c->srcFormat, formatConvBuffer,
2839 while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
2841 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
2842 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
2844 assert(chrBufIndex < 2*vChrBufSize);
2845 assert(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH);
2846 assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
2848 if (!(isGray(srcFormat) || isGray(dstFormat)))
2849 RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
2850 flags, hChrFilter, hChrFilterPos, hChrFilterSize,
2851 c->srcFormat, formatConvBuffer,
2855 //wrap buf index around to stay inside the ring buffer
2856 if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
2857 if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
2858 break; //we can't output a dstY line so let's try with the next slice
2862 c->blueDither= ff_dither8[dstY&1];
2863 if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555)
2864 c->greenDither= ff_dither8[dstY&1];
2866 c->greenDither= ff_dither4[dstY&1];
2867 c->redDither= ff_dither8[(dstY+1)&1];
2871 const int16_t **lumSrcPtr= (const int16_t **) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
2872 const int16_t **chrSrcPtr= (const int16_t **) chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
2873 const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
2876 if (flags & SWS_ACCURATE_RND){
2877 int s= APCK_SIZE / 8;
2878 for (i=0; i<vLumFilterSize; i+=2){
2879 *(void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ];
2880 *(void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)];
2881 lumMmxFilter[s*i+APCK_COEF/4 ]=
2882 lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ]
2883 + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
2884 if (CONFIG_SWSCALE_ALPHA && alpPixBuf){
2885 *(void**)&alpMmxFilter[s*i ]= alpSrcPtr[i ];
2886 *(void**)&alpMmxFilter[s*i+APCK_PTR2/4 ]= alpSrcPtr[i+(vLumFilterSize>1)];
2887 alpMmxFilter[s*i+APCK_COEF/4 ]=
2888 alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4 ];
2891 for (i=0; i<vChrFilterSize; i+=2){
2892 *(void**)&chrMmxFilter[s*i ]= chrSrcPtr[i ];
2893 *(void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrSrcPtr[i+(vChrFilterSize>1)];
2894 chrMmxFilter[s*i+APCK_COEF/4 ]=
2895 chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ]
2896 + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
2899 for (i=0; i<vLumFilterSize; i++)
2901 lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
2902 lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
2903 lumMmxFilter[4*i+2]=
2904 lumMmxFilter[4*i+3]=
2905 ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
2906 if (CONFIG_SWSCALE_ALPHA && alpPixBuf){
2907 alpMmxFilter[4*i+0]= (int32_t)alpSrcPtr[i];
2908 alpMmxFilter[4*i+1]= (uint64_t)alpSrcPtr[i] >> 32;
2909 alpMmxFilter[4*i+2]=
2910 alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2];
2913 for (i=0; i<vChrFilterSize; i++)
2915 chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
2916 chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
2917 chrMmxFilter[4*i+2]=
2918 chrMmxFilter[4*i+3]=
2919 ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
2923 if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
2924 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
2925 if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
2927 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
2928 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2929 dest, uDest, dstW, chrDstW, dstFormat);
2931 else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12 like
2933 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
2934 if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
2935 if (vLumFilterSize == 1 && vChrFilterSize == 1) // unscaled YV12
2937 int16_t *lumBuf = lumPixBuf[0];
2938 int16_t *chrBuf= chrPixBuf[0];
2939 int16_t *alpBuf= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? alpPixBuf[0] : NULL;
2940 c->yuv2yuv1(c, lumBuf, chrBuf, alpBuf, dest, uDest, vDest, aDest, dstW, chrDstW);
2945 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
2946 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2947 alpSrcPtr, dest, uDest, vDest, aDest, dstW, chrDstW);
2952 assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
2953 assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
2954 if (vLumFilterSize == 1 && vChrFilterSize == 2) //unscaled RGB
2956 int chrAlpha= vChrFilter[2*dstY+1];
2957 if(flags & SWS_FULL_CHR_H_INT){
2958 yuv2rgbXinC_full(c, //FIXME write a packed1_full function
2959 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2960 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2961 alpSrcPtr, dest, dstW, dstY);
2963 c->yuv2packed1(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
2964 alpPixBuf ? *alpSrcPtr : NULL,
2965 dest, dstW, chrAlpha, dstFormat, flags, dstY);
2968 else if (vLumFilterSize == 2 && vChrFilterSize == 2) //bilinear upscale RGB
2970 int lumAlpha= vLumFilter[2*dstY+1];
2971 int chrAlpha= vChrFilter[2*dstY+1];
2973 lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
2975 chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
2976 if(flags & SWS_FULL_CHR_H_INT){
2977 yuv2rgbXinC_full(c, //FIXME write a packed2_full function
2978 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2979 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2980 alpSrcPtr, dest, dstW, dstY);
2982 c->yuv2packed2(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
2983 alpPixBuf ? *alpSrcPtr : NULL, alpPixBuf ? *(alpSrcPtr+1) : NULL,
2984 dest, dstW, lumAlpha, chrAlpha, dstY);
2989 if(flags & SWS_FULL_CHR_H_INT){
2991 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2992 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2993 alpSrcPtr, dest, dstW, dstY);
2996 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2997 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2998 alpSrcPtr, dest, dstW, dstY);
3003 else // hmm looks like we can't use MMX here without overwriting this array's tail
3005 const int16_t **lumSrcPtr= (const int16_t **)lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
3006 const int16_t **chrSrcPtr= (const int16_t **)chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
3007 const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **)alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
3008 if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
3009 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
3010 if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
3012 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
3013 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
3014 dest, uDest, dstW, chrDstW, dstFormat);
3016 else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12
3018 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
3019 if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
3021 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
3022 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
3023 alpSrcPtr, dest, uDest, vDest, aDest, dstW, chrDstW);
3027 assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
3028 assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
3029 if(flags & SWS_FULL_CHR_H_INT){
3031 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
3032 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
3033 alpSrcPtr, dest, dstW, dstY);
3036 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
3037 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
3038 alpSrcPtr, dest, dstW, dstY);
3044 if ((dstFormat == PIX_FMT_YUVA420P) && !alpPixBuf)
3045 fillPlane(dst[3], dstStride[3], dstW, dstY-lastDstY, lastDstY, 255);
3048 if (flags & SWS_CPU_CAPS_MMX2 ) __asm__ volatile("sfence":::"memory");
3049 /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
3050 if (flags & SWS_CPU_CAPS_3DNOW) __asm__ volatile("femms" :::"memory");
3051 else __asm__ volatile("emms" :::"memory");
3053 /* store changed local vars back in the context */
3055 c->lumBufIndex= lumBufIndex;
3056 c->chrBufIndex= chrBufIndex;
3057 c->lastInLumBuf= lastInLumBuf;
3058 c->lastInChrBuf= lastInChrBuf;
3060 return dstY - lastDstY;
3063 static void RENAME(sws_init_swScale)(SwsContext *c)
3065 enum PixelFormat srcFormat = c->srcFormat;
3067 c->yuv2nv12X = RENAME(yuv2nv12X );
3068 c->yuv2yuv1 = RENAME(yuv2yuv1 );
3069 c->yuv2yuvX = RENAME(yuv2yuvX );
3070 c->yuv2packed1 = RENAME(yuv2packed1 );
3071 c->yuv2packed2 = RENAME(yuv2packed2 );
3072 c->yuv2packedX = RENAME(yuv2packedX );
3074 c->hScale = RENAME(hScale );
3076 c->hyscale_fast = RENAME(hyscale_fast);
3077 c->hcscale_fast = RENAME(hcscale_fast);
3079 c->hcscale_internal = NULL;
3081 case PIX_FMT_YUYV422 : c->hcscale_internal = RENAME(yuy2ToUV); break;
3082 case PIX_FMT_UYVY422 : c->hcscale_internal = RENAME(uyvyToUV); break;
3086 case PIX_FMT_BGR4_BYTE:
3087 case PIX_FMT_RGB4_BYTE: c->hcscale_internal = RENAME(palToUV); break;
3089 if (c->chrSrcHSubSample) {
3091 case PIX_FMT_RGB32 :
3092 case PIX_FMT_RGB32_1: c->hcscale_internal = RENAME(bgr32ToUV_half); break;
3093 case PIX_FMT_BGR24 : c->hcscale_internal = RENAME(bgr24ToUV_half); break;
3094 case PIX_FMT_BGR565 : c->hcscale_internal = RENAME(bgr16ToUV_half); break;
3095 case PIX_FMT_BGR555 : c->hcscale_internal = RENAME(bgr15ToUV_half); break;
3096 case PIX_FMT_BGR32 :
3097 case PIX_FMT_BGR32_1: c->hcscale_internal = RENAME(rgb32ToUV_half); break;
3098 case PIX_FMT_RGB24 : c->hcscale_internal = RENAME(rgb24ToUV_half); break;
3099 case PIX_FMT_RGB565 : c->hcscale_internal = RENAME(rgb16ToUV_half); break;
3100 case PIX_FMT_RGB555 : c->hcscale_internal = RENAME(rgb15ToUV_half); break;
3104 case PIX_FMT_RGB32 :
3105 case PIX_FMT_RGB32_1: c->hcscale_internal = RENAME(bgr32ToUV); break;
3106 case PIX_FMT_BGR24 : c->hcscale_internal = RENAME(bgr24ToUV); break;
3107 case PIX_FMT_BGR565 : c->hcscale_internal = RENAME(bgr16ToUV); break;
3108 case PIX_FMT_BGR555 : c->hcscale_internal = RENAME(bgr15ToUV); break;
3109 case PIX_FMT_BGR32 :
3110 case PIX_FMT_BGR32_1: c->hcscale_internal = RENAME(rgb32ToUV); break;
3111 case PIX_FMT_RGB24 : c->hcscale_internal = RENAME(rgb24ToUV); break;
3112 case PIX_FMT_RGB565 : c->hcscale_internal = RENAME(rgb16ToUV); break;
3113 case PIX_FMT_RGB555 : c->hcscale_internal = RENAME(rgb15ToUV); break;
3117 c->hyscale_internal = NULL;
3118 c->hascale_internal = NULL;
3119 switch (srcFormat) {
3120 case PIX_FMT_YUYV422 :
3121 case PIX_FMT_GRAY16BE : c->hyscale_internal = RENAME(yuy2ToY); break;
3122 case PIX_FMT_UYVY422 :
3123 case PIX_FMT_GRAY16LE : c->hyscale_internal = RENAME(uyvyToY); break;
3124 case PIX_FMT_BGR24 : c->hyscale_internal = RENAME(bgr24ToY); break;
3125 case PIX_FMT_BGR565 : c->hyscale_internal = RENAME(bgr16ToY); break;
3126 case PIX_FMT_BGR555 : c->hyscale_internal = RENAME(bgr15ToY); break;
3127 case PIX_FMT_RGB24 : c->hyscale_internal = RENAME(rgb24ToY); break;
3128 case PIX_FMT_RGB565 : c->hyscale_internal = RENAME(rgb16ToY); break;
3129 case PIX_FMT_RGB555 : c->hyscale_internal = RENAME(rgb15ToY); break;
3133 case PIX_FMT_BGR4_BYTE:
3134 case PIX_FMT_RGB4_BYTE: c->hyscale_internal = RENAME(palToY); break;
3135 case PIX_FMT_MONOBLACK: c->hyscale_internal = RENAME(monoblack2Y); break;
3136 case PIX_FMT_MONOWHITE: c->hyscale_internal = RENAME(monowhite2Y); break;
3137 case PIX_FMT_RGB32 :
3138 case PIX_FMT_RGB32_1: c->hyscale_internal = RENAME(bgr32ToY); break;
3139 case PIX_FMT_BGR32 :
3140 case PIX_FMT_BGR32_1: c->hyscale_internal = RENAME(rgb32ToY); break;
3143 switch (srcFormat) {
3144 case PIX_FMT_RGB32 :
3145 case PIX_FMT_RGB32_1:
3146 case PIX_FMT_BGR32 :
3147 case PIX_FMT_BGR32_1: c->hascale_internal = RENAME(abgrToA); break;