1 /*****************************************************************************
2 * transforms_yuvmmx.h: MMX YUV transformation assembly
3 *****************************************************************************
4 * Copyright (C) 1999-2007 the VideoLAN team
7 * Authors: Olie Lho <ollie@sis.com.tw>
8 * Gaƫl Hendryckx <jimmy@via.ecp.fr>
9 * Samuel Hocevar <sam@zoy.org>
10 * Damien Fouilleul <damienf@videolan.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
25 *****************************************************************************/
27 #ifdef MODULE_NAME_IS_i420_rgb_mmx
29 /* hope these constant values are cache line aligned */
30 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
31 #define USED_U64(foo) \
32 static const uint64_t foo __asm__ (#foo) __attribute__((used))
34 #define USED_U64(foo) \
35 static const uint64_t foo __asm__ (#foo) __attribute__((unused))
37 USED_U64(mmx_80w) = 0x0080008000800080ULL; /* Will be referenced as %4 in inline asm */
38 USED_U64(mmx_10w) = 0x1010101010101010ULL; /* -- as %5 */
39 USED_U64(mmx_00ffw) = 0x00ff00ff00ff00ffULL; /* -- as %6 */
40 USED_U64(mmx_Y_coeff) = 0x253f253f253f253fULL; /* -- as %7 */
42 USED_U64(mmx_U_green) = 0xf37df37df37df37dULL; /* -- as %8 */
43 USED_U64(mmx_U_blue) = 0x4093409340934093ULL; /* -- as %9 */
44 USED_U64(mmx_V_red) = 0x3312331233123312ULL; /* -- as %10 */
45 USED_U64(mmx_V_green) = 0xe5fce5fce5fce5fcULL; /* -- as %11 */
47 USED_U64(mmx_mask_f8) = 0xf8f8f8f8f8f8f8f8ULL; /* -- as %12 */
48 USED_U64(mmx_mask_fc) = 0xfcfcfcfcfcfcfcfcULL; /* -- as %13 */
51 #if defined(CAN_COMPILE_MMX)
55 #define MMX_CALL(MMX_INSTRUCTIONS) \
57 __asm__ __volatile__( \
61 : "r" (p_y), "r" (p_u), \
62 "r" (p_v), "r" (p_buffer), \
63 "m" (mmx_80w), "m" (mmx_10w), \
64 "m" (mmx_00ffw), "m" (mmx_Y_coeff), \
65 "m" (mmx_U_green), "m" (mmx_U_blue), \
66 "m" (mmx_V_red), "m" (mmx_V_green), \
67 "m" (mmx_mask_f8), "m" (mmx_mask_fc) ); \
70 #define MMX_END __asm__ __volatile__ ( "emms" )
72 #define MMX_INIT_16 " \n\
73 movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
74 movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
75 pxor %%mm4, %%mm4 # zero mm4 \n\
76 movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
79 #define MMX_INIT_16_GRAY " \n\
80 movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
81 #movl $0, (%3) # cache preload for image \n\
84 #define MMX_INIT_32 " \n\
85 movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
86 movl $0, (%3) # cache preload for image \n\
87 movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
88 pxor %%mm4, %%mm4 # zero mm4 \n\
89 movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
93 * Do the multiply part of the conversion for even and odd pixels,
95 * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
96 * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
97 * mm6 -> Y even, mm7 -> Y odd
100 #define MMX_YUV_MUL " \n\
101 # convert the chroma part \n\
102 punpcklbw %%mm4, %%mm0 # scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\
103 punpcklbw %%mm4, %%mm1 # scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\
104 psubsw %4, %%mm0 # Cb -= 128 \n\
105 psubsw %4, %%mm1 # Cr -= 128 \n\
106 psllw $3, %%mm0 # Promote precision \n\
107 psllw $3, %%mm1 # Promote precision \n\
108 movq %%mm0, %%mm2 # Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\
109 movq %%mm1, %%mm3 # Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\
110 pmulhw %8, %%mm2 # Mul Cb with green coeff -> Cb green \n\
111 pmulhw %11, %%mm3 # Mul Cr with green coeff -> Cr green \n\
112 pmulhw %9, %%mm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 \n\
113 pmulhw %10, %%mm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 \n\
114 paddsw %%mm3, %%mm2 # Cb green + Cr green -> Cgreen \n\
116 # convert the luma part \n\
117 psubusb %5, %%mm6 # Y -= 16 \n\
118 movq %%mm6, %%mm7 # Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
119 pand %6, %%mm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\
120 psrlw $8, %%mm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 \n\
121 psllw $3, %%mm6 # Promote precision \n\
122 psllw $3, %%mm7 # Promote precision \n\
123 pmulhw %7, %%mm6 # Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 \n\
124 pmulhw %7, %%mm7 # Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 \n\
128 * Do the addition part of the conversion for even and odd pixels,
130 * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
131 * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
132 * mm6 -> Y even, mm7 -> Y odd
135 #define MMX_YUV_ADD " \n\
136 # Do horizontal and vertical scaling \n\
137 movq %%mm0, %%mm3 # Copy Cblue \n\
138 movq %%mm1, %%mm4 # Copy Cred \n\
139 movq %%mm2, %%mm5 # Copy Cgreen \n\
140 paddsw %%mm6, %%mm0 # Y even + Cblue 00 B6 00 B4 00 B2 00 B0 \n\
141 paddsw %%mm7, %%mm3 # Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 \n\
142 paddsw %%mm6, %%mm1 # Y even + Cred 00 R6 00 R4 00 R2 00 R0 \n\
143 paddsw %%mm7, %%mm4 # Y odd + Cred 00 R7 00 R5 00 R3 00 R1 \n\
144 paddsw %%mm6, %%mm2 # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 \n\
145 paddsw %%mm7, %%mm5 # Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 \n\
147 # Limit RGB even to 0..255 \n\
148 packuswb %%mm0, %%mm0 # B6 B4 B2 B0 / B6 B4 B2 B0 \n\
149 packuswb %%mm1, %%mm1 # R6 R4 R2 R0 / R6 R4 R2 R0 \n\
150 packuswb %%mm2, %%mm2 # G6 G4 G2 G0 / G6 G4 G2 G0 \n\
152 # Limit RGB odd to 0..255 \n\
153 packuswb %%mm3, %%mm3 # B7 B5 B3 B1 / B7 B5 B3 B1 \n\
154 packuswb %%mm4, %%mm4 # R7 R5 R3 R1 / R7 R5 R3 R1 \n\
155 packuswb %%mm5, %%mm5 # G7 G5 G3 G1 / G7 G5 G3 G1 \n\
157 # Interleave RGB even and odd \n\
158 punpcklbw %%mm3, %%mm0 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
159 punpcklbw %%mm4, %%mm1 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
160 punpcklbw %%mm5, %%mm2 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
164 * Grayscale case, only use Y
167 #define MMX_YUV_GRAY " \n\
168 # convert the luma part \n\
169 psubusb %5, %%mm6 \n\
170 movq %%mm6, %%mm7 \n\
177 packuswb %%mm6, %%mm6 \n\
178 packuswb %%mm7, %%mm7 \n\
179 punpcklbw %%mm7, %%mm6 \n\
182 #define MMX_UNPACK_16_GRAY " \n\
183 movq %%mm6, %%mm5 \n\
186 movq %%mm6, %%mm7 \n\
188 pxor %%mm3, %%mm3 \n\
189 movq %%mm7, %%mm2 \n\
190 movq %%mm5, %%mm0 \n\
191 punpcklbw %%mm3, %%mm5 \n\
192 punpcklbw %%mm6, %%mm7 \n\
196 punpckhbw %%mm3, %%mm0 \n\
197 punpckhbw %%mm6, %%mm2 \n\
199 movq 8(%0), %%mm6 \n\
201 movq %%mm2, 8(%3) \n\
206 * convert RGB plane to RGB 15 bits,
207 * mm0 -> B, mm1 -> R, mm2 -> G,
208 * mm4 -> GB, mm5 -> AR pixel 4-7,
209 * mm6 -> GB, mm7 -> AR pixel 0-3
212 #define MMX_UNPACK_15 " \n\
213 # mask unneeded bits off \n\
214 pand %12, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
215 psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
216 pand %12, %%mm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
217 pand %12, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
218 psrlw $1,%%mm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\
219 pxor %%mm4, %%mm4 # zero mm4 \n\
220 movq %%mm0, %%mm5 # Copy B7-B0 \n\
221 movq %%mm2, %%mm7 # Copy G7-G0 \n\
223 # convert rgb24 plane to rgb15 pack for pixel 0-3 \n\
224 punpcklbw %%mm4, %%mm2 # ________ ________ g7g6g5g4 g3______ \n\
225 punpcklbw %%mm1, %%mm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
226 psllw $2,%%mm2 # ________ ____g7g6 g5g4g3__ ________ \n\
227 por %%mm2, %%mm0 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
228 movq 8(%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
229 movq %%mm0, (%3) # store pixel 0-3 \n\
231 # convert rgb24 plane to rgb16 pack for pixel 0-3 \n\
232 punpckhbw %%mm4, %%mm7 # ________ ________ g7g6g5g4 g3______ \n\
233 punpckhbw %%mm1, %%mm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
234 psllw $2,%%mm7 # ________ ____g7g6 g5g4g3__ ________ \n\
235 movd 4(%1), %%mm0 # Load 4 Cb __ __ __ __ u3 u2 u1 u0 \n\
236 por %%mm7, %%mm5 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
237 movd 4(%2), %%mm1 # Load 4 Cr __ __ __ __ v3 v2 v1 v0 \n\
238 movq %%mm5, 8(%3) # store pixel 4-7 \n\
242 * convert RGB plane to RGB 16 bits,
243 * mm0 -> B, mm1 -> R, mm2 -> G,
244 * mm4 -> GB, mm5 -> AR pixel 4-7,
245 * mm6 -> GB, mm7 -> AR pixel 0-3
248 #define MMX_UNPACK_16 " \n\
249 # mask unneeded bits off \n\
250 pand %12, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
251 pand %13, %%mm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
252 pand %12, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
253 psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
254 pxor %%mm4, %%mm4 # zero mm4 \n\
255 movq %%mm0, %%mm5 # Copy B7-B0 \n\
256 movq %%mm2, %%mm7 # Copy G7-G0 \n\
258 # convert rgb24 plane to rgb16 pack for pixel 0-3 \n\
259 punpcklbw %%mm4, %%mm2 # ________ ________ g7g6g5g4 g3g2____ \n\
260 punpcklbw %%mm1, %%mm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
261 psllw $3,%%mm2 # ________ __g7g6g5 g4g3g2__ ________ \n\
262 por %%mm2, %%mm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
263 movq 8(%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
264 movq %%mm0, (%3) # store pixel 0-3 \n\
266 # convert rgb24 plane to rgb16 pack for pixel 0-3 \n\
267 punpckhbw %%mm4, %%mm7 # ________ ________ g7g6g5g4 g3g2____ \n\
268 punpckhbw %%mm1, %%mm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
269 psllw $3,%%mm7 # ________ __g7g6g5 g4g3g2__ ________ \n\
270 movd 4(%1), %%mm0 # Load 4 Cb __ __ __ __ u3 u2 u1 u0 \n\
271 por %%mm7, %%mm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
272 movd 4(%2), %%mm1 # Load 4 Cr __ __ __ __ v3 v2 v1 v0 \n\
273 movq %%mm5, 8(%3) # store pixel 4-7 \n\
277 * convert RGB plane to RGB packed format,
278 * mm0 -> B, mm1 -> R, mm2 -> G
281 #define MMX_UNPACK_32_ARGB " \n\
282 pxor %%mm3, %%mm3 # zero mm3 \n\
283 movq %%mm0, %%mm4 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
284 punpcklbw %%mm2, %%mm4 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
285 movq %%mm1, %%mm5 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
286 punpcklbw %%mm3, %%mm5 # 00 R3 00 R2 00 R1 00 R0 \n\
287 movq %%mm4, %%mm6 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
288 punpcklwd %%mm5, %%mm4 # 00 R1 B1 G1 00 R0 B0 G0 \n\
289 movq %%mm4, (%3) # Store ARGB1 ARGB0 \n\
290 punpckhwd %%mm5, %%mm6 # 00 R3 B3 G3 00 R2 B2 G2 \n\
291 movq %%mm6, 8(%3) # Store ARGB3 ARGB2 \n\
292 punpckhbw %%mm2, %%mm0 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
293 punpckhbw %%mm3, %%mm1 # 00 R7 00 R6 00 R5 00 R4 \n\
294 movq %%mm0, %%mm5 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
295 punpcklwd %%mm1, %%mm5 # 00 R5 B5 G5 00 R4 B4 G4 \n\
296 movq %%mm5, 16(%3) # Store ARGB5 ARGB4 \n\
297 punpckhwd %%mm1, %%mm0 # 00 R7 B7 G7 00 R6 B6 G6 \n\
298 movq %%mm0, 24(%3) # Store ARGB7 ARGB6 \n\
301 #define MMX_UNPACK_32_RGBA " \n\
302 pxor %%mm3, %%mm3 # zero mm3 \n\
303 movq %%mm2, %%mm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
304 punpcklbw %%mm1, %%mm4 # R3 G3 R2 G2 R1 G1 R0 G0 \n\
305 punpcklbw %%mm0, %%mm3 # B3 00 B2 00 B1 00 B0 00 \n\
306 movq %%mm3, %%mm5 # R3 00 R2 00 R1 00 R0 00 \n\
307 punpcklwd %%mm4, %%mm3 # R1 G1 B1 00 R0 G0 B0 00 \n\
308 movq %%mm3, (%3) # Store RGBA1 RGBA0 \n\
309 punpckhwd %%mm4, %%mm5 # R3 G3 B3 00 R2 G2 B2 00 \n\
310 movq %%mm5, 8(%3) # Store RGBA3 RGBA2 \n\
311 pxor %%mm6, %%mm6 # zero mm6 \n\
312 punpckhbw %%mm1, %%mm2 # R7 G7 R6 G6 R5 G5 R4 G4 \n\
313 punpckhbw %%mm0, %%mm6 # B7 00 B6 00 B5 00 B4 00 \n\
314 movq %%mm6, %%mm0 # B7 00 B6 00 B5 00 B4 00 \n\
315 punpcklwd %%mm2, %%mm6 # R5 G5 B5 00 R4 G4 B4 00 \n\
316 movq %%mm6, 16(%3) # Store RGBA5 RGBA4 \n\
317 punpckhwd %%mm2, %%mm0 # R7 G7 B7 00 R6 G6 B6 00 \n\
318 movq %%mm0, 24(%3) # Store RGBA7 RGBA6 \n\
321 #define MMX_UNPACK_32_BGRA " \n\
322 pxor %%mm3, %%mm3 # zero mm3 \n\
323 movq %%mm2, %%mm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
324 punpcklbw %%mm0, %%mm4 # B3 G3 B2 G2 B1 G1 B0 G0 \n\
325 punpcklbw %%mm1, %%mm3 # R3 00 R2 00 R1 00 R0 00 \n\
326 movq %%mm3, %%mm5 # R3 00 R2 00 R1 00 R0 00 \n\
327 punpcklwd %%mm4, %%mm3 # B1 G1 R1 00 B0 G0 R0 00 \n\
328 movq %%mm3, (%3) # Store BGRA1 BGRA0 \n\
329 punpckhwd %%mm4, %%mm5 # B3 G3 R3 00 B2 G2 R2 00 \n\
330 movq %%mm5, 8(%3) # Store BGRA3 BGRA2 \n\
331 pxor %%mm6, %%mm6 # zero mm6 \n\
332 punpckhbw %%mm0, %%mm2 # B7 G7 B6 G6 B5 G5 B4 G4 \n\
333 punpckhbw %%mm1, %%mm6 # R7 00 R6 00 R5 00 R4 00 \n\
334 movq %%mm6, %%mm0 # R7 00 R6 00 R5 00 R4 00 \n\
335 punpcklwd %%mm2, %%mm6 # B5 G5 R5 00 B4 G4 R4 00 \n\
336 movq %%mm6, 16(%3) # Store BGRA5 BGRA4 \n\
337 punpckhwd %%mm2, %%mm0 # B7 G7 R7 00 B6 G6 R6 00 \n\
338 movq %%mm0, 24(%3) # Store BGRA7 BGRA6 \n\
341 #define MMX_UNPACK_32_ABGR " \n\
342 pxor %%mm3, %%mm3 # zero mm3 \n\
343 movq %%mm1, %%mm4 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
344 punpcklbw %%mm2, %%mm4 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
345 movq %%mm0, %%mm5 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
346 punpcklbw %%mm3, %%mm5 # 00 B3 00 B2 00 B1 00 B0 \n\
347 movq %%mm4, %%mm6 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
348 punpcklwd %%mm5, %%mm4 # 00 B1 G1 R1 00 B0 G0 R0 \n\
349 movq %%mm4, (%3) # Store ABGR1 ABGR0 \n\
350 punpckhwd %%mm5, %%mm6 # 00 B3 G3 R3 00 B2 G2 R2 \n\
351 movq %%mm6, 8(%3) # Store ABGR3 ABGR2 \n\
352 punpckhbw %%mm2, %%mm1 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
353 punpckhbw %%mm3, %%mm0 # 00 B7 00 B6 00 B5 00 B4 \n\
354 movq %%mm1, %%mm2 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
355 punpcklwd %%mm0, %%mm1 # 00 B5 G5 R5 00 B4 G4 R4 \n\
356 movq %%mm1, 16(%3) # Store ABGR5 ABGR4 \n\
357 punpckhwd %%mm0, %%mm2 # B7 G7 R7 00 B6 G6 R6 00 \n\
358 movq %%mm2, 24(%3) # Store ABGR7 ABGR6 \n\
361 #elif defined(HAVE_MMX_INTRINSICS)
365 #include <mmintrin.h>
367 #define MMX_CALL(MMX_INSTRUCTIONS) \
369 __m64 mm0, mm1, mm2, mm3, \
370 mm4, mm5, mm6, mm7; \
374 #define MMX_END _mm_empty()
376 #define MMX_INIT_16 \
377 mm0 = _mm_cvtsi32_si64(*(int*)p_u); \
378 mm1 = _mm_cvtsi32_si64(*(int*)p_v); \
379 mm4 = _mm_setzero_si64(); \
380 mm6 = (__m64)*(uint64_t *)p_y;
382 #define MMX_INIT_32 \
383 mm0 = _mm_cvtsi32_si64(*(int*)p_u); \
384 *(uint16_t *)p_buffer = 0; \
385 mm1 = _mm_cvtsi32_si64(*(int*)p_v); \
386 mm4 = _mm_setzero_si64(); \
387 mm6 = (__m64)*(uint64_t *)p_y;
389 #define MMX_YUV_MUL \
390 mm0 = _mm_unpacklo_pi8(mm0, mm4); \
391 mm1 = _mm_unpacklo_pi8(mm1, mm4); \
392 mm0 = _mm_subs_pi16(mm0, (__m64)mmx_80w); \
393 mm1 = _mm_subs_pi16(mm1, (__m64)mmx_80w); \
394 mm0 = _mm_slli_pi16(mm0, 3); \
395 mm1 = _mm_slli_pi16(mm1, 3); \
398 mm2 = _mm_mulhi_pi16(mm2, (__m64)mmx_U_green); \
399 mm3 = _mm_mulhi_pi16(mm3, (__m64)mmx_V_green); \
400 mm0 = _mm_mulhi_pi16(mm0, (__m64)mmx_U_blue); \
401 mm1 = _mm_mulhi_pi16(mm1, (__m64)mmx_V_red); \
402 mm2 = _mm_adds_pi16(mm2, mm3); \
404 mm6 = _mm_subs_pu8(mm6, (__m64)mmx_10w); \
406 mm6 = _mm_and_si64(mm6, (__m64)mmx_00ffw); \
407 mm7 = _mm_srli_pi16(mm7, 8); \
408 mm6 = _mm_slli_pi16(mm6, 3); \
409 mm7 = _mm_slli_pi16(mm7, 3); \
410 mm6 = _mm_mulhi_pi16(mm6, (__m64)mmx_Y_coeff); \
411 mm7 = _mm_mulhi_pi16(mm7, (__m64)mmx_Y_coeff);
413 #define MMX_YUV_ADD \
417 mm0 = _mm_adds_pi16(mm0, mm6); \
418 mm3 = _mm_adds_pi16(mm3, mm7); \
419 mm1 = _mm_adds_pi16(mm1, mm6); \
420 mm4 = _mm_adds_pi16(mm4, mm7); \
421 mm2 = _mm_adds_pi16(mm2, mm6); \
422 mm5 = _mm_adds_pi16(mm5, mm7); \
424 mm0 = _mm_packs_pu16(mm0, mm0); \
425 mm1 = _mm_packs_pu16(mm1, mm1); \
426 mm2 = _mm_packs_pu16(mm2, mm2); \
428 mm3 = _mm_packs_pu16(mm3, mm3); \
429 mm4 = _mm_packs_pu16(mm4, mm4); \
430 mm5 = _mm_packs_pu16(mm5, mm5); \
432 mm0 = _mm_unpacklo_pi8(mm0, mm3); \
433 mm1 = _mm_unpacklo_pi8(mm1, mm4); \
434 mm2 = _mm_unpacklo_pi8(mm2, mm5);
436 #define MMX_UNPACK_15 \
437 mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
438 mm0 = _mm_srli_pi16(mm0, 3); \
439 mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_f8); \
440 mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
441 mm1 = _mm_srli_pi16(mm1, 1); \
442 mm4 = _mm_setzero_si64(); \
446 mm2 = _mm_unpacklo_pi8(mm2, mm4); \
447 mm0 = _mm_unpacklo_pi8(mm0, mm1); \
448 mm2 = _mm_slli_pi16(mm2, 2); \
449 mm0 = _mm_or_si64(mm0, mm2); \
450 mm6 = (__m64)*(uint64_t *)(p_y + 8); \
451 *(uint64_t *)p_buffer = (uint64_t)mm0; \
453 mm7 = _mm_unpackhi_pi8(mm7, mm4); \
454 mm5 = _mm_unpackhi_pi8(mm5, mm1); \
455 mm7 = _mm_slli_pi16(mm7, 2); \
456 mm0 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_u + 4)); \
457 mm5 = _mm_or_si64(mm5, mm7); \
458 mm1 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_v + 4)); \
459 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
461 #define MMX_UNPACK_16 \
462 mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
463 mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_fc); \
464 mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
465 mm0 = _mm_srli_pi16(mm0, 3); \
466 mm4 = _mm_setzero_si64(); \
470 mm2 = _mm_unpacklo_pi8(mm2, mm4); \
471 mm0 = _mm_unpacklo_pi8(mm0, mm1); \
472 mm2 = _mm_slli_pi16(mm2, 3); \
473 mm0 = _mm_or_si64(mm0, mm2); \
474 mm6 = (__m64)*(uint64_t *)(p_y + 8); \
475 *(uint64_t *)p_buffer = (uint64_t)mm0; \
477 mm7 = _mm_unpackhi_pi8(mm7, mm4); \
478 mm5 = _mm_unpackhi_pi8(mm5, mm1); \
479 mm7 = _mm_slli_pi16(mm7, 3); \
480 mm0 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_u + 4)); \
481 mm5 = _mm_or_si64(mm5, mm7); \
482 mm1 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_v + 4)); \
483 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
485 #define MMX_UNPACK_32_ARGB \
486 mm3 = _mm_setzero_si64(); \
488 mm4 = _mm_unpacklo_pi8(mm4, mm2); \
490 mm5 = _mm_unpacklo_pi8(mm5, mm3); \
492 mm4 = _mm_unpacklo_pi16(mm4, mm5); \
493 *(uint64_t *)p_buffer = (uint64_t)mm4; \
494 mm6 = _mm_unpackhi_pi16(mm6, mm5); \
495 *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6;\
496 mm0 = _mm_unpackhi_pi8(mm0, mm2); \
497 mm1 = _mm_unpackhi_pi8(mm1, mm3); \
499 mm5 = _mm_unpacklo_pi16(mm5, mm1); \
500 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;\
501 mm0 = _mm_unpackhi_pi16(mm0, mm1); \
502 *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
504 #define MMX_UNPACK_32_RGBA \
505 mm3 = _mm_setzero_si64(); \
507 mm4 = _mm_unpacklo_pi8(mm4, mm1); \
508 mm3 = _mm_unpacklo_pi8(mm3, mm0); \
510 mm3 = _mm_unpacklo_pi16(mm3, mm4); \
511 *(uint64_t *)p_buffer = (uint64_t)mm3; \
512 mm5 = _mm_unpackhi_pi16(mm5, mm4); \
513 *(uint64_t *)(p_buffer + 2) = (uint64_t)mm5;\
514 mm6 = _mm_setzero_si64(); \
515 mm2 = _mm_unpackhi_pi8(mm2, mm1); \
516 mm6 = _mm_unpackhi_pi8(mm6, mm0); \
518 mm6 = _mm_unpacklo_pi16(mm6, mm2); \
519 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm6;\
520 mm0 = _mm_unpackhi_pi16(mm0, mm2); \
521 *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
523 #define MMX_UNPACK_32_BGRA \
524 mm3 = _mm_setzero_si64(); \
526 mm4 = _mm_unpacklo_pi8(mm4, mm0); \
527 mm3 = _mm_unpacklo_pi8(mm3, mm1); \
529 mm3 = _mm_unpacklo_pi16(mm3, mm4); \
530 *(uint64_t *)p_buffer = (uint64_t)mm3; \
531 mm5 = _mm_unpackhi_pi16(mm5, mm4); \
532 *(uint64_t *)(p_buffer + 2) = (uint64_t)mm5;\
533 mm6 = _mm_setzero_si64(); \
534 mm2 = _mm_unpackhi_pi8(mm2, mm0); \
535 mm6 = _mm_unpackhi_pi8(mm6, mm1); \
537 mm6 = _mm_unpacklo_pi16(mm6, mm2); \
538 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm6;\
539 mm0 = _mm_unpackhi_pi16(mm0, mm2); \
540 *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
542 #define MMX_UNPACK_32_ABGR \
543 mm3 = _mm_setzero_si64(); \
545 mm4 = _mm_unpacklo_pi8(mm4, mm2); \
547 mm5 = _mm_unpacklo_pi8(mm5, mm3); \
549 mm4 = _mm_unpacklo_pi16(mm4, mm5); \
550 *(uint64_t *)p_buffer = (uint64_t)mm4; \
551 mm6 = _mm_unpackhi_pi16(mm6, mm5); \
552 *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6;\
553 mm1 = _mm_unpackhi_pi8(mm1, mm2); \
554 mm0 = _mm_unpackhi_pi8(mm0, mm3); \
556 mm1 = _mm_unpacklo_pi16(mm1, mm0); \
557 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm1;\
558 mm2 = _mm_unpackhi_pi16(mm2, mm0); \
559 *(uint64_t *)(p_buffer + 6) = (uint64_t)mm2;
563 #elif defined( MODULE_NAME_IS_i420_rgb_sse2 )
565 #if defined(CAN_COMPILE_SSE2)
569 #define SSE2_CALL(SSE2_INSTRUCTIONS) \
571 __asm__ __volatile__( \
575 : "r" (p_y), "r" (p_u), \
576 "r" (p_v), "r" (p_buffer) \
580 #define SSE2_END __asm__ __volatile__ ( "sfence" ::: "memory" )
582 #define SSE2_INIT_16_ALIGNED " \n\
583 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
584 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
585 pxor %%xmm4, %%xmm4 # zero mm4 \n\
586 movdqa (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
589 #define SSE2_INIT_16_UNALIGNED " \n\
590 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
591 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
592 pxor %%xmm4, %%xmm4 # zero mm4 \n\
593 movdqu (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
594 prefetchnta (%3) # Tell CPU not to cache output RGB data \n\
597 #define SSE2_INIT_32_ALIGNED " \n\
598 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
599 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
600 pxor %%xmm4, %%xmm4 # zero mm4 \n\
601 movdqa (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
604 #define SSE2_INIT_32_UNALIGNED " \n\
605 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
606 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
607 pxor %%xmm4, %%xmm4 # zero mm4 \n\
608 movdqu (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
609 prefetchnta (%3) # Tell CPU not to cache output RGB data \n\
612 #define SSE2_YUV_MUL " \n\
613 # convert the chroma part \n\
614 punpcklbw %%xmm4, %%xmm0 # scatter 8 Cb 00 u3 00 u2 00 u1 00 u0 \n\
615 punpcklbw %%xmm4, %%xmm1 # scatter 8 Cr 00 v3 00 v2 00 v1 00 v0 \n\
616 movl $0x00800080, %%eax # \n\
617 movd %%eax, %%xmm5 # \n\
618 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 0080 0080 ... 0080 0080 \n\
619 psubsw %%xmm5, %%xmm0 # Cb -= 128 \n\
620 psubsw %%xmm5, %%xmm1 # Cr -= 128 \n\
621 psllw $3, %%xmm0 # Promote precision \n\
622 psllw $3, %%xmm1 # Promote precision \n\
623 movdqa %%xmm0, %%xmm2 # Copy 8 Cb 00 u3 00 u2 00 u1 00 u0 \n\
624 movdqa %%xmm1, %%xmm3 # Copy 8 Cr 00 v3 00 v2 00 v1 00 v0 \n\
625 movl $0xf37df37d, %%eax # \n\
626 movd %%eax, %%xmm5 # \n\
627 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to f37d f37d ... f37d f37d \n\
628 pmulhw %%xmm5, %%xmm2 # Mul Cb with green coeff -> Cb green \n\
629 movl $0xe5fce5fc, %%eax # \n\
630 movd %%eax, %%xmm5 # \n\
631 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to e5fc e5fc ... e5fc e5fc \n\
632 pmulhw %%xmm5, %%xmm3 # Mul Cr with green coeff -> Cr green \n\
633 movl $0x40934093, %%eax # \n\
634 movd %%eax, %%xmm5 # \n\
635 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 4093 4093 ... 4093 4093 \n\
636 pmulhw %%xmm5, %%xmm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 \n\
637 movl $0x33123312, %%eax # \n\
638 movd %%eax, %%xmm5 # \n\
639 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 3312 3312 ... 3312 3312 \n\
640 pmulhw %%xmm5, %%xmm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 \n\
641 paddsw %%xmm3, %%xmm2 # Cb green + Cr green -> Cgreen \n\
643 # convert the luma part \n\
644 movl $0x10101010, %%eax # \n\
645 movd %%eax, %%xmm5 # \n\
646 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 1010 1010 ... 1010 1010 \n\
647 psubusb %%xmm5, %%xmm6 # Y -= 16 \n\
648 movdqa %%xmm6, %%xmm7 # Copy 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
649 movl $0x00ff00ff, %%eax # \n\
650 movd %%eax, %%xmm5 # \n\
651 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to 00ff 00ff ... 00ff 00ff \n\
652 pand %%xmm5, %%xmm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\
653 psrlw $8, %%xmm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 \n\
654 psllw $3, %%xmm6 # Promote precision \n\
655 psllw $3, %%xmm7 # Promote precision \n\
656 movl $0x253f253f, %%eax # \n\
657 movd %%eax, %%xmm5 # \n\
658 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to 253f 253f ... 253f 253f \n\
659 pmulhw %%xmm5, %%xmm6 # Mul 8 Y even 00 y6 00 y4 00 y2 00 y0 \n\
660 pmulhw %%xmm5, %%xmm7 # Mul 8 Y odd 00 y7 00 y5 00 y3 00 y1 \n\
663 #define SSE2_YUV_ADD " \n\
664 # Do horizontal and vertical scaling \n\
665 movdqa %%xmm0, %%xmm3 # Copy Cblue \n\
666 movdqa %%xmm1, %%xmm4 # Copy Cred \n\
667 movdqa %%xmm2, %%xmm5 # Copy Cgreen \n\
668 paddsw %%xmm6, %%xmm0 # Y even + Cblue 00 B6 00 B4 00 B2 00 B0 \n\
669 paddsw %%xmm7, %%xmm3 # Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 \n\
670 paddsw %%xmm6, %%xmm1 # Y even + Cred 00 R6 00 R4 00 R2 00 R0 \n\
671 paddsw %%xmm7, %%xmm4 # Y odd + Cred 00 R7 00 R5 00 R3 00 R1 \n\
672 paddsw %%xmm6, %%xmm2 # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 \n\
673 paddsw %%xmm7, %%xmm5 # Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 \n\
675 # Limit RGB even to 0..255 \n\
676 packuswb %%xmm0, %%xmm0 # B6 B4 B2 B0 / B6 B4 B2 B0 \n\
677 packuswb %%xmm1, %%xmm1 # R6 R4 R2 R0 / R6 R4 R2 R0 \n\
678 packuswb %%xmm2, %%xmm2 # G6 G4 G2 G0 / G6 G4 G2 G0 \n\
680 # Limit RGB odd to 0..255 \n\
681 packuswb %%xmm3, %%xmm3 # B7 B5 B3 B1 / B7 B5 B3 B1 \n\
682 packuswb %%xmm4, %%xmm4 # R7 R5 R3 R1 / R7 R5 R3 R1 \n\
683 packuswb %%xmm5, %%xmm5 # G7 G5 G3 G1 / G7 G5 G3 G1 \n\
685 # Interleave RGB even and odd \n\
686 punpcklbw %%xmm3, %%xmm0 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
687 punpcklbw %%xmm4, %%xmm1 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
688 punpcklbw %%xmm5, %%xmm2 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
691 #define SSE2_UNPACK_15_ALIGNED " \n\
692 # mask unneeded bits off \n\
693 movl $0xf8f8f8f8, %%eax # \n\
694 movd %%eax, %%xmm5 # \n\
695 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
696 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
697 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
698 pand %%xmm5, %%xmm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
699 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
700 psrlw $1,%%xmm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\
701 pxor %%xmm4, %%xmm4 # zero mm4 \n\
702 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
703 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
705 # convert rgb24 plane to rgb15 pack for pixel 0-7 \n\
706 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3______ \n\
707 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
708 psllw $2,%%xmm2 # ________ ____g7g6 g5g4g3__ ________ \n\
709 por %%xmm2, %%xmm0 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
710 movntdq %%xmm0, (%3) # store pixel 0-7 \n\
712 # convert rgb24 plane to rgb15 pack for pixel 8-15 \n\
713 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3______ \n\
714 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
715 psllw $2,%%xmm7 # ________ ____g7g6 g5g4g3__ ________ \n\
716 por %%xmm7, %%xmm5 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
717 movntdq %%xmm5, 16(%3) # store pixel 4-7 \n\
720 #define SSE2_UNPACK_15_UNALIGNED " \n\
721 # mask unneeded bits off \n\
722 movl $0xf8f8f8f8, %%eax # \n\
723 movd %%eax, %%xmm5 # \n\
724 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
725 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
726 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
727 pand %%xmm5, %%xmm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
728 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
729 psrlw $1,%%xmm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\
730 pxor %%xmm4, %%xmm4 # zero mm4 \n\
731 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
732 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
734 # convert rgb24 plane to rgb15 pack for pixel 0-7 \n\
735 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3______ \n\
736 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
737 psllw $2,%%xmm2 # ________ ____g7g6 g5g4g3__ ________ \n\
738 por %%xmm2, %%xmm0 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
739 movdqu %%xmm0, (%3) # store pixel 0-7 \n\
741 # convert rgb24 plane to rgb15 pack for pixel 8-15 \n\
742 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3______ \n\
743 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
744 psllw $2,%%xmm7 # ________ ____g7g6 g5g4g3__ ________ \n\
745 por %%xmm7, %%xmm5 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
746 movdqu %%xmm5, 16(%3) # store pixel 4-7 \n\
749 #define SSE2_UNPACK_16_ALIGNED " \n\
750 # mask unneeded bits off \n\
751 movl $0xf8f8f8f8, %%eax # \n\
752 movd %%eax, %%xmm5 # \n\
753 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
754 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
755 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
756 movl $0xfcfcfcfc, %%eax # \n\
757 movd %%eax, %%xmm5 # \n\
758 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
759 pand %%xmm5, %%xmm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
760 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
761 pxor %%xmm4, %%xmm4 # zero mm4 \n\
762 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
763 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
765 # convert rgb24 plane to rgb16 pack for pixel 0-7 \n\
766 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3g2____ \n\
767 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
768 psllw $3,%%xmm2 # ________ __g7g6g5 g4g3g2__ ________ \n\
769 por %%xmm2, %%xmm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
770 movntdq %%xmm0, (%3) # store pixel 0-7 \n\
772 # convert rgb24 plane to rgb16 pack for pixel 8-15 \n\
773 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3g2____ \n\
774 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
775 psllw $3,%%xmm7 # ________ __g7g6g5 g4g3g2__ ________ \n\
776 por %%xmm7, %%xmm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
777 movntdq %%xmm5, 16(%3) # store pixel 4-7 \n\
780 #define SSE2_UNPACK_16_UNALIGNED " \n\
781 # mask unneeded bits off \n\
782 movl $0xf8f8f8f8, %%eax # \n\
783 movd %%eax, %%xmm5 # \n\
784 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
785 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
786 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
787 movl $0xfcfcfcfc, %%eax # \n\
788 movd %%eax, %%xmm5 # \n\
789 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
790 pand %%xmm5, %%xmm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
791 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
792 pxor %%xmm4, %%xmm4 # zero mm4 \n\
793 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
794 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
796 # convert rgb24 plane to rgb16 pack for pixel 0-7 \n\
797 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3g2____ \n\
798 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
799 psllw $3,%%xmm2 # ________ __g7g6g5 g4g3g2__ ________ \n\
800 por %%xmm2, %%xmm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
801 movdqu %%xmm0, (%3) # store pixel 0-7 \n\
803 # convert rgb24 plane to rgb16 pack for pixel 8-15 \n\
804 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3g2____ \n\
805 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
806 psllw $3,%%xmm7 # ________ __g7g6g5 g4g3g2__ ________ \n\
807 por %%xmm7, %%xmm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
808 movdqu %%xmm5, 16(%3) # store pixel 4-7 \n\
811 #define SSE2_UNPACK_32_ARGB_ALIGNED " \n\
812 pxor %%xmm3, %%xmm3 # zero xmm3 \n\
813 movdqa %%xmm0, %%xmm4 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
814 punpcklbw %%xmm2, %%xmm4 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
815 movdqa %%xmm1, %%xmm5 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
816 punpcklbw %%xmm3, %%xmm5 # 00 R3 00 R2 00 R1 00 R0 \n\
817 movdqa %%xmm4, %%xmm6 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
818 punpcklwd %%xmm5, %%xmm4 # 00 R1 B1 G1 00 R0 B0 G0 \n\
819 movntdq %%xmm4, (%3) # Store ARGB3 ARGB2 ARGB1 ARGB0 \n\
820 punpckhwd %%xmm5, %%xmm6 # 00 R3 B3 G3 00 R2 B2 G2 \n\
821 movntdq %%xmm6, 16(%3) # Store ARGB7 ARGB6 ARGB5 ARGB4 \n\
822 punpckhbw %%xmm2, %%xmm0 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
823 punpckhbw %%xmm3, %%xmm1 # 00 R7 00 R6 00 R5 00 R4 \n\
824 movdqa %%xmm0, %%xmm5 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
825 punpcklwd %%xmm1, %%xmm5 # 00 R5 B5 G5 00 R4 B4 G4 \n\
826 movntdq %%xmm5, 32(%3) # Store ARGB11 ARGB10 ARGB9 ARGB8 \n\
827 punpckhwd %%xmm1, %%xmm0 # 00 R7 B7 G7 00 R6 B6 G6 \n\
828 movntdq %%xmm0, 48(%3) # Store ARGB15 ARGB14 ARGB13 ARGB12 \n\
831 #define SSE2_UNPACK_32_ARGB_UNALIGNED " \n\
832 pxor %%xmm3, %%xmm3 # zero xmm3 \n\
833 movdqa %%xmm0, %%xmm4 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
834 punpcklbw %%xmm2, %%xmm4 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
835 movdqa %%xmm1, %%xmm5 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
836 punpcklbw %%xmm3, %%xmm5 # 00 R3 00 R2 00 R1 00 R0 \n\
837 movdqa %%xmm4, %%xmm6 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
838 punpcklwd %%xmm5, %%xmm4 # 00 R1 B1 G1 00 R0 B0 G0 \n\
839 movdqu %%xmm4, (%3) # Store ARGB3 ARGB2 ARGB1 ARGB0 \n\
840 punpckhwd %%xmm5, %%xmm6 # 00 R3 B3 G3 00 R2 B2 G2 \n\
841 movdqu %%xmm6, 16(%3) # Store ARGB7 ARGB6 ARGB5 ARGB4 \n\
842 punpckhbw %%xmm2, %%xmm0 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
843 punpckhbw %%xmm3, %%xmm1 # 00 R7 00 R6 00 R5 00 R4 \n\
844 movdqa %%xmm0, %%xmm5 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
845 punpcklwd %%xmm1, %%xmm5 # 00 R5 B5 G5 00 R4 B4 G4 \n\
846 movdqu %%xmm5, 32(%3) # Store ARGB11 ARGB10 ARGB9 ARGB8 \n\
847 punpckhwd %%xmm1, %%xmm0 # 00 R7 B7 G7 00 R6 B6 G6 \n\
848 movdqu %%xmm0, 48(%3) # Store ARGB15 ARGB14 ARGB13 ARGB12 \n\
851 #define SSE2_UNPACK_32_RGBA_ALIGNED " \n\
852 pxor %%xmm3, %%xmm3 # zero mm3 \n\
853 movdqa %%xmm2, %%xmm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
854 punpcklbw %%xmm1, %%xmm4 # R3 G3 R2 G2 R1 G1 R0 G0 \n\
855 punpcklbw %%xmm0, %%xmm3 # B3 00 B2 00 B1 00 B0 00 \n\
856 movdqa %%xmm3, %%xmm5 # R3 00 R2 00 R1 00 R0 00 \n\
857 punpcklwd %%xmm4, %%xmm3 # R1 G1 B1 00 R0 B0 G0 00 \n\
858 movntdq %%xmm3, (%3) # Store RGBA3 RGBA2 RGBA1 RGBA0 \n\
859 punpckhwd %%xmm4, %%xmm5 # R3 G3 B3 00 R2 G2 B2 00 \n\
860 movntdq %%xmm5, 16(%3) # Store RGBA7 RGBA6 RGBA5 RGBA4 \n\
861 pxor %%xmm6, %%xmm6 # zero mm6 \n\
862 punpckhbw %%xmm1, %%xmm2 # R7 G7 R6 G6 R5 G5 R4 G4 \n\
863 punpckhbw %%xmm0, %%xmm6 # B7 00 B6 00 B5 00 B4 00 \n\
864 movdqa %%xmm6, %%xmm0 # B7 00 B6 00 B5 00 B4 00 \n\
865 punpcklwd %%xmm2, %%xmm6 # R5 G5 B5 00 R4 G4 B4 00 \n\
866 movntdq %%xmm6, 32(%3) # Store BGRA11 BGRA10 BGRA9 RGBA8 \n\
867 punpckhwd %%xmm2, %%xmm0 # R7 G7 B7 00 R6 G6 B6 00 \n\
868 movntdq %%xmm0, 48(%3) # Store RGBA15 RGBA14 RGBA13 RGBA12 \n\
871 #define SSE2_UNPACK_32_RGBA_UNALIGNED " \n\
872 pxor %%xmm3, %%xmm3 # zero mm3 \n\
873 movdqa %%xmm2, %%xmm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
874 punpcklbw %%xmm1, %%xmm4 # R3 G3 R2 G2 R1 G1 R0 G0 \n\
875 punpcklbw %%xmm0, %%xmm3 # B3 00 B2 00 B1 00 B0 00 \n\
876 movdqa %%xmm3, %%xmm5 # R3 00 R2 00 R1 00 R0 00 \n\
877 punpcklwd %%xmm4, %%xmm3 # R1 G1 B1 00 R0 B0 G0 00 \n\
878 movdqu %%xmm3, (%3) # Store RGBA3 RGBA2 RGBA1 RGBA0 \n\
879 punpckhwd %%xmm4, %%xmm5 # R3 G3 B3 00 R2 G2 B2 00 \n\
880 movdqu %%xmm5, 16(%3) # Store RGBA7 RGBA6 RGBA5 RGBA4 \n\
881 pxor %%xmm6, %%xmm6 # zero mm6 \n\
882 punpckhbw %%xmm1, %%xmm2 # R7 G7 R6 G6 R5 G5 R4 G4 \n\
883 punpckhbw %%xmm0, %%xmm6 # B7 00 B6 00 B5 00 B4 00 \n\
884 movdqa %%xmm6, %%xmm0 # B7 00 B6 00 B5 00 B4 00 \n\
885 punpcklwd %%xmm2, %%xmm6 # R5 G5 B5 00 R4 G4 B4 00 \n\
886 movdqu %%xmm6, 32(%3) # Store RGBA11 RGBA10 RGBA9 RGBA8 \n\
887 punpckhwd %%xmm2, %%xmm0 # R7 G7 B7 00 R6 G6 B6 00 \n\
888 movdqu %%xmm0, 48(%3) # Store RGBA15 RGBA14 RGBA13 RGBA12 \n\
891 #define SSE2_UNPACK_32_BGRA_ALIGNED " \n\
892 pxor %%xmm3, %%xmm3 # zero mm3 \n\
893 movdqa %%xmm2, %%xmm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
894 punpcklbw %%xmm0, %%xmm4 # B3 G3 B2 G2 B1 G1 B0 G0 \n\
895 punpcklbw %%xmm1, %%xmm3 # R3 00 R2 00 R1 00 R0 00 \n\
896 movdqa %%xmm3, %%xmm5 # R3 00 R2 00 R1 00 R0 00 \n\
897 punpcklwd %%xmm4, %%xmm3 # B1 G1 R1 00 B0 G0 R0 00 \n\
898 movntdq %%xmm3, (%3) # Store BGRA3 BGRA2 BGRA1 BGRA0 \n\
899 punpckhwd %%xmm4, %%xmm5 # B3 G3 R3 00 B2 G2 R2 00 \n\
900 movntdq %%xmm5, 16(%3) # Store BGRA7 BGRA6 BGRA5 BGRA4 \n\
901 pxor %%xmm6, %%xmm6 # zero mm6 \n\
902 punpckhbw %%xmm0, %%xmm2 # B7 G7 B6 G6 B5 G5 B4 G4 \n\
903 punpckhbw %%xmm1, %%xmm6 # R7 00 R6 00 R5 00 R4 00 \n\
904 movdqa %%xmm6, %%xmm0 # R7 00 R6 00 R5 00 R4 00 \n\
905 punpcklwd %%xmm2, %%xmm6 # B5 G5 R5 00 B4 G4 R4 00 \n\
906 movntdq %%xmm6, 32(%3) # Store BGRA11 BGRA10 BGRA9 BGRA8 \n\
907 punpckhwd %%xmm2, %%xmm0 # B7 G7 R7 00 B6 G6 R6 00 \n\
908 movntdq %%xmm0, 48(%3) # Store BGRA15 BGRA14 BGRA13 BGRA12 \n\
911 #define SSE2_UNPACK_32_BGRA_UNALIGNED " \n\
912 pxor %%xmm3, %%xmm3 # zero mm3 \n\
913 movdqa %%xmm2, %%xmm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
914 punpcklbw %%xmm0, %%xmm4 # B3 G3 B2 G2 B1 G1 B0 G0 \n\
915 punpcklbw %%xmm1, %%xmm3 # R3 00 R2 00 R1 00 R0 00 \n\
916 movdqa %%xmm3, %%xmm5 # R3 00 R2 00 R1 00 R0 00 \n\
917 punpcklwd %%xmm4, %%xmm3 # B1 G1 R1 00 B0 G0 R0 00 \n\
918 movdqu %%xmm3, (%3) # Store BGRA3 BGRA2 BGRA1 BGRA0 \n\
919 punpckhwd %%xmm4, %%xmm5 # B3 G3 R3 00 B2 G2 R2 00 \n\
920 movdqu %%xmm5, 16(%3) # Store BGRA7 BGRA6 BGRA5 BGRA4 \n\
921 pxor %%xmm6, %%xmm6 # zero mm6 \n\
922 punpckhbw %%xmm0, %%xmm2 # B7 G7 B6 G6 B5 G5 B4 G4 \n\
923 punpckhbw %%xmm1, %%xmm6 # R7 00 R6 00 R5 00 R4 00 \n\
924 movdqa %%xmm6, %%xmm0 # R7 00 R6 00 R5 00 R4 00 \n\
925 punpcklwd %%xmm2, %%xmm6 # B5 G5 R5 00 B4 G4 R4 00 \n\
926 movdqu %%xmm6, 32(%3) # Store BGRA11 BGRA10 BGRA9 BGRA8 \n\
927 punpckhwd %%xmm2, %%xmm0 # B7 G7 R7 00 B6 G6 R6 00 \n\
928 movdqu %%xmm0, 48(%3) # Store BGRA15 BGRA14 BGRA13 BGRA12 \n\
931 #define SSE2_UNPACK_32_ABGR_ALIGNED " \n\
932 pxor %%xmm3, %%xmm3 # zero mm3 \n\
933 movdqa %%xmm1, %%xmm4 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
934 punpcklbw %%xmm2, %%xmm4 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
935 movdqa %%xmm0, %%xmm5 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
936 punpcklbw %%xmm3, %%xmm5 # 00 B3 00 B2 00 B1 00 B0 \n\
937 movdqa %%xmm4, %%xmm6 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
938 punpcklwd %%xmm5, %%xmm4 # 00 B1 G1 R1 00 B0 G0 R0 \n\
939 movntdq %%xmm4, (%3) # Store ABGR3 ABGR2 ABGR1 ABGR0 \n\
940 punpckhwd %%xmm5, %%xmm6 # 00 B3 G3 R3 00 B2 G2 R2 \n\
941 movntdq %%xmm6, 16(%3) # Store ABGR7 ABGR6 ABGR5 ABGR4 \n\
942 punpckhbw %%xmm2, %%xmm1 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
943 punpckhbw %%xmm3, %%xmm0 # 00 B7 00 B6 00 B5 00 B4 \n\
944 movdqa %%xmm1, %%xmm2 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
945 punpcklwd %%xmm0, %%xmm1 # 00 B5 G5 R5 00 B4 G4 R4 \n\
946 movntdq %%xmm1, 32(%3) # Store ABGR11 ABGR10 ABGR9 ABGR8 \n\
947 punpckhwd %%xmm0, %%xmm2 # B7 G7 R7 00 B6 G6 R6 00 \n\
948 movntdq %%xmm2, 48(%3) # Store ABGR15 ABGR14 ABGR13 ABGR12 \n\
951 #define SSE2_UNPACK_32_ABGR_UNALIGNED " \n\
952 pxor %%xmm3, %%xmm3 # zero mm3 \n\
953 movdqa %%xmm1, %%xmm4 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
954 punpcklbw %%xmm2, %%xmm4 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
955 movdqa %%xmm0, %%xmm5 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
956 punpcklbw %%xmm3, %%xmm5 # 00 B3 00 B2 00 B1 00 B0 \n\
957 movdqa %%xmm4, %%xmm6 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
958 punpcklwd %%xmm5, %%xmm4 # 00 B1 G1 R1 00 B0 G0 R0 \n\
959 movdqu %%xmm4, (%3) # Store ABGR3 ABGR2 ABGR1 ABGR0 \n\
960 punpckhwd %%xmm5, %%xmm6 # 00 B3 G3 R3 00 B2 G2 R2 \n\
961 movdqu %%xmm6, 16(%3) # Store ABGR7 ABGR6 ABGR5 ABGR4 \n\
962 punpckhbw %%xmm2, %%xmm1 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
963 punpckhbw %%xmm3, %%xmm0 # 00 B7 00 B6 00 B5 00 B4 \n\
964 movdqa %%xmm1, %%xmm2 # R7 00 R6 00 R5 00 R4 00 \n\
965 punpcklwd %%xmm0, %%xmm1 # 00 B5 G5 R5 00 B4 G4 R4 \n\
966 movdqu %%xmm1, 32(%3) # Store ABGR11 ABGR10 ABGR9 ABGR8 \n\
967 punpckhwd %%xmm0, %%xmm2 # B7 G7 R7 00 B6 G6 R6 00 \n\
968 movdqu %%xmm2, 48(%3) # Store ABGR15 ABGR14 ABGR13 ABGR12 \n\
971 #elif defined(HAVE_SSE2_INTRINSICS)
973 /* SSE2 intrinsics */
975 #include <emmintrin.h>
977 #define SSE2_CALL(SSE2_INSTRUCTIONS) \
979 __m128i xmm0, xmm1, xmm2, xmm3, \
980 xmm4, xmm5, xmm6, xmm7; \
984 #define SSE2_END _mm_sfence()
986 #define SSE2_INIT_16_ALIGNED \
987 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
988 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
989 xmm4 = _mm_setzero_si128(); \
990 xmm6 = _mm_load_si128((__m128i *)p_y);
992 #define SSE2_INIT_16_UNALIGNED \
993 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
994 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
995 xmm4 = _mm_setzero_si128(); \
996 xmm6 = _mm_loadu_si128((__m128i *)p_y); \
997 _mm_prefetch(p_buffer, _MM_HINT_NTA);
999 #define SSE2_INIT_32_ALIGNED \
1000 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
1001 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
1002 xmm4 = _mm_setzero_si128(); \
1003 xmm6 = _mm_load_si128((__m128i *)p_y);
1005 #define SSE2_INIT_32_UNALIGNED \
1006 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
1007 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
1008 xmm4 = _mm_setzero_si128(); \
1009 xmm6 = _mm_loadu_si128((__m128i *)p_y); \
1010 _mm_prefetch(p_buffer, _MM_HINT_NTA);
1012 #define SSE2_YUV_MUL \
1013 xmm0 = _mm_unpacklo_epi8(xmm0, xmm4); \
1014 xmm1 = _mm_unpacklo_epi8(xmm1, xmm4); \
1015 xmm5 = _mm_set1_epi32(0x00800080UL); \
1016 xmm0 = _mm_subs_epi16(xmm0, xmm5); \
1017 xmm1 = _mm_subs_epi16(xmm1, xmm5); \
1018 xmm0 = _mm_slli_epi16(xmm0, 3); \
1019 xmm1 = _mm_slli_epi16(xmm1, 3); \
1022 xmm5 = _mm_set1_epi32(0xf37df37dUL); \
1023 xmm2 = _mm_mulhi_epi16(xmm2, xmm5); \
1024 xmm5 = _mm_set1_epi32(0xe5fce5fcUL); \
1025 xmm3 = _mm_mulhi_epi16(xmm3, xmm5); \
1026 xmm5 = _mm_set1_epi32(0x40934093UL); \
1027 xmm0 = _mm_mulhi_epi16(xmm0, xmm5); \
1028 xmm5 = _mm_set1_epi32(0x33123312UL); \
1029 xmm1 = _mm_mulhi_epi16(xmm1, xmm5); \
1030 xmm2 = _mm_adds_epi16(xmm2, xmm3); \
1032 xmm5 = _mm_set1_epi32(0x10101010UL); \
1033 xmm6 = _mm_subs_epu8(xmm6, xmm5); \
1035 xmm5 = _mm_set1_epi32(0x00ff00ffUL); \
1036 xmm6 = _mm_and_si128(xmm6, xmm5); \
1037 xmm7 = _mm_srli_epi16(xmm7, 8); \
1038 xmm6 = _mm_slli_epi16(xmm6, 3); \
1039 xmm7 = _mm_slli_epi16(xmm7, 3); \
1040 xmm5 = _mm_set1_epi32(0x253f253fUL); \
1041 xmm6 = _mm_mulhi_epi16(xmm6, xmm5); \
1042 xmm7 = _mm_mulhi_epi16(xmm7, xmm5);
1044 #define SSE2_YUV_ADD \
1048 xmm0 = _mm_adds_epi16(xmm0, xmm6); \
1049 xmm3 = _mm_adds_epi16(xmm3, xmm7); \
1050 xmm1 = _mm_adds_epi16(xmm1, xmm6); \
1051 xmm4 = _mm_adds_epi16(xmm4, xmm7); \
1052 xmm2 = _mm_adds_epi16(xmm2, xmm6); \
1053 xmm5 = _mm_adds_epi16(xmm5, xmm7); \
1055 xmm0 = _mm_packus_epi16(xmm0, xmm0); \
1056 xmm1 = _mm_packus_epi16(xmm1, xmm1); \
1057 xmm2 = _mm_packus_epi16(xmm2, xmm2); \
1059 xmm3 = _mm_packus_epi16(xmm3, xmm3); \
1060 xmm4 = _mm_packus_epi16(xmm4, xmm4); \
1061 xmm5 = _mm_packus_epi16(xmm5, xmm5); \
1063 xmm0 = _mm_unpacklo_epi8(xmm0, xmm3); \
1064 xmm1 = _mm_unpacklo_epi8(xmm1, xmm4); \
1065 xmm2 = _mm_unpacklo_epi8(xmm2, xmm5);
1067 #define SSE2_UNPACK_15_ALIGNED \
1068 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
1069 xmm0 = _mm_and_si128(xmm0, xmm5); \
1070 xmm0 = _mm_srli_epi16(xmm0, 3); \
1071 xmm2 = _mm_and_si128(xmm2, xmm5); \
1072 xmm1 = _mm_and_si128(xmm1, xmm5); \
1073 xmm1 = _mm_srli_epi16(xmm1, 1); \
1074 xmm4 = _mm_setzero_si128(); \
1078 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
1079 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
1080 xmm2 = _mm_slli_epi16(xmm2, 2); \
1081 xmm0 = _mm_or_si128(xmm0, xmm2); \
1082 _mm_stream_si128((__m128i*)p_buffer, xmm0); \
1084 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
1085 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
1086 xmm7 = _mm_slli_epi16(xmm7, 2); \
1087 xmm5 = _mm_or_si128(xmm5, xmm7); \
1088 _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
1090 #define SSE2_UNPACK_15_UNALIGNED \
1091 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
1092 xmm0 = _mm_and_si128(xmm0, xmm5); \
1093 xmm0 = _mm_srli_epi16(xmm0, 3); \
1094 xmm2 = _mm_and_si128(xmm2, xmm5); \
1095 xmm1 = _mm_and_si128(xmm1, xmm5); \
1096 xmm1 = _mm_srli_epi16(xmm1, 1); \
1097 xmm4 = _mm_setzero_si128(); \
1101 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
1102 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
1103 xmm2 = _mm_slli_epi16(xmm2, 2); \
1104 xmm0 = _mm_or_si128(xmm0, xmm2); \
1105 _mm_storeu_si128((__m128i*)p_buffer, xmm0); \
1107 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
1108 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
1109 xmm7 = _mm_slli_epi16(xmm7, 2); \
1110 xmm5 = _mm_or_si128(xmm5, xmm7); \
1111 _mm_storeu_si128((__m128i*)(p_buffer+16), xmm5);
1113 #define SSE2_UNPACK_16_ALIGNED \
1114 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
1115 xmm0 = _mm_and_si128(xmm0, xmm5); \
1116 xmm1 = _mm_and_si128(xmm1, xmm5); \
1117 xmm5 = _mm_set1_epi32(0xfcfcfcfcUL); \
1118 xmm2 = _mm_and_si128(xmm2, xmm5); \
1119 xmm0 = _mm_srli_epi16(xmm0, 3); \
1120 xmm4 = _mm_setzero_si128(); \
1124 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
1125 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
1126 xmm2 = _mm_slli_epi16(xmm2, 3); \
1127 xmm0 = _mm_or_si128(xmm0, xmm2); \
1128 _mm_stream_si128((__m128i*)p_buffer, xmm0); \
1130 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
1131 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
1132 xmm7 = _mm_slli_epi16(xmm7, 3); \
1133 xmm5 = _mm_or_si128(xmm5, xmm7); \
1134 _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
1136 #define SSE2_UNPACK_16_UNALIGNED \
1137 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
1138 xmm0 = _mm_and_si128(xmm0, xmm5); \
1139 xmm1 = _mm_and_si128(xmm1, xmm5); \
1140 xmm5 = _mm_set1_epi32(0xfcfcfcfcUL); \
1141 xmm2 = _mm_and_si128(xmm2, xmm5); \
1142 xmm0 = _mm_srli_epi16(xmm0, 3); \
1143 xmm4 = _mm_setzero_si128(); \
1147 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
1148 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
1149 xmm2 = _mm_slli_epi16(xmm2, 3); \
1150 xmm0 = _mm_or_si128(xmm0, xmm2); \
1151 _mm_storeu_si128((__m128i*)p_buffer, xmm0); \
1153 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
1154 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
1155 xmm7 = _mm_slli_epi16(xmm7, 3); \
1156 xmm5 = _mm_or_si128(xmm5, xmm7); \
1157 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5);
1159 #define SSE2_UNPACK_32_ARGB_ALIGNED \
1160 xmm3 = _mm_setzero_si128(); \
1162 xmm4 = _mm_unpacklo_epi8(xmm4, xmm2); \
1164 xmm5 = _mm_unpacklo_epi8(xmm5, xmm3); \
1166 xmm4 = _mm_unpacklo_epi16(xmm4, xmm5); \
1167 _mm_stream_si128((__m128i*)(p_buffer), xmm4); \
1168 xmm6 = _mm_unpackhi_epi16(xmm6, xmm5); \
1169 _mm_stream_si128((__m128i*)(p_buffer+4), xmm6); \
1170 xmm0 = _mm_unpackhi_epi8(xmm0, xmm2); \
1171 xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
1173 xmm5 = _mm_unpacklo_epi16(xmm5, xmm1); \
1174 _mm_stream_si128((__m128i*)(p_buffer+8), xmm5); \
1175 xmm0 = _mm_unpackhi_epi16(xmm0, xmm1); \
1176 _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
1178 #define SSE2_UNPACK_32_ARGB_UNALIGNED \
1179 xmm3 = _mm_setzero_si128(); \
1181 xmm4 = _mm_unpacklo_epi8(xmm4, xmm2); \
1183 xmm5 = _mm_unpacklo_epi8(xmm5, xmm3); \
1185 xmm4 = _mm_unpacklo_epi16(xmm4, xmm5); \
1186 _mm_storeu_si128((__m128i*)(p_buffer), xmm4); \
1187 xmm6 = _mm_unpackhi_epi16(xmm6, xmm5); \
1188 _mm_storeu_si128((__m128i*)(p_buffer+4), xmm6); \
1189 xmm0 = _mm_unpackhi_epi8(xmm0, xmm2); \
1190 xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
1192 xmm5 = _mm_unpacklo_epi16(xmm5, xmm1); \
1193 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5); \
1194 xmm0 = _mm_unpackhi_epi16(xmm0, xmm1); \
1195 _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
1197 #define SSE2_UNPACK_32_RGBA_ALIGNED \
1198 xmm3 = _mm_setzero_si128(); \
1200 xmm4 = _mm_unpacklo_epi8(xmm4, xmm1); \
1201 xmm3 = _mm_unpacklo_epi8(xmm3, xmm0); \
1203 xmm3 = _mm_unpacklo_epi16(xmm3, xmm4); \
1204 _mm_stream_si128((__m128i*)(p_buffer), xmm3); \
1205 xmm5 = _mm_unpackhi_epi16(xmm5, xmm4); \
1206 _mm_stream_si128((__m128i*)(p_buffer+4), xmm5); \
1207 xmm6 = _mm_setzero_si128(); \
1208 xmm2 = _mm_unpackhi_epi8(xmm2, xmm1); \
1209 xmm6 = _mm_unpackhi_epi8(xmm6, xmm0); \
1211 xmm6 = _mm_unpacklo_epi16(xmm6, xmm2); \
1212 _mm_stream_si128((__m128i*)(p_buffer+8), xmm6); \
1213 xmm0 = _mm_unpackhi_epi16(xmm0, xmm2); \
1214 _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
1216 #define SSE2_UNPACK_32_RGBA_UNALIGNED \
1217 xmm3 = _mm_setzero_si128(); \
1219 xmm4 = _mm_unpacklo_epi8(xmm4, xmm1); \
1220 xmm3 = _mm_unpacklo_epi8(xmm3, xmm0); \
1222 xmm3 = _mm_unpacklo_epi16(xmm3, xmm4); \
1223 _mm_storeu_si128((__m128i*)(p_buffer), xmm3); \
1224 xmm5 = _mm_unpackhi_epi16(xmm5, xmm4); \
1225 _mm_storeu_si128((__m128i*)(p_buffer+4), xmm5); \
1226 xmm6 = _mm_setzero_si128(); \
1227 xmm2 = _mm_unpackhi_epi8(xmm2, xmm1); \
1228 xmm6 = _mm_unpackhi_epi8(xmm6, xmm0); \
1230 xmm6 = _mm_unpacklo_epi16(xmm6, xmm2); \
1231 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm6); \
1232 xmm0 = _mm_unpackhi_epi16(xmm0, xmm2); \
1233 _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
1235 #define SSE2_UNPACK_32_BGRA_ALIGNED \
1236 xmm3 = _mm_setzero_si128(); \
1238 xmm4 = _mm_unpacklo_epi8(xmm4, xmm0); \
1239 xmm3 = _mm_unpacklo_epi8(xmm3, xmm1); \
1241 xmm3 = _mm_unpacklo_epi16(xmm3, xmm4); \
1242 _mm_stream_si128((__m128i*)(p_buffer), xmm3); \
1243 xmm5 = _mm_unpackhi_epi16(xmm5, xmm4); \
1244 _mm_stream_si128((__m128i*)(p_buffer+4), xmm5); \
1245 xmm6 = _mm_setzero_si128(); \
1246 xmm2 = _mm_unpackhi_epi8(xmm2, xmm0); \
1247 xmm6 = _mm_unpackhi_epi8(xmm6, xmm1); \
1249 xmm6 = _mm_unpacklo_epi16(xmm6, xmm2); \
1250 _mm_stream_si128((__m128i*)(p_buffer+8), xmm6); \
1251 xmm0 = _mm_unpackhi_epi16(xmm0, xmm2); \
1252 _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
1254 #define SSE2_UNPACK_32_BGRA_UNALIGNED \
1255 xmm3 = _mm_setzero_si128(); \
1257 xmm4 = _mm_unpacklo_epi8(xmm4, xmm0); \
1258 xmm3 = _mm_unpacklo_epi8(xmm3, xmm1); \
1260 xmm3 = _mm_unpacklo_epi16(xmm3, xmm4); \
1261 _mm_storeu_si128((__m128i*)(p_buffer), xmm3); \
1262 xmm5 = _mm_unpackhi_epi16(xmm5, xmm4); \
1263 _mm_storeu_si128((__m128i*)(p_buffer+4), xmm5); \
1264 xmm6 = _mm_setzero_si128(); \
1265 xmm2 = _mm_unpackhi_epi8(xmm2, xmm0); \
1266 xmm6 = _mm_unpackhi_epi8(xmm6, xmm1); \
1268 xmm6 = _mm_unpacklo_epi16(xmm6, xmm2); \
1269 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm6); \
1270 xmm0 = _mm_unpackhi_epi16(xmm0, xmm2); \
1271 _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
1273 #define SSE2_UNPACK_32_ABGR_ALIGNED \
1274 xmm3 = _mm_setzero_si128(); \
1276 xmm4 = _mm_unpacklo_epi8(xmm4, xmm2); \
1278 xmm5 = _mm_unpacklo_epi8(xmm5, xmm3); \
1280 xmm4 = _mm_unpacklo_epi16(xmm4, xmm5); \
1281 _mm_stream_si128((__m128i*)(p_buffer), xmm4); \
1282 xmm6 = _mm_unpackhi_epi16(xmm6, xmm5); \
1283 _mm_stream_si128((__m128i*)(p_buffer+4), xmm6); \
1284 xmm1 = _mm_unpackhi_epi8(xmm1, xmm2); \
1285 xmm0 = _mm_unpackhi_epi8(xmm0, xmm3); \
1287 xmm1 = _mm_unpacklo_epi16(xmm1, xmm0); \
1288 _mm_stream_si128((__m128i*)(p_buffer+8), xmm1); \
1289 xmm2 = _mm_unpackhi_epi16(xmm2, xmm0); \
1290 _mm_stream_si128((__m128i*)(p_buffer+12), xmm2);
1292 #define SSE2_UNPACK_32_ABGR_UNALIGNED \
1293 xmm3 = _mm_setzero_si128(); \
1295 xmm4 = _mm_unpacklo_epi8(xmm4, xmm2); \
1297 xmm5 = _mm_unpacklo_epi8(xmm5, xmm3); \
1299 xmm4 = _mm_unpacklo_epi16(xmm4, xmm5); \
1300 _mm_storeu_si128((__m128i*)(p_buffer), xmm4); \
1301 xmm6 = _mm_unpackhi_epi16(xmm6, xmm5); \
1302 _mm_storeu_si128((__m128i*)(p_buffer+4), xmm6); \
1303 xmm1 = _mm_unpackhi_epi8(xmm1, xmm2); \
1304 xmm0 = _mm_unpackhi_epi8(xmm0, xmm3); \
1306 xmm1 = _mm_unpacklo_epi16(xmm1, xmm0); \
1307 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm1); \
1308 xmm2 = _mm_unpackhi_epi16(xmm2, xmm0); \
1309 _mm_storeu_si128((__m128i*)(p_buffer+12), xmm2);