1 /*****************************************************************************
2 * transforms_yuvmmx.h: MMX YUV transformation assembly
3 *****************************************************************************
4 * Copyright (C) 1999-2007 the VideoLAN team
7 * Authors: Olie Lho <ollie@sis.com.tw>
8 * Gaƫl Hendryckx <jimmy@via.ecp.fr>
9 * Samuel Hocevar <sam@zoy.org>
10 * Damien Fouilleul <damienf@videolan.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
25 *****************************************************************************/
27 #ifdef MODULE_NAME_IS_i420_rgb_mmx
29 /* hope these constant values are cache line aligned */
30 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
31 #define USED_U64(foo) \
32 static const uint64_t foo __asm__ (#foo) __attribute__((used))
34 #define USED_U64(foo) \
35 static const uint64_t foo __asm__ (#foo) __attribute__((unused))
37 USED_U64(mmx_80w) = 0x0080008000800080ULL;
38 USED_U64(mmx_10w) = 0x1010101010101010ULL;
39 USED_U64(mmx_00ffw) = 0x00ff00ff00ff00ffULL;
40 USED_U64(mmx_Y_coeff) = 0x253f253f253f253fULL;
42 USED_U64(mmx_U_green) = 0xf37df37df37df37dULL;
43 USED_U64(mmx_U_blue) = 0x4093409340934093ULL;
44 USED_U64(mmx_V_red) = 0x3312331233123312ULL;
45 USED_U64(mmx_V_green) = 0xe5fce5fce5fce5fcULL;
47 USED_U64(mmx_mask_f8) = 0xf8f8f8f8f8f8f8f8ULL;
48 USED_U64(mmx_mask_fc) = 0xfcfcfcfcfcfcfcfcULL;
51 #if defined(CAN_COMPILE_MMX)
55 #define MMX_CALL(MMX_INSTRUCTIONS) \
57 __asm__ __volatile__( \
61 : "r" (p_y), "r" (p_u), \
62 "r" (p_v), "r" (p_buffer) ); \
65 #define MMX_END __asm__ __volatile__ ( "emms" )
67 /* Use RIP-relative code in PIC mode on amd64 */
68 #if defined(__x86_64__) && defined(__PIC__)
74 #define MMX_INIT_16 " \n\
75 movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
76 movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
77 pxor %%mm4, %%mm4 # zero mm4 \n\
78 movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
81 #define MMX_INIT_16_GRAY " \n\
82 movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
83 #movl $0, (%3) # cache preload for image \n\
86 #define MMX_INIT_32 " \n\
87 movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
88 movl $0, (%3) # cache preload for image \n\
89 movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
90 pxor %%mm4, %%mm4 # zero mm4 \n\
91 movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
95 * Do the multiply part of the conversion for even and odd pixels,
97 * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
98 * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
99 * mm6 -> Y even, mm7 -> Y odd
102 #define MMX_YUV_MUL " \n\
103 # convert the chroma part \n\
104 punpcklbw %%mm4, %%mm0 # scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\
105 punpcklbw %%mm4, %%mm1 # scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\
106 psubsw mmx_80w"G", %%mm0 # Cb -= 128 \n\
107 psubsw mmx_80w"G", %%mm1 # Cr -= 128 \n\
108 psllw $3, %%mm0 # Promote precision \n\
109 psllw $3, %%mm1 # Promote precision \n\
110 movq %%mm0, %%mm2 # Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\
111 movq %%mm1, %%mm3 # Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\
112 pmulhw mmx_U_green"G", %%mm2 # Mul Cb with green coeff -> Cb green \n\
113 pmulhw mmx_V_green"G", %%mm3 # Mul Cr with green coeff -> Cr green \n\
114 pmulhw mmx_U_blue"G", %%mm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 \n\
115 pmulhw mmx_V_red"G", %%mm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 \n\
116 paddsw %%mm3, %%mm2 # Cb green + Cr green -> Cgreen \n\
118 # convert the luma part \n\
119 psubusb mmx_10w"G", %%mm6 # Y -= 16 \n\
120 movq %%mm6, %%mm7 # Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
121 pand mmx_00ffw"G", %%mm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\
122 psrlw $8, %%mm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 \n\
123 psllw $3, %%mm6 # Promote precision \n\
124 psllw $3, %%mm7 # Promote precision \n\
125 pmulhw mmx_Y_coeff"G", %%mm6 # Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 \n\
126 pmulhw mmx_Y_coeff"G", %%mm7 # Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 \n\
130 * Do the addition part of the conversion for even and odd pixels,
132 * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
133 * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
134 * mm6 -> Y even, mm7 -> Y odd
137 #define MMX_YUV_ADD " \n\
138 # Do horizontal and vertical scaling \n\
139 movq %%mm0, %%mm3 # Copy Cblue \n\
140 movq %%mm1, %%mm4 # Copy Cred \n\
141 movq %%mm2, %%mm5 # Copy Cgreen \n\
142 paddsw %%mm6, %%mm0 # Y even + Cblue 00 B6 00 B4 00 B2 00 B0 \n\
143 paddsw %%mm7, %%mm3 # Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 \n\
144 paddsw %%mm6, %%mm1 # Y even + Cred 00 R6 00 R4 00 R2 00 R0 \n\
145 paddsw %%mm7, %%mm4 # Y odd + Cred 00 R7 00 R5 00 R3 00 R1 \n\
146 paddsw %%mm6, %%mm2 # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 \n\
147 paddsw %%mm7, %%mm5 # Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 \n\
149 # Limit RGB even to 0..255 \n\
150 packuswb %%mm0, %%mm0 # B6 B4 B2 B0 / B6 B4 B2 B0 \n\
151 packuswb %%mm1, %%mm1 # R6 R4 R2 R0 / R6 R4 R2 R0 \n\
152 packuswb %%mm2, %%mm2 # G6 G4 G2 G0 / G6 G4 G2 G0 \n\
154 # Limit RGB odd to 0..255 \n\
155 packuswb %%mm3, %%mm3 # B7 B5 B3 B1 / B7 B5 B3 B1 \n\
156 packuswb %%mm4, %%mm4 # R7 R5 R3 R1 / R7 R5 R3 R1 \n\
157 packuswb %%mm5, %%mm5 # G7 G5 G3 G1 / G7 G5 G3 G1 \n\
159 # Interleave RGB even and odd \n\
160 punpcklbw %%mm3, %%mm0 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
161 punpcklbw %%mm4, %%mm1 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
162 punpcklbw %%mm5, %%mm2 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
166 * Grayscale case, only use Y
169 #define MMX_YUV_GRAY " \n\
170 # convert the luma part \n\
171 psubusb mmx_10w"G", %%mm6 \n\
172 movq %%mm6, %%mm7 \n\
173 pand mmx_00ffw"G", %%mm6 \n\
177 pmulhw mmx_Y_coeff"G", %%mm6 \n\
178 pmulhw mmx_Y_coeff"G", %%mm7 \n\
179 packuswb %%mm6, %%mm6 \n\
180 packuswb %%mm7, %%mm7 \n\
181 punpcklbw %%mm7, %%mm6 \n\
184 #define MMX_UNPACK_16_GRAY " \n\
185 movq %%mm6, %%mm5 \n\
186 pand mmx_mask_f8"G", %%mm6 \n\
187 pand mmx_mask_fc"G", %%mm5 \n\
188 movq %%mm6, %%mm7 \n\
190 pxor %%mm3, %%mm3 \n\
191 movq %%mm7, %%mm2 \n\
192 movq %%mm5, %%mm0 \n\
193 punpcklbw %%mm3, %%mm5 \n\
194 punpcklbw %%mm6, %%mm7 \n\
198 punpckhbw %%mm3, %%mm0 \n\
199 punpckhbw %%mm6, %%mm2 \n\
201 movq 8(%0), %%mm6 \n\
203 movq %%mm2, 8(%3) \n\
208 * convert RGB plane to RGB 15 bits,
209 * mm0 -> B, mm1 -> R, mm2 -> G,
210 * mm4 -> GB, mm5 -> AR pixel 4-7,
211 * mm6 -> GB, mm7 -> AR pixel 0-3
214 #define MMX_UNPACK_15 " \n\
215 # mask unneeded bits off \n\
216 pand mmx_mask_f8"G", %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
217 psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
218 pand mmx_mask_f8"G", %%mm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
219 pand mmx_mask_f8"G", %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
220 psrlw $1,%%mm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\
221 pxor %%mm4, %%mm4 # zero mm4 \n\
222 movq %%mm0, %%mm5 # Copy B7-B0 \n\
223 movq %%mm2, %%mm7 # Copy G7-G0 \n\
225 # convert rgb24 plane to rgb15 pack for pixel 0-3 \n\
226 punpcklbw %%mm4, %%mm2 # ________ ________ g7g6g5g4 g3______ \n\
227 punpcklbw %%mm1, %%mm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
228 psllw $2,%%mm2 # ________ ____g7g6 g5g4g3__ ________ \n\
229 por %%mm2, %%mm0 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
230 movq 8(%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
231 movq %%mm0, (%3) # store pixel 0-3 \n\
233 # convert rgb24 plane to rgb16 pack for pixel 0-3 \n\
234 punpckhbw %%mm4, %%mm7 # ________ ________ g7g6g5g4 g3______ \n\
235 punpckhbw %%mm1, %%mm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
236 psllw $2,%%mm7 # ________ ____g7g6 g5g4g3__ ________ \n\
237 movd 4(%1), %%mm0 # Load 4 Cb __ __ __ __ u3 u2 u1 u0 \n\
238 por %%mm7, %%mm5 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
239 movd 4(%2), %%mm1 # Load 4 Cr __ __ __ __ v3 v2 v1 v0 \n\
240 movq %%mm5, 8(%3) # store pixel 4-7 \n\
244 * convert RGB plane to RGB 16 bits,
245 * mm0 -> B, mm1 -> R, mm2 -> G,
246 * mm4 -> GB, mm5 -> AR pixel 4-7,
247 * mm6 -> GB, mm7 -> AR pixel 0-3
250 #define MMX_UNPACK_16 " \n\
251 # mask unneeded bits off \n\
252 pand mmx_mask_f8"G", %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
253 pand mmx_mask_fc"G", %%mm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
254 pand mmx_mask_f8"G", %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
255 psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
256 pxor %%mm4, %%mm4 # zero mm4 \n\
257 movq %%mm0, %%mm5 # Copy B7-B0 \n\
258 movq %%mm2, %%mm7 # Copy G7-G0 \n\
260 # convert rgb24 plane to rgb16 pack for pixel 0-3 \n\
261 punpcklbw %%mm4, %%mm2 # ________ ________ g7g6g5g4 g3g2____ \n\
262 punpcklbw %%mm1, %%mm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
263 psllw $3,%%mm2 # ________ __g7g6g5 g4g3g2__ ________ \n\
264 por %%mm2, %%mm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
265 movq 8(%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
266 movq %%mm0, (%3) # store pixel 0-3 \n\
268 # convert rgb24 plane to rgb16 pack for pixel 0-3 \n\
269 punpckhbw %%mm4, %%mm7 # ________ ________ g7g6g5g4 g3g2____ \n\
270 punpckhbw %%mm1, %%mm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
271 psllw $3,%%mm7 # ________ __g7g6g5 g4g3g2__ ________ \n\
272 movd 4(%1), %%mm0 # Load 4 Cb __ __ __ __ u3 u2 u1 u0 \n\
273 por %%mm7, %%mm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
274 movd 4(%2), %%mm1 # Load 4 Cr __ __ __ __ v3 v2 v1 v0 \n\
275 movq %%mm5, 8(%3) # store pixel 4-7 \n\
279 * convert RGB plane to RGB packed format,
280 * mm0 -> B, mm1 -> R, mm2 -> G
283 #define MMX_UNPACK_32_ARGB " \n\
284 pxor %%mm3, %%mm3 # zero mm3 \n\
285 movq %%mm0, %%mm4 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
286 punpcklbw %%mm2, %%mm4 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
287 movq %%mm1, %%mm5 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
288 punpcklbw %%mm3, %%mm5 # 00 R3 00 R2 00 R1 00 R0 \n\
289 movq %%mm4, %%mm6 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
290 punpcklwd %%mm5, %%mm4 # 00 R1 B1 G1 00 R0 B0 G0 \n\
291 movq %%mm4, (%3) # Store ARGB1 ARGB0 \n\
292 punpckhwd %%mm5, %%mm6 # 00 R3 B3 G3 00 R2 B2 G2 \n\
293 movq %%mm6, 8(%3) # Store ARGB3 ARGB2 \n\
294 punpckhbw %%mm2, %%mm0 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
295 punpckhbw %%mm3, %%mm1 # 00 R7 00 R6 00 R5 00 R4 \n\
296 movq %%mm0, %%mm5 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
297 punpcklwd %%mm1, %%mm5 # 00 R5 B5 G5 00 R4 B4 G4 \n\
298 movq %%mm5, 16(%3) # Store ARGB5 ARGB4 \n\
299 punpckhwd %%mm1, %%mm0 # 00 R7 B7 G7 00 R6 B6 G6 \n\
300 movq %%mm0, 24(%3) # Store ARGB7 ARGB6 \n\
303 #define MMX_UNPACK_32_BGRA " \n\
304 pxor %%mm3, %%mm3 # zero mm3 \n\
305 movq %%mm2, %%mm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
306 punpcklbw %%mm0, %%mm4 # B3 G3 B2 G2 B1 G1 B0 G0 \n\
307 punpcklbw %%mm1, %%mm3 # R3 00 R2 00 R1 00 R0 00 \n\
308 movq %%mm3, %%mm5 # R3 00 R2 00 R1 00 R0 00 \n\
309 punpcklwd %%mm4, %%mm3 # B1 G1 R1 00 B0 G0 R0 00 \n\
310 movq %%mm3, (%3) # Store BGRA1 BGRA0 \n\
311 punpckhwd %%mm4, %%mm5 # B3 G3 R3 00 B2 G2 R2 00 \n\
312 movq %%mm5, 8(%3) # Store BGRA3 BGRA2 \n\
313 pxor %%mm6, %%mm6 # zero mm6 \n\
314 punpckhbw %%mm0, %%mm2 # B7 G7 B6 G6 B5 G5 B4 G4 \n\
315 punpckhbw %%mm1, %%mm6 # R7 00 R6 00 R5 00 R4 00 \n\
316 movq %%mm6, %%mm0 # R7 00 R6 00 R5 00 R4 00 \n\
317 punpcklwd %%mm2, %%mm6 # B5 G5 R5 00 B4 G4 R4 00 \n\
318 movq %%mm6, 16(%3) # Store BGRA5 BGRA4 \n\
319 punpckhwd %%mm2, %%mm0 # B7 G7 R7 00 B6 G6 R6 00 \n\
320 movq %%mm0, 24(%3) # Store BGRA7 BGRA6 \n\
323 #define MMX_UNPACK_32_ABGR " \n\
324 pxor %%mm3, %%mm3 # zero mm3 \n\
325 movq %%mm1, %%mm4 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
326 punpcklbw %%mm2, %%mm4 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
327 movq %%mm0, %%mm5 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
328 punpcklbw %%mm3, %%mm5 # 00 B3 00 B2 00 B1 00 B0 \n\
329 movq %%mm4, %%mm6 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
330 punpcklwd %%mm5, %%mm4 # 00 B1 G1 R1 00 B0 G0 R0 \n\
331 movq %%mm4, (%3) # Store ABGR1 ABGR0 \n\
332 punpckhwd %%mm5, %%mm6 # 00 B3 G3 R3 00 B2 G2 R2 \n\
333 movq %%mm6, 8(%3) # Store ABGR3 ABGR2 \n\
334 punpckhbw %%mm2, %%mm1 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
335 punpckhbw %%mm3, %%mm0 # 00 B7 00 B6 00 B5 00 B4 \n\
336 movq %%mm1, %%mm2 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
337 punpcklwd %%mm0, %%mm1 # 00 B5 G5 R5 00 B4 G4 R4 \n\
338 movq %%mm1, 16(%3) # Store ABGR5 ABGR4 \n\
339 punpckhwd %%mm0, %%mm2 # B7 G7 R7 00 B6 G6 R6 00 \n\
340 movq %%mm2, 24(%3) # Store ABGR7 ABGR6 \n\
343 #elif defined(HAVE_MMX_INTRINSICS)
347 #include <mmintrin.h>
349 #define MMX_CALL(MMX_INSTRUCTIONS) \
351 __m64 mm0, mm1, mm2, mm3, \
352 mm4, mm5, mm6, mm7; \
356 #define MMX_END _mm_empty()
358 #define MMX_INIT_16 \
359 mm0 = _mm_cvtsi32_si64((int)*p_u); \
360 mm1 = _mm_cvtsi32_si64((int)*p_v); \
361 mm4 = _mm_setzero_si64(); \
362 mm6 = (__m64)*(uint64_t *)p_y
364 #define MMX_INIT_32 \
365 mm0 = _mm_cvtsi32_si64((int)*p_u); \
366 *(uint16_t *)p_buffer = 0; \
367 mm1 = _mm_cvtsi32_si64((int)*p_v); \
368 mm4 = _mm_setzero_si64(); \
369 mm6 = (__m64)*(uint64_t *)p_y;
371 #define MMX_YUV_MUL \
372 mm0 = _mm_unpacklo_pi8(mm0, mm4); \
373 mm1 = _mm_unpacklo_pi8(mm1, mm4); \
374 mm0 = _mm_subs_pi16(mm0, (__m64)mmx_80w); \
375 mm1 = _mm_subs_pi16(mm1, (__m64)mmx_80w); \
376 mm0 = _mm_slli_pi16(mm0, 3); \
377 mm1 = _mm_slli_pi16(mm1, 3); \
380 mm2 = _mm_mulhi_pi16(mm2, (__m64)mmx_U_green); \
381 mm3 = _mm_mulhi_pi16(mm3, (__m64)mmx_V_green); \
382 mm0 = _mm_mulhi_pi16(mm0, (__m64)mmx_U_blue); \
383 mm1 = _mm_mulhi_pi16(mm1, (__m64)mmx_V_red); \
384 mm2 = _mm_adds_pi16(mm2, mm3); \
386 mm6 = _mm_subs_pu8(mm6, (__m64)mmx_10w); \
388 mm6 = _mm_and_si64(mm6, (__m64)mmx_00ffw); \
389 mm7 = _mm_srli_pi16(mm7, 8); \
390 mm6 = _mm_slli_pi16(mm6, 3); \
391 mm7 = _mm_slli_pi16(mm7, 3); \
392 mm6 = _mm_mulhi_pi16(mm6, (__m64)mmx_Y_coeff); \
393 mm7 = _mm_mulhi_pi16(mm7, (__m64)mmx_Y_coeff);
395 #define MMX_YUV_ADD \
399 mm0 = _mm_adds_pi16(mm0, mm6); \
400 mm3 = _mm_adds_pi16(mm3, mm7); \
401 mm1 = _mm_adds_pi16(mm1, mm6); \
402 mm4 = _mm_adds_pi16(mm4, mm7); \
403 mm2 = _mm_adds_pi16(mm2, mm6); \
404 mm5 = _mm_adds_pi16(mm5, mm7); \
406 mm0 = _mm_packs_pu16(mm0, mm0); \
407 mm1 = _mm_packs_pu16(mm1, mm1); \
408 mm2 = _mm_packs_pu16(mm2, mm2); \
410 mm3 = _mm_packs_pu16(mm3, mm3); \
411 mm4 = _mm_packs_pu16(mm4, mm4); \
412 mm5 = _mm_packs_pu16(mm5, mm5); \
414 mm0 = _mm_unpacklo_pi8(mm0, mm3); \
415 mm1 = _mm_unpacklo_pi8(mm1, mm4); \
416 mm2 = _mm_unpacklo_pi8(mm2, mm5);
418 #define MMX_UNPACK_15 \
419 mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
420 mm0 = _mm_srli_pi16(mm0, 3); \
421 mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_f8); \
422 mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
423 mm1 = _mm_srli_pi16(mm1, 1); \
424 mm4 = _mm_setzero_si64(); \
428 mm2 = _mm_unpacklo_pi8(mm2, mm4); \
429 mm0 = _mm_unpacklo_pi8(mm0, mm1); \
430 mm2 = _mm_slli_pi16(mm2, 2); \
431 mm0 = _mm_or_si64(mm0, mm2); \
432 mm6 = (__m64)*(uint64_t *)(p_y + 8); \
433 *(uint64_t *)p_buffer = (uint64_t)mm0; \
435 mm7 = _mm_unpackhi_pi8(mm7, mm4); \
436 mm5 = _mm_unpackhi_pi8(mm5, mm1); \
437 mm7 = _mm_slli_pi16(mm7, 2); \
438 mm0 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_u + 4)); \
439 mm5 = _mm_or_si64(mm5, mm7); \
440 mm1 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_v + 4)); \
441 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
443 #define MMX_UNPACK_16 \
444 mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
445 mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_fc); \
446 mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
447 mm0 = _mm_srli_pi16(mm0, 3); \
448 mm4 = _mm_setzero_si64(); \
452 mm2 = _mm_unpacklo_pi8(mm2, mm4); \
453 mm0 = _mm_unpacklo_pi8(mm0, mm1); \
454 mm2 = _mm_slli_pi16(mm2, 3); \
455 mm0 = _mm_or_si64(mm0, mm2); \
456 mm6 = (__m64)*(uint64_t *)(p_y + 8); \
457 *(uint64_t *)p_buffer = (uint64_t)mm0; \
459 mm7 = _mm_unpackhi_pi8(mm7, mm4); \
460 mm5 = _mm_unpackhi_pi8(mm5, mm1); \
461 mm7 = _mm_slli_pi16(mm7, 3); \
462 mm0 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_u + 4)); \
463 mm5 = _mm_or_si64(mm5, mm7); \
464 mm1 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_v + 4)); \
465 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
467 #define MMX_UNPACK_32_ARGB \
468 mm3 = _mm_setzero_si64(); \
470 mm4 = _mm_unpacklo_pi8(mm4, mm2); \
472 mm5 = _mm_unpacklo_pi8(mm5, mm3); \
474 mm4 = _mm_unpacklo_pi16(mm4, mm5); \
475 *(uint64_t *)p_buffer = (uint64_t)mm4; \
476 mm6 = _mm_unpackhi_pi16(mm6, mm5); \
477 *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6;\
478 mm0 = _mm_unpackhi_pi8(mm0, mm2); \
479 mm1 = _mm_unpackhi_pi8(mm1, mm3); \
481 mm5 = _mm_unpacklo_pi16(mm5, mm1); \
482 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;\
483 mm0 = _mm_unpackhi_pi16(mm0, mm1); \
484 *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
486 #define MMX_UNPACK_32_BGRA \
487 mm3 = _mm_setzero_si64(); \
489 mm4 = _mm_unpacklo_pi8(mm4, mm0); \
490 mm3 = _mm_unpacklo_pi8(mm3, mm1); \
492 mm3 = _mm_unpacklo_pi16(mm3, mm4); \
493 *(uint64_t *)p_buffer = (uint64_t)mm3; \
494 mm5 = _mm_unpackhi_pi16(mm5, mm4); \
495 *(uint64_t *)(p_buffer + 2) = (uint64_t)mm5;\
496 mm6 = _mm_setzero_si64(); \
497 mm2 = _mm_unpackhi_pi8(mm2, mm0); \
498 mm6 = _mm_unpackhi_pi8(mm6, mm1); \
500 mm6 = _mm_unpacklo_pi16(mm6, mm2); \
501 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm6;\
502 mm0 = _mm_unpackhi_pi16(mm0, mm2); \
503 *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
505 #define MMX_UNPACK_32_ABGR \
510 #elif defined( MODULE_NAME_IS_i420_rgb_sse2 )
512 #if defined(CAN_COMPILE_SSE2)
516 #define SSE2_CALL(SSE2_INSTRUCTIONS) \
518 __asm__ __volatile__( \
522 : "r" (p_y), "r" (p_u), \
523 "r" (p_v), "r" (p_buffer) \
527 #define SSE2_END __asm__ __volatile__ ( "sfence" ::: "memory" )
529 #define SSE2_INIT_16_ALIGNED " \n\
530 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
531 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
532 pxor %%xmm4, %%xmm4 # zero mm4 \n\
533 movdqa (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
536 #define SSE2_INIT_16_UNALIGNED " \n\
537 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
538 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
539 pxor %%xmm4, %%xmm4 # zero mm4 \n\
540 movdqu (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
541 prefetchnta (%3) # Tell CPU not to cache output RGB data \n\
544 #define SSE2_INIT_32_ALIGNED " \n\
545 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
546 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
547 pxor %%xmm4, %%xmm4 # zero mm4 \n\
548 movdqa (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
551 #define SSE2_INIT_32_UNALIGNED " \n\
552 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
553 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
554 pxor %%xmm4, %%xmm4 # zero mm4 \n\
555 movdqu (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
556 prefetchnta (%3) # Tell CPU not to cache output RGB data \n\
559 #define SSE2_YUV_MUL " \n\
560 # convert the chroma part \n\
561 punpcklbw %%xmm4, %%xmm0 # scatter 8 Cb 00 u3 00 u2 00 u1 00 u0 \n\
562 punpcklbw %%xmm4, %%xmm1 # scatter 8 Cr 00 v3 00 v2 00 v1 00 v0 \n\
563 movl $0x00800080, %%eax # \n\
564 movd %%eax, %%xmm5 # \n\
565 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 0080 0080 ... 0080 0080 \n\
566 psubsw %%xmm5, %%xmm0 # Cb -= 128 \n\
567 psubsw %%xmm5, %%xmm1 # Cr -= 128 \n\
568 psllw $3, %%xmm0 # Promote precision \n\
569 psllw $3, %%xmm1 # Promote precision \n\
570 movdqa %%xmm0, %%xmm2 # Copy 8 Cb 00 u3 00 u2 00 u1 00 u0 \n\
571 movdqa %%xmm1, %%xmm3 # Copy 8 Cr 00 v3 00 v2 00 v1 00 v0 \n\
572 movl $0xf37df37d, %%eax # \n\
573 movd %%eax, %%xmm5 # \n\
574 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to f37d f37d ... f37d f37d \n\
575 pmulhw %%xmm5, %%xmm2 # Mul Cb with green coeff -> Cb green \n\
576 movl $0xe5fce5fc, %%eax # \n\
577 movd %%eax, %%xmm5 # \n\
578 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to e5fc e5fc ... e5fc e5fc \n\
579 pmulhw %%xmm5, %%xmm3 # Mul Cr with green coeff -> Cr green \n\
580 movl $0x40934093, %%eax # \n\
581 movd %%eax, %%xmm5 # \n\
582 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 4093 4093 ... 4093 4093 \n\
583 pmulhw %%xmm5, %%xmm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 \n\
584 movl $0x33123312, %%eax # \n\
585 movd %%eax, %%xmm5 # \n\
586 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 3312 3312 ... 3312 3312 \n\
587 pmulhw %%xmm5, %%xmm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 \n\
588 paddsw %%xmm3, %%xmm2 # Cb green + Cr green -> Cgreen \n\
590 # convert the luma part \n\
591 movl $0x10101010, %%eax # \n\
592 movd %%eax, %%xmm5 # \n\
593 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 1010 1010 ... 1010 1010 \n\
594 psubusb %%xmm5, %%xmm6 # Y -= 16 \n\
595 movdqa %%xmm6, %%xmm7 # Copy 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
596 movl $0x00ff00ff, %%eax # \n\
597 movd %%eax, %%xmm5 # \n\
598 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to 00ff 00ff ... 00ff 00ff \n\
599 pand %%xmm5, %%xmm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\
600 psrlw $8, %%xmm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 \n\
601 psllw $3, %%xmm6 # Promote precision \n\
602 psllw $3, %%xmm7 # Promote precision \n\
603 movl $0x253f253f, %%eax # \n\
604 movd %%eax, %%xmm5 # \n\
605 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to 253f 253f ... 253f 253f \n\
606 pmulhw %%xmm5, %%xmm6 # Mul 8 Y even 00 y6 00 y4 00 y2 00 y0 \n\
607 pmulhw %%xmm5, %%xmm7 # Mul 8 Y odd 00 y7 00 y5 00 y3 00 y1 \n\
610 #define SSE2_YUV_ADD " \n\
611 # Do horizontal and vertical scaling \n\
612 movdqa %%xmm0, %%xmm3 # Copy Cblue \n\
613 movdqa %%xmm1, %%xmm4 # Copy Cred \n\
614 movdqa %%xmm2, %%xmm5 # Copy Cgreen \n\
615 paddsw %%xmm6, %%xmm0 # Y even + Cblue 00 B6 00 B4 00 B2 00 B0 \n\
616 paddsw %%xmm7, %%xmm3 # Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 \n\
617 paddsw %%xmm6, %%xmm1 # Y even + Cred 00 R6 00 R4 00 R2 00 R0 \n\
618 paddsw %%xmm7, %%xmm4 # Y odd + Cred 00 R7 00 R5 00 R3 00 R1 \n\
619 paddsw %%xmm6, %%xmm2 # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 \n\
620 paddsw %%xmm7, %%xmm5 # Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 \n\
622 # Limit RGB even to 0..255 \n\
623 packuswb %%xmm0, %%xmm0 # B6 B4 B2 B0 / B6 B4 B2 B0 \n\
624 packuswb %%xmm1, %%xmm1 # R6 R4 R2 R0 / R6 R4 R2 R0 \n\
625 packuswb %%xmm2, %%xmm2 # G6 G4 G2 G0 / G6 G4 G2 G0 \n\
627 # Limit RGB odd to 0..255 \n\
628 packuswb %%xmm3, %%xmm3 # B7 B5 B3 B1 / B7 B5 B3 B1 \n\
629 packuswb %%xmm4, %%xmm4 # R7 R5 R3 R1 / R7 R5 R3 R1 \n\
630 packuswb %%xmm5, %%xmm5 # G7 G5 G3 G1 / G7 G5 G3 G1 \n\
632 # Interleave RGB even and odd \n\
633 punpcklbw %%xmm3, %%xmm0 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
634 punpcklbw %%xmm4, %%xmm1 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
635 punpcklbw %%xmm5, %%xmm2 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
638 #define SSE2_UNPACK_15_ALIGNED " \n\
639 # mask unneeded bits off \n\
640 movl $0xf8f8f8f8, %%eax # \n\
641 movd %%eax, %%xmm5 # \n\
642 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
643 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
644 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
645 pand %%xmm5, %%xmm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
646 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
647 psrlw $1,%%xmm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\
648 pxor %%xmm4, %%xmm4 # zero mm4 \n\
649 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
650 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
652 # convert rgb24 plane to rgb15 pack for pixel 0-7 \n\
653 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3______ \n\
654 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
655 psllw $2,%%xmm2 # ________ ____g7g6 g5g4g3__ ________ \n\
656 por %%xmm2, %%xmm0 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
657 movntdq %%xmm0, (%3) # store pixel 0-7 \n\
659 # convert rgb24 plane to rgb15 pack for pixel 8-15 \n\
660 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3______ \n\
661 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
662 psllw $2,%%xmm7 # ________ ____g7g6 g5g4g3__ ________ \n\
663 por %%xmm7, %%xmm5 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
664 movntdq %%xmm5, 16(%3) # store pixel 4-7 \n\
667 #define SSE2_UNPACK_15_UNALIGNED " \n\
668 # mask unneeded bits off \n\
669 movl $0xf8f8f8f8, %%eax # \n\
670 movd %%eax, %%xmm5 # \n\
671 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
672 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
673 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
674 pand %%xmm5, %%xmm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
675 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
676 psrlw $1,%%xmm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\
677 pxor %%xmm4, %%xmm4 # zero mm4 \n\
678 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
679 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
681 # convert rgb24 plane to rgb15 pack for pixel 0-7 \n\
682 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3______ \n\
683 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
684 psllw $2,%%xmm2 # ________ ____g7g6 g5g4g3__ ________ \n\
685 por %%xmm2, %%xmm0 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
686 movdqu %%xmm0, (%3) # store pixel 0-7 \n\
688 # convert rgb24 plane to rgb15 pack for pixel 8-15 \n\
689 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3______ \n\
690 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
691 psllw $2,%%xmm7 # ________ ____g7g6 g5g4g3__ ________ \n\
692 por %%xmm7, %%xmm5 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
693 movdqu %%xmm5, 16(%3) # store pixel 4-7 \n\
696 #define SSE2_UNPACK_16_ALIGNED " \n\
697 # mask unneeded bits off \n\
698 movl $0xf8f8f8f8, %%eax # \n\
699 movd %%eax, %%xmm5 # \n\
700 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
701 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
702 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
703 movl $0xfcfcfcfc, %%eax # \n\
704 movd %%eax, %%xmm5 # \n\
705 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
706 pand %%xmm5, %%xmm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
707 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
708 pxor %%xmm4, %%xmm4 # zero mm4 \n\
709 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
710 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
712 # convert rgb24 plane to rgb16 pack for pixel 0-7 \n\
713 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3g2____ \n\
714 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
715 psllw $3,%%xmm2 # ________ __g7g6g5 g4g3g2__ ________ \n\
716 por %%xmm2, %%xmm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
717 movntdq %%xmm0, (%3) # store pixel 0-7 \n\
719 # convert rgb24 plane to rgb16 pack for pixel 8-15 \n\
720 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3g2____ \n\
721 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
722 psllw $3,%%xmm7 # ________ __g7g6g5 g4g3g2__ ________ \n\
723 por %%xmm7, %%xmm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
724 movntdq %%xmm5, 16(%3) # store pixel 4-7 \n\
727 #define SSE2_UNPACK_16_UNALIGNED " \n\
728 # mask unneeded bits off \n\
729 movl $0xf8f8f8f8, %%eax # \n\
730 movd %%eax, %%xmm5 # \n\
731 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
732 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
733 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
734 movl $0xfcfcfcfc, %%eax # \n\
735 movd %%eax, %%xmm5 # \n\
736 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
737 pand %%xmm5, %%xmm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
738 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
739 pxor %%xmm4, %%xmm4 # zero mm4 \n\
740 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
741 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
743 # convert rgb24 plane to rgb16 pack for pixel 0-7 \n\
744 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3g2____ \n\
745 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
746 psllw $3,%%xmm2 # ________ __g7g6g5 g4g3g2__ ________ \n\
747 por %%xmm2, %%xmm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
748 movdqu %%xmm0, (%3) # store pixel 0-7 \n\
750 # convert rgb24 plane to rgb16 pack for pixel 8-15 \n\
751 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3g2____ \n\
752 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
753 psllw $3,%%xmm7 # ________ __g7g6g5 g4g3g2__ ________ \n\
754 por %%xmm7, %%xmm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
755 movdqu %%xmm5, 16(%3) # store pixel 4-7 \n\
758 #define SSE2_UNPACK_32_ARGB_ALIGNED " \n\
759 pxor %%xmm3, %%xmm3 # zero xmm3 \n\
760 movdqa %%xmm0, %%xmm4 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
761 punpcklbw %%xmm2, %%xmm4 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
762 movdqa %%xmm1, %%xmm5 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
763 punpcklbw %%xmm3, %%xmm5 # 00 R3 00 R2 00 R1 00 R0 \n\
764 movdqa %%xmm4, %%xmm6 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
765 punpcklwd %%xmm5, %%xmm4 # 00 R1 B1 G1 00 R0 B0 G0 \n\
766 movntdq %%xmm4, (%3) # Store ARGB3 ARGB2 ARGB1 ARGB0 \n\
767 punpckhwd %%xmm5, %%xmm6 # 00 R3 B3 G3 00 R2 B2 G2 \n\
768 movntdq %%xmm6, 16(%3) # Store ARGB7 ARGB6 ARGB5 ARGB4 \n\
769 punpckhbw %%xmm2, %%xmm0 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
770 punpckhbw %%xmm3, %%xmm1 # 00 R7 00 R6 00 R5 00 R4 \n\
771 movdqa %%xmm0, %%xmm5 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
772 punpcklwd %%xmm1, %%xmm5 # 00 R5 B5 G5 00 R4 B4 G4 \n\
773 movntdq %%xmm5, 32(%3) # Store ARGB11 ARGB10 ARGB9 ARGB8 \n\
774 punpckhwd %%xmm1, %%xmm0 # 00 R7 B7 G7 00 R6 B6 G6 \n\
775 movntdq %%xmm0, 48(%3) # Store ARGB15 ARGB14 ARGB13 ARGB12 \n\
778 #define SSE2_UNPACK_32_ARGB_UNALIGNED " \n\
779 pxor %%xmm3, %%xmm3 # zero xmm3 \n\
780 movdqa %%xmm0, %%xmm4 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
781 punpcklbw %%xmm2, %%xmm4 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
782 movdqa %%xmm1, %%xmm5 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
783 punpcklbw %%xmm3, %%xmm5 # 00 R3 00 R2 00 R1 00 R0 \n\
784 movdqa %%xmm4, %%xmm6 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
785 punpcklwd %%xmm5, %%xmm4 # 00 R1 B1 G1 00 R0 B0 G0 \n\
786 movdqu %%xmm4, (%3) # Store ARGB3 ARGB2 ARGB1 ARGB0 \n\
787 punpckhwd %%xmm5, %%xmm6 # 00 R3 B3 G3 00 R2 B2 G2 \n\
788 movdqu %%xmm6, 16(%3) # Store ARGB7 ARGB6 ARGB5 ARGB4 \n\
789 punpckhbw %%xmm2, %%xmm0 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
790 punpckhbw %%xmm3, %%xmm1 # 00 R7 00 R6 00 R5 00 R4 \n\
791 movdqa %%xmm0, %%xmm5 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
792 punpcklwd %%xmm1, %%xmm5 # 00 R5 B5 G5 00 R4 B4 G4 \n\
793 movdqu %%xmm5, 32(%3) # Store ARGB11 ARGB10 ARGB9 ARGB8 \n\
794 punpckhwd %%xmm1, %%xmm0 # 00 R7 B7 G7 00 R6 B6 G6 \n\
795 movdqu %%xmm0, 48(%3) # Store ARGB15 ARGB14 ARGB13 ARGB12 \n\
798 #define SSE2_UNPACK_32_BGRA_ALIGNED " \n\
799 pxor %%xmm3, %%xmm3 # zero mm3 \n\
800 movdqa %%xmm2, %%xmm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
801 punpcklbw %%xmm0, %%xmm4 # B3 G3 B2 G2 B1 G1 B0 G0 \n\
802 punpcklbw %%xmm1, %%xmm3 # R3 00 R2 00 R1 00 R0 00 \n\
803 movdqa %%xmm3, %%xmm5 # R3 00 R2 00 R1 00 R0 00 \n\
804 punpcklwd %%xmm4, %%xmm3 # B1 G1 R1 00 B0 G0 R0 00 \n\
805 movntdq %%xmm3, (%3) # Store BGRA3 BGRA2 BGRA1 BGRA0 \n\
806 punpckhwd %%xmm4, %%xmm5 # B3 G3 R3 00 B2 G2 R2 00 \n\
807 movntdq %%xmm5, 16(%3) # Store BGRA7 BGRA6 BGRA5 BGRA4 \n\
808 pxor %%xmm6, %%xmm6 # zero mm6 \n\
809 punpckhbw %%xmm0, %%xmm2 # B7 G7 B6 G6 B5 G5 B4 G4 \n\
810 punpckhbw %%xmm1, %%xmm6 # R7 00 R6 00 R5 00 R4 00 \n\
811 movdqa %%xmm6, %%xmm0 # R7 00 R6 00 R5 00 R4 00 \n\
812 punpcklwd %%xmm2, %%xmm6 # B5 G5 R5 00 B4 G4 R4 00 \n\
813 movntdq %%xmm6, 32(%3) # Store BGRA11 BGRA10 BGRA9 BGRA8 \n\
814 punpckhwd %%xmm2, %%xmm0 # B7 G7 R7 00 B6 G6 R6 00 \n\
815 movntdq %%xmm0, 48(%3) # Store BGRA15 BGRA14 BGRA13 BGRA12 \n\
818 #define SSE2_UNPACK_32_BGRA_UNALIGNED " \n\
819 pxor %%xmm3, %%xmm3 # zero mm3 \n\
820 movdqa %%xmm2, %%xmm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
821 punpcklbw %%xmm0, %%xmm4 # B3 G3 B2 G2 B1 G1 B0 G0 \n\
822 punpcklbw %%xmm1, %%xmm3 # R3 00 R2 00 R1 00 R0 00 \n\
823 movdqa %%xmm3, %%xmm5 # R3 00 R2 00 R1 00 R0 00 \n\
824 punpcklwd %%xmm4, %%xmm3 # B1 G1 R1 00 B0 G0 R0 00 \n\
825 movdqu %%xmm3, (%3) # Store BGRA3 BGRA2 BGRA1 BGRA0 \n\
826 punpckhwd %%xmm4, %%xmm5 # B3 G3 R3 00 B2 G2 R2 00 \n\
827 movdqu %%xmm5, 16(%3) # Store BGRA7 BGRA6 BGRA5 BGRA4 \n\
828 pxor %%xmm6, %%xmm6 # zero mm6 \n\
829 punpckhbw %%xmm0, %%xmm2 # B7 G7 B6 G6 B5 G5 B4 G4 \n\
830 punpckhbw %%xmm1, %%xmm6 # R7 00 R6 00 R5 00 R4 00 \n\
831 movdqa %%xmm6, %%xmm0 # R7 00 R6 00 R5 00 R4 00 \n\
832 punpcklwd %%xmm2, %%xmm6 # B5 G5 R5 00 B4 G4 R4 00 \n\
833 movdqu %%xmm6, 32(%3) # Store BGRA11 BGRA10 BGRA9 BGRA8 \n\
834 punpckhwd %%xmm2, %%xmm0 # B7 G7 R7 00 B6 G6 R6 00 \n\
835 movdqu %%xmm0, 48(%3) # Store BGRA15 BGRA14 BGRA13 BGRA12 \n\
838 #define SSE2_UNPACK_32_ABGR_ALIGNED " \n\
839 pxor %%xmm3, %%xmm3 # zero mm3 \n\
840 movdqa %%xmm1, %%xmm4 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
841 punpcklbw %%xmm2, %%xmm4 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
842 movdqa %%xmm0, %%xmm5 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
843 punpcklbw %%xmm3, %%xmm5 # 00 B3 00 B2 00 B1 00 B0 \n\
844 movdqa %%xmm4, %%xmm6 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
845 punpcklwd %%xmm5, %%xmm4 # 00 B1 G1 R1 00 B0 G0 R0 \n\
846 movntdq %%xmm4, (%3) # Store ABGR3 ABGR2 ABGR1 ABGR0 \n\
847 punpckhwd %%xmm5, %%xmm6 # 00 B3 G3 R3 00 B2 G2 R2 \n\
848 movntdq %%xmm6, 16(%3) # Store ABGR7 ABGR6 ABGR5 ABGR4 \n\
849 punpckhbw %%xmm2, %%xmm1 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
850 punpckhbw %%xmm3, %%xmm0 # 00 B7 00 B6 00 B5 00 B4 \n\
851 movdqa %%xmm1, %%xmm2 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
852 punpcklwd %%xmm0, %%xmm1 # 00 B5 G5 R5 00 B4 G4 R4 \n\
853 movntdq %%xmm1, 32(%3) # Store ABGR11 ABGR10 ABGR9 ABGR8 \n\
854 punpckhwd %%xmm0, %%xmm2 # B7 G7 R7 00 B6 G6 R6 00 \n\
855 movntdq %%xmm2, 48(%3) # Store ABGR15 ABGR14 ABGR13 ABGR12 \n\
858 #define SSE2_UNPACK_32_ABGR_UNALIGNED " \n\
859 pxor %%xmm3, %%xmm3 # zero mm3 \n\
860 movdqa %%xmm1, %%xmm4 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
861 punpcklbw %%xmm2, %%xmm4 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
862 movdqa %%xmm0, %%xmm5 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
863 punpcklbw %%xmm3, %%xmm5 # 00 B3 00 B2 00 B1 00 B0 \n\
864 movdqa %%xmm4, %%xmm6 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
865 punpcklwd %%xmm5, %%xmm4 # 00 B1 G1 R1 00 B0 G0 R0 \n\
866 movdqu %%xmm4, (%3) # Store ABGR3 ABGR2 ABGR1 ABGR0 \n\
867 punpckhwd %%xmm5, %%xmm6 # 00 B3 G3 R3 00 B2 G2 R2 \n\
868 movdqu %%xmm6, 16(%3) # Store ABGR7 ABGR6 ABGR5 ABGR4 \n\
869 punpckhbw %%xmm2, %%xmm1 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
870 punpckhbw %%xmm3, %%xmm0 # 00 B7 00 B6 00 B5 00 B4 \n\
871 movdqa %%xmm1, %%xmm2 # R7 00 R6 00 R5 00 R4 00 \n\
872 punpcklwd %%xmm0, %%xmm1 # 00 B5 G5 R5 00 B4 G4 R4 \n\
873 movdqu %%xmm1, 32(%3) # Store ABGR11 ABGR10 ABGR9 ABGR8 \n\
874 punpckhwd %%xmm0, %%xmm2 # B7 G7 R7 00 B6 G6 R6 00 \n\
875 movdqu %%xmm2, 48(%3) # Store ABGR15 ABGR14 ABGR13 ABGR12 \n\
878 #elif defined(HAVE_SSE2_INTRINSICS)
880 /* SSE2 intrinsics */
882 #include <emmintrin.h>
884 #define SSE2_CALL(SSE2_INSTRUCTIONS) \
886 __m128i xmm0, xmm1, xmm2, xmm3, \
887 xmm4, xmm5, xmm6, xmm7; \
891 #define SSE2_END _mm_sfence()
893 #define SSE2_INIT_16_ALIGNED \
894 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
895 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
896 xmm4 = _mm_setzero_si128(); \
897 xmm6 = _mm_load_si128((__m128i *)p_y);
899 #define SSE2_INIT_16_UNALIGNED \
900 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
901 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
902 xmm4 = _mm_setzero_si128(); \
903 xmm6 = _mm_loadu_si128((__m128i *)p_y); \
904 _mm_prefetch(p_buffer, _MM_HINT_NTA);
906 #define SSE2_INIT_32_ALIGNED \
907 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
908 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
909 xmm4 = _mm_setzero_si128(); \
910 xmm6 = _mm_load_si128((__m128i *)p_y);
912 #define SSE2_INIT_32_UNALIGNED \
913 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
914 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
915 xmm4 = _mm_setzero_si128(); \
916 xmm6 = _mm_loadu_si128((__m128i *)p_y); \
917 _mm_prefetch(p_buffer, _MM_HINT_NTA);
919 #define SSE2_YUV_MUL \
920 xmm0 = _mm_unpacklo_epi8(xmm0, xmm4); \
921 xmm1 = _mm_unpacklo_epi8(xmm1, xmm4); \
922 xmm5 = _mm_set1_epi32(0x00800080UL); \
923 xmm0 = _mm_subs_epi16(xmm0, xmm5); \
924 xmm1 = _mm_subs_epi16(xmm1, xmm5); \
925 xmm0 = _mm_slli_epi16(xmm0, 3); \
926 xmm1 = _mm_slli_epi16(xmm1, 3); \
929 xmm5 = _mm_set1_epi32(0xf37df37dUL); \
930 xmm2 = _mm_mulhi_epi16(xmm2, xmm5); \
931 xmm5 = _mm_set1_epi32(0xe5fce5fcUL); \
932 xmm3 = _mm_mulhi_epi16(xmm3, xmm5); \
933 xmm5 = _mm_set1_epi32(0x40934093UL); \
934 xmm0 = _mm_mulhi_epi16(xmm0, xmm5); \
935 xmm5 = _mm_set1_epi32(0x33123312UL); \
936 xmm1 = _mm_mulhi_epi16(xmm1, xmm5); \
937 xmm2 = _mm_adds_epi16(xmm2, xmm3); \
939 xmm5 = _mm_set1_epi32(0x10101010UL); \
940 xmm6 = _mm_subs_epu8(xmm6, xmm5); \
942 xmm5 = _mm_set1_epi32(0x00ff00ffUL); \
943 xmm6 = _mm_and_si128(xmm6, xmm5); \
944 xmm7 = _mm_srli_epi16(xmm7, 8); \
945 xmm6 = _mm_slli_epi16(xmm6, 3); \
946 xmm7 = _mm_slli_epi16(xmm7, 3); \
947 xmm5 = _mm_set1_epi32(0x253f253fUL); \
948 xmm6 = _mm_mulhi_epi16(xmm6, xmm5); \
949 xmm7 = _mm_mulhi_epi16(xmm7, xmm5);
951 #define SSE2_YUV_ADD \
955 xmm0 = _mm_adds_epi16(xmm0, xmm6); \
956 xmm3 = _mm_adds_epi16(xmm3, xmm7); \
957 xmm1 = _mm_adds_epi16(xmm1, xmm6); \
958 xmm4 = _mm_adds_epi16(xmm4, xmm7); \
959 xmm2 = _mm_adds_epi16(xmm2, xmm6); \
960 xmm5 = _mm_adds_epi16(xmm5, xmm7); \
962 xmm0 = _mm_packus_epi16(xmm0, xmm0); \
963 xmm1 = _mm_packus_epi16(xmm1, xmm1); \
964 xmm2 = _mm_packus_epi16(xmm2, xmm2); \
966 xmm3 = _mm_packus_epi16(xmm3, xmm3); \
967 xmm4 = _mm_packus_epi16(xmm4, xmm4); \
968 xmm5 = _mm_packus_epi16(xmm5, xmm5); \
970 xmm0 = _mm_unpacklo_epi8(xmm0, xmm3); \
971 xmm1 = _mm_unpacklo_epi8(xmm1, xmm4); \
972 xmm2 = _mm_unpacklo_epi8(xmm2, xmm5);
974 #define SSE2_UNPACK_15_ALIGNED \
975 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
976 xmm0 = _mm_and_si128(xmm0, xmm5); \
977 xmm0 = _mm_srli_epi16(xmm0, 3); \
978 xmm2 = _mm_and_si128(xmm2, xmm5); \
979 xmm1 = _mm_and_si128(xmm1, xmm5); \
980 xmm1 = _mm_srli_epi16(xmm1, 1); \
981 xmm4 = _mm_setzero_si128(); \
985 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
986 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
987 xmm2 = _mm_slli_epi16(xmm2, 2); \
988 xmm0 = _mm_or_si128(xmm0, xmm2); \
989 _mm_stream_si128((__m128i*)p_buffer, xmm0); \
991 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
992 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
993 xmm7 = _mm_slli_epi16(xmm7, 2); \
994 xmm5 = _mm_or_si128(xmm5, xmm7); \
995 _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
997 #define SSE2_UNPACK_15_UNALIGNED \
998 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
999 xmm0 = _mm_and_si128(xmm0, xmm5); \
1000 xmm0 = _mm_srli_epi16(xmm0, 3); \
1001 xmm2 = _mm_and_si128(xmm2, xmm5); \
1002 xmm1 = _mm_and_si128(xmm1, xmm5); \
1003 xmm1 = _mm_srli_epi16(xmm1, 1); \
1004 xmm4 = _mm_setzero_si128(); \
1008 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
1009 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
1010 xmm2 = _mm_slli_epi16(xmm2, 2); \
1011 xmm0 = _mm_or_si128(xmm0, xmm2); \
1012 _mm_storeu_si128((__m128i*)p_buffer, xmm0); \
1014 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
1015 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
1016 xmm7 = _mm_slli_epi16(xmm7, 2); \
1017 xmm5 = _mm_or_si128(xmm5, xmm7); \
1018 _mm_storeu_si128((__m128i*)(p_buffer+16), xmm5);
1020 #define SSE2_UNPACK_16_ALIGNED \
1021 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
1022 xmm0 = _mm_and_si128(xmm0, xmm5); \
1023 xmm1 = _mm_and_si128(xmm1, xmm5); \
1024 xmm5 = _mm_set1_epi32(0xfcfcfcfcUL); \
1025 xmm2 = _mm_and_si128(xmm2, xmm5); \
1026 xmm0 = _mm_srli_epi16(xmm0, 3); \
1027 xmm4 = _mm_setzero_si128(); \
1031 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
1032 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
1033 xmm2 = _mm_slli_epi16(xmm2, 3); \
1034 xmm0 = _mm_or_si128(xmm0, xmm2); \
1035 _mm_stream_si128((__m128i*)p_buffer, xmm0); \
1037 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
1038 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
1039 xmm7 = _mm_slli_epi16(xmm7, 3); \
1040 xmm5 = _mm_or_si128(xmm5, xmm7); \
1041 _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
1043 #define SSE2_UNPACK_16_UNALIGNED \
1044 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
1045 xmm0 = _mm_and_si128(xmm0, xmm5); \
1046 xmm1 = _mm_and_si128(xmm1, xmm5); \
1047 xmm5 = _mm_set1_epi32(0xfcfcfcfcUL); \
1048 xmm2 = _mm_and_si128(xmm2, xmm5); \
1049 xmm0 = _mm_srli_epi16(xmm0, 3); \
1050 xmm4 = _mm_setzero_si128(); \
1054 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
1055 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
1056 xmm2 = _mm_slli_epi16(xmm2, 3); \
1057 xmm0 = _mm_or_si128(xmm0, xmm2); \
1058 _mm_storeu_si128((__m128i*)p_buffer, xmm0); \
1060 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
1061 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
1062 xmm7 = _mm_slli_epi16(xmm7, 3); \
1063 xmm5 = _mm_or_si128(xmm5, xmm7); \
1064 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5);
1066 #define SSE2_UNPACK_32_ARGB_ALIGNED \
1067 xmm3 = _mm_setzero_si128(); \
1069 xmm4 = _mm_unpacklo_epi8(xmm4, xmm2); \
1071 xmm5 = _mm_unpacklo_epi8(xmm5, xmm3); \
1073 xmm4 = _mm_unpacklo_epi16(xmm4, xmm5); \
1074 _mm_stream_si128((__m128i*)(p_buffer), xmm4); \
1075 xmm6 = _mm_unpackhi_epi16(xmm6, xmm5); \
1076 _mm_stream_si128((__m128i*)(p_buffer+4), xmm6); \
1077 xmm0 = _mm_unpackhi_epi8(xmm0, xmm2); \
1078 xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
1080 xmm5 = _mm_unpacklo_epi16(xmm5, xmm1); \
1081 _mm_stream_si128((__m128i*)(p_buffer+8), xmm5); \
1082 xmm0 = _mm_unpackhi_epi16(xmm0, xmm1); \
1083 _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
1085 #define SSE2_UNPACK_32_ARGB_UNALIGNED \
1086 xmm3 = _mm_setzero_si128(); \
1088 xmm4 = _mm_unpacklo_epi8(xmm4, xmm2); \
1090 xmm5 = _mm_unpacklo_epi8(xmm5, xmm3); \
1092 xmm4 = _mm_unpacklo_epi16(xmm4, xmm5); \
1093 _mm_storeu_si128((__m128i*)(p_buffer), xmm4); \
1094 xmm6 = _mm_unpackhi_epi16(xmm6, xmm5); \
1095 _mm_storeu_si128((__m128i*)(p_buffer+4), xmm6); \
1096 xmm0 = _mm_unpackhi_epi8(xmm0, xmm2); \
1097 xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
1099 xmm5 = _mm_unpacklo_epi16(xmm5, xmm1); \
1100 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5); \
1101 xmm0 = _mm_unpackhi_epi16(xmm0, xmm1); \
1102 _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
1104 #define SSE2_UNPACK_32_BGRA_ALIGNED \
1105 xmm3 = _mm_setzero_si128(); \
1107 xmm4 = _mm_unpacklo_epi8(xmm4, xmm0); \
1108 xmm3 = _mm_unpacklo_epi8(xmm3, xmm1); \
1110 xmm3 = _mm_unpacklo_epi16(xmm3, xmm4); \
1111 _mm_stream_si128((__m128i*)(p_buffer), xmm3); \
1112 xmm5 = _mm_unpackhi_epi16(xmm5, xmm4); \
1113 _mm_stream_si128((__m128i*)(p_buffer+4), xmm5); \
1114 xmm6 = _mm_setzero_si128(); \
1115 xmm2 = _mm_unpackhi_epi8(xmm2, xmm0); \
1116 xmm6 = _mm_unpackhi_epi8(xmm6, xmm1); \
1118 xmm6 = _mm_unpacklo_epi16(xmm6, xmm2); \
1119 _mm_stream_si128((__m128i*)(p_buffer+8), xmm6); \
1120 xmm0 = _mm_unpackhi_epi16(xmm0, xmm2); \
1121 _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
1123 #define SSE2_UNPACK_32_BGRA_UNALIGNED \
1124 xmm3 = _mm_setzero_si128(); \
1126 xmm4 = _mm_unpacklo_epi8(xmm4, xmm0); \
1127 xmm3 = _mm_unpacklo_epi8(xmm3, xmm1); \
1129 xmm3 = _mm_unpacklo_epi16(xmm3, xmm4); \
1130 _mm_storeu_si128((__m128i*)(p_buffer), xmm3); \
1131 xmm5 = _mm_unpackhi_epi16(xmm5, xmm4); \
1132 _mm_storeu_si128((__m128i*)(p_buffer+4), xmm5); \
1133 xmm6 = _mm_setzero_si128(); \
1134 xmm2 = _mm_unpackhi_epi8(xmm2, xmm0); \
1135 xmm6 = _mm_unpackhi_epi8(xmm6, xmm1); \
1137 xmm6 = _mm_unpacklo_epi16(xmm6, xmm2); \
1138 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm6); \
1139 xmm0 = _mm_unpackhi_epi16(xmm0, xmm2); \
1140 _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
1142 #define SSE2_UNPACK_32_ABGR_ALIGNED \
1145 #define SSE2_UNPACK_32_ABGR_UNALIGNED \