]> git.sesse.net Git - vlc/blob - modules/mmx/i420_rgb_mmx.h
i420_rgb: clobber lists for MMX and SSE2
[vlc] / modules / mmx / i420_rgb_mmx.h
1 /*****************************************************************************
2  * transforms_yuvmmx.h: MMX YUV transformation assembly
3  *****************************************************************************
4  * Copyright (C) 1999-2007 the VideoLAN team
5  * $Id$
6  *
7  * Authors: Olie Lho <ollie@sis.com.tw>
8  *          GaĆ«l Hendryckx <jimmy@via.ecp.fr>
9  *          Samuel Hocevar <sam@zoy.org>
10  *          Damien Fouilleul <damienf@videolan.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
25  *****************************************************************************/
26
27 #ifdef MODULE_NAME_IS_i420_rgb_mmx
28
29 /* hope these constant values are cache line aligned */
30 static const uint64_t mmx_80w     = 0x0080008000800080ULL; /* Will be referenced as %4 in inline asm */
31 static const uint64_t mmx_10w     = 0x1010101010101010ULL; /* -- as %5 */
32 static const uint64_t mmx_00ffw   = 0x00ff00ff00ff00ffULL; /* -- as %6 */
33 static const uint64_t mmx_Y_coeff = 0x253f253f253f253fULL; /* -- as %7 */
34
35 static const uint64_t mmx_U_green = 0xf37df37df37df37dULL; /* -- as %8 */
36 static const uint64_t mmx_U_blue  = 0x4093409340934093ULL; /* -- as %9 */
37 static const uint64_t mmx_V_red   = 0x3312331233123312ULL; /* -- as %10 */
38 static const uint64_t mmx_V_green = 0xe5fce5fce5fce5fcULL; /* -- as %11 */
39
40 static const uint64_t mmx_mask_f8 = 0xf8f8f8f8f8f8f8f8ULL; /* -- as %12 */
41 static const uint64_t mmx_mask_fc = 0xfcfcfcfcfcfcfcfcULL; /* -- as %13 */
42
43 #if defined(CAN_COMPILE_MMX)
44
45 /* MMX assembly */
46  
47 #define MMX_CALL(MMX_INSTRUCTIONS)      \
48     do {                                \
49     __asm__ __volatile__(               \
50         ".p2align 3 \n\t"               \
51         MMX_INSTRUCTIONS                \
52         :                               \
53         : "r" (p_y), "r" (p_u),         \
54           "r" (p_v), "r" (p_buffer),    \
55           "m" (mmx_80w), "m" (mmx_10w), \
56           "m" (mmx_00ffw), "m" (mmx_Y_coeff), \
57           "m" (mmx_U_green), "m" (mmx_U_blue), \
58           "m" (mmx_V_red), "m" (mmx_V_green), \
59           "m" (mmx_mask_f8), "m" (mmx_mask_fc) \
60         : "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" );  \
61     } while(0)
62
63 #define MMX_END __asm__ __volatile__ ( "emms" )
64
65 #define MMX_INIT_16 "                                                       \n\
66 movd       (%1), %%mm0      # Load 4 Cb       00 00 00 00 u3 u2 u1 u0       \n\
67 movd       (%2), %%mm1      # Load 4 Cr       00 00 00 00 v3 v2 v1 v0       \n\
68 pxor      %%mm4, %%mm4      # zero mm4                                      \n\
69 movq       (%0), %%mm6      # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
70 "
71
72 #define MMX_INIT_16_GRAY "                                                  \n\
73 movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
74 #movl      $0, (%3)         # cache preload for image                       \n\
75 "
76
77 #define MMX_INIT_32 "                                                       \n\
78 movd      (%1), %%mm0       # Load 4 Cb       00 00 00 00 u3 u2 u1 u0       \n\
79 movl        $0, (%3)        # cache preload for image                       \n\
80 movd      (%2), %%mm1       # Load 4 Cr       00 00 00 00 v3 v2 v1 v0       \n\
81 pxor     %%mm4, %%mm4       # zero mm4                                      \n\
82 movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
83 "
84
85 /*
86  * Do the multiply part of the conversion for even and odd pixels,
87  * register usage:
88  * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
89  * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd  pixels,
90  * mm6 -> Y even, mm7 -> Y odd
91  */
92
93 #define MMX_YUV_MUL "                                                       \n\
94 # convert the chroma part                                                   \n\
95 punpcklbw %%mm4, %%mm0          # scatter 4 Cb    00 u3 00 u2 00 u1 00 u0   \n\
96 punpcklbw %%mm4, %%mm1          # scatter 4 Cr    00 v3 00 v2 00 v1 00 v0   \n\
97 psubsw    %4, %%mm0     # Cb -= 128                                 \n\
98 psubsw    %4, %%mm1     # Cr -= 128                                 \n\
99 psllw     $3, %%mm0             # Promote precision                         \n\
100 psllw     $3, %%mm1             # Promote precision                         \n\
101 movq      %%mm0, %%mm2          # Copy 4 Cb       00 u3 00 u2 00 u1 00 u0   \n\
102 movq      %%mm1, %%mm3          # Copy 4 Cr       00 v3 00 v2 00 v1 00 v0   \n\
103 pmulhw    %8, %%mm2 # Mul Cb with green coeff -> Cb green       \n\
104 pmulhw    %11, %%mm3 # Mul Cr with green coeff -> Cr green       \n\
105 pmulhw    %9, %%mm0  # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0   \n\
106 pmulhw    %10, %%mm1   # Mul Cr -> Cred  00 r3 00 r2 00 r1 00 r0   \n\
107 paddsw    %%mm3, %%mm2          # Cb green + Cr green -> Cgreen             \n\
108                                                                             \n\
109 # convert the luma part                                                     \n\
110 psubusb   %5, %%mm6     # Y -= 16                                   \n\
111 movq      %%mm6, %%mm7          # Copy 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
112 pand      %6, %%mm6   # get Y even      00 Y6 00 Y4 00 Y2 00 Y0   \n\
113 psrlw     $8, %%mm7             # get Y odd       00 Y7 00 Y5 00 Y3 00 Y1   \n\
114 psllw     $3, %%mm6             # Promote precision                         \n\
115 psllw     $3, %%mm7             # Promote precision                         \n\
116 pmulhw    %7, %%mm6 # Mul 4 Y even    00 y6 00 y4 00 y2 00 y0   \n\
117 pmulhw    %7, %%mm7 # Mul 4 Y odd     00 y7 00 y5 00 y3 00 y1   \n\
118 "
119
120 /*
121  * Do the addition part of the conversion for even and odd pixels,
122  * register usage:
123  * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
124  * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd  pixels,
125  * mm6 -> Y even, mm7 -> Y odd
126  */
127
128 #define MMX_YUV_ADD "                                                       \n\
129 # Do horizontal and vertical scaling                                        \n\
130 movq      %%mm0, %%mm3          # Copy Cblue                                \n\
131 movq      %%mm1, %%mm4          # Copy Cred                                 \n\
132 movq      %%mm2, %%mm5          # Copy Cgreen                               \n\
133 paddsw    %%mm6, %%mm0          # Y even + Cblue  00 B6 00 B4 00 B2 00 B0   \n\
134 paddsw    %%mm7, %%mm3          # Y odd  + Cblue  00 B7 00 B5 00 B3 00 B1   \n\
135 paddsw    %%mm6, %%mm1          # Y even + Cred   00 R6 00 R4 00 R2 00 R0   \n\
136 paddsw    %%mm7, %%mm4          # Y odd  + Cred   00 R7 00 R5 00 R3 00 R1   \n\
137 paddsw    %%mm6, %%mm2          # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0   \n\
138 paddsw    %%mm7, %%mm5          # Y odd  + Cgreen 00 G7 00 G5 00 G3 00 G1   \n\
139                                                                             \n\
140 # Limit RGB even to 0..255                                                  \n\
141 packuswb  %%mm0, %%mm0          # B6 B4 B2 B0 / B6 B4 B2 B0                 \n\
142 packuswb  %%mm1, %%mm1          # R6 R4 R2 R0 / R6 R4 R2 R0                 \n\
143 packuswb  %%mm2, %%mm2          # G6 G4 G2 G0 / G6 G4 G2 G0                 \n\
144                                                                             \n\
145 # Limit RGB odd to 0..255                                                   \n\
146 packuswb  %%mm3, %%mm3          # B7 B5 B3 B1 / B7 B5 B3 B1                 \n\
147 packuswb  %%mm4, %%mm4          # R7 R5 R3 R1 / R7 R5 R3 R1                 \n\
148 packuswb  %%mm5, %%mm5          # G7 G5 G3 G1 / G7 G5 G3 G1                 \n\
149                                                                             \n\
150 # Interleave RGB even and odd                                               \n\
151 punpcklbw %%mm3, %%mm0          #                 B7 B6 B5 B4 B3 B2 B1 B0   \n\
152 punpcklbw %%mm4, %%mm1          #                 R7 R6 R5 R4 R3 R2 R1 R0   \n\
153 punpcklbw %%mm5, %%mm2          #                 G7 G6 G5 G4 G3 G2 G1 G0   \n\
154 "
155
156 /*
157  * Grayscale case, only use Y
158  */
159
160 #define MMX_YUV_GRAY "                                                      \n\
161 # convert the luma part                                                     \n\
162 psubusb   %5, %%mm6                                                 \n\
163 movq      %%mm6, %%mm7                                                      \n\
164 pand      %6, %%mm6                                               \n\
165 psrlw     $8, %%mm7                                                         \n\
166 psllw     $3, %%mm6                                                         \n\
167 psllw     $3, %%mm7                                                         \n\
168 pmulhw    %7, %%mm6                                             \n\
169 pmulhw    %7, %%mm7                                             \n\
170 packuswb  %%mm6, %%mm6                                                      \n\
171 packuswb  %%mm7, %%mm7                                                      \n\
172 punpcklbw %%mm7, %%mm6                                                      \n\
173 "
174
175 #define MMX_UNPACK_16_GRAY "                                                \n\
176 movq      %%mm6, %%mm5                                                      \n\
177 pand      %12, %%mm6                                             \n\
178 pand      %13, %%mm5                                             \n\
179 movq      %%mm6, %%mm7                                                      \n\
180 psrlw     $3, %%mm7                                                         \n\
181 pxor      %%mm3, %%mm3                                                      \n\
182 movq      %%mm7, %%mm2                                                      \n\
183 movq      %%mm5, %%mm0                                                      \n\
184 punpcklbw %%mm3, %%mm5                                                      \n\
185 punpcklbw %%mm6, %%mm7                                                      \n\
186 psllw     $3, %%mm5                                                         \n\
187 por       %%mm5, %%mm7                                                      \n\
188 movq      %%mm7, (%3)                                                       \n\
189 punpckhbw %%mm3, %%mm0                                                      \n\
190 punpckhbw %%mm6, %%mm2                                                      \n\
191 psllw     $3, %%mm0                                                         \n\
192 movq      8(%0), %%mm6                                                      \n\
193 por       %%mm0, %%mm2                                                      \n\
194 movq      %%mm2, 8(%3)                                                      \n\
195 "
196
197
198 /*
199  * convert RGB plane to RGB 15 bits,
200  * mm0 -> B, mm1 -> R, mm2 -> G,
201  * mm4 -> GB, mm5 -> AR pixel 4-7,
202  * mm6 -> GB, mm7 -> AR pixel 0-3
203  */
204
205 #define MMX_UNPACK_15 "                                                     \n\
206 # mask unneeded bits off                                                    \n\
207 pand      %12, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
208 psrlw     $3,%%mm0              # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
209 pand      %12, %%mm2 # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
210 pand      %12, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
211 psrlw     $1,%%mm1              # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
212 pxor      %%mm4, %%mm4          # zero mm4                                  \n\
213 movq      %%mm0, %%mm5          # Copy B7-B0                                \n\
214 movq      %%mm2, %%mm7          # Copy G7-G0                                \n\
215                                                                             \n\
216 # convert rgb24 plane to rgb15 pack for pixel 0-3                           \n\
217 punpcklbw %%mm4, %%mm2          # ________ ________ g7g6g5g4 g3______       \n\
218 punpcklbw %%mm1, %%mm0          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
219 psllw     $2,%%mm2              # ________ ____g7g6 g5g4g3__ ________       \n\
220 por       %%mm2, %%mm0          # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
221 movq      8(%0), %%mm6          # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
222 movq      %%mm0, (%3)           # store pixel 0-3                           \n\
223                                                                             \n\
224 # convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
225 punpckhbw %%mm4, %%mm7          # ________ ________ g7g6g5g4 g3______       \n\
226 punpckhbw %%mm1, %%mm5          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
227 psllw     $2,%%mm7              # ________ ____g7g6 g5g4g3__ ________       \n\
228 movd      4(%1), %%mm0          # Load 4 Cb       __ __ __ __ u3 u2 u1 u0   \n\
229 por       %%mm7, %%mm5          # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
230 movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   \n\
231 movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
232 "
233
234 /*
235  * convert RGB plane to RGB 16 bits,
236  * mm0 -> B, mm1 -> R, mm2 -> G,
237  * mm4 -> GB, mm5 -> AR pixel 4-7,
238  * mm6 -> GB, mm7 -> AR pixel 0-3
239  */
240
241 #define MMX_UNPACK_16 "                                                     \n\
242 # mask unneeded bits off                                                    \n\
243 pand      %12, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
244 pand      %13, %%mm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
245 pand      %12, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
246 psrlw     $3,%%mm0              # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
247 pxor      %%mm4, %%mm4          # zero mm4                                  \n\
248 movq      %%mm0, %%mm5          # Copy B7-B0                                \n\
249 movq      %%mm2, %%mm7          # Copy G7-G0                                \n\
250                                                                             \n\
251 # convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
252 punpcklbw %%mm4, %%mm2          # ________ ________ g7g6g5g4 g3g2____       \n\
253 punpcklbw %%mm1, %%mm0          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
254 psllw     $3,%%mm2              # ________ __g7g6g5 g4g3g2__ ________       \n\
255 por       %%mm2, %%mm0          # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
256 movq      8(%0), %%mm6          # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
257 movq      %%mm0, (%3)           # store pixel 0-3                           \n\
258                                                                             \n\
259 # convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
260 punpckhbw %%mm4, %%mm7          # ________ ________ g7g6g5g4 g3g2____       \n\
261 punpckhbw %%mm1, %%mm5          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
262 psllw     $3,%%mm7              # ________ __g7g6g5 g4g3g2__ ________       \n\
263 movd      4(%1), %%mm0          # Load 4 Cb       __ __ __ __ u3 u2 u1 u0   \n\
264 por       %%mm7, %%mm5          # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
265 movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   \n\
266 movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
267 "
268
269 /*
270  * convert RGB plane to RGB packed format,
271  * mm0 -> B, mm1 -> R, mm2 -> G
272  */
273
274 #define MMX_UNPACK_32_ARGB "                                                \n\
275 pxor      %%mm3, %%mm3  # zero mm3                                          \n\
276 movq      %%mm0, %%mm4  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
277 punpcklbw %%mm2, %%mm4  #                 G3 B3 G2 B2 G1 B1 G0 B0           \n\
278 movq      %%mm1, %%mm5  #                 R7 R6 R5 R4 R3 R2 R1 R0           \n\
279 punpcklbw %%mm3, %%mm5  #                 00 R3 00 R2 00 R1 00 R0           \n\
280 movq      %%mm4, %%mm6  #                 G3 B3 G2 B2 G1 B1 G0 B0           \n\
281 punpcklwd %%mm5, %%mm4  #                 00 R1 B1 G1 00 R0 B0 G0           \n\
282 movq      %%mm4, (%3)   # Store ARGB1 ARGB0                                 \n\
283 punpckhwd %%mm5, %%mm6  #                 00 R3 B3 G3 00 R2 B2 G2           \n\
284 movq      %%mm6, 8(%3)  # Store ARGB3 ARGB2                                 \n\
285 punpckhbw %%mm2, %%mm0  #                 G7 B7 G6 B6 G5 B5 G4 B4           \n\
286 punpckhbw %%mm3, %%mm1  #                 00 R7 00 R6 00 R5 00 R4           \n\
287 movq      %%mm0, %%mm5  #                 G7 B7 G6 B6 G5 B5 G4 B4           \n\
288 punpcklwd %%mm1, %%mm5  #                 00 R5 B5 G5 00 R4 B4 G4           \n\
289 movq      %%mm5, 16(%3) # Store ARGB5 ARGB4                                 \n\
290 punpckhwd %%mm1, %%mm0  #                 00 R7 B7 G7 00 R6 B6 G6           \n\
291 movq      %%mm0, 24(%3) # Store ARGB7 ARGB6                                 \n\
292 "
293
294 #define MMX_UNPACK_32_RGBA "                                                \n\
295 pxor      %%mm3, %%mm3  # zero mm3                                          \n\
296 movq      %%mm2, %%mm4  #                 G7 G6 G5 G4 G3 G2 G1 G0           \n\
297 punpcklbw %%mm1, %%mm4  #                 R3 G3 R2 G2 R1 G1 R0 G0           \n\
298 punpcklbw %%mm0, %%mm3  #                 B3 00 B2 00 B1 00 B0 00           \n\
299 movq      %%mm3, %%mm5  #                 R3 00 R2 00 R1 00 R0 00           \n\
300 punpcklwd %%mm4, %%mm3  #                 R1 G1 B1 00 R0 G0 B0 00           \n\
301 movq      %%mm3, (%3)   # Store RGBA1 RGBA0                                 \n\
302 punpckhwd %%mm4, %%mm5  #                 R3 G3 B3 00 R2 G2 B2 00           \n\
303 movq      %%mm5, 8(%3)  # Store RGBA3 RGBA2                                 \n\
304 pxor      %%mm6, %%mm6  # zero mm6                                          \n\
305 punpckhbw %%mm1, %%mm2  #                 R7 G7 R6 G6 R5 G5 R4 G4           \n\
306 punpckhbw %%mm0, %%mm6  #                 B7 00 B6 00 B5 00 B4 00           \n\
307 movq      %%mm6, %%mm0  #                 B7 00 B6 00 B5 00 B4 00           \n\
308 punpcklwd %%mm2, %%mm6  #                 R5 G5 B5 00 R4 G4 B4 00           \n\
309 movq      %%mm6, 16(%3) # Store RGBA5 RGBA4                                 \n\
310 punpckhwd %%mm2, %%mm0  #                 R7 G7 B7 00 R6 G6 B6 00           \n\
311 movq      %%mm0, 24(%3) # Store RGBA7 RGBA6                                 \n\
312 "
313
314 #define MMX_UNPACK_32_BGRA "                                                \n\
315 pxor      %%mm3, %%mm3  # zero mm3                                          \n\
316 movq      %%mm2, %%mm4  #                 G7 G6 G5 G4 G3 G2 G1 G0           \n\
317 punpcklbw %%mm0, %%mm4  #                 B3 G3 B2 G2 B1 G1 B0 G0           \n\
318 punpcklbw %%mm1, %%mm3  #                 R3 00 R2 00 R1 00 R0 00           \n\
319 movq      %%mm3, %%mm5  #                 R3 00 R2 00 R1 00 R0 00           \n\
320 punpcklwd %%mm4, %%mm3  #                 B1 G1 R1 00 B0 G0 R0 00           \n\
321 movq      %%mm3, (%3)   # Store BGRA1 BGRA0                                 \n\
322 punpckhwd %%mm4, %%mm5  #                 B3 G3 R3 00 B2 G2 R2 00           \n\
323 movq      %%mm5, 8(%3)  # Store BGRA3 BGRA2                                 \n\
324 pxor      %%mm6, %%mm6  # zero mm6                                          \n\
325 punpckhbw %%mm0, %%mm2  #                 B7 G7 B6 G6 B5 G5 B4 G4           \n\
326 punpckhbw %%mm1, %%mm6  #                 R7 00 R6 00 R5 00 R4 00           \n\
327 movq      %%mm6, %%mm0  #                 R7 00 R6 00 R5 00 R4 00           \n\
328 punpcklwd %%mm2, %%mm6  #                 B5 G5 R5 00 B4 G4 R4 00           \n\
329 movq      %%mm6, 16(%3) # Store BGRA5 BGRA4                                 \n\
330 punpckhwd %%mm2, %%mm0  #                 B7 G7 R7 00 B6 G6 R6 00           \n\
331 movq      %%mm0, 24(%3) # Store BGRA7 BGRA6                                 \n\
332 "
333
334 #define MMX_UNPACK_32_ABGR "                                                \n\
335 pxor      %%mm3, %%mm3  # zero mm3                                          \n\
336 movq      %%mm1, %%mm4  #                 R7 R6 R5 R4 R3 R2 R1 R0           \n\
337 punpcklbw %%mm2, %%mm4  #                 G3 R3 G2 R2 G1 R1 G0 R0           \n\
338 movq      %%mm0, %%mm5  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
339 punpcklbw %%mm3, %%mm5  #                 00 B3 00 B2 00 B1 00 B0           \n\
340 movq      %%mm4, %%mm6  #                 G3 R3 G2 R2 G1 R1 G0 R0           \n\
341 punpcklwd %%mm5, %%mm4  #                 00 B1 G1 R1 00 B0 G0 R0           \n\
342 movq      %%mm4, (%3)   # Store ABGR1 ABGR0                                 \n\
343 punpckhwd %%mm5, %%mm6  #                 00 B3 G3 R3 00 B2 G2 R2           \n\
344 movq      %%mm6, 8(%3)  # Store ABGR3 ABGR2                                 \n\
345 punpckhbw %%mm2, %%mm1  #                 G7 R7 G6 R6 G5 R5 G4 R4           \n\
346 punpckhbw %%mm3, %%mm0  #                 00 B7 00 B6 00 B5 00 B4           \n\
347 movq      %%mm1, %%mm2  #                 G7 R7 G6 R6 G5 R5 G4 R4           \n\
348 punpcklwd %%mm0, %%mm1  #                 00 B5 G5 R5 00 B4 G4 R4           \n\
349 movq      %%mm1, 16(%3) # Store ABGR5 ABGR4                                 \n\
350 punpckhwd %%mm0, %%mm2  #                 B7 G7 R7 00 B6 G6 R6 00           \n\
351 movq      %%mm2, 24(%3) # Store ABGR7 ABGR6                                 \n\
352 "
353
354 #elif defined(HAVE_MMX_INTRINSICS)
355
356 /* MMX intrinsics */
357
358 #include <mmintrin.h>
359
360 #define MMX_CALL(MMX_INSTRUCTIONS)  \
361     do {                            \
362         __m64 mm0, mm1, mm2, mm3,   \
363               mm4, mm5, mm6, mm7;   \
364         MMX_INSTRUCTIONS            \
365     } while(0)
366
367 #define MMX_END _mm_empty()
368  
369 #define MMX_INIT_16                     \
370     mm0 = _mm_cvtsi32_si64(*(int*)p_u); \
371     mm1 = _mm_cvtsi32_si64(*(int*)p_v); \
372     mm4 = _mm_setzero_si64();           \
373     mm6 = (__m64)*(uint64_t *)p_y;
374
375 #define MMX_INIT_32                     \
376     mm0 = _mm_cvtsi32_si64(*(int*)p_u); \
377     *(uint16_t *)p_buffer = 0;          \
378     mm1 = _mm_cvtsi32_si64(*(int*)p_v); \
379     mm4 = _mm_setzero_si64();           \
380     mm6 = (__m64)*(uint64_t *)p_y;
381
382 #define MMX_YUV_MUL                                 \
383     mm0 = _mm_unpacklo_pi8(mm0, mm4);               \
384     mm1 = _mm_unpacklo_pi8(mm1, mm4);               \
385     mm0 = _mm_subs_pi16(mm0, (__m64)mmx_80w);       \
386     mm1 = _mm_subs_pi16(mm1, (__m64)mmx_80w);       \
387     mm0 = _mm_slli_pi16(mm0, 3);                    \
388     mm1 = _mm_slli_pi16(mm1, 3);                    \
389     mm2 = mm0;                                      \
390     mm3 = mm1;                                      \
391     mm2 = _mm_mulhi_pi16(mm2, (__m64)mmx_U_green);  \
392     mm3 = _mm_mulhi_pi16(mm3, (__m64)mmx_V_green);  \
393     mm0 = _mm_mulhi_pi16(mm0, (__m64)mmx_U_blue);   \
394     mm1 = _mm_mulhi_pi16(mm1, (__m64)mmx_V_red);    \
395     mm2 = _mm_adds_pi16(mm2, mm3);                  \
396     \
397     mm6 = _mm_subs_pu8(mm6, (__m64)mmx_10w);        \
398     mm7 = mm6;                                      \
399     mm6 = _mm_and_si64(mm6, (__m64)mmx_00ffw);      \
400     mm7 = _mm_srli_pi16(mm7, 8);                    \
401     mm6 = _mm_slli_pi16(mm6, 3);                    \
402     mm7 = _mm_slli_pi16(mm7, 3);                    \
403     mm6 = _mm_mulhi_pi16(mm6, (__m64)mmx_Y_coeff);  \
404     mm7 = _mm_mulhi_pi16(mm7, (__m64)mmx_Y_coeff);
405
406 #define MMX_YUV_ADD                     \
407     mm3 = mm0;                          \
408     mm4 = mm1;                          \
409     mm5 = mm2;                          \
410     mm0 = _mm_adds_pi16(mm0, mm6);      \
411     mm3 = _mm_adds_pi16(mm3, mm7);      \
412     mm1 = _mm_adds_pi16(mm1, mm6);      \
413     mm4 = _mm_adds_pi16(mm4, mm7);      \
414     mm2 = _mm_adds_pi16(mm2, mm6);      \
415     mm5 = _mm_adds_pi16(mm5, mm7);      \
416     \
417     mm0 = _mm_packs_pu16(mm0, mm0);     \
418     mm1 = _mm_packs_pu16(mm1, mm1);     \
419     mm2 = _mm_packs_pu16(mm2, mm2);     \
420     \
421     mm3 = _mm_packs_pu16(mm3, mm3);     \
422     mm4 = _mm_packs_pu16(mm4, mm4);     \
423     mm5 = _mm_packs_pu16(mm5, mm5);     \
424     \
425     mm0 = _mm_unpacklo_pi8(mm0, mm3);   \
426     mm1 = _mm_unpacklo_pi8(mm1, mm4);   \
427     mm2 = _mm_unpacklo_pi8(mm2, mm5);
428
429 #define MMX_UNPACK_15                               \
430     mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8);    \
431     mm0 = _mm_srli_pi16(mm0, 3);                    \
432     mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_f8);    \
433     mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8);    \
434     mm1 = _mm_srli_pi16(mm1, 1);                    \
435     mm4 = _mm_setzero_si64();                       \
436     mm5 = mm0;                                      \
437     mm7 = mm2;                                      \
438     \
439     mm2 = _mm_unpacklo_pi8(mm2, mm4);               \
440     mm0 = _mm_unpacklo_pi8(mm0, mm1);               \
441     mm2 = _mm_slli_pi16(mm2, 2);                    \
442     mm0 = _mm_or_si64(mm0, mm2);                    \
443     mm6 = (__m64)*(uint64_t *)(p_y + 8);            \
444     *(uint64_t *)p_buffer = (uint64_t)mm0;          \
445     \
446     mm7 = _mm_unpackhi_pi8(mm7, mm4);               \
447     mm5 = _mm_unpackhi_pi8(mm5, mm1);               \
448     mm7 = _mm_slli_pi16(mm7, 2);                    \
449     mm0 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_u + 4)); \
450     mm5 = _mm_or_si64(mm5, mm7);                    \
451     mm1 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_v + 4)); \
452     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
453
454 #define MMX_UNPACK_16                               \
455     mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8);    \
456     mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_fc);    \
457     mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8);    \
458     mm0 = _mm_srli_pi16(mm0, 3);                    \
459     mm4 = _mm_setzero_si64();                       \
460     mm5 = mm0;                                      \
461     mm7 = mm2;                                      \
462     \
463     mm2 = _mm_unpacklo_pi8(mm2, mm4);               \
464     mm0 = _mm_unpacklo_pi8(mm0, mm1);               \
465     mm2 = _mm_slli_pi16(mm2, 3);                    \
466     mm0 = _mm_or_si64(mm0, mm2);                    \
467     mm6 = (__m64)*(uint64_t *)(p_y + 8);            \
468     *(uint64_t *)p_buffer = (uint64_t)mm0;          \
469     \
470     mm7 = _mm_unpackhi_pi8(mm7, mm4);               \
471     mm5 = _mm_unpackhi_pi8(mm5, mm1);               \
472     mm7 = _mm_slli_pi16(mm7, 3);                    \
473     mm0 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_u + 4)); \
474     mm5 = _mm_or_si64(mm5, mm7);                    \
475     mm1 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_v + 4)); \
476     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
477
478 #define MMX_UNPACK_32_ARGB                      \
479     mm3 = _mm_setzero_si64();                   \
480     mm4 = mm0;                                  \
481     mm4 = _mm_unpacklo_pi8(mm4, mm2);           \
482     mm5 = mm1;                                  \
483     mm5 = _mm_unpacklo_pi8(mm5, mm3);           \
484     mm6 = mm4;                                  \
485     mm4 = _mm_unpacklo_pi16(mm4, mm5);          \
486     *(uint64_t *)p_buffer = (uint64_t)mm4;      \
487     mm6 = _mm_unpackhi_pi16(mm6, mm5);          \
488     *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6;\
489     mm0 = _mm_unpackhi_pi8(mm0, mm2);           \
490     mm1 = _mm_unpackhi_pi8(mm1, mm3);           \
491     mm5 = mm0;                                  \
492     mm5 = _mm_unpacklo_pi16(mm5, mm1);          \
493     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;\
494     mm0 = _mm_unpackhi_pi16(mm0, mm1);          \
495     *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
496
497 #define MMX_UNPACK_32_RGBA                      \
498     mm3 = _mm_setzero_si64();                   \
499     mm4 = mm2;                                  \
500     mm4 = _mm_unpacklo_pi8(mm4, mm1);           \
501     mm3 = _mm_unpacklo_pi8(mm3, mm0);           \
502     mm5 = mm3;                                  \
503     mm3 = _mm_unpacklo_pi16(mm3, mm4);          \
504     *(uint64_t *)p_buffer = (uint64_t)mm3;      \
505     mm5 = _mm_unpackhi_pi16(mm5, mm4);          \
506     *(uint64_t *)(p_buffer + 2) = (uint64_t)mm5;\
507     mm6 = _mm_setzero_si64();                   \
508     mm2 = _mm_unpackhi_pi8(mm2, mm1);           \
509     mm6 = _mm_unpackhi_pi8(mm6, mm0);           \
510     mm0 = mm6;                                  \
511     mm6 = _mm_unpacklo_pi16(mm6, mm2);          \
512     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm6;\
513     mm0 = _mm_unpackhi_pi16(mm0, mm2);          \
514     *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
515
516 #define MMX_UNPACK_32_BGRA                      \
517     mm3 = _mm_setzero_si64();                   \
518     mm4 = mm2;                                  \
519     mm4 = _mm_unpacklo_pi8(mm4, mm0);           \
520     mm3 = _mm_unpacklo_pi8(mm3, mm1);           \
521     mm5 = mm3;                                  \
522     mm3 = _mm_unpacklo_pi16(mm3, mm4);          \
523     *(uint64_t *)p_buffer = (uint64_t)mm3;      \
524     mm5 = _mm_unpackhi_pi16(mm5, mm4);          \
525     *(uint64_t *)(p_buffer + 2) = (uint64_t)mm5;\
526     mm6 = _mm_setzero_si64();                   \
527     mm2 = _mm_unpackhi_pi8(mm2, mm0);           \
528     mm6 = _mm_unpackhi_pi8(mm6, mm1);           \
529     mm0 = mm6;                                  \
530     mm6 = _mm_unpacklo_pi16(mm6, mm2);          \
531     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm6;\
532     mm0 = _mm_unpackhi_pi16(mm0, mm2);          \
533     *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
534
535 #define MMX_UNPACK_32_ABGR                      \
536     mm3 = _mm_setzero_si64();                   \
537     mm4 = mm1;                                  \
538     mm4 = _mm_unpacklo_pi8(mm4, mm2);           \
539     mm5 = mm0;                                  \
540     mm5 = _mm_unpacklo_pi8(mm5, mm3);           \
541     mm6 = mm4;                                  \
542     mm4 = _mm_unpacklo_pi16(mm4, mm5);          \
543     *(uint64_t *)p_buffer = (uint64_t)mm4;      \
544     mm6 = _mm_unpackhi_pi16(mm6, mm5);          \
545     *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6;\
546     mm1 = _mm_unpackhi_pi8(mm1, mm2);           \
547     mm0 = _mm_unpackhi_pi8(mm0, mm3);           \
548     mm2 = mm1;                                  \
549     mm1 = _mm_unpacklo_pi16(mm1, mm0);          \
550     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm1;\
551     mm2 = _mm_unpackhi_pi16(mm2, mm0);          \
552     *(uint64_t *)(p_buffer + 6) = (uint64_t)mm2;
553
554 #endif
555
556 #elif defined( MODULE_NAME_IS_i420_rgb_sse2 )
557
558 #if defined(CAN_COMPILE_SSE2)
559
560 /* SSE2 assembly */
561
562 #define SSE2_CALL(SSE2_INSTRUCTIONS)    \
563     do {                                \
564     __asm__ __volatile__(               \
565         ".p2align 3 \n\t"               \
566         SSE2_INSTRUCTIONS               \
567         :                               \
568         : "r" (p_y), "r" (p_u),         \
569           "r" (p_v), "r" (p_buffer)     \
570         : "eax", "xmm0", "xmm1", "xmm2", "xmm3", \
571                  "xmm4", "xmm5", "xmm6", "xmm7" ); \
572     } while(0)
573
574 #define SSE2_END  __asm__ __volatile__ ( "sfence" ::: "memory" )
575
576 #define SSE2_INIT_16_ALIGNED "                                              \n\
577 movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
578 movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
579 pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
580 movdqa      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
581 "
582
583 #define SSE2_INIT_16_UNALIGNED "                                            \n\
584 movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
585 movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
586 pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
587 movdqu      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
588 prefetchnta (%3)            # Tell CPU not to cache output RGB data         \n\
589 "
590
591 #define SSE2_INIT_32_ALIGNED "                                              \n\
592 movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
593 movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
594 pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
595 movdqa      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
596 "
597
598 #define SSE2_INIT_32_UNALIGNED "                                            \n\
599 movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
600 movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
601 pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
602 movdqu      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
603 prefetchnta (%3)            # Tell CPU not to cache output RGB data         \n\
604 "
605
606 #define SSE2_YUV_MUL "                                                      \n\
607 # convert the chroma part                                                   \n\
608 punpcklbw %%xmm4, %%xmm0        # scatter 8 Cb    00 u3 00 u2 00 u1 00 u0   \n\
609 punpcklbw %%xmm4, %%xmm1        # scatter 8 Cr    00 v3 00 v2 00 v1 00 v0   \n\
610 movl      $0x00800080, %%eax    #                                           \n\
611 movd      %%eax, %%xmm5         #                                           \n\
612 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     0080 0080 ... 0080 0080   \n\
613 psubsw    %%xmm5, %%xmm0        # Cb -= 128                                 \n\
614 psubsw    %%xmm5, %%xmm1        # Cr -= 128                                 \n\
615 psllw     $3, %%xmm0            # Promote precision                         \n\
616 psllw     $3, %%xmm1            # Promote precision                         \n\
617 movdqa    %%xmm0, %%xmm2        # Copy 8 Cb       00 u3 00 u2 00 u1 00 u0   \n\
618 movdqa    %%xmm1, %%xmm3        # Copy 8 Cr       00 v3 00 v2 00 v1 00 v0   \n\
619 movl      $0xf37df37d, %%eax    #                                           \n\
620 movd      %%eax, %%xmm5         #                                           \n\
621 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     f37d f37d ... f37d f37d   \n\
622 pmulhw    %%xmm5, %%xmm2        # Mul Cb with green coeff -> Cb green       \n\
623 movl      $0xe5fce5fc, %%eax    #                                           \n\
624 movd      %%eax, %%xmm5         #                                           \n\
625 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     e5fc e5fc ... e5fc e5fc   \n\
626 pmulhw    %%xmm5, %%xmm3        # Mul Cr with green coeff -> Cr green       \n\
627 movl      $0x40934093, %%eax    #                                           \n\
628 movd      %%eax, %%xmm5         #                                           \n\
629 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     4093 4093 ... 4093 4093   \n\
630 pmulhw    %%xmm5, %%xmm0        # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0   \n\
631 movl      $0x33123312, %%eax    #                                           \n\
632 movd      %%eax, %%xmm5         #                                           \n\
633 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     3312 3312 ... 3312 3312   \n\
634 pmulhw    %%xmm5, %%xmm1        # Mul Cr -> Cred  00 r3 00 r2 00 r1 00 r0   \n\
635 paddsw    %%xmm3, %%xmm2        # Cb green + Cr green -> Cgreen             \n\
636                                                                             \n\
637 # convert the luma part                                                     \n\
638 movl      $0x10101010, %%eax    #                                           \n\
639 movd      %%eax, %%xmm5         #                                           \n\
640 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to   1010 1010 ... 1010 1010     \n\
641 psubusb   %%xmm5, %%xmm6        # Y -= 16                                   \n\
642 movdqa    %%xmm6, %%xmm7        # Copy 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
643 movl      $0x00ff00ff, %%eax    #                                           \n\
644 movd      %%eax, %%xmm5         #                                           \n\
645 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     00ff 00ff ... 00ff 00ff   \n\
646 pand      %%xmm5, %%xmm6        # get Y even      00 Y6 00 Y4 00 Y2 00 Y0   \n\
647 psrlw     $8, %%xmm7            # get Y odd       00 Y7 00 Y5 00 Y3 00 Y1   \n\
648 psllw     $3, %%xmm6            # Promote precision                         \n\
649 psllw     $3, %%xmm7            # Promote precision                         \n\
650 movl      $0x253f253f, %%eax    #                                           \n\
651 movd      %%eax, %%xmm5         #                                           \n\
652 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     253f 253f ... 253f 253f   \n\
653 pmulhw    %%xmm5, %%xmm6        # Mul 8 Y even    00 y6 00 y4 00 y2 00 y0   \n\
654 pmulhw    %%xmm5, %%xmm7        # Mul 8 Y odd     00 y7 00 y5 00 y3 00 y1   \n\
655 "
656
657 #define SSE2_YUV_ADD "                                                      \n\
658 # Do horizontal and vertical scaling                                        \n\
659 movdqa    %%xmm0, %%xmm3        # Copy Cblue                                \n\
660 movdqa    %%xmm1, %%xmm4        # Copy Cred                                 \n\
661 movdqa    %%xmm2, %%xmm5        # Copy Cgreen                               \n\
662 paddsw    %%xmm6, %%xmm0        # Y even + Cblue  00 B6 00 B4 00 B2 00 B0   \n\
663 paddsw    %%xmm7, %%xmm3        # Y odd  + Cblue  00 B7 00 B5 00 B3 00 B1   \n\
664 paddsw    %%xmm6, %%xmm1        # Y even + Cred   00 R6 00 R4 00 R2 00 R0   \n\
665 paddsw    %%xmm7, %%xmm4        # Y odd  + Cred   00 R7 00 R5 00 R3 00 R1   \n\
666 paddsw    %%xmm6, %%xmm2        # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0   \n\
667 paddsw    %%xmm7, %%xmm5        # Y odd  + Cgreen 00 G7 00 G5 00 G3 00 G1   \n\
668                                                                             \n\
669 # Limit RGB even to 0..255                                                  \n\
670 packuswb  %%xmm0, %%xmm0        # B6 B4 B2 B0 / B6 B4 B2 B0                 \n\
671 packuswb  %%xmm1, %%xmm1        # R6 R4 R2 R0 / R6 R4 R2 R0                 \n\
672 packuswb  %%xmm2, %%xmm2        # G6 G4 G2 G0 / G6 G4 G2 G0                 \n\
673                                                                             \n\
674 # Limit RGB odd to 0..255                                                   \n\
675 packuswb  %%xmm3, %%xmm3        # B7 B5 B3 B1 / B7 B5 B3 B1                 \n\
676 packuswb  %%xmm4, %%xmm4        # R7 R5 R3 R1 / R7 R5 R3 R1                 \n\
677 packuswb  %%xmm5, %%xmm5        # G7 G5 G3 G1 / G7 G5 G3 G1                 \n\
678                                                                             \n\
679 # Interleave RGB even and odd                                               \n\
680 punpcklbw %%xmm3, %%xmm0        #                 B7 B6 B5 B4 B3 B2 B1 B0   \n\
681 punpcklbw %%xmm4, %%xmm1        #                 R7 R6 R5 R4 R3 R2 R1 R0   \n\
682 punpcklbw %%xmm5, %%xmm2        #                 G7 G6 G5 G4 G3 G2 G1 G0   \n\
683 "
684
685 #define SSE2_UNPACK_15_ALIGNED "                                            \n\
686 # mask unneeded bits off                                                    \n\
687 movl      $0xf8f8f8f8, %%eax    #                                           \n\
688 movd      %%eax, %%xmm5         #                                           \n\
689 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
690 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
691 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
692 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
693 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
694 psrlw     $1,%%xmm1             # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
695 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
696 movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
697 movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
698                                                                             \n\
699 # convert rgb24 plane to rgb15 pack for pixel 0-7                           \n\
700 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3______       \n\
701 punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
702 psllw     $2,%%xmm2             # ________ ____g7g6 g5g4g3__ ________       \n\
703 por       %%xmm2, %%xmm0        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
704 movntdq   %%xmm0, (%3)          # store pixel 0-7                           \n\
705                                                                             \n\
706 # convert rgb24 plane to rgb15 pack for pixel 8-15                          \n\
707 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3______       \n\
708 punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
709 psllw     $2,%%xmm7             # ________ ____g7g6 g5g4g3__ ________       \n\
710 por       %%xmm7, %%xmm5        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
711 movntdq   %%xmm5, 16(%3)        # store pixel 4-7                           \n\
712 "
713
714 #define SSE2_UNPACK_15_UNALIGNED "                                          \n\
715 # mask unneeded bits off                                                    \n\
716 movl      $0xf8f8f8f8, %%eax    #                                           \n\
717 movd      %%eax, %%xmm5         #                                           \n\
718 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
719 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
720 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
721 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
722 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
723 psrlw     $1,%%xmm1             # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
724 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
725 movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
726 movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
727                                                                             \n\
728 # convert rgb24 plane to rgb15 pack for pixel 0-7                           \n\
729 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3______       \n\
730 punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
731 psllw     $2,%%xmm2             # ________ ____g7g6 g5g4g3__ ________       \n\
732 por       %%xmm2, %%xmm0        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
733 movdqu    %%xmm0, (%3)          # store pixel 0-7                           \n\
734                                                                             \n\
735 # convert rgb24 plane to rgb15 pack for pixel 8-15                          \n\
736 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3______       \n\
737 punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
738 psllw     $2,%%xmm7             # ________ ____g7g6 g5g4g3__ ________       \n\
739 por       %%xmm7, %%xmm5        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
740 movdqu    %%xmm5, 16(%3)        # store pixel 4-7                           \n\
741 "
742
743 #define SSE2_UNPACK_16_ALIGNED "                                            \n\
744 # mask unneeded bits off                                                    \n\
745 movl      $0xf8f8f8f8, %%eax    #                                           \n\
746 movd      %%eax, %%xmm5         #                                           \n\
747 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
748 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
749 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
750 movl      $0xfcfcfcfc, %%eax    #                                           \n\
751 movd      %%eax, %%xmm5         #                                           \n\
752 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
753 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
754 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
755 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
756 movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
757 movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
758                                                                             \n\
759 # convert rgb24 plane to rgb16 pack for pixel 0-7                           \n\
760 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3g2____       \n\
761 punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
762 psllw     $3,%%xmm2             # ________ __g7g6g5 g4g3g2__ ________       \n\
763 por       %%xmm2, %%xmm0        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
764 movntdq   %%xmm0, (%3)          # store pixel 0-7                           \n\
765                                                                             \n\
766 # convert rgb24 plane to rgb16 pack for pixel 8-15                          \n\
767 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3g2____       \n\
768 punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
769 psllw     $3,%%xmm7             # ________ __g7g6g5 g4g3g2__ ________       \n\
770 por       %%xmm7, %%xmm5        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
771 movntdq   %%xmm5, 16(%3)        # store pixel 4-7                           \n\
772 "
773
774 #define SSE2_UNPACK_16_UNALIGNED "                                          \n\
775 # mask unneeded bits off                                                    \n\
776 movl      $0xf8f8f8f8, %%eax    #                                           \n\
777 movd      %%eax, %%xmm5         #                                           \n\
778 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
779 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
780 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
781 movl      $0xfcfcfcfc, %%eax    #                                           \n\
782 movd      %%eax, %%xmm5         #                                           \n\
783 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
784 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
785 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
786 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
787 movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
788 movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
789                                                                             \n\
790 # convert rgb24 plane to rgb16 pack for pixel 0-7                           \n\
791 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3g2____       \n\
792 punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
793 psllw     $3,%%xmm2             # ________ __g7g6g5 g4g3g2__ ________       \n\
794 por       %%xmm2, %%xmm0        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
795 movdqu    %%xmm0, (%3)          # store pixel 0-7                           \n\
796                                                                             \n\
797 # convert rgb24 plane to rgb16 pack for pixel 8-15                          \n\
798 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3g2____       \n\
799 punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
800 psllw     $3,%%xmm7             # ________ __g7g6g5 g4g3g2__ ________       \n\
801 por       %%xmm7, %%xmm5        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
802 movdqu    %%xmm5, 16(%3)        # store pixel 4-7                           \n\
803 "
804
805 #define SSE2_UNPACK_32_ARGB_ALIGNED "                                       \n\
806 pxor      %%xmm3, %%xmm3  # zero xmm3                                       \n\
807 movdqa    %%xmm0, %%xmm4  #               B7 B6 B5 B4 B3 B2 B1 B0           \n\
808 punpcklbw %%xmm2, %%xmm4  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
809 movdqa    %%xmm1, %%xmm5  #               R7 R6 R5 R4 R3 R2 R1 R0           \n\
810 punpcklbw %%xmm3, %%xmm5  #               00 R3 00 R2 00 R1 00 R0           \n\
811 movdqa    %%xmm4, %%xmm6  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
812 punpcklwd %%xmm5, %%xmm4  #               00 R1 B1 G1 00 R0 B0 G0           \n\
813 movntdq   %%xmm4, (%3)    # Store ARGB3 ARGB2 ARGB1 ARGB0                   \n\
814 punpckhwd %%xmm5, %%xmm6  #               00 R3 B3 G3 00 R2 B2 G2           \n\
815 movntdq   %%xmm6, 16(%3)  # Store ARGB7 ARGB6 ARGB5 ARGB4                   \n\
816 punpckhbw %%xmm2, %%xmm0  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
817 punpckhbw %%xmm3, %%xmm1  #               00 R7 00 R6 00 R5 00 R4           \n\
818 movdqa    %%xmm0, %%xmm5  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
819 punpcklwd %%xmm1, %%xmm5  #               00 R5 B5 G5 00 R4 B4 G4           \n\
820 movntdq   %%xmm5, 32(%3)  # Store ARGB11 ARGB10 ARGB9 ARGB8                 \n\
821 punpckhwd %%xmm1, %%xmm0  #               00 R7 B7 G7 00 R6 B6 G6           \n\
822 movntdq   %%xmm0, 48(%3)  # Store ARGB15 ARGB14 ARGB13 ARGB12               \n\
823 "
824
825 #define SSE2_UNPACK_32_ARGB_UNALIGNED "                                     \n\
826 pxor      %%xmm3, %%xmm3  # zero xmm3                                       \n\
827 movdqa    %%xmm0, %%xmm4  #               B7 B6 B5 B4 B3 B2 B1 B0           \n\
828 punpcklbw %%xmm2, %%xmm4  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
829 movdqa    %%xmm1, %%xmm5  #               R7 R6 R5 R4 R3 R2 R1 R0           \n\
830 punpcklbw %%xmm3, %%xmm5  #               00 R3 00 R2 00 R1 00 R0           \n\
831 movdqa    %%xmm4, %%xmm6  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
832 punpcklwd %%xmm5, %%xmm4  #               00 R1 B1 G1 00 R0 B0 G0           \n\
833 movdqu    %%xmm4, (%3)    # Store ARGB3 ARGB2 ARGB1 ARGB0                   \n\
834 punpckhwd %%xmm5, %%xmm6  #               00 R3 B3 G3 00 R2 B2 G2           \n\
835 movdqu    %%xmm6, 16(%3)  # Store ARGB7 ARGB6 ARGB5 ARGB4                   \n\
836 punpckhbw %%xmm2, %%xmm0  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
837 punpckhbw %%xmm3, %%xmm1  #               00 R7 00 R6 00 R5 00 R4           \n\
838 movdqa    %%xmm0, %%xmm5  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
839 punpcklwd %%xmm1, %%xmm5  #               00 R5 B5 G5 00 R4 B4 G4           \n\
840 movdqu    %%xmm5, 32(%3)  # Store ARGB11 ARGB10 ARGB9 ARGB8                 \n\
841 punpckhwd %%xmm1, %%xmm0  #               00 R7 B7 G7 00 R6 B6 G6           \n\
842 movdqu    %%xmm0, 48(%3)  # Store ARGB15 ARGB14 ARGB13 ARGB12               \n\
843 "
844
845 #define SSE2_UNPACK_32_RGBA_ALIGNED "                                       \n\
846 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
847 movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
848 punpcklbw %%xmm1, %%xmm4  #                 R3 G3 R2 G2 R1 G1 R0 G0         \n\
849 punpcklbw %%xmm0, %%xmm3  #                 B3 00 B2 00 B1 00 B0 00         \n\
850 movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
851 punpcklwd %%xmm4, %%xmm3  #                 R1 G1 B1 00 R0 B0 G0 00         \n\
852 movntdq   %%xmm3, (%3)    # Store RGBA3 RGBA2 RGBA1 RGBA0                   \n\
853 punpckhwd %%xmm4, %%xmm5  #                 R3 G3 B3 00 R2 G2 B2 00         \n\
854 movntdq   %%xmm5, 16(%3)  # Store RGBA7 RGBA6 RGBA5 RGBA4                   \n\
855 pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
856 punpckhbw %%xmm1, %%xmm2  #                 R7 G7 R6 G6 R5 G5 R4 G4         \n\
857 punpckhbw %%xmm0, %%xmm6  #                 B7 00 B6 00 B5 00 B4 00         \n\
858 movdqa    %%xmm6, %%xmm0  #                 B7 00 B6 00 B5 00 B4 00         \n\
859 punpcklwd %%xmm2, %%xmm6  #                 R5 G5 B5 00 R4 G4 B4 00         \n\
860 movntdq   %%xmm6, 32(%3)  # Store BGRA11 BGRA10 BGRA9 RGBA8                 \n\
861 punpckhwd %%xmm2, %%xmm0  #                 R7 G7 B7 00 R6 G6 B6 00         \n\
862 movntdq   %%xmm0, 48(%3)  # Store RGBA15 RGBA14 RGBA13 RGBA12               \n\
863 "
864
865 #define SSE2_UNPACK_32_RGBA_UNALIGNED "                                     \n\
866 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
867 movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
868 punpcklbw %%xmm1, %%xmm4  #                 R3 G3 R2 G2 R1 G1 R0 G0         \n\
869 punpcklbw %%xmm0, %%xmm3  #                 B3 00 B2 00 B1 00 B0 00         \n\
870 movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
871 punpcklwd %%xmm4, %%xmm3  #                 R1 G1 B1 00 R0 B0 G0 00         \n\
872 movdqu    %%xmm3, (%3)    # Store RGBA3 RGBA2 RGBA1 RGBA0                   \n\
873 punpckhwd %%xmm4, %%xmm5  #                 R3 G3 B3 00 R2 G2 B2 00         \n\
874 movdqu    %%xmm5, 16(%3)  # Store RGBA7 RGBA6 RGBA5 RGBA4                   \n\
875 pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
876 punpckhbw %%xmm1, %%xmm2  #                 R7 G7 R6 G6 R5 G5 R4 G4         \n\
877 punpckhbw %%xmm0, %%xmm6  #                 B7 00 B6 00 B5 00 B4 00         \n\
878 movdqa    %%xmm6, %%xmm0  #                 B7 00 B6 00 B5 00 B4 00         \n\
879 punpcklwd %%xmm2, %%xmm6  #                 R5 G5 B5 00 R4 G4 B4 00         \n\
880 movdqu    %%xmm6, 32(%3)  # Store RGBA11 RGBA10 RGBA9 RGBA8                 \n\
881 punpckhwd %%xmm2, %%xmm0  #                 R7 G7 B7 00 R6 G6 B6 00         \n\
882 movdqu    %%xmm0, 48(%3)  # Store RGBA15 RGBA14 RGBA13 RGBA12               \n\
883 "
884
885 #define SSE2_UNPACK_32_BGRA_ALIGNED "                                       \n\
886 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
887 movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
888 punpcklbw %%xmm0, %%xmm4  #                 B3 G3 B2 G2 B1 G1 B0 G0         \n\
889 punpcklbw %%xmm1, %%xmm3  #                 R3 00 R2 00 R1 00 R0 00         \n\
890 movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
891 punpcklwd %%xmm4, %%xmm3  #                 B1 G1 R1 00 B0 G0 R0 00         \n\
892 movntdq   %%xmm3, (%3)    # Store BGRA3 BGRA2 BGRA1 BGRA0                   \n\
893 punpckhwd %%xmm4, %%xmm5  #                 B3 G3 R3 00 B2 G2 R2 00         \n\
894 movntdq   %%xmm5, 16(%3)  # Store BGRA7 BGRA6 BGRA5 BGRA4                   \n\
895 pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
896 punpckhbw %%xmm0, %%xmm2  #                 B7 G7 B6 G6 B5 G5 B4 G4         \n\
897 punpckhbw %%xmm1, %%xmm6  #                 R7 00 R6 00 R5 00 R4 00         \n\
898 movdqa    %%xmm6, %%xmm0  #                 R7 00 R6 00 R5 00 R4 00         \n\
899 punpcklwd %%xmm2, %%xmm6  #                 B5 G5 R5 00 B4 G4 R4 00         \n\
900 movntdq   %%xmm6, 32(%3)  # Store BGRA11 BGRA10 BGRA9 BGRA8                 \n\
901 punpckhwd %%xmm2, %%xmm0  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
902 movntdq   %%xmm0, 48(%3)  # Store BGRA15 BGRA14 BGRA13 BGRA12               \n\
903 "
904
905 #define SSE2_UNPACK_32_BGRA_UNALIGNED "                                     \n\
906 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
907 movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
908 punpcklbw %%xmm0, %%xmm4  #                 B3 G3 B2 G2 B1 G1 B0 G0         \n\
909 punpcklbw %%xmm1, %%xmm3  #                 R3 00 R2 00 R1 00 R0 00         \n\
910 movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
911 punpcklwd %%xmm4, %%xmm3  #                 B1 G1 R1 00 B0 G0 R0 00         \n\
912 movdqu    %%xmm3, (%3)    # Store BGRA3 BGRA2 BGRA1 BGRA0                   \n\
913 punpckhwd %%xmm4, %%xmm5  #                 B3 G3 R3 00 B2 G2 R2 00         \n\
914 movdqu    %%xmm5, 16(%3)  # Store BGRA7 BGRA6 BGRA5 BGRA4                   \n\
915 pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
916 punpckhbw %%xmm0, %%xmm2  #                 B7 G7 B6 G6 B5 G5 B4 G4         \n\
917 punpckhbw %%xmm1, %%xmm6  #                 R7 00 R6 00 R5 00 R4 00         \n\
918 movdqa    %%xmm6, %%xmm0  #                 R7 00 R6 00 R5 00 R4 00         \n\
919 punpcklwd %%xmm2, %%xmm6  #                 B5 G5 R5 00 B4 G4 R4 00         \n\
920 movdqu    %%xmm6, 32(%3)  # Store BGRA11 BGRA10 BGRA9 BGRA8                 \n\
921 punpckhwd %%xmm2, %%xmm0  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
922 movdqu    %%xmm0, 48(%3)  # Store BGRA15 BGRA14 BGRA13 BGRA12               \n\
923 "
924
925 #define SSE2_UNPACK_32_ABGR_ALIGNED "                                       \n\
926 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
927 movdqa    %%xmm1, %%xmm4  #                 R7 R6 R5 R4 R3 R2 R1 R0         \n\
928 punpcklbw %%xmm2, %%xmm4  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
929 movdqa    %%xmm0, %%xmm5  #                 B7 B6 B5 B4 B3 B2 B1 B0         \n\
930 punpcklbw %%xmm3, %%xmm5  #                 00 B3 00 B2 00 B1 00 B0         \n\
931 movdqa    %%xmm4, %%xmm6  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
932 punpcklwd %%xmm5, %%xmm4  #                 00 B1 G1 R1 00 B0 G0 R0         \n\
933 movntdq   %%xmm4, (%3)    # Store ABGR3 ABGR2 ABGR1 ABGR0                   \n\
934 punpckhwd %%xmm5, %%xmm6  #                 00 B3 G3 R3 00 B2 G2 R2         \n\
935 movntdq   %%xmm6, 16(%3)  # Store ABGR7 ABGR6 ABGR5 ABGR4                   \n\
936 punpckhbw %%xmm2, %%xmm1  #                 G7 R7 G6 R6 G5 R5 G4 R4         \n\
937 punpckhbw %%xmm3, %%xmm0  #                 00 B7 00 B6 00 B5 00 B4         \n\
938 movdqa    %%xmm1, %%xmm2  #                 G7 R7 G6 R6 G5 R5 G4 R4         \n\
939 punpcklwd %%xmm0, %%xmm1  #                 00 B5 G5 R5 00 B4 G4 R4         \n\
940 movntdq   %%xmm1, 32(%3)  # Store ABGR11 ABGR10 ABGR9 ABGR8                 \n\
941 punpckhwd %%xmm0, %%xmm2  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
942 movntdq   %%xmm2, 48(%3)  # Store ABGR15 ABGR14 ABGR13 ABGR12               \n\
943 "
944
945 #define SSE2_UNPACK_32_ABGR_UNALIGNED "                                     \n\
946 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
947 movdqa    %%xmm1, %%xmm4  #                 R7 R6 R5 R4 R3 R2 R1 R0         \n\
948 punpcklbw %%xmm2, %%xmm4  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
949 movdqa    %%xmm0, %%xmm5  #                 B7 B6 B5 B4 B3 B2 B1 B0         \n\
950 punpcklbw %%xmm3, %%xmm5  #                 00 B3 00 B2 00 B1 00 B0         \n\
951 movdqa    %%xmm4, %%xmm6  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
952 punpcklwd %%xmm5, %%xmm4  #                 00 B1 G1 R1 00 B0 G0 R0         \n\
953 movdqu    %%xmm4, (%3)    # Store ABGR3 ABGR2 ABGR1 ABGR0                   \n\
954 punpckhwd %%xmm5, %%xmm6  #                 00 B3 G3 R3 00 B2 G2 R2         \n\
955 movdqu    %%xmm6, 16(%3)  # Store ABGR7 ABGR6 ABGR5 ABGR4                   \n\
956 punpckhbw %%xmm2, %%xmm1  #                 G7 R7 G6 R6 G5 R5 G4 R4         \n\
957 punpckhbw %%xmm3, %%xmm0  #                 00 B7 00 B6 00 B5 00 B4         \n\
958 movdqa    %%xmm1, %%xmm2  #                 R7 00 R6 00 R5 00 R4 00         \n\
959 punpcklwd %%xmm0, %%xmm1  #                 00 B5 G5 R5 00 B4 G4 R4         \n\
960 movdqu    %%xmm1, 32(%3)  # Store ABGR11 ABGR10 ABGR9 ABGR8                 \n\
961 punpckhwd %%xmm0, %%xmm2  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
962 movdqu    %%xmm2, 48(%3)  # Store ABGR15 ABGR14 ABGR13 ABGR12               \n\
963 "
964
965 #elif defined(HAVE_SSE2_INTRINSICS)
966
967 /* SSE2 intrinsics */
968
969 #include <emmintrin.h>
970
971 #define SSE2_CALL(SSE2_INSTRUCTIONS)        \
972     do {                                    \
973         __m128i xmm0, xmm1, xmm2, xmm3,     \
974                 xmm4, xmm5, xmm6, xmm7;     \
975         SSE2_INSTRUCTIONS                   \
976     } while(0)
977
978 #define SSE2_END  _mm_sfence()
979
980 #define SSE2_INIT_16_ALIGNED                \
981     xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
982     xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
983     xmm4 = _mm_setzero_si128();             \
984     xmm6 = _mm_load_si128((__m128i *)p_y);
985
986 #define SSE2_INIT_16_UNALIGNED              \
987     xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
988     xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
989     xmm4 = _mm_setzero_si128();             \
990     xmm6 = _mm_loadu_si128((__m128i *)p_y); \
991     _mm_prefetch(p_buffer, _MM_HINT_NTA);
992
993 #define SSE2_INIT_32_ALIGNED                \
994     xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
995     xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
996     xmm4 = _mm_setzero_si128();             \
997     xmm6 = _mm_load_si128((__m128i *)p_y);
998
999 #define SSE2_INIT_32_UNALIGNED              \
1000     xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
1001     xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
1002     xmm4 = _mm_setzero_si128();             \
1003     xmm6 = _mm_loadu_si128((__m128i *)p_y); \
1004     _mm_prefetch(p_buffer, _MM_HINT_NTA);
1005
1006 #define SSE2_YUV_MUL                        \
1007     xmm0 = _mm_unpacklo_epi8(xmm0, xmm4);   \
1008     xmm1 = _mm_unpacklo_epi8(xmm1, xmm4);   \
1009     xmm5 = _mm_set1_epi32(0x00800080UL);    \
1010     xmm0 = _mm_subs_epi16(xmm0, xmm5);      \
1011     xmm1 = _mm_subs_epi16(xmm1, xmm5);      \
1012     xmm0 = _mm_slli_epi16(xmm0, 3);         \
1013     xmm1 = _mm_slli_epi16(xmm1, 3);         \
1014     xmm2 = xmm0;                            \
1015     xmm3 = xmm1;                            \
1016     xmm5 = _mm_set1_epi32(0xf37df37dUL);    \
1017     xmm2 = _mm_mulhi_epi16(xmm2, xmm5);     \
1018     xmm5 = _mm_set1_epi32(0xe5fce5fcUL);    \
1019     xmm3 = _mm_mulhi_epi16(xmm3, xmm5);     \
1020     xmm5 = _mm_set1_epi32(0x40934093UL);    \
1021     xmm0 = _mm_mulhi_epi16(xmm0, xmm5);     \
1022     xmm5 = _mm_set1_epi32(0x33123312UL);    \
1023     xmm1 = _mm_mulhi_epi16(xmm1, xmm5);     \
1024     xmm2 = _mm_adds_epi16(xmm2, xmm3);      \
1025     \
1026     xmm5 = _mm_set1_epi32(0x10101010UL);    \
1027     xmm6 = _mm_subs_epu8(xmm6, xmm5);       \
1028     xmm7 = xmm6;                            \
1029     xmm5 = _mm_set1_epi32(0x00ff00ffUL);    \
1030     xmm6 = _mm_and_si128(xmm6, xmm5);       \
1031     xmm7 = _mm_srli_epi16(xmm7, 8);         \
1032     xmm6 = _mm_slli_epi16(xmm6, 3);         \
1033     xmm7 = _mm_slli_epi16(xmm7, 3);         \
1034     xmm5 = _mm_set1_epi32(0x253f253fUL);    \
1035     xmm6 = _mm_mulhi_epi16(xmm6, xmm5);     \
1036     xmm7 = _mm_mulhi_epi16(xmm7, xmm5);
1037
1038 #define SSE2_YUV_ADD                        \
1039     xmm3 = xmm0;                            \
1040     xmm4 = xmm1;                            \
1041     xmm5 = xmm2;                            \
1042     xmm0 = _mm_adds_epi16(xmm0, xmm6);      \
1043     xmm3 = _mm_adds_epi16(xmm3, xmm7);      \
1044     xmm1 = _mm_adds_epi16(xmm1, xmm6);      \
1045     xmm4 = _mm_adds_epi16(xmm4, xmm7);      \
1046     xmm2 = _mm_adds_epi16(xmm2, xmm6);      \
1047     xmm5 = _mm_adds_epi16(xmm5, xmm7);      \
1048     \
1049     xmm0 = _mm_packus_epi16(xmm0, xmm0);    \
1050     xmm1 = _mm_packus_epi16(xmm1, xmm1);    \
1051     xmm2 = _mm_packus_epi16(xmm2, xmm2);    \
1052     \
1053     xmm3 = _mm_packus_epi16(xmm3, xmm3);    \
1054     xmm4 = _mm_packus_epi16(xmm4, xmm4);    \
1055     xmm5 = _mm_packus_epi16(xmm5, xmm5);    \
1056     \
1057     xmm0 = _mm_unpacklo_epi8(xmm0, xmm3);   \
1058     xmm1 = _mm_unpacklo_epi8(xmm1, xmm4);   \
1059     xmm2 = _mm_unpacklo_epi8(xmm2, xmm5);
1060
1061 #define SSE2_UNPACK_15_ALIGNED                      \
1062     xmm5 = _mm_set1_epi32(0xf8f8f8f8UL);            \
1063     xmm0 = _mm_and_si128(xmm0, xmm5);               \
1064     xmm0 = _mm_srli_epi16(xmm0, 3);                 \
1065     xmm2 = _mm_and_si128(xmm2, xmm5);               \
1066     xmm1 = _mm_and_si128(xmm1, xmm5);               \
1067     xmm1 = _mm_srli_epi16(xmm1, 1);                 \
1068     xmm4 = _mm_setzero_si128();                     \
1069     xmm5 = xmm0;                                    \
1070     xmm7 = xmm2;                                    \
1071     \
1072     xmm2 = _mm_unpacklo_epi8(xmm2, xmm4);           \
1073     xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);           \
1074     xmm2 = _mm_slli_epi16(xmm2, 2);                 \
1075     xmm0 = _mm_or_si128(xmm0, xmm2);                \
1076     _mm_stream_si128((__m128i*)p_buffer, xmm0);     \
1077     \
1078     xmm7 = _mm_unpackhi_epi8(xmm7, xmm4);           \
1079     xmm5 = _mm_unpackhi_epi8(xmm5, xmm1);           \
1080     xmm7 = _mm_slli_epi16(xmm7, 2);                 \
1081     xmm5 = _mm_or_si128(xmm5, xmm7);                \
1082     _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
1083
1084 #define SSE2_UNPACK_15_UNALIGNED                    \
1085     xmm5 = _mm_set1_epi32(0xf8f8f8f8UL);            \
1086     xmm0 = _mm_and_si128(xmm0, xmm5);               \
1087     xmm0 = _mm_srli_epi16(xmm0, 3);                 \
1088     xmm2 = _mm_and_si128(xmm2, xmm5);               \
1089     xmm1 = _mm_and_si128(xmm1, xmm5);               \
1090     xmm1 = _mm_srli_epi16(xmm1, 1);                 \
1091     xmm4 = _mm_setzero_si128();                     \
1092     xmm5 = xmm0;                                    \
1093     xmm7 = xmm2;                                    \
1094     \
1095     xmm2 = _mm_unpacklo_epi8(xmm2, xmm4);           \
1096     xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);           \
1097     xmm2 = _mm_slli_epi16(xmm2, 2);                 \
1098     xmm0 = _mm_or_si128(xmm0, xmm2);                \
1099     _mm_storeu_si128((__m128i*)p_buffer, xmm0);     \
1100     \
1101     xmm7 = _mm_unpackhi_epi8(xmm7, xmm4);           \
1102     xmm5 = _mm_unpackhi_epi8(xmm5, xmm1);           \
1103     xmm7 = _mm_slli_epi16(xmm7, 2);                 \
1104     xmm5 = _mm_or_si128(xmm5, xmm7);                \
1105     _mm_storeu_si128((__m128i*)(p_buffer+16), xmm5);
1106
1107 #define SSE2_UNPACK_16_ALIGNED                      \
1108     xmm5 = _mm_set1_epi32(0xf8f8f8f8UL);            \
1109     xmm0 = _mm_and_si128(xmm0, xmm5);               \
1110     xmm1 = _mm_and_si128(xmm1, xmm5);               \
1111     xmm5 = _mm_set1_epi32(0xfcfcfcfcUL);            \
1112     xmm2 = _mm_and_si128(xmm2, xmm5);               \
1113     xmm0 = _mm_srli_epi16(xmm0, 3);                 \
1114     xmm4 = _mm_setzero_si128();                     \
1115     xmm5 = xmm0;                                    \
1116     xmm7 = xmm2;                                    \
1117     \
1118     xmm2 = _mm_unpacklo_epi8(xmm2, xmm4);           \
1119     xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);           \
1120     xmm2 = _mm_slli_epi16(xmm2, 3);                 \
1121     xmm0 = _mm_or_si128(xmm0, xmm2);                \
1122     _mm_stream_si128((__m128i*)p_buffer, xmm0);     \
1123     \
1124     xmm7 = _mm_unpackhi_epi8(xmm7, xmm4);           \
1125     xmm5 = _mm_unpackhi_epi8(xmm5, xmm1);           \
1126     xmm7 = _mm_slli_epi16(xmm7, 3);                 \
1127     xmm5 = _mm_or_si128(xmm5, xmm7);                \
1128     _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
1129
1130 #define SSE2_UNPACK_16_UNALIGNED                    \
1131     xmm5 = _mm_set1_epi32(0xf8f8f8f8UL);            \
1132     xmm0 = _mm_and_si128(xmm0, xmm5);               \
1133     xmm1 = _mm_and_si128(xmm1, xmm5);               \
1134     xmm5 = _mm_set1_epi32(0xfcfcfcfcUL);            \
1135     xmm2 = _mm_and_si128(xmm2, xmm5);               \
1136     xmm0 = _mm_srli_epi16(xmm0, 3);                 \
1137     xmm4 = _mm_setzero_si128();                     \
1138     xmm5 = xmm0;                                    \
1139     xmm7 = xmm2;                                    \
1140     \
1141     xmm2 = _mm_unpacklo_epi8(xmm2, xmm4);           \
1142     xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);           \
1143     xmm2 = _mm_slli_epi16(xmm2, 3);                 \
1144     xmm0 = _mm_or_si128(xmm0, xmm2);                \
1145     _mm_storeu_si128((__m128i*)p_buffer, xmm0);     \
1146     \
1147     xmm7 = _mm_unpackhi_epi8(xmm7, xmm4);           \
1148     xmm5 = _mm_unpackhi_epi8(xmm5, xmm1);           \
1149     xmm7 = _mm_slli_epi16(xmm7, 3);                 \
1150     xmm5 = _mm_or_si128(xmm5, xmm7);                \
1151     _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5);
1152
1153 #define SSE2_UNPACK_32_ARGB_ALIGNED                 \
1154     xmm3 = _mm_setzero_si128();                     \
1155     xmm4 = xmm0;                                    \
1156     xmm4 = _mm_unpacklo_epi8(xmm4, xmm2);           \
1157     xmm5 = xmm1;                                    \
1158     xmm5 = _mm_unpacklo_epi8(xmm5, xmm3);           \
1159     xmm6 = xmm4;                                    \
1160     xmm4 = _mm_unpacklo_epi16(xmm4, xmm5);          \
1161     _mm_stream_si128((__m128i*)(p_buffer), xmm4);   \
1162     xmm6 = _mm_unpackhi_epi16(xmm6, xmm5);          \
1163     _mm_stream_si128((__m128i*)(p_buffer+4), xmm6); \
1164     xmm0 = _mm_unpackhi_epi8(xmm0, xmm2);           \
1165     xmm1 = _mm_unpackhi_epi8(xmm1, xmm3);           \
1166     xmm5 = xmm0;                                    \
1167     xmm5 = _mm_unpacklo_epi16(xmm5, xmm1);          \
1168     _mm_stream_si128((__m128i*)(p_buffer+8), xmm5); \
1169     xmm0 = _mm_unpackhi_epi16(xmm0, xmm1);          \
1170     _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
1171
1172 #define SSE2_UNPACK_32_ARGB_UNALIGNED               \
1173     xmm3 = _mm_setzero_si128();                     \
1174     xmm4 = xmm0;                                    \
1175     xmm4 = _mm_unpacklo_epi8(xmm4, xmm2);           \
1176     xmm5 = xmm1;                                    \
1177     xmm5 = _mm_unpacklo_epi8(xmm5, xmm3);           \
1178     xmm6 = xmm4;                                    \
1179     xmm4 = _mm_unpacklo_epi16(xmm4, xmm5);          \
1180     _mm_storeu_si128((__m128i*)(p_buffer), xmm4);   \
1181     xmm6 = _mm_unpackhi_epi16(xmm6, xmm5);          \
1182     _mm_storeu_si128((__m128i*)(p_buffer+4), xmm6); \
1183     xmm0 = _mm_unpackhi_epi8(xmm0, xmm2);           \
1184     xmm1 = _mm_unpackhi_epi8(xmm1, xmm3);           \
1185     xmm5 = xmm0;                                    \
1186     xmm5 = _mm_unpacklo_epi16(xmm5, xmm1);          \
1187     _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5); \
1188     xmm0 = _mm_unpackhi_epi16(xmm0, xmm1);          \
1189     _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
1190
1191 #define SSE2_UNPACK_32_RGBA_ALIGNED                 \
1192     xmm3 = _mm_setzero_si128();                     \
1193     xmm4 = xmm2;                                    \
1194     xmm4 = _mm_unpacklo_epi8(xmm4, xmm1);           \
1195     xmm3 = _mm_unpacklo_epi8(xmm3, xmm0);           \
1196     xmm5 = xmm3;                                    \
1197     xmm3 = _mm_unpacklo_epi16(xmm3, xmm4);          \
1198     _mm_stream_si128((__m128i*)(p_buffer), xmm3);   \
1199     xmm5 = _mm_unpackhi_epi16(xmm5, xmm4);          \
1200     _mm_stream_si128((__m128i*)(p_buffer+4), xmm5); \
1201     xmm6 = _mm_setzero_si128();                     \
1202     xmm2 = _mm_unpackhi_epi8(xmm2, xmm1);           \
1203     xmm6 = _mm_unpackhi_epi8(xmm6, xmm0);           \
1204     xmm0 = xmm6;                                    \
1205     xmm6 = _mm_unpacklo_epi16(xmm6, xmm2);          \
1206     _mm_stream_si128((__m128i*)(p_buffer+8), xmm6); \
1207     xmm0 = _mm_unpackhi_epi16(xmm0, xmm2);          \
1208     _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
1209
1210 #define SSE2_UNPACK_32_RGBA_UNALIGNED               \
1211     xmm3 = _mm_setzero_si128();                     \
1212     xmm4 = xmm2;                                    \
1213     xmm4 = _mm_unpacklo_epi8(xmm4, xmm1);           \
1214     xmm3 = _mm_unpacklo_epi8(xmm3, xmm0);           \
1215     xmm5 = xmm3;                                    \
1216     xmm3 = _mm_unpacklo_epi16(xmm3, xmm4);          \
1217     _mm_storeu_si128((__m128i*)(p_buffer), xmm3);   \
1218     xmm5 = _mm_unpackhi_epi16(xmm5, xmm4);          \
1219     _mm_storeu_si128((__m128i*)(p_buffer+4), xmm5); \
1220     xmm6 = _mm_setzero_si128();                     \
1221     xmm2 = _mm_unpackhi_epi8(xmm2, xmm1);           \
1222     xmm6 = _mm_unpackhi_epi8(xmm6, xmm0);           \
1223     xmm0 = xmm6;                                    \
1224     xmm6 = _mm_unpacklo_epi16(xmm6, xmm2);          \
1225     _mm_storeu_si128((__m128i*)(p_buffer+8), xmm6); \
1226     xmm0 = _mm_unpackhi_epi16(xmm0, xmm2);          \
1227     _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
1228
1229 #define SSE2_UNPACK_32_BGRA_ALIGNED                 \
1230     xmm3 = _mm_setzero_si128();                     \
1231     xmm4 = xmm2;                                    \
1232     xmm4 = _mm_unpacklo_epi8(xmm4, xmm0);           \
1233     xmm3 = _mm_unpacklo_epi8(xmm3, xmm1);           \
1234     xmm5 = xmm3;                                    \
1235     xmm3 = _mm_unpacklo_epi16(xmm3, xmm4);          \
1236     _mm_stream_si128((__m128i*)(p_buffer), xmm3);   \
1237     xmm5 = _mm_unpackhi_epi16(xmm5, xmm4);          \
1238     _mm_stream_si128((__m128i*)(p_buffer+4), xmm5); \
1239     xmm6 = _mm_setzero_si128();                     \
1240     xmm2 = _mm_unpackhi_epi8(xmm2, xmm0);           \
1241     xmm6 = _mm_unpackhi_epi8(xmm6, xmm1);           \
1242     xmm0 = xmm6;                                    \
1243     xmm6 = _mm_unpacklo_epi16(xmm6, xmm2);          \
1244     _mm_stream_si128((__m128i*)(p_buffer+8), xmm6); \
1245     xmm0 = _mm_unpackhi_epi16(xmm0, xmm2);          \
1246     _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
1247
1248 #define SSE2_UNPACK_32_BGRA_UNALIGNED               \
1249     xmm3 = _mm_setzero_si128();                     \
1250     xmm4 = xmm2;                                    \
1251     xmm4 = _mm_unpacklo_epi8(xmm4, xmm0);           \
1252     xmm3 = _mm_unpacklo_epi8(xmm3, xmm1);           \
1253     xmm5 = xmm3;                                    \
1254     xmm3 = _mm_unpacklo_epi16(xmm3, xmm4);          \
1255     _mm_storeu_si128((__m128i*)(p_buffer), xmm3);   \
1256     xmm5 = _mm_unpackhi_epi16(xmm5, xmm4);          \
1257     _mm_storeu_si128((__m128i*)(p_buffer+4), xmm5); \
1258     xmm6 = _mm_setzero_si128();                     \
1259     xmm2 = _mm_unpackhi_epi8(xmm2, xmm0);           \
1260     xmm6 = _mm_unpackhi_epi8(xmm6, xmm1);           \
1261     xmm0 = xmm6;                                    \
1262     xmm6 = _mm_unpacklo_epi16(xmm6, xmm2);          \
1263     _mm_storeu_si128((__m128i*)(p_buffer+8), xmm6); \
1264     xmm0 = _mm_unpackhi_epi16(xmm0, xmm2);          \
1265     _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
1266
1267 #define SSE2_UNPACK_32_ABGR_ALIGNED                 \
1268     xmm3 = _mm_setzero_si128();                     \
1269     xmm4 = xmm1;                                    \
1270     xmm4 = _mm_unpacklo_epi8(xmm4, xmm2);           \
1271     xmm5 = xmm0;                                    \
1272     xmm5 = _mm_unpacklo_epi8(xmm5, xmm3);           \
1273     xmm6 = xmm4;                                    \
1274     xmm4 = _mm_unpacklo_epi16(xmm4, xmm5);          \
1275     _mm_stream_si128((__m128i*)(p_buffer), xmm4);   \
1276     xmm6 = _mm_unpackhi_epi16(xmm6, xmm5);          \
1277     _mm_stream_si128((__m128i*)(p_buffer+4), xmm6); \
1278     xmm1 = _mm_unpackhi_epi8(xmm1, xmm2);           \
1279     xmm0 = _mm_unpackhi_epi8(xmm0, xmm3);           \
1280     xmm2 = xmm1;                                    \
1281     xmm1 = _mm_unpacklo_epi16(xmm1, xmm0);          \
1282     _mm_stream_si128((__m128i*)(p_buffer+8), xmm1); \
1283     xmm2 = _mm_unpackhi_epi16(xmm2, xmm0);          \
1284     _mm_stream_si128((__m128i*)(p_buffer+12), xmm2);
1285
1286 #define SSE2_UNPACK_32_ABGR_UNALIGNED               \
1287     xmm3 = _mm_setzero_si128();                     \
1288     xmm4 = xmm1;                                    \
1289     xmm4 = _mm_unpacklo_epi8(xmm4, xmm2);           \
1290     xmm5 = xmm0;                                    \
1291     xmm5 = _mm_unpacklo_epi8(xmm5, xmm3);           \
1292     xmm6 = xmm4;                                    \
1293     xmm4 = _mm_unpacklo_epi16(xmm4, xmm5);          \
1294     _mm_storeu_si128((__m128i*)(p_buffer), xmm4);   \
1295     xmm6 = _mm_unpackhi_epi16(xmm6, xmm5);          \
1296     _mm_storeu_si128((__m128i*)(p_buffer+4), xmm6); \
1297     xmm1 = _mm_unpackhi_epi8(xmm1, xmm2);           \
1298     xmm0 = _mm_unpackhi_epi8(xmm0, xmm3);           \
1299     xmm2 = xmm1;                                    \
1300     xmm1 = _mm_unpacklo_epi16(xmm1, xmm0);          \
1301     _mm_storeu_si128((__m128i*)(p_buffer+8), xmm1); \
1302     xmm2 = _mm_unpackhi_epi16(xmm2, xmm0);          \
1303     _mm_storeu_si128((__m128i*)(p_buffer+12), xmm2);
1304
1305 #endif
1306
1307 #endif