]> git.sesse.net Git - vlc/blob - modules/video_chroma/i420_rgb_mmx.h
Removes trailing spaces. Removes tabs.
[vlc] / modules / video_chroma / i420_rgb_mmx.h
1 /*****************************************************************************
2  * transforms_yuvmmx.h: MMX YUV transformation assembly
3  *****************************************************************************
4  * Copyright (C) 1999-2007 the VideoLAN team
5  * $Id$
6  *
7  * Authors: Olie Lho <ollie@sis.com.tw>
8  *          GaĆ«l Hendryckx <jimmy@via.ecp.fr>
9  *          Samuel Hocevar <sam@zoy.org>
10  *          Damien Fouilleul <damienf@videolan.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
25  *****************************************************************************/
26
27 #ifdef MODULE_NAME_IS_i420_rgb_mmx
28
29 /* hope these constant values are cache line aligned */
30 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
31 #define USED_U64(foo) \
32     static const uint64_t foo __asm__ (#foo) __attribute__((used))
33 #else
34 #define USED_U64(foo) \
35     static const uint64_t foo __asm__ (#foo) __attribute__((unused))
36 #endif
37 USED_U64(mmx_80w)     = 0x0080008000800080ULL;
38 USED_U64(mmx_10w)     = 0x1010101010101010ULL;
39 USED_U64(mmx_00ffw)   = 0x00ff00ff00ff00ffULL;
40 USED_U64(mmx_Y_coeff) = 0x253f253f253f253fULL;
41
42 USED_U64(mmx_U_green) = 0xf37df37df37df37dULL;
43 USED_U64(mmx_U_blue)  = 0x4093409340934093ULL;
44 USED_U64(mmx_V_red)   = 0x3312331233123312ULL;
45 USED_U64(mmx_V_green) = 0xe5fce5fce5fce5fcULL;
46
47 USED_U64(mmx_mask_f8) = 0xf8f8f8f8f8f8f8f8ULL;
48 USED_U64(mmx_mask_fc) = 0xfcfcfcfcfcfcfcfcULL;
49 #undef USED_U64
50
51 #if defined(CAN_COMPILE_MMX)
52
53 /* MMX assembly */
54  
55 #define MMX_CALL(MMX_INSTRUCTIONS)      \
56     do {                                \
57     __asm__ __volatile__(               \
58         ".p2align 3 \n\t"               \
59         MMX_INSTRUCTIONS                \
60         :                               \
61         : "r" (p_y), "r" (p_u),         \
62           "r" (p_v), "r" (p_buffer) );  \
63     } while(0)
64
65 #define MMX_END __asm__ __volatile__ ( "emms" )
66
67 /* Use RIP-relative code in PIC mode on amd64 */
68 #if defined(__x86_64__) && defined(__PIC__)
69 #   define G "(%%rip)"
70 #else
71 #   define G
72 #endif
73
74 #define MMX_INIT_16 "                                                       \n\
75 movd       (%1), %%mm0      # Load 4 Cb       00 00 00 00 u3 u2 u1 u0       \n\
76 movd       (%2), %%mm1      # Load 4 Cr       00 00 00 00 v3 v2 v1 v0       \n\
77 pxor      %%mm4, %%mm4      # zero mm4                                      \n\
78 movq       (%0), %%mm6      # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
79 "
80
81 #define MMX_INIT_16_GRAY "                                                  \n\
82 movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
83 #movl      $0, (%3)         # cache preload for image                       \n\
84 "
85
86 #define MMX_INIT_32 "                                                       \n\
87 movd      (%1), %%mm0       # Load 4 Cb       00 00 00 00 u3 u2 u1 u0       \n\
88 movl        $0, (%3)        # cache preload for image                       \n\
89 movd      (%2), %%mm1       # Load 4 Cr       00 00 00 00 v3 v2 v1 v0       \n\
90 pxor     %%mm4, %%mm4       # zero mm4                                      \n\
91 movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
92 "
93
94 /*
95  * Do the multiply part of the conversion for even and odd pixels,
96  * register usage:
97  * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
98  * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd  pixels,
99  * mm6 -> Y even, mm7 -> Y odd
100  */
101
102 #define MMX_YUV_MUL "                                                       \n\
103 # convert the chroma part                                                   \n\
104 punpcklbw %%mm4, %%mm0          # scatter 4 Cb    00 u3 00 u2 00 u1 00 u0   \n\
105 punpcklbw %%mm4, %%mm1          # scatter 4 Cr    00 v3 00 v2 00 v1 00 v0   \n\
106 psubsw    mmx_80w"G", %%mm0     # Cb -= 128                                 \n\
107 psubsw    mmx_80w"G", %%mm1     # Cr -= 128                                 \n\
108 psllw     $3, %%mm0             # Promote precision                         \n\
109 psllw     $3, %%mm1             # Promote precision                         \n\
110 movq      %%mm0, %%mm2          # Copy 4 Cb       00 u3 00 u2 00 u1 00 u0   \n\
111 movq      %%mm1, %%mm3          # Copy 4 Cr       00 v3 00 v2 00 v1 00 v0   \n\
112 pmulhw    mmx_U_green"G", %%mm2 # Mul Cb with green coeff -> Cb green       \n\
113 pmulhw    mmx_V_green"G", %%mm3 # Mul Cr with green coeff -> Cr green       \n\
114 pmulhw    mmx_U_blue"G", %%mm0  # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0   \n\
115 pmulhw    mmx_V_red"G", %%mm1   # Mul Cr -> Cred  00 r3 00 r2 00 r1 00 r0   \n\
116 paddsw    %%mm3, %%mm2          # Cb green + Cr green -> Cgreen             \n\
117                                                                             \n\
118 # convert the luma part                                                     \n\
119 psubusb   mmx_10w"G", %%mm6     # Y -= 16                                   \n\
120 movq      %%mm6, %%mm7          # Copy 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
121 pand      mmx_00ffw"G", %%mm6   # get Y even      00 Y6 00 Y4 00 Y2 00 Y0   \n\
122 psrlw     $8, %%mm7             # get Y odd       00 Y7 00 Y5 00 Y3 00 Y1   \n\
123 psllw     $3, %%mm6             # Promote precision                         \n\
124 psllw     $3, %%mm7             # Promote precision                         \n\
125 pmulhw    mmx_Y_coeff"G", %%mm6 # Mul 4 Y even    00 y6 00 y4 00 y2 00 y0   \n\
126 pmulhw    mmx_Y_coeff"G", %%mm7 # Mul 4 Y odd     00 y7 00 y5 00 y3 00 y1   \n\
127 "
128
129 /*
130  * Do the addition part of the conversion for even and odd pixels,
131  * register usage:
132  * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
133  * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd  pixels,
134  * mm6 -> Y even, mm7 -> Y odd
135  */
136
137 #define MMX_YUV_ADD "                                                       \n\
138 # Do horizontal and vertical scaling                                        \n\
139 movq      %%mm0, %%mm3          # Copy Cblue                                \n\
140 movq      %%mm1, %%mm4          # Copy Cred                                 \n\
141 movq      %%mm2, %%mm5          # Copy Cgreen                               \n\
142 paddsw    %%mm6, %%mm0          # Y even + Cblue  00 B6 00 B4 00 B2 00 B0   \n\
143 paddsw    %%mm7, %%mm3          # Y odd  + Cblue  00 B7 00 B5 00 B3 00 B1   \n\
144 paddsw    %%mm6, %%mm1          # Y even + Cred   00 R6 00 R4 00 R2 00 R0   \n\
145 paddsw    %%mm7, %%mm4          # Y odd  + Cred   00 R7 00 R5 00 R3 00 R1   \n\
146 paddsw    %%mm6, %%mm2          # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0   \n\
147 paddsw    %%mm7, %%mm5          # Y odd  + Cgreen 00 G7 00 G5 00 G3 00 G1   \n\
148                                                                             \n\
149 # Limit RGB even to 0..255                                                  \n\
150 packuswb  %%mm0, %%mm0          # B6 B4 B2 B0 / B6 B4 B2 B0                 \n\
151 packuswb  %%mm1, %%mm1          # R6 R4 R2 R0 / R6 R4 R2 R0                 \n\
152 packuswb  %%mm2, %%mm2          # G6 G4 G2 G0 / G6 G4 G2 G0                 \n\
153                                                                             \n\
154 # Limit RGB odd to 0..255                                                   \n\
155 packuswb  %%mm3, %%mm3          # B7 B5 B3 B1 / B7 B5 B3 B1                 \n\
156 packuswb  %%mm4, %%mm4          # R7 R5 R3 R1 / R7 R5 R3 R1                 \n\
157 packuswb  %%mm5, %%mm5          # G7 G5 G3 G1 / G7 G5 G3 G1                 \n\
158                                                                             \n\
159 # Interleave RGB even and odd                                               \n\
160 punpcklbw %%mm3, %%mm0          #                 B7 B6 B5 B4 B3 B2 B1 B0   \n\
161 punpcklbw %%mm4, %%mm1          #                 R7 R6 R5 R4 R3 R2 R1 R0   \n\
162 punpcklbw %%mm5, %%mm2          #                 G7 G6 G5 G4 G3 G2 G1 G0   \n\
163 "
164
165 /*
166  * Grayscale case, only use Y
167  */
168
169 #define MMX_YUV_GRAY "                                                      \n\
170 # convert the luma part                                                     \n\
171 psubusb   mmx_10w"G", %%mm6                                                 \n\
172 movq      %%mm6, %%mm7                                                      \n\
173 pand      mmx_00ffw"G", %%mm6                                               \n\
174 psrlw     $8, %%mm7                                                         \n\
175 psllw     $3, %%mm6                                                         \n\
176 psllw     $3, %%mm7                                                         \n\
177 pmulhw    mmx_Y_coeff"G", %%mm6                                             \n\
178 pmulhw    mmx_Y_coeff"G", %%mm7                                             \n\
179 packuswb  %%mm6, %%mm6                                                      \n\
180 packuswb  %%mm7, %%mm7                                                      \n\
181 punpcklbw %%mm7, %%mm6                                                      \n\
182 "
183
184 #define MMX_UNPACK_16_GRAY "                                                \n\
185 movq      %%mm6, %%mm5                                                      \n\
186 pand      mmx_mask_f8"G", %%mm6                                             \n\
187 pand      mmx_mask_fc"G", %%mm5                                             \n\
188 movq      %%mm6, %%mm7                                                      \n\
189 psrlw     $3, %%mm7                                                         \n\
190 pxor      %%mm3, %%mm3                                                      \n\
191 movq      %%mm7, %%mm2                                                      \n\
192 movq      %%mm5, %%mm0                                                      \n\
193 punpcklbw %%mm3, %%mm5                                                      \n\
194 punpcklbw %%mm6, %%mm7                                                      \n\
195 psllw     $3, %%mm5                                                         \n\
196 por       %%mm5, %%mm7                                                      \n\
197 movq      %%mm7, (%3)                                                       \n\
198 punpckhbw %%mm3, %%mm0                                                      \n\
199 punpckhbw %%mm6, %%mm2                                                      \n\
200 psllw     $3, %%mm0                                                         \n\
201 movq      8(%0), %%mm6                                                      \n\
202 por       %%mm0, %%mm2                                                      \n\
203 movq      %%mm2, 8(%3)                                                      \n\
204 "
205
206
207 /*
208  * convert RGB plane to RGB 15 bits,
209  * mm0 -> B, mm1 -> R, mm2 -> G,
210  * mm4 -> GB, mm5 -> AR pixel 4-7,
211  * mm6 -> GB, mm7 -> AR pixel 0-3
212  */
213
214 #define MMX_UNPACK_15 "                                                     \n\
215 # mask unneeded bits off                                                    \n\
216 pand      mmx_mask_f8"G", %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
217 psrlw     $3,%%mm0              # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
218 pand      mmx_mask_f8"G", %%mm2 # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
219 pand      mmx_mask_f8"G", %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
220 psrlw     $1,%%mm1              # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
221 pxor      %%mm4, %%mm4          # zero mm4                                  \n\
222 movq      %%mm0, %%mm5          # Copy B7-B0                                \n\
223 movq      %%mm2, %%mm7          # Copy G7-G0                                \n\
224                                                                             \n\
225 # convert rgb24 plane to rgb15 pack for pixel 0-3                           \n\
226 punpcklbw %%mm4, %%mm2          # ________ ________ g7g6g5g4 g3______       \n\
227 punpcklbw %%mm1, %%mm0          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
228 psllw     $2,%%mm2              # ________ ____g7g6 g5g4g3__ ________       \n\
229 por       %%mm2, %%mm0          # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
230 movq      8(%0), %%mm6          # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
231 movq      %%mm0, (%3)           # store pixel 0-3                           \n\
232                                                                             \n\
233 # convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
234 punpckhbw %%mm4, %%mm7          # ________ ________ g7g6g5g4 g3______       \n\
235 punpckhbw %%mm1, %%mm5          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
236 psllw     $2,%%mm7              # ________ ____g7g6 g5g4g3__ ________       \n\
237 movd      4(%1), %%mm0          # Load 4 Cb       __ __ __ __ u3 u2 u1 u0   \n\
238 por       %%mm7, %%mm5          # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
239 movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   \n\
240 movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
241 "
242
243 /*
244  * convert RGB plane to RGB 16 bits,
245  * mm0 -> B, mm1 -> R, mm2 -> G,
246  * mm4 -> GB, mm5 -> AR pixel 4-7,
247  * mm6 -> GB, mm7 -> AR pixel 0-3
248  */
249
250 #define MMX_UNPACK_16 "                                                     \n\
251 # mask unneeded bits off                                                    \n\
252 pand      mmx_mask_f8"G", %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
253 pand      mmx_mask_fc"G", %%mm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
254 pand      mmx_mask_f8"G", %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
255 psrlw     $3,%%mm0              # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
256 pxor      %%mm4, %%mm4          # zero mm4                                  \n\
257 movq      %%mm0, %%mm5          # Copy B7-B0                                \n\
258 movq      %%mm2, %%mm7          # Copy G7-G0                                \n\
259                                                                             \n\
260 # convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
261 punpcklbw %%mm4, %%mm2          # ________ ________ g7g6g5g4 g3g2____       \n\
262 punpcklbw %%mm1, %%mm0          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
263 psllw     $3,%%mm2              # ________ __g7g6g5 g4g3g2__ ________       \n\
264 por       %%mm2, %%mm0          # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
265 movq      8(%0), %%mm6          # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
266 movq      %%mm0, (%3)           # store pixel 0-3                           \n\
267                                                                             \n\
268 # convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
269 punpckhbw %%mm4, %%mm7          # ________ ________ g7g6g5g4 g3g2____       \n\
270 punpckhbw %%mm1, %%mm5          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
271 psllw     $3,%%mm7              # ________ __g7g6g5 g4g3g2__ ________       \n\
272 movd      4(%1), %%mm0          # Load 4 Cb       __ __ __ __ u3 u2 u1 u0   \n\
273 por       %%mm7, %%mm5          # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
274 movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   \n\
275 movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
276 "
277
278 /*
279  * convert RGB plane to RGB packed format,
280  * mm0 -> B, mm1 -> R, mm2 -> G
281  */
282
283 #define MMX_UNPACK_32_ARGB "                                                \n\
284 pxor      %%mm3, %%mm3  # zero mm3                                          \n\
285 movq      %%mm0, %%mm4  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
286 punpcklbw %%mm2, %%mm4  #                 G3 B3 G2 B2 G1 B1 G0 B0           \n\
287 movq      %%mm1, %%mm5  #                 R7 R6 R5 R4 R3 R2 R1 R0           \n\
288 punpcklbw %%mm3, %%mm5  #                 00 R3 00 R2 00 R1 00 R0           \n\
289 movq      %%mm4, %%mm6  #                 G3 B3 G2 B2 G1 B1 G0 B0           \n\
290 punpcklwd %%mm5, %%mm4  #                 00 R1 B1 G1 00 R0 B0 G0           \n\
291 movq      %%mm4, (%3)   # Store ARGB1 ARGB0                                 \n\
292 punpckhwd %%mm5, %%mm6  #                 00 R3 B3 G3 00 R2 B2 G2           \n\
293 movq      %%mm6, 8(%3)  # Store ARGB3 ARGB2                                 \n\
294 punpckhbw %%mm2, %%mm0  #                 G7 B7 G6 B6 G5 B5 G4 B4           \n\
295 punpckhbw %%mm3, %%mm1  #                 00 R7 00 R6 00 R5 00 R4           \n\
296 movq      %%mm0, %%mm5  #                 G7 B7 G6 B6 G5 B5 G4 B4           \n\
297 punpcklwd %%mm1, %%mm5  #                 00 R5 B5 G5 00 R4 B4 G4           \n\
298 movq      %%mm5, 16(%3) # Store ARGB5 ARGB4                                 \n\
299 punpckhwd %%mm1, %%mm0  #                 00 R7 B7 G7 00 R6 B6 G6           \n\
300 movq      %%mm0, 24(%3) # Store ARGB7 ARGB6                                 \n\
301 "
302
303 #define MMX_UNPACK_32_RGBA "                                                \n\
304 pxor      %%mm3, %%mm3  # zero mm3                                          \n\
305 movq      %%mm2, %%mm4  #                 G7 G6 G5 G4 G3 G2 G1 G0           \n\
306 punpcklbw %%mm1, %%mm4  #                 R3 G3 R2 G2 R1 G1 R0 G0           \n\
307 punpcklbw %%mm0, %%mm3  #                 B3 00 B2 00 B1 00 B0 00           \n\
308 movq      %%mm3, %%mm5  #                 R3 00 R2 00 R1 00 R0 00           \n\
309 punpcklwd %%mm4, %%mm3  #                 R1 G1 B1 00 R0 G0 B0 00           \n\
310 movq      %%mm3, (%3)   # Store RGBA1 RGBA0                                 \n\
311 punpckhwd %%mm4, %%mm5  #                 R3 G3 B3 00 R2 G2 B2 00           \n\
312 movq      %%mm5, 8(%3)  # Store RGBA3 RGBA2                                 \n\
313 pxor      %%mm6, %%mm6  # zero mm6                                          \n\
314 punpckhbw %%mm1, %%mm2  #                 R7 G7 R6 G6 R5 G5 R4 G4           \n\
315 punpckhbw %%mm0, %%mm6  #                 B7 00 B6 00 B5 00 B4 00           \n\
316 movq      %%mm6, %%mm0  #                 B7 00 B6 00 B5 00 B4 00           \n\
317 punpcklwd %%mm2, %%mm6  #                 R5 G5 B5 00 R4 G4 B4 00           \n\
318 movq      %%mm6, 16(%3) # Store RGBA5 RGBA4                                 \n\
319 punpckhwd %%mm2, %%mm0  #                 R7 G7 B7 00 R6 G6 B6 00           \n\
320 movq      %%mm0, 24(%3) # Store RGBA7 RGBA6                                 \n\
321 "
322
323 #define MMX_UNPACK_32_BGRA "                                                \n\
324 pxor      %%mm3, %%mm3  # zero mm3                                          \n\
325 movq      %%mm2, %%mm4  #                 G7 G6 G5 G4 G3 G2 G1 G0           \n\
326 punpcklbw %%mm0, %%mm4  #                 B3 G3 B2 G2 B1 G1 B0 G0           \n\
327 punpcklbw %%mm1, %%mm3  #                 R3 00 R2 00 R1 00 R0 00           \n\
328 movq      %%mm3, %%mm5  #                 R3 00 R2 00 R1 00 R0 00           \n\
329 punpcklwd %%mm4, %%mm3  #                 B1 G1 R1 00 B0 G0 R0 00           \n\
330 movq      %%mm3, (%3)   # Store BGRA1 BGRA0                                 \n\
331 punpckhwd %%mm4, %%mm5  #                 B3 G3 R3 00 B2 G2 R2 00           \n\
332 movq      %%mm5, 8(%3)  # Store BGRA3 BGRA2                                 \n\
333 pxor      %%mm6, %%mm6  # zero mm6                                          \n\
334 punpckhbw %%mm0, %%mm2  #                 B7 G7 B6 G6 B5 G5 B4 G4           \n\
335 punpckhbw %%mm1, %%mm6  #                 R7 00 R6 00 R5 00 R4 00           \n\
336 movq      %%mm6, %%mm0  #                 R7 00 R6 00 R5 00 R4 00           \n\
337 punpcklwd %%mm2, %%mm6  #                 B5 G5 R5 00 B4 G4 R4 00           \n\
338 movq      %%mm6, 16(%3) # Store BGRA5 BGRA4                                 \n\
339 punpckhwd %%mm2, %%mm0  #                 B7 G7 R7 00 B6 G6 R6 00           \n\
340 movq      %%mm0, 24(%3) # Store BGRA7 BGRA6                                 \n\
341 "
342
343 #define MMX_UNPACK_32_ABGR "                                                \n\
344 pxor      %%mm3, %%mm3  # zero mm3                                          \n\
345 movq      %%mm1, %%mm4  #                 R7 R6 R5 R4 R3 R2 R1 R0           \n\
346 punpcklbw %%mm2, %%mm4  #                 G3 R3 G2 R2 G1 R1 G0 R0           \n\
347 movq      %%mm0, %%mm5  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
348 punpcklbw %%mm3, %%mm5  #                 00 B3 00 B2 00 B1 00 B0           \n\
349 movq      %%mm4, %%mm6  #                 G3 R3 G2 R2 G1 R1 G0 R0           \n\
350 punpcklwd %%mm5, %%mm4  #                 00 B1 G1 R1 00 B0 G0 R0           \n\
351 movq      %%mm4, (%3)   # Store ABGR1 ABGR0                                 \n\
352 punpckhwd %%mm5, %%mm6  #                 00 B3 G3 R3 00 B2 G2 R2           \n\
353 movq      %%mm6, 8(%3)  # Store ABGR3 ABGR2                                 \n\
354 punpckhbw %%mm2, %%mm1  #                 G7 R7 G6 R6 G5 R5 G4 R4           \n\
355 punpckhbw %%mm3, %%mm0  #                 00 B7 00 B6 00 B5 00 B4           \n\
356 movq      %%mm1, %%mm2  #                 G7 R7 G6 R6 G5 R5 G4 R4           \n\
357 punpcklwd %%mm0, %%mm1  #                 00 B5 G5 R5 00 B4 G4 R4           \n\
358 movq      %%mm1, 16(%3) # Store ABGR5 ABGR4                                 \n\
359 punpckhwd %%mm0, %%mm2  #                 B7 G7 R7 00 B6 G6 R6 00           \n\
360 movq      %%mm2, 24(%3) # Store ABGR7 ABGR6                                 \n\
361 "
362
363 #elif defined(HAVE_MMX_INTRINSICS)
364
365 /* MMX intrinsics */
366
367 #include <mmintrin.h>
368
369 #define MMX_CALL(MMX_INSTRUCTIONS)  \
370     do {                            \
371         __m64 mm0, mm1, mm2, mm3,   \
372               mm4, mm5, mm6, mm7;   \
373         MMX_INSTRUCTIONS            \
374     } while(0)
375
376 #define MMX_END _mm_empty()
377  
378 #define MMX_INIT_16                     \
379     mm0 = _mm_cvtsi32_si64(*(int*)p_u); \
380     mm1 = _mm_cvtsi32_si64(*(int*)p_v); \
381     mm4 = _mm_setzero_si64();           \
382     mm6 = (__m64)*(uint64_t *)p_y;
383
384 #define MMX_INIT_32                     \
385     mm0 = _mm_cvtsi32_si64(*(int*)p_u); \
386     *(uint16_t *)p_buffer = 0;          \
387     mm1 = _mm_cvtsi32_si64(*(int*)p_v); \
388     mm4 = _mm_setzero_si64();           \
389     mm6 = (__m64)*(uint64_t *)p_y;
390
391 #define MMX_YUV_MUL                                 \
392     mm0 = _mm_unpacklo_pi8(mm0, mm4);               \
393     mm1 = _mm_unpacklo_pi8(mm1, mm4);               \
394     mm0 = _mm_subs_pi16(mm0, (__m64)mmx_80w);       \
395     mm1 = _mm_subs_pi16(mm1, (__m64)mmx_80w);       \
396     mm0 = _mm_slli_pi16(mm0, 3);                    \
397     mm1 = _mm_slli_pi16(mm1, 3);                    \
398     mm2 = mm0;                                      \
399     mm3 = mm1;                                      \
400     mm2 = _mm_mulhi_pi16(mm2, (__m64)mmx_U_green);  \
401     mm3 = _mm_mulhi_pi16(mm3, (__m64)mmx_V_green);  \
402     mm0 = _mm_mulhi_pi16(mm0, (__m64)mmx_U_blue);   \
403     mm1 = _mm_mulhi_pi16(mm1, (__m64)mmx_V_red);    \
404     mm2 = _mm_adds_pi16(mm2, mm3);                  \
405     \
406     mm6 = _mm_subs_pu8(mm6, (__m64)mmx_10w);        \
407     mm7 = mm6;                                      \
408     mm6 = _mm_and_si64(mm6, (__m64)mmx_00ffw);      \
409     mm7 = _mm_srli_pi16(mm7, 8);                    \
410     mm6 = _mm_slli_pi16(mm6, 3);                    \
411     mm7 = _mm_slli_pi16(mm7, 3);                    \
412     mm6 = _mm_mulhi_pi16(mm6, (__m64)mmx_Y_coeff);  \
413     mm7 = _mm_mulhi_pi16(mm7, (__m64)mmx_Y_coeff);
414
415 #define MMX_YUV_ADD                     \
416     mm3 = mm0;                          \
417     mm4 = mm1;                          \
418     mm5 = mm2;                          \
419     mm0 = _mm_adds_pi16(mm0, mm6);      \
420     mm3 = _mm_adds_pi16(mm3, mm7);      \
421     mm1 = _mm_adds_pi16(mm1, mm6);      \
422     mm4 = _mm_adds_pi16(mm4, mm7);      \
423     mm2 = _mm_adds_pi16(mm2, mm6);      \
424     mm5 = _mm_adds_pi16(mm5, mm7);      \
425     \
426     mm0 = _mm_packs_pu16(mm0, mm0);     \
427     mm1 = _mm_packs_pu16(mm1, mm1);     \
428     mm2 = _mm_packs_pu16(mm2, mm2);     \
429     \
430     mm3 = _mm_packs_pu16(mm3, mm3);     \
431     mm4 = _mm_packs_pu16(mm4, mm4);     \
432     mm5 = _mm_packs_pu16(mm5, mm5);     \
433     \
434     mm0 = _mm_unpacklo_pi8(mm0, mm3);   \
435     mm1 = _mm_unpacklo_pi8(mm1, mm4);   \
436     mm2 = _mm_unpacklo_pi8(mm2, mm5);
437
438 #define MMX_UNPACK_15                               \
439     mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8);    \
440     mm0 = _mm_srli_pi16(mm0, 3);                    \
441     mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_f8);    \
442     mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8);    \
443     mm1 = _mm_srli_pi16(mm1, 1);                    \
444     mm4 = _mm_setzero_si64();                       \
445     mm5 = mm0;                                      \
446     mm7 = mm2;                                      \
447     \
448     mm2 = _mm_unpacklo_pi8(mm2, mm4);               \
449     mm0 = _mm_unpacklo_pi8(mm0, mm1);               \
450     mm2 = _mm_slli_pi16(mm2, 2);                    \
451     mm0 = _mm_or_si64(mm0, mm2);                    \
452     mm6 = (__m64)*(uint64_t *)(p_y + 8);            \
453     *(uint64_t *)p_buffer = (uint64_t)mm0;          \
454     \
455     mm7 = _mm_unpackhi_pi8(mm7, mm4);               \
456     mm5 = _mm_unpackhi_pi8(mm5, mm1);               \
457     mm7 = _mm_slli_pi16(mm7, 2);                    \
458     mm0 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_u + 4)); \
459     mm5 = _mm_or_si64(mm5, mm7);                    \
460     mm1 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_v + 4)); \
461     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
462
463 #define MMX_UNPACK_16                               \
464     mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8);    \
465     mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_fc);    \
466     mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8);    \
467     mm0 = _mm_srli_pi16(mm0, 3);                    \
468     mm4 = _mm_setzero_si64();                       \
469     mm5 = mm0;                                      \
470     mm7 = mm2;                                      \
471     \
472     mm2 = _mm_unpacklo_pi8(mm2, mm4);               \
473     mm0 = _mm_unpacklo_pi8(mm0, mm1);               \
474     mm2 = _mm_slli_pi16(mm2, 3);                    \
475     mm0 = _mm_or_si64(mm0, mm2);                    \
476     mm6 = (__m64)*(uint64_t *)(p_y + 8);            \
477     *(uint64_t *)p_buffer = (uint64_t)mm0;          \
478     \
479     mm7 = _mm_unpackhi_pi8(mm7, mm4);               \
480     mm5 = _mm_unpackhi_pi8(mm5, mm1);               \
481     mm7 = _mm_slli_pi16(mm7, 3);                    \
482     mm0 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_u + 4)); \
483     mm5 = _mm_or_si64(mm5, mm7);                    \
484     mm1 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_v + 4)); \
485     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
486
487 #define MMX_UNPACK_32_ARGB                      \
488     mm3 = _mm_setzero_si64();                   \
489     mm4 = mm0;                                  \
490     mm4 = _mm_unpacklo_pi8(mm4, mm2);           \
491     mm5 = mm1;                                  \
492     mm5 = _mm_unpacklo_pi8(mm5, mm3);           \
493     mm6 = mm4;                                  \
494     mm4 = _mm_unpacklo_pi16(mm4, mm5);          \
495     *(uint64_t *)p_buffer = (uint64_t)mm4;      \
496     mm6 = _mm_unpackhi_pi16(mm6, mm5);          \
497     *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6;\
498     mm0 = _mm_unpackhi_pi8(mm0, mm2);           \
499     mm1 = _mm_unpackhi_pi8(mm1, mm3);           \
500     mm5 = mm0;                                  \
501     mm5 = _mm_unpacklo_pi16(mm5, mm1);          \
502     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;\
503     mm0 = _mm_unpackhi_pi16(mm0, mm1);          \
504     *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
505
506 #define MMX_UNPACK_32_RGBA                      \
507     mm3 = _mm_setzero_si64();                   \
508     mm4 = mm2;                                  \
509     mm4 = _mm_unpacklo_pi8(mm4, mm1);           \
510     mm3 = _mm_unpacklo_pi8(mm3, mm0);           \
511     mm5 = mm3;                                  \
512     mm3 = _mm_unpacklo_pi16(mm3, mm4);          \
513     *(uint64_t *)p_buffer = (uint64_t)mm3;      \
514     mm5 = _mm_unpackhi_pi16(mm5, mm4);          \
515     *(uint64_t *)(p_buffer + 2) = (uint64_t)mm5;\
516     mm6 = _mm_setzero_si64();                   \
517     mm2 = _mm_unpackhi_pi8(mm2, mm1);           \
518     mm6 = _mm_unpackhi_pi8(mm6, mm0);           \
519     mm0 = mm6;                                  \
520     mm6 = _mm_unpacklo_pi16(mm6, mm2);          \
521     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm6;\
522     mm0 = _mm_unpackhi_pi16(mm0, mm2);          \
523     *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
524
525 #define MMX_UNPACK_32_BGRA                      \
526     mm3 = _mm_setzero_si64();                   \
527     mm4 = mm2;                                  \
528     mm4 = _mm_unpacklo_pi8(mm4, mm0);           \
529     mm3 = _mm_unpacklo_pi8(mm3, mm1);           \
530     mm5 = mm3;                                  \
531     mm3 = _mm_unpacklo_pi16(mm3, mm4);          \
532     *(uint64_t *)p_buffer = (uint64_t)mm3;      \
533     mm5 = _mm_unpackhi_pi16(mm5, mm4);          \
534     *(uint64_t *)(p_buffer + 2) = (uint64_t)mm5;\
535     mm6 = _mm_setzero_si64();                   \
536     mm2 = _mm_unpackhi_pi8(mm2, mm0);           \
537     mm6 = _mm_unpackhi_pi8(mm6, mm1);           \
538     mm0 = mm6;                                  \
539     mm6 = _mm_unpacklo_pi16(mm6, mm2);          \
540     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm6;\
541     mm0 = _mm_unpackhi_pi16(mm0, mm2);          \
542     *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
543
544 #define MMX_UNPACK_32_ABGR                      \
545     mm3 = _mm_setzero_si64();                   \
546     mm4 = mm1;                                  \
547     mm4 = _mm_unpacklo_pi8(mm4, mm2);           \
548     mm5 = mm0;                                  \
549     mm5 = _mm_unpacklo_pi8(mm5, mm3);           \
550     mm6 = mm4;                                  \
551     mm4 = _mm_unpacklo_pi16(mm4, mm5);          \
552     *(uint64_t *)p_buffer = (uint64_t)mm4;      \
553     mm6 = _mm_unpackhi_pi16(mm6, mm5);          \
554     *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6;\
555     mm1 = _mm_unpackhi_pi8(mm1, mm2);           \
556     mm0 = _mm_unpackhi_pi8(mm0, mm3);           \
557     mm2 = mm1;                                  \
558     mm1 = _mm_unpacklo_pi16(mm1, mm0);          \
559     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm1;\
560     mm2 = _mm_unpackhi_pi16(mm2, mm0);          \
561     *(uint64_t *)(p_buffer + 6) = (uint64_t)mm2;
562
563 #endif
564
565 #elif defined( MODULE_NAME_IS_i420_rgb_sse2 )
566
567 #if defined(CAN_COMPILE_SSE2)
568
569 /* SSE2 assembly */
570
571 #define SSE2_CALL(SSE2_INSTRUCTIONS)    \
572     do {                                \
573     __asm__ __volatile__(               \
574         ".p2align 3 \n\t"               \
575         SSE2_INSTRUCTIONS               \
576         :                               \
577         : "r" (p_y), "r" (p_u),         \
578           "r" (p_v), "r" (p_buffer)     \
579         : "eax" );                      \
580     } while(0)
581
582 #define SSE2_END  __asm__ __volatile__ ( "sfence" ::: "memory" )
583
584 #define SSE2_INIT_16_ALIGNED "                                              \n\
585 movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
586 movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
587 pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
588 movdqa      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
589 "
590
591 #define SSE2_INIT_16_UNALIGNED "                                            \n\
592 movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
593 movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
594 pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
595 movdqu      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
596 prefetchnta (%3)            # Tell CPU not to cache output RGB data         \n\
597 "
598
599 #define SSE2_INIT_32_ALIGNED "                                              \n\
600 movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
601 movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
602 pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
603 movdqa      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
604 "
605
606 #define SSE2_INIT_32_UNALIGNED "                                            \n\
607 movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
608 movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
609 pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
610 movdqu      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
611 prefetchnta (%3)            # Tell CPU not to cache output RGB data         \n\
612 "
613
614 #define SSE2_YUV_MUL "                                                      \n\
615 # convert the chroma part                                                   \n\
616 punpcklbw %%xmm4, %%xmm0        # scatter 8 Cb    00 u3 00 u2 00 u1 00 u0   \n\
617 punpcklbw %%xmm4, %%xmm1        # scatter 8 Cr    00 v3 00 v2 00 v1 00 v0   \n\
618 movl      $0x00800080, %%eax    #                                           \n\
619 movd      %%eax, %%xmm5         #                                           \n\
620 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     0080 0080 ... 0080 0080   \n\
621 psubsw    %%xmm5, %%xmm0        # Cb -= 128                                 \n\
622 psubsw    %%xmm5, %%xmm1        # Cr -= 128                                 \n\
623 psllw     $3, %%xmm0            # Promote precision                         \n\
624 psllw     $3, %%xmm1            # Promote precision                         \n\
625 movdqa    %%xmm0, %%xmm2        # Copy 8 Cb       00 u3 00 u2 00 u1 00 u0   \n\
626 movdqa    %%xmm1, %%xmm3        # Copy 8 Cr       00 v3 00 v2 00 v1 00 v0   \n\
627 movl      $0xf37df37d, %%eax    #                                           \n\
628 movd      %%eax, %%xmm5         #                                           \n\
629 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     f37d f37d ... f37d f37d   \n\
630 pmulhw    %%xmm5, %%xmm2        # Mul Cb with green coeff -> Cb green       \n\
631 movl      $0xe5fce5fc, %%eax    #                                           \n\
632 movd      %%eax, %%xmm5         #                                           \n\
633 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     e5fc e5fc ... e5fc e5fc   \n\
634 pmulhw    %%xmm5, %%xmm3        # Mul Cr with green coeff -> Cr green       \n\
635 movl      $0x40934093, %%eax    #                                           \n\
636 movd      %%eax, %%xmm5         #                                           \n\
637 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     4093 4093 ... 4093 4093   \n\
638 pmulhw    %%xmm5, %%xmm0        # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0   \n\
639 movl      $0x33123312, %%eax    #                                           \n\
640 movd      %%eax, %%xmm5         #                                           \n\
641 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     3312 3312 ... 3312 3312   \n\
642 pmulhw    %%xmm5, %%xmm1        # Mul Cr -> Cred  00 r3 00 r2 00 r1 00 r0   \n\
643 paddsw    %%xmm3, %%xmm2        # Cb green + Cr green -> Cgreen             \n\
644                                                                             \n\
645 # convert the luma part                                                     \n\
646 movl      $0x10101010, %%eax    #                                           \n\
647 movd      %%eax, %%xmm5         #                                           \n\
648 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to   1010 1010 ... 1010 1010     \n\
649 psubusb   %%xmm5, %%xmm6        # Y -= 16                                   \n\
650 movdqa    %%xmm6, %%xmm7        # Copy 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
651 movl      $0x00ff00ff, %%eax    #                                           \n\
652 movd      %%eax, %%xmm5         #                                           \n\
653 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     00ff 00ff ... 00ff 00ff   \n\
654 pand      %%xmm5, %%xmm6        # get Y even      00 Y6 00 Y4 00 Y2 00 Y0   \n\
655 psrlw     $8, %%xmm7            # get Y odd       00 Y7 00 Y5 00 Y3 00 Y1   \n\
656 psllw     $3, %%xmm6            # Promote precision                         \n\
657 psllw     $3, %%xmm7            # Promote precision                         \n\
658 movl      $0x253f253f, %%eax    #                                           \n\
659 movd      %%eax, %%xmm5         #                                           \n\
660 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     253f 253f ... 253f 253f   \n\
661 pmulhw    %%xmm5, %%xmm6        # Mul 8 Y even    00 y6 00 y4 00 y2 00 y0   \n\
662 pmulhw    %%xmm5, %%xmm7        # Mul 8 Y odd     00 y7 00 y5 00 y3 00 y1   \n\
663 "
664
665 #define SSE2_YUV_ADD "                                                      \n\
666 # Do horizontal and vertical scaling                                        \n\
667 movdqa    %%xmm0, %%xmm3        # Copy Cblue                                \n\
668 movdqa    %%xmm1, %%xmm4        # Copy Cred                                 \n\
669 movdqa    %%xmm2, %%xmm5        # Copy Cgreen                               \n\
670 paddsw    %%xmm6, %%xmm0        # Y even + Cblue  00 B6 00 B4 00 B2 00 B0   \n\
671 paddsw    %%xmm7, %%xmm3        # Y odd  + Cblue  00 B7 00 B5 00 B3 00 B1   \n\
672 paddsw    %%xmm6, %%xmm1        # Y even + Cred   00 R6 00 R4 00 R2 00 R0   \n\
673 paddsw    %%xmm7, %%xmm4        # Y odd  + Cred   00 R7 00 R5 00 R3 00 R1   \n\
674 paddsw    %%xmm6, %%xmm2        # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0   \n\
675 paddsw    %%xmm7, %%xmm5        # Y odd  + Cgreen 00 G7 00 G5 00 G3 00 G1   \n\
676                                                                             \n\
677 # Limit RGB even to 0..255                                                  \n\
678 packuswb  %%xmm0, %%xmm0        # B6 B4 B2 B0 / B6 B4 B2 B0                 \n\
679 packuswb  %%xmm1, %%xmm1        # R6 R4 R2 R0 / R6 R4 R2 R0                 \n\
680 packuswb  %%xmm2, %%xmm2        # G6 G4 G2 G0 / G6 G4 G2 G0                 \n\
681                                                                             \n\
682 # Limit RGB odd to 0..255                                                   \n\
683 packuswb  %%xmm3, %%xmm3        # B7 B5 B3 B1 / B7 B5 B3 B1                 \n\
684 packuswb  %%xmm4, %%xmm4        # R7 R5 R3 R1 / R7 R5 R3 R1                 \n\
685 packuswb  %%xmm5, %%xmm5        # G7 G5 G3 G1 / G7 G5 G3 G1                 \n\
686                                                                             \n\
687 # Interleave RGB even and odd                                               \n\
688 punpcklbw %%xmm3, %%xmm0        #                 B7 B6 B5 B4 B3 B2 B1 B0   \n\
689 punpcklbw %%xmm4, %%xmm1        #                 R7 R6 R5 R4 R3 R2 R1 R0   \n\
690 punpcklbw %%xmm5, %%xmm2        #                 G7 G6 G5 G4 G3 G2 G1 G0   \n\
691 "
692
693 #define SSE2_UNPACK_15_ALIGNED "                                            \n\
694 # mask unneeded bits off                                                    \n\
695 movl      $0xf8f8f8f8, %%eax    #                                           \n\
696 movd      %%eax, %%xmm5         #                                           \n\
697 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
698 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
699 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
700 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
701 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
702 psrlw     $1,%%xmm1             # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
703 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
704 movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
705 movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
706                                                                             \n\
707 # convert rgb24 plane to rgb15 pack for pixel 0-7                           \n\
708 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3______       \n\
709 punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
710 psllw     $2,%%xmm2             # ________ ____g7g6 g5g4g3__ ________       \n\
711 por       %%xmm2, %%xmm0        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
712 movntdq   %%xmm0, (%3)          # store pixel 0-7                           \n\
713                                                                             \n\
714 # convert rgb24 plane to rgb15 pack for pixel 8-15                          \n\
715 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3______       \n\
716 punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
717 psllw     $2,%%xmm7             # ________ ____g7g6 g5g4g3__ ________       \n\
718 por       %%xmm7, %%xmm5        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
719 movntdq   %%xmm5, 16(%3)        # store pixel 4-7                           \n\
720 "
721
722 #define SSE2_UNPACK_15_UNALIGNED "                                          \n\
723 # mask unneeded bits off                                                    \n\
724 movl      $0xf8f8f8f8, %%eax    #                                           \n\
725 movd      %%eax, %%xmm5         #                                           \n\
726 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
727 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
728 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
729 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
730 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
731 psrlw     $1,%%xmm1             # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
732 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
733 movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
734 movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
735                                                                             \n\
736 # convert rgb24 plane to rgb15 pack for pixel 0-7                           \n\
737 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3______       \n\
738 punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
739 psllw     $2,%%xmm2             # ________ ____g7g6 g5g4g3__ ________       \n\
740 por       %%xmm2, %%xmm0        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
741 movdqu    %%xmm0, (%3)          # store pixel 0-7                           \n\
742                                                                             \n\
743 # convert rgb24 plane to rgb15 pack for pixel 8-15                          \n\
744 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3______       \n\
745 punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
746 psllw     $2,%%xmm7             # ________ ____g7g6 g5g4g3__ ________       \n\
747 por       %%xmm7, %%xmm5        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
748 movdqu    %%xmm5, 16(%3)        # store pixel 4-7                           \n\
749 "
750
751 #define SSE2_UNPACK_16_ALIGNED "                                            \n\
752 # mask unneeded bits off                                                    \n\
753 movl      $0xf8f8f8f8, %%eax    #                                           \n\
754 movd      %%eax, %%xmm5         #                                           \n\
755 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
756 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
757 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
758 movl      $0xfcfcfcfc, %%eax    #                                           \n\
759 movd      %%eax, %%xmm5         #                                           \n\
760 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
761 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
762 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
763 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
764 movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
765 movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
766                                                                             \n\
767 # convert rgb24 plane to rgb16 pack for pixel 0-7                           \n\
768 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3g2____       \n\
769 punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
770 psllw     $3,%%xmm2             # ________ __g7g6g5 g4g3g2__ ________       \n\
771 por       %%xmm2, %%xmm0        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
772 movntdq   %%xmm0, (%3)          # store pixel 0-7                           \n\
773                                                                             \n\
774 # convert rgb24 plane to rgb16 pack for pixel 8-15                          \n\
775 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3g2____       \n\
776 punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
777 psllw     $3,%%xmm7             # ________ __g7g6g5 g4g3g2__ ________       \n\
778 por       %%xmm7, %%xmm5        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
779 movntdq   %%xmm5, 16(%3)        # store pixel 4-7                           \n\
780 "
781
782 #define SSE2_UNPACK_16_UNALIGNED "                                          \n\
783 # mask unneeded bits off                                                    \n\
784 movl      $0xf8f8f8f8, %%eax    #                                           \n\
785 movd      %%eax, %%xmm5         #                                           \n\
786 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
787 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
788 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
789 movl      $0xfcfcfcfc, %%eax    #                                           \n\
790 movd      %%eax, %%xmm5         #                                           \n\
791 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
792 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
793 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
794 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
795 movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
796 movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
797                                                                             \n\
798 # convert rgb24 plane to rgb16 pack for pixel 0-7                           \n\
799 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3g2____       \n\
800 punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
801 psllw     $3,%%xmm2             # ________ __g7g6g5 g4g3g2__ ________       \n\
802 por       %%xmm2, %%xmm0        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
803 movdqu    %%xmm0, (%3)          # store pixel 0-7                           \n\
804                                                                             \n\
805 # convert rgb24 plane to rgb16 pack for pixel 8-15                          \n\
806 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3g2____       \n\
807 punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
808 psllw     $3,%%xmm7             # ________ __g7g6g5 g4g3g2__ ________       \n\
809 por       %%xmm7, %%xmm5        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
810 movdqu    %%xmm5, 16(%3)        # store pixel 4-7                           \n\
811 "
812
813 #define SSE2_UNPACK_32_ARGB_ALIGNED "                                       \n\
814 pxor      %%xmm3, %%xmm3  # zero xmm3                                       \n\
815 movdqa    %%xmm0, %%xmm4  #               B7 B6 B5 B4 B3 B2 B1 B0           \n\
816 punpcklbw %%xmm2, %%xmm4  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
817 movdqa    %%xmm1, %%xmm5  #               R7 R6 R5 R4 R3 R2 R1 R0           \n\
818 punpcklbw %%xmm3, %%xmm5  #               00 R3 00 R2 00 R1 00 R0           \n\
819 movdqa    %%xmm4, %%xmm6  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
820 punpcklwd %%xmm5, %%xmm4  #               00 R1 B1 G1 00 R0 B0 G0           \n\
821 movntdq   %%xmm4, (%3)    # Store ARGB3 ARGB2 ARGB1 ARGB0                   \n\
822 punpckhwd %%xmm5, %%xmm6  #               00 R3 B3 G3 00 R2 B2 G2           \n\
823 movntdq   %%xmm6, 16(%3)  # Store ARGB7 ARGB6 ARGB5 ARGB4                   \n\
824 punpckhbw %%xmm2, %%xmm0  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
825 punpckhbw %%xmm3, %%xmm1  #               00 R7 00 R6 00 R5 00 R4           \n\
826 movdqa    %%xmm0, %%xmm5  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
827 punpcklwd %%xmm1, %%xmm5  #               00 R5 B5 G5 00 R4 B4 G4           \n\
828 movntdq   %%xmm5, 32(%3)  # Store ARGB11 ARGB10 ARGB9 ARGB8                 \n\
829 punpckhwd %%xmm1, %%xmm0  #               00 R7 B7 G7 00 R6 B6 G6           \n\
830 movntdq   %%xmm0, 48(%3)  # Store ARGB15 ARGB14 ARGB13 ARGB12               \n\
831 "
832
833 #define SSE2_UNPACK_32_ARGB_UNALIGNED "                                     \n\
834 pxor      %%xmm3, %%xmm3  # zero xmm3                                       \n\
835 movdqa    %%xmm0, %%xmm4  #               B7 B6 B5 B4 B3 B2 B1 B0           \n\
836 punpcklbw %%xmm2, %%xmm4  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
837 movdqa    %%xmm1, %%xmm5  #               R7 R6 R5 R4 R3 R2 R1 R0           \n\
838 punpcklbw %%xmm3, %%xmm5  #               00 R3 00 R2 00 R1 00 R0           \n\
839 movdqa    %%xmm4, %%xmm6  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
840 punpcklwd %%xmm5, %%xmm4  #               00 R1 B1 G1 00 R0 B0 G0           \n\
841 movdqu    %%xmm4, (%3)    # Store ARGB3 ARGB2 ARGB1 ARGB0                   \n\
842 punpckhwd %%xmm5, %%xmm6  #               00 R3 B3 G3 00 R2 B2 G2           \n\
843 movdqu    %%xmm6, 16(%3)  # Store ARGB7 ARGB6 ARGB5 ARGB4                   \n\
844 punpckhbw %%xmm2, %%xmm0  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
845 punpckhbw %%xmm3, %%xmm1  #               00 R7 00 R6 00 R5 00 R4           \n\
846 movdqa    %%xmm0, %%xmm5  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
847 punpcklwd %%xmm1, %%xmm5  #               00 R5 B5 G5 00 R4 B4 G4           \n\
848 movdqu    %%xmm5, 32(%3)  # Store ARGB11 ARGB10 ARGB9 ARGB8                 \n\
849 punpckhwd %%xmm1, %%xmm0  #               00 R7 B7 G7 00 R6 B6 G6           \n\
850 movdqu    %%xmm0, 48(%3)  # Store ARGB15 ARGB14 ARGB13 ARGB12               \n\
851 "
852
853 #define SSE2_UNPACK_32_RGBA_ALIGNED "                                       \n\
854 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
855 movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
856 punpcklbw %%xmm1, %%xmm4  #                 R3 G3 R2 G2 R1 G1 R0 G0         \n\
857 punpcklbw %%xmm0, %%xmm3  #                 B3 00 B2 00 B1 00 B0 00         \n\
858 movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
859 punpcklwd %%xmm4, %%xmm3  #                 R1 G1 B1 00 R0 B0 G0 00         \n\
860 movntdq   %%xmm3, (%3)    # Store RGBA3 RGBA2 RGBA1 RGBA0                   \n\
861 punpckhwd %%xmm4, %%xmm5  #                 R3 G3 B3 00 R2 G2 B2 00         \n\
862 movntdq   %%xmm5, 16(%3)  # Store RGBA7 RGBA6 RGBA5 RGBA4                   \n\
863 pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
864 punpckhbw %%xmm1, %%xmm2  #                 R7 G7 R6 G6 R5 G5 R4 G4         \n\
865 punpckhbw %%xmm0, %%xmm6  #                 B7 00 B6 00 B5 00 B4 00         \n\
866 movdqa    %%xmm6, %%xmm0  #                 B7 00 B6 00 B5 00 B4 00         \n\
867 punpcklwd %%xmm2, %%xmm6  #                 R5 G5 B5 00 R4 G4 B4 00         \n\
868 movntdq   %%xmm6, 32(%3)  # Store BGRA11 BGRA10 BGRA9 RGBA8                 \n\
869 punpckhwd %%xmm2, %%xmm0  #                 R7 G7 B7 00 R6 G6 B6 00         \n\
870 movntdq   %%xmm0, 48(%3)  # Store RGBA15 RGBA14 RGBA13 RGBA12               \n\
871 "
872
873 #define SSE2_UNPACK_32_RGBA_UNALIGNED "                                     \n\
874 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
875 movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
876 punpcklbw %%xmm1, %%xmm4  #                 R3 G3 R2 G2 R1 G1 R0 G0         \n\
877 punpcklbw %%xmm0, %%xmm3  #                 B3 00 B2 00 B1 00 B0 00         \n\
878 movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
879 punpcklwd %%xmm4, %%xmm3  #                 R1 G1 B1 00 R0 B0 G0 00         \n\
880 movdqu    %%xmm3, (%3)    # Store RGBA3 RGBA2 RGBA1 RGBA0                   \n\
881 punpckhwd %%xmm4, %%xmm5  #                 R3 G3 B3 00 R2 G2 B2 00         \n\
882 movdqu    %%xmm5, 16(%3)  # Store RGBA7 RGBA6 RGBA5 RGBA4                   \n\
883 pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
884 punpckhbw %%xmm1, %%xmm2  #                 R7 G7 R6 G6 R5 G5 R4 G4         \n\
885 punpckhbw %%xmm0, %%xmm6  #                 B7 00 B6 00 B5 00 B4 00         \n\
886 movdqa    %%xmm6, %%xmm0  #                 B7 00 B6 00 B5 00 B4 00         \n\
887 punpcklwd %%xmm2, %%xmm6  #                 R5 G5 B5 00 R4 G4 B4 00         \n\
888 movdqu    %%xmm6, 32(%3)  # Store RGBA11 RGBA10 RGBA9 RGBA8                 \n\
889 punpckhwd %%xmm2, %%xmm0  #                 R7 G7 B7 00 R6 G6 B6 00         \n\
890 movdqu    %%xmm0, 48(%3)  # Store RGBA15 RGBA14 RGBA13 RGBA12               \n\
891 "
892
893 #define SSE2_UNPACK_32_BGRA_ALIGNED "                                       \n\
894 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
895 movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
896 punpcklbw %%xmm0, %%xmm4  #                 B3 G3 B2 G2 B1 G1 B0 G0         \n\
897 punpcklbw %%xmm1, %%xmm3  #                 R3 00 R2 00 R1 00 R0 00         \n\
898 movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
899 punpcklwd %%xmm4, %%xmm3  #                 B1 G1 R1 00 B0 G0 R0 00         \n\
900 movntdq   %%xmm3, (%3)    # Store BGRA3 BGRA2 BGRA1 BGRA0                   \n\
901 punpckhwd %%xmm4, %%xmm5  #                 B3 G3 R3 00 B2 G2 R2 00         \n\
902 movntdq   %%xmm5, 16(%3)  # Store BGRA7 BGRA6 BGRA5 BGRA4                   \n\
903 pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
904 punpckhbw %%xmm0, %%xmm2  #                 B7 G7 B6 G6 B5 G5 B4 G4         \n\
905 punpckhbw %%xmm1, %%xmm6  #                 R7 00 R6 00 R5 00 R4 00         \n\
906 movdqa    %%xmm6, %%xmm0  #                 R7 00 R6 00 R5 00 R4 00         \n\
907 punpcklwd %%xmm2, %%xmm6  #                 B5 G5 R5 00 B4 G4 R4 00         \n\
908 movntdq   %%xmm6, 32(%3)  # Store BGRA11 BGRA10 BGRA9 BGRA8                 \n\
909 punpckhwd %%xmm2, %%xmm0  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
910 movntdq   %%xmm0, 48(%3)  # Store BGRA15 BGRA14 BGRA13 BGRA12               \n\
911 "
912
913 #define SSE2_UNPACK_32_BGRA_UNALIGNED "                                     \n\
914 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
915 movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
916 punpcklbw %%xmm0, %%xmm4  #                 B3 G3 B2 G2 B1 G1 B0 G0         \n\
917 punpcklbw %%xmm1, %%xmm3  #                 R3 00 R2 00 R1 00 R0 00         \n\
918 movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
919 punpcklwd %%xmm4, %%xmm3  #                 B1 G1 R1 00 B0 G0 R0 00         \n\
920 movdqu    %%xmm3, (%3)    # Store BGRA3 BGRA2 BGRA1 BGRA0                   \n\
921 punpckhwd %%xmm4, %%xmm5  #                 B3 G3 R3 00 B2 G2 R2 00         \n\
922 movdqu    %%xmm5, 16(%3)  # Store BGRA7 BGRA6 BGRA5 BGRA4                   \n\
923 pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
924 punpckhbw %%xmm0, %%xmm2  #                 B7 G7 B6 G6 B5 G5 B4 G4         \n\
925 punpckhbw %%xmm1, %%xmm6  #                 R7 00 R6 00 R5 00 R4 00         \n\
926 movdqa    %%xmm6, %%xmm0  #                 R7 00 R6 00 R5 00 R4 00         \n\
927 punpcklwd %%xmm2, %%xmm6  #                 B5 G5 R5 00 B4 G4 R4 00         \n\
928 movdqu    %%xmm6, 32(%3)  # Store BGRA11 BGRA10 BGRA9 BGRA8                 \n\
929 punpckhwd %%xmm2, %%xmm0  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
930 movdqu    %%xmm0, 48(%3)  # Store BGRA15 BGRA14 BGRA13 BGRA12               \n\
931 "
932
933 #define SSE2_UNPACK_32_ABGR_ALIGNED "                                       \n\
934 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
935 movdqa    %%xmm1, %%xmm4  #                 R7 R6 R5 R4 R3 R2 R1 R0         \n\
936 punpcklbw %%xmm2, %%xmm4  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
937 movdqa    %%xmm0, %%xmm5  #                 B7 B6 B5 B4 B3 B2 B1 B0         \n\
938 punpcklbw %%xmm3, %%xmm5  #                 00 B3 00 B2 00 B1 00 B0         \n\
939 movdqa    %%xmm4, %%xmm6  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
940 punpcklwd %%xmm5, %%xmm4  #                 00 B1 G1 R1 00 B0 G0 R0         \n\
941 movntdq   %%xmm4, (%3)    # Store ABGR3 ABGR2 ABGR1 ABGR0                   \n\
942 punpckhwd %%xmm5, %%xmm6  #                 00 B3 G3 R3 00 B2 G2 R2         \n\
943 movntdq   %%xmm6, 16(%3)  # Store ABGR7 ABGR6 ABGR5 ABGR4                   \n\
944 punpckhbw %%xmm2, %%xmm1  #                 G7 R7 G6 R6 G5 R5 G4 R4         \n\
945 punpckhbw %%xmm3, %%xmm0  #                 00 B7 00 B6 00 B5 00 B4         \n\
946 movdqa    %%xmm1, %%xmm2  #                 G7 R7 G6 R6 G5 R5 G4 R4         \n\
947 punpcklwd %%xmm0, %%xmm1  #                 00 B5 G5 R5 00 B4 G4 R4         \n\
948 movntdq   %%xmm1, 32(%3)  # Store ABGR11 ABGR10 ABGR9 ABGR8                 \n\
949 punpckhwd %%xmm0, %%xmm2  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
950 movntdq   %%xmm2, 48(%3)  # Store ABGR15 ABGR14 ABGR13 ABGR12               \n\
951 "
952
953 #define SSE2_UNPACK_32_ABGR_UNALIGNED "                                     \n\
954 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
955 movdqa    %%xmm1, %%xmm4  #                 R7 R6 R5 R4 R3 R2 R1 R0         \n\
956 punpcklbw %%xmm2, %%xmm4  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
957 movdqa    %%xmm0, %%xmm5  #                 B7 B6 B5 B4 B3 B2 B1 B0         \n\
958 punpcklbw %%xmm3, %%xmm5  #                 00 B3 00 B2 00 B1 00 B0         \n\
959 movdqa    %%xmm4, %%xmm6  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
960 punpcklwd %%xmm5, %%xmm4  #                 00 B1 G1 R1 00 B0 G0 R0         \n\
961 movdqu    %%xmm4, (%3)    # Store ABGR3 ABGR2 ABGR1 ABGR0                   \n\
962 punpckhwd %%xmm5, %%xmm6  #                 00 B3 G3 R3 00 B2 G2 R2         \n\
963 movdqu    %%xmm6, 16(%3)  # Store ABGR7 ABGR6 ABGR5 ABGR4                   \n\
964 punpckhbw %%xmm2, %%xmm1  #                 G7 R7 G6 R6 G5 R5 G4 R4         \n\
965 punpckhbw %%xmm3, %%xmm0  #                 00 B7 00 B6 00 B5 00 B4         \n\
966 movdqa    %%xmm1, %%xmm2  #                 R7 00 R6 00 R5 00 R4 00         \n\
967 punpcklwd %%xmm0, %%xmm1  #                 00 B5 G5 R5 00 B4 G4 R4         \n\
968 movdqu    %%xmm1, 32(%3)  # Store ABGR11 ABGR10 ABGR9 ABGR8                 \n\
969 punpckhwd %%xmm0, %%xmm2  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
970 movdqu    %%xmm2, 48(%3)  # Store ABGR15 ABGR14 ABGR13 ABGR12               \n\
971 "
972
973 #elif defined(HAVE_SSE2_INTRINSICS)
974
975 /* SSE2 intrinsics */
976
977 #include <emmintrin.h>
978
979 #define SSE2_CALL(SSE2_INSTRUCTIONS)        \
980     do {                                    \
981         __m128i xmm0, xmm1, xmm2, xmm3,     \
982                 xmm4, xmm5, xmm6, xmm7;     \
983         SSE2_INSTRUCTIONS                   \
984     } while(0)
985
986 #define SSE2_END  _mm_sfence()
987
988 #define SSE2_INIT_16_ALIGNED                \
989     xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
990     xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
991     xmm4 = _mm_setzero_si128();             \
992     xmm6 = _mm_load_si128((__m128i *)p_y);
993
994 #define SSE2_INIT_16_UNALIGNED              \
995     xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
996     xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
997     xmm4 = _mm_setzero_si128();             \
998     xmm6 = _mm_loadu_si128((__m128i *)p_y); \
999     _mm_prefetch(p_buffer, _MM_HINT_NTA);
1000
1001 #define SSE2_INIT_32_ALIGNED                \
1002     xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
1003     xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
1004     xmm4 = _mm_setzero_si128();             \
1005     xmm6 = _mm_load_si128((__m128i *)p_y);
1006
1007 #define SSE2_INIT_32_UNALIGNED              \
1008     xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
1009     xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
1010     xmm4 = _mm_setzero_si128();             \
1011     xmm6 = _mm_loadu_si128((__m128i *)p_y); \
1012     _mm_prefetch(p_buffer, _MM_HINT_NTA);
1013
1014 #define SSE2_YUV_MUL                        \
1015     xmm0 = _mm_unpacklo_epi8(xmm0, xmm4);   \
1016     xmm1 = _mm_unpacklo_epi8(xmm1, xmm4);   \
1017     xmm5 = _mm_set1_epi32(0x00800080UL);    \
1018     xmm0 = _mm_subs_epi16(xmm0, xmm5);      \
1019     xmm1 = _mm_subs_epi16(xmm1, xmm5);      \
1020     xmm0 = _mm_slli_epi16(xmm0, 3);         \
1021     xmm1 = _mm_slli_epi16(xmm1, 3);         \
1022     xmm2 = xmm0;                            \
1023     xmm3 = xmm1;                            \
1024     xmm5 = _mm_set1_epi32(0xf37df37dUL);    \
1025     xmm2 = _mm_mulhi_epi16(xmm2, xmm5);     \
1026     xmm5 = _mm_set1_epi32(0xe5fce5fcUL);    \
1027     xmm3 = _mm_mulhi_epi16(xmm3, xmm5);     \
1028     xmm5 = _mm_set1_epi32(0x40934093UL);    \
1029     xmm0 = _mm_mulhi_epi16(xmm0, xmm5);     \
1030     xmm5 = _mm_set1_epi32(0x33123312UL);    \
1031     xmm1 = _mm_mulhi_epi16(xmm1, xmm5);     \
1032     xmm2 = _mm_adds_epi16(xmm2, xmm3);      \
1033     \
1034     xmm5 = _mm_set1_epi32(0x10101010UL);    \
1035     xmm6 = _mm_subs_epu8(xmm6, xmm5);       \
1036     xmm7 = xmm6;                            \
1037     xmm5 = _mm_set1_epi32(0x00ff00ffUL);    \
1038     xmm6 = _mm_and_si128(xmm6, xmm5);       \
1039     xmm7 = _mm_srli_epi16(xmm7, 8);         \
1040     xmm6 = _mm_slli_epi16(xmm6, 3);         \
1041     xmm7 = _mm_slli_epi16(xmm7, 3);         \
1042     xmm5 = _mm_set1_epi32(0x253f253fUL);    \
1043     xmm6 = _mm_mulhi_epi16(xmm6, xmm5);     \
1044     xmm7 = _mm_mulhi_epi16(xmm7, xmm5);
1045
1046 #define SSE2_YUV_ADD                        \
1047     xmm3 = xmm0;                            \
1048     xmm4 = xmm1;                            \
1049     xmm5 = xmm2;                            \
1050     xmm0 = _mm_adds_epi16(xmm0, xmm6);      \
1051     xmm3 = _mm_adds_epi16(xmm3, xmm7);      \
1052     xmm1 = _mm_adds_epi16(xmm1, xmm6);      \
1053     xmm4 = _mm_adds_epi16(xmm4, xmm7);      \
1054     xmm2 = _mm_adds_epi16(xmm2, xmm6);      \
1055     xmm5 = _mm_adds_epi16(xmm5, xmm7);      \
1056     \
1057     xmm0 = _mm_packus_epi16(xmm0, xmm0);    \
1058     xmm1 = _mm_packus_epi16(xmm1, xmm1);    \
1059     xmm2 = _mm_packus_epi16(xmm2, xmm2);    \
1060     \
1061     xmm3 = _mm_packus_epi16(xmm3, xmm3);    \
1062     xmm4 = _mm_packus_epi16(xmm4, xmm4);    \
1063     xmm5 = _mm_packus_epi16(xmm5, xmm5);    \
1064     \
1065     xmm0 = _mm_unpacklo_epi8(xmm0, xmm3);   \
1066     xmm1 = _mm_unpacklo_epi8(xmm1, xmm4);   \
1067     xmm2 = _mm_unpacklo_epi8(xmm2, xmm5);
1068
1069 #define SSE2_UNPACK_15_ALIGNED                      \
1070     xmm5 = _mm_set1_epi32(0xf8f8f8f8UL);            \
1071     xmm0 = _mm_and_si128(xmm0, xmm5);               \
1072     xmm0 = _mm_srli_epi16(xmm0, 3);                 \
1073     xmm2 = _mm_and_si128(xmm2, xmm5);               \
1074     xmm1 = _mm_and_si128(xmm1, xmm5);               \
1075     xmm1 = _mm_srli_epi16(xmm1, 1);                 \
1076     xmm4 = _mm_setzero_si128();                     \
1077     xmm5 = xmm0;                                    \
1078     xmm7 = xmm2;                                    \
1079     \
1080     xmm2 = _mm_unpacklo_epi8(xmm2, xmm4);           \
1081     xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);           \
1082     xmm2 = _mm_slli_epi16(xmm2, 2);                 \
1083     xmm0 = _mm_or_si128(xmm0, xmm2);                \
1084     _mm_stream_si128((__m128i*)p_buffer, xmm0);     \
1085     \
1086     xmm7 = _mm_unpackhi_epi8(xmm7, xmm4);           \
1087     xmm5 = _mm_unpackhi_epi8(xmm5, xmm1);           \
1088     xmm7 = _mm_slli_epi16(xmm7, 2);                 \
1089     xmm5 = _mm_or_si128(xmm5, xmm7);                \
1090     _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
1091
1092 #define SSE2_UNPACK_15_UNALIGNED                    \
1093     xmm5 = _mm_set1_epi32(0xf8f8f8f8UL);            \
1094     xmm0 = _mm_and_si128(xmm0, xmm5);               \
1095     xmm0 = _mm_srli_epi16(xmm0, 3);                 \
1096     xmm2 = _mm_and_si128(xmm2, xmm5);               \
1097     xmm1 = _mm_and_si128(xmm1, xmm5);               \
1098     xmm1 = _mm_srli_epi16(xmm1, 1);                 \
1099     xmm4 = _mm_setzero_si128();                     \
1100     xmm5 = xmm0;                                    \
1101     xmm7 = xmm2;                                    \
1102     \
1103     xmm2 = _mm_unpacklo_epi8(xmm2, xmm4);           \
1104     xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);           \
1105     xmm2 = _mm_slli_epi16(xmm2, 2);                 \
1106     xmm0 = _mm_or_si128(xmm0, xmm2);                \
1107     _mm_storeu_si128((__m128i*)p_buffer, xmm0);     \
1108     \
1109     xmm7 = _mm_unpackhi_epi8(xmm7, xmm4);           \
1110     xmm5 = _mm_unpackhi_epi8(xmm5, xmm1);           \
1111     xmm7 = _mm_slli_epi16(xmm7, 2);                 \
1112     xmm5 = _mm_or_si128(xmm5, xmm7);                \
1113     _mm_storeu_si128((__m128i*)(p_buffer+16), xmm5);
1114
1115 #define SSE2_UNPACK_16_ALIGNED                      \
1116     xmm5 = _mm_set1_epi32(0xf8f8f8f8UL);            \
1117     xmm0 = _mm_and_si128(xmm0, xmm5);               \
1118     xmm1 = _mm_and_si128(xmm1, xmm5);               \
1119     xmm5 = _mm_set1_epi32(0xfcfcfcfcUL);            \
1120     xmm2 = _mm_and_si128(xmm2, xmm5);               \
1121     xmm0 = _mm_srli_epi16(xmm0, 3);                 \
1122     xmm4 = _mm_setzero_si128();                     \
1123     xmm5 = xmm0;                                    \
1124     xmm7 = xmm2;                                    \
1125     \
1126     xmm2 = _mm_unpacklo_epi8(xmm2, xmm4);           \
1127     xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);           \
1128     xmm2 = _mm_slli_epi16(xmm2, 3);                 \
1129     xmm0 = _mm_or_si128(xmm0, xmm2);                \
1130     _mm_stream_si128((__m128i*)p_buffer, xmm0);     \
1131     \
1132     xmm7 = _mm_unpackhi_epi8(xmm7, xmm4);           \
1133     xmm5 = _mm_unpackhi_epi8(xmm5, xmm1);           \
1134     xmm7 = _mm_slli_epi16(xmm7, 3);                 \
1135     xmm5 = _mm_or_si128(xmm5, xmm7);                \
1136     _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
1137
1138 #define SSE2_UNPACK_16_UNALIGNED                    \
1139     xmm5 = _mm_set1_epi32(0xf8f8f8f8UL);            \
1140     xmm0 = _mm_and_si128(xmm0, xmm5);               \
1141     xmm1 = _mm_and_si128(xmm1, xmm5);               \
1142     xmm5 = _mm_set1_epi32(0xfcfcfcfcUL);            \
1143     xmm2 = _mm_and_si128(xmm2, xmm5);               \
1144     xmm0 = _mm_srli_epi16(xmm0, 3);                 \
1145     xmm4 = _mm_setzero_si128();                     \
1146     xmm5 = xmm0;                                    \
1147     xmm7 = xmm2;                                    \
1148     \
1149     xmm2 = _mm_unpacklo_epi8(xmm2, xmm4);           \
1150     xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);           \
1151     xmm2 = _mm_slli_epi16(xmm2, 3);                 \
1152     xmm0 = _mm_or_si128(xmm0, xmm2);                \
1153     _mm_storeu_si128((__m128i*)p_buffer, xmm0);     \
1154     \
1155     xmm7 = _mm_unpackhi_epi8(xmm7, xmm4);           \
1156     xmm5 = _mm_unpackhi_epi8(xmm5, xmm1);           \
1157     xmm7 = _mm_slli_epi16(xmm7, 3);                 \
1158     xmm5 = _mm_or_si128(xmm5, xmm7);                \
1159     _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5);
1160
1161 #define SSE2_UNPACK_32_ARGB_ALIGNED                 \
1162     xmm3 = _mm_setzero_si128();                     \
1163     xmm4 = xmm0;                                    \
1164     xmm4 = _mm_unpacklo_epi8(xmm4, xmm2);           \
1165     xmm5 = xmm1;                                    \
1166     xmm5 = _mm_unpacklo_epi8(xmm5, xmm3);           \
1167     xmm6 = xmm4;                                    \
1168     xmm4 = _mm_unpacklo_epi16(xmm4, xmm5);          \
1169     _mm_stream_si128((__m128i*)(p_buffer), xmm4);   \
1170     xmm6 = _mm_unpackhi_epi16(xmm6, xmm5);          \
1171     _mm_stream_si128((__m128i*)(p_buffer+4), xmm6); \
1172     xmm0 = _mm_unpackhi_epi8(xmm0, xmm2);           \
1173     xmm1 = _mm_unpackhi_epi8(xmm1, xmm3);           \
1174     xmm5 = xmm0;                                    \
1175     xmm5 = _mm_unpacklo_epi16(xmm5, xmm1);          \
1176     _mm_stream_si128((__m128i*)(p_buffer+8), xmm5); \
1177     xmm0 = _mm_unpackhi_epi16(xmm0, xmm1);          \
1178     _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
1179
1180 #define SSE2_UNPACK_32_ARGB_UNALIGNED               \
1181     xmm3 = _mm_setzero_si128();                     \
1182     xmm4 = xmm0;                                    \
1183     xmm4 = _mm_unpacklo_epi8(xmm4, xmm2);           \
1184     xmm5 = xmm1;                                    \
1185     xmm5 = _mm_unpacklo_epi8(xmm5, xmm3);           \
1186     xmm6 = xmm4;                                    \
1187     xmm4 = _mm_unpacklo_epi16(xmm4, xmm5);          \
1188     _mm_storeu_si128((__m128i*)(p_buffer), xmm4);   \
1189     xmm6 = _mm_unpackhi_epi16(xmm6, xmm5);          \
1190     _mm_storeu_si128((__m128i*)(p_buffer+4), xmm6); \
1191     xmm0 = _mm_unpackhi_epi8(xmm0, xmm2);           \
1192     xmm1 = _mm_unpackhi_epi8(xmm1, xmm3);           \
1193     xmm5 = xmm0;                                    \
1194     xmm5 = _mm_unpacklo_epi16(xmm5, xmm1);          \
1195     _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5); \
1196     xmm0 = _mm_unpackhi_epi16(xmm0, xmm1);          \
1197     _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
1198
1199 #define SSE2_UNPACK_32_RGBA_ALIGNED                 \
1200     xmm3 = _mm_setzero_si128();                     \
1201     xmm4 = xmm2;                                    \
1202     xmm4 = _mm_unpacklo_epi8(xmm4, xmm1);           \
1203     xmm3 = _mm_unpacklo_epi8(xmm3, xmm0);           \
1204     xmm5 = xmm3;                                    \
1205     xmm3 = _mm_unpacklo_epi16(xmm3, xmm4);          \
1206     _mm_stream_si128((__m128i*)(p_buffer), xmm3);   \
1207     xmm5 = _mm_unpackhi_epi16(xmm5, xmm4);          \
1208     _mm_stream_si128((__m128i*)(p_buffer+4), xmm5); \
1209     xmm6 = _mm_setzero_si128();                     \
1210     xmm2 = _mm_unpackhi_epi8(xmm2, xmm1);           \
1211     xmm6 = _mm_unpackhi_epi8(xmm6, xmm0);           \
1212     xmm0 = xmm6;                                    \
1213     xmm6 = _mm_unpacklo_epi16(xmm6, xmm2);          \
1214     _mm_stream_si128((__m128i*)(p_buffer+8), xmm6); \
1215     xmm0 = _mm_unpackhi_epi16(xmm0, xmm2);          \
1216     _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
1217
1218 #define SSE2_UNPACK_32_RGBA_UNALIGNED               \
1219     xmm3 = _mm_setzero_si128();                     \
1220     xmm4 = xmm2;                                    \
1221     xmm4 = _mm_unpacklo_epi8(xmm4, xmm1);           \
1222     xmm3 = _mm_unpacklo_epi8(xmm3, xmm0);           \
1223     xmm5 = xmm3;                                    \
1224     xmm3 = _mm_unpacklo_epi16(xmm3, xmm4);          \
1225     _mm_storeu_si128((__m128i*)(p_buffer), xmm3);   \
1226     xmm5 = _mm_unpackhi_epi16(xmm5, xmm4);          \
1227     _mm_storeu_si128((__m128i*)(p_buffer+4), xmm5); \
1228     xmm6 = _mm_setzero_si128();                     \
1229     xmm2 = _mm_unpackhi_epi8(xmm2, xmm1);           \
1230     xmm6 = _mm_unpackhi_epi8(xmm6, xmm0);           \
1231     xmm0 = xmm6;                                    \
1232     xmm6 = _mm_unpacklo_epi16(xmm6, xmm2);          \
1233     _mm_storeu_si128((__m128i*)(p_buffer+8), xmm6); \
1234     xmm0 = _mm_unpackhi_epi16(xmm0, xmm2);          \
1235     _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
1236
1237 #define SSE2_UNPACK_32_BGRA_ALIGNED                 \
1238     xmm3 = _mm_setzero_si128();                     \
1239     xmm4 = xmm2;                                    \
1240     xmm4 = _mm_unpacklo_epi8(xmm4, xmm0);           \
1241     xmm3 = _mm_unpacklo_epi8(xmm3, xmm1);           \
1242     xmm5 = xmm3;                                    \
1243     xmm3 = _mm_unpacklo_epi16(xmm3, xmm4);          \
1244     _mm_stream_si128((__m128i*)(p_buffer), xmm3);   \
1245     xmm5 = _mm_unpackhi_epi16(xmm5, xmm4);          \
1246     _mm_stream_si128((__m128i*)(p_buffer+4), xmm5); \
1247     xmm6 = _mm_setzero_si128();                     \
1248     xmm2 = _mm_unpackhi_epi8(xmm2, xmm0);           \
1249     xmm6 = _mm_unpackhi_epi8(xmm6, xmm1);           \
1250     xmm0 = xmm6;                                    \
1251     xmm6 = _mm_unpacklo_epi16(xmm6, xmm2);          \
1252     _mm_stream_si128((__m128i*)(p_buffer+8), xmm6); \
1253     xmm0 = _mm_unpackhi_epi16(xmm0, xmm2);          \
1254     _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
1255
1256 #define SSE2_UNPACK_32_BGRA_UNALIGNED               \
1257     xmm3 = _mm_setzero_si128();                     \
1258     xmm4 = xmm2;                                    \
1259     xmm4 = _mm_unpacklo_epi8(xmm4, xmm0);           \
1260     xmm3 = _mm_unpacklo_epi8(xmm3, xmm1);           \
1261     xmm5 = xmm3;                                    \
1262     xmm3 = _mm_unpacklo_epi16(xmm3, xmm4);          \
1263     _mm_storeu_si128((__m128i*)(p_buffer), xmm3);   \
1264     xmm5 = _mm_unpackhi_epi16(xmm5, xmm4);          \
1265     _mm_storeu_si128((__m128i*)(p_buffer+4), xmm5); \
1266     xmm6 = _mm_setzero_si128();                     \
1267     xmm2 = _mm_unpackhi_epi8(xmm2, xmm0);           \
1268     xmm6 = _mm_unpackhi_epi8(xmm6, xmm1);           \
1269     xmm0 = xmm6;                                    \
1270     xmm6 = _mm_unpacklo_epi16(xmm6, xmm2);          \
1271     _mm_storeu_si128((__m128i*)(p_buffer+8), xmm6); \
1272     xmm0 = _mm_unpackhi_epi16(xmm0, xmm2);          \
1273     _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
1274
1275 #define SSE2_UNPACK_32_ABGR_ALIGNED                 \
1276     xmm3 = _mm_setzero_si128();                     \
1277     xmm4 = xmm1;                                    \
1278     xmm4 = _mm_unpacklo_epi8(xmm4, xmm2);           \
1279     xmm5 = xmm0;                                    \
1280     xmm5 = _mm_unpacklo_epi8(xmm5, xmm3);           \
1281     xmm6 = xmm4;                                    \
1282     xmm4 = _mm_unpacklo_epi16(xmm4, xmm5);          \
1283     _mm_stream_si128((__m128i*)(p_buffer), xmm4);   \
1284     xmm6 = _mm_unpackhi_epi16(xmm6, xmm5);          \
1285     _mm_stream_si128((__m128i*)(p_buffer+4), xmm6); \
1286     xmm1 = _mm_unpackhi_epi8(xmm1, xmm2);           \
1287     xmm0 = _mm_unpackhi_epi8(xmm0, xmm3);           \
1288     xmm2 = xmm1;                                    \
1289     xmm1 = _mm_unpacklo_epi16(xmm1, xmm0);          \
1290     _mm_stream_si128((__m128i*)(p_buffer+8), xmm1); \
1291     xmm2 = _mm_unpackhi_epi16(xmm2, xmm0);          \
1292     _mm_stream_si128((__m128i*)(p_buffer+12), xmm2);
1293
1294 #define SSE2_UNPACK_32_ABGR_UNALIGNED               \
1295     xmm3 = _mm_setzero_si128();                     \
1296     xmm4 = xmm1;                                    \
1297     xmm4 = _mm_unpacklo_epi8(xmm4, xmm2);           \
1298     xmm5 = xmm0;                                    \
1299     xmm5 = _mm_unpacklo_epi8(xmm5, xmm3);           \
1300     xmm6 = xmm4;                                    \
1301     xmm4 = _mm_unpacklo_epi16(xmm4, xmm5);          \
1302     _mm_storeu_si128((__m128i*)(p_buffer), xmm4);   \
1303     xmm6 = _mm_unpackhi_epi16(xmm6, xmm5);          \
1304     _mm_storeu_si128((__m128i*)(p_buffer+4), xmm6); \
1305     xmm1 = _mm_unpackhi_epi8(xmm1, xmm2);           \
1306     xmm0 = _mm_unpackhi_epi8(xmm0, xmm3);           \
1307     xmm2 = xmm1;                                    \
1308     xmm1 = _mm_unpacklo_epi16(xmm1, xmm0);          \
1309     _mm_storeu_si128((__m128i*)(p_buffer+8), xmm1); \
1310     xmm2 = _mm_unpackhi_epi16(xmm2, xmm0);          \
1311     _mm_storeu_si128((__m128i*)(p_buffer+12), xmm2);
1312
1313 #endif
1314
1315 #endif