]> git.sesse.net Git - vlc/blob - modules/sse2/i420_rgb_sse2.h
avcodec: set encoding sample format correctly and don't assume S16N
[vlc] / modules / sse2 / i420_rgb_sse2.h
1 /*****************************************************************************
2  * i420_rgb_sse2.h: MMX YUV transformation assembly
3  *****************************************************************************
4  * Copyright (C) 1999-2012 VLC authors and VideoLAN
5  *
6  * Authors: Damien Fouilleul <damienf@videolan.org>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU Lesser General Public License as published by
10  * the Free Software Foundation; either version 2.1 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
21  *****************************************************************************/
22 #if defined(CAN_COMPILE_SSE2)
23
24 /* SSE2 assembly */
25
26 #define SSE2_CALL(SSE2_INSTRUCTIONS)    \
27     do {                                \
28     __asm__ __volatile__(               \
29         ".p2align 3 \n\t"               \
30         SSE2_INSTRUCTIONS               \
31         :                               \
32         : "r" (p_y), "r" (p_u),         \
33           "r" (p_v), "r" (p_buffer)     \
34         : "eax", "xmm0", "xmm1", "xmm2", "xmm3", \
35                  "xmm4", "xmm5", "xmm6", "xmm7" ); \
36     } while(0)
37
38 #define SSE2_END  __asm__ __volatile__ ( "sfence" ::: "memory" )
39
40 #define SSE2_INIT_16_ALIGNED "                                              \n\
41 movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
42 movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
43 pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
44 movdqa      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
45 "
46
47 #define SSE2_INIT_16_UNALIGNED "                                            \n\
48 movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
49 movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
50 pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
51 movdqu      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
52 prefetchnta (%3)            # Tell CPU not to cache output RGB data         \n\
53 "
54
55 #define SSE2_INIT_32_ALIGNED "                                              \n\
56 movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
57 movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
58 pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
59 movdqa      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
60 "
61
62 #define SSE2_INIT_32_UNALIGNED "                                            \n\
63 movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
64 movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
65 pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
66 movdqu      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
67 prefetchnta (%3)            # Tell CPU not to cache output RGB data         \n\
68 "
69
70 #define SSE2_YUV_MUL "                                                      \n\
71 # convert the chroma part                                                   \n\
72 punpcklbw %%xmm4, %%xmm0        # scatter 8 Cb    00 u3 00 u2 00 u1 00 u0   \n\
73 punpcklbw %%xmm4, %%xmm1        # scatter 8 Cr    00 v3 00 v2 00 v1 00 v0   \n\
74 movl      $0x00800080, %%eax    #                                           \n\
75 movd      %%eax, %%xmm5         #                                           \n\
76 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     0080 0080 ... 0080 0080   \n\
77 psubsw    %%xmm5, %%xmm0        # Cb -= 128                                 \n\
78 psubsw    %%xmm5, %%xmm1        # Cr -= 128                                 \n\
79 psllw     $3, %%xmm0            # Promote precision                         \n\
80 psllw     $3, %%xmm1            # Promote precision                         \n\
81 movdqa    %%xmm0, %%xmm2        # Copy 8 Cb       00 u3 00 u2 00 u1 00 u0   \n\
82 movdqa    %%xmm1, %%xmm3        # Copy 8 Cr       00 v3 00 v2 00 v1 00 v0   \n\
83 movl      $0xf37df37d, %%eax    #                                           \n\
84 movd      %%eax, %%xmm5         #                                           \n\
85 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     f37d f37d ... f37d f37d   \n\
86 pmulhw    %%xmm5, %%xmm2        # Mul Cb with green coeff -> Cb green       \n\
87 movl      $0xe5fce5fc, %%eax    #                                           \n\
88 movd      %%eax, %%xmm5         #                                           \n\
89 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     e5fc e5fc ... e5fc e5fc   \n\
90 pmulhw    %%xmm5, %%xmm3        # Mul Cr with green coeff -> Cr green       \n\
91 movl      $0x40934093, %%eax    #                                           \n\
92 movd      %%eax, %%xmm5         #                                           \n\
93 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     4093 4093 ... 4093 4093   \n\
94 pmulhw    %%xmm5, %%xmm0        # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0   \n\
95 movl      $0x33123312, %%eax    #                                           \n\
96 movd      %%eax, %%xmm5         #                                           \n\
97 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     3312 3312 ... 3312 3312   \n\
98 pmulhw    %%xmm5, %%xmm1        # Mul Cr -> Cred  00 r3 00 r2 00 r1 00 r0   \n\
99 paddsw    %%xmm3, %%xmm2        # Cb green + Cr green -> Cgreen             \n\
100                                                                             \n\
101 # convert the luma part                                                     \n\
102 movl      $0x10101010, %%eax    #                                           \n\
103 movd      %%eax, %%xmm5         #                                           \n\
104 pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to   1010 1010 ... 1010 1010     \n\
105 psubusb   %%xmm5, %%xmm6        # Y -= 16                                   \n\
106 movdqa    %%xmm6, %%xmm7        # Copy 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
107 movl      $0x00ff00ff, %%eax    #                                           \n\
108 movd      %%eax, %%xmm5         #                                           \n\
109 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     00ff 00ff ... 00ff 00ff   \n\
110 pand      %%xmm5, %%xmm6        # get Y even      00 Y6 00 Y4 00 Y2 00 Y0   \n\
111 psrlw     $8, %%xmm7            # get Y odd       00 Y7 00 Y5 00 Y3 00 Y1   \n\
112 psllw     $3, %%xmm6            # Promote precision                         \n\
113 psllw     $3, %%xmm7            # Promote precision                         \n\
114 movl      $0x253f253f, %%eax    #                                           \n\
115 movd      %%eax, %%xmm5         #                                           \n\
116 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     253f 253f ... 253f 253f   \n\
117 pmulhw    %%xmm5, %%xmm6        # Mul 8 Y even    00 y6 00 y4 00 y2 00 y0   \n\
118 pmulhw    %%xmm5, %%xmm7        # Mul 8 Y odd     00 y7 00 y5 00 y3 00 y1   \n\
119 "
120
121 #define SSE2_YUV_ADD "                                                      \n\
122 # Do horizontal and vertical scaling                                        \n\
123 movdqa    %%xmm0, %%xmm3        # Copy Cblue                                \n\
124 movdqa    %%xmm1, %%xmm4        # Copy Cred                                 \n\
125 movdqa    %%xmm2, %%xmm5        # Copy Cgreen                               \n\
126 paddsw    %%xmm6, %%xmm0        # Y even + Cblue  00 B6 00 B4 00 B2 00 B0   \n\
127 paddsw    %%xmm7, %%xmm3        # Y odd  + Cblue  00 B7 00 B5 00 B3 00 B1   \n\
128 paddsw    %%xmm6, %%xmm1        # Y even + Cred   00 R6 00 R4 00 R2 00 R0   \n\
129 paddsw    %%xmm7, %%xmm4        # Y odd  + Cred   00 R7 00 R5 00 R3 00 R1   \n\
130 paddsw    %%xmm6, %%xmm2        # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0   \n\
131 paddsw    %%xmm7, %%xmm5        # Y odd  + Cgreen 00 G7 00 G5 00 G3 00 G1   \n\
132                                                                             \n\
133 # Limit RGB even to 0..255                                                  \n\
134 packuswb  %%xmm0, %%xmm0        # B6 B4 B2 B0 / B6 B4 B2 B0                 \n\
135 packuswb  %%xmm1, %%xmm1        # R6 R4 R2 R0 / R6 R4 R2 R0                 \n\
136 packuswb  %%xmm2, %%xmm2        # G6 G4 G2 G0 / G6 G4 G2 G0                 \n\
137                                                                             \n\
138 # Limit RGB odd to 0..255                                                   \n\
139 packuswb  %%xmm3, %%xmm3        # B7 B5 B3 B1 / B7 B5 B3 B1                 \n\
140 packuswb  %%xmm4, %%xmm4        # R7 R5 R3 R1 / R7 R5 R3 R1                 \n\
141 packuswb  %%xmm5, %%xmm5        # G7 G5 G3 G1 / G7 G5 G3 G1                 \n\
142                                                                             \n\
143 # Interleave RGB even and odd                                               \n\
144 punpcklbw %%xmm3, %%xmm0        #                 B7 B6 B5 B4 B3 B2 B1 B0   \n\
145 punpcklbw %%xmm4, %%xmm1        #                 R7 R6 R5 R4 R3 R2 R1 R0   \n\
146 punpcklbw %%xmm5, %%xmm2        #                 G7 G6 G5 G4 G3 G2 G1 G0   \n\
147 "
148
149 #define SSE2_UNPACK_15_ALIGNED "                                            \n\
150 # mask unneeded bits off                                                    \n\
151 movl      $0xf8f8f8f8, %%eax    #                                           \n\
152 movd      %%eax, %%xmm5         #                                           \n\
153 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
154 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
155 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
156 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
157 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
158 psrlw     $1,%%xmm1             # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
159 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
160 movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
161 movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
162                                                                             \n\
163 # convert rgb24 plane to rgb15 pack for pixel 0-7                           \n\
164 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3______       \n\
165 punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
166 psllw     $2,%%xmm2             # ________ ____g7g6 g5g4g3__ ________       \n\
167 por       %%xmm2, %%xmm0        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
168 movntdq   %%xmm0, (%3)          # store pixel 0-7                           \n\
169                                                                             \n\
170 # convert rgb24 plane to rgb15 pack for pixel 8-15                          \n\
171 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3______       \n\
172 punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
173 psllw     $2,%%xmm7             # ________ ____g7g6 g5g4g3__ ________       \n\
174 por       %%xmm7, %%xmm5        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
175 movntdq   %%xmm5, 16(%3)        # store pixel 4-7                           \n\
176 "
177
178 #define SSE2_UNPACK_15_UNALIGNED "                                          \n\
179 # mask unneeded bits off                                                    \n\
180 movl      $0xf8f8f8f8, %%eax    #                                           \n\
181 movd      %%eax, %%xmm5         #                                           \n\
182 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
183 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
184 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
185 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
186 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
187 psrlw     $1,%%xmm1             # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
188 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
189 movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
190 movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
191                                                                             \n\
192 # convert rgb24 plane to rgb15 pack for pixel 0-7                           \n\
193 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3______       \n\
194 punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
195 psllw     $2,%%xmm2             # ________ ____g7g6 g5g4g3__ ________       \n\
196 por       %%xmm2, %%xmm0        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
197 movdqu    %%xmm0, (%3)          # store pixel 0-7                           \n\
198                                                                             \n\
199 # convert rgb24 plane to rgb15 pack for pixel 8-15                          \n\
200 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3______       \n\
201 punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
202 psllw     $2,%%xmm7             # ________ ____g7g6 g5g4g3__ ________       \n\
203 por       %%xmm7, %%xmm5        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
204 movdqu    %%xmm5, 16(%3)        # store pixel 4-7                           \n\
205 "
206
207 #define SSE2_UNPACK_16_ALIGNED "                                            \n\
208 # mask unneeded bits off                                                    \n\
209 movl      $0xf8f8f8f8, %%eax    #                                           \n\
210 movd      %%eax, %%xmm5         #                                           \n\
211 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
212 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
213 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
214 movl      $0xfcfcfcfc, %%eax    #                                           \n\
215 movd      %%eax, %%xmm5         #                                           \n\
216 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
217 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
218 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
219 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
220 movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
221 movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
222                                                                             \n\
223 # convert rgb24 plane to rgb16 pack for pixel 0-7                           \n\
224 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3g2____       \n\
225 punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
226 psllw     $3,%%xmm2             # ________ __g7g6g5 g4g3g2__ ________       \n\
227 por       %%xmm2, %%xmm0        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
228 movntdq   %%xmm0, (%3)          # store pixel 0-7                           \n\
229                                                                             \n\
230 # convert rgb24 plane to rgb16 pack for pixel 8-15                          \n\
231 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3g2____       \n\
232 punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
233 psllw     $3,%%xmm7             # ________ __g7g6g5 g4g3g2__ ________       \n\
234 por       %%xmm7, %%xmm5        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
235 movntdq   %%xmm5, 16(%3)        # store pixel 4-7                           \n\
236 "
237
238 #define SSE2_UNPACK_16_UNALIGNED "                                          \n\
239 # mask unneeded bits off                                                    \n\
240 movl      $0xf8f8f8f8, %%eax    #                                           \n\
241 movd      %%eax, %%xmm5         #                                           \n\
242 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
243 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
244 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
245 movl      $0xfcfcfcfc, %%eax    #                                           \n\
246 movd      %%eax, %%xmm5         #                                           \n\
247 pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
248 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
249 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
250 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
251 movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
252 movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
253                                                                             \n\
254 # convert rgb24 plane to rgb16 pack for pixel 0-7                           \n\
255 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3g2____       \n\
256 punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
257 psllw     $3,%%xmm2             # ________ __g7g6g5 g4g3g2__ ________       \n\
258 por       %%xmm2, %%xmm0        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
259 movdqu    %%xmm0, (%3)          # store pixel 0-7                           \n\
260                                                                             \n\
261 # convert rgb24 plane to rgb16 pack for pixel 8-15                          \n\
262 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3g2____       \n\
263 punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
264 psllw     $3,%%xmm7             # ________ __g7g6g5 g4g3g2__ ________       \n\
265 por       %%xmm7, %%xmm5        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
266 movdqu    %%xmm5, 16(%3)        # store pixel 4-7                           \n\
267 "
268
269 #define SSE2_UNPACK_32_ARGB_ALIGNED "                                       \n\
270 pxor      %%xmm3, %%xmm3  # zero xmm3                                       \n\
271 movdqa    %%xmm0, %%xmm4  #               B7 B6 B5 B4 B3 B2 B1 B0           \n\
272 punpcklbw %%xmm2, %%xmm4  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
273 movdqa    %%xmm1, %%xmm5  #               R7 R6 R5 R4 R3 R2 R1 R0           \n\
274 punpcklbw %%xmm3, %%xmm5  #               00 R3 00 R2 00 R1 00 R0           \n\
275 movdqa    %%xmm4, %%xmm6  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
276 punpcklwd %%xmm5, %%xmm4  #               00 R1 B1 G1 00 R0 B0 G0           \n\
277 movntdq   %%xmm4, (%3)    # Store ARGB3 ARGB2 ARGB1 ARGB0                   \n\
278 punpckhwd %%xmm5, %%xmm6  #               00 R3 B3 G3 00 R2 B2 G2           \n\
279 movntdq   %%xmm6, 16(%3)  # Store ARGB7 ARGB6 ARGB5 ARGB4                   \n\
280 punpckhbw %%xmm2, %%xmm0  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
281 punpckhbw %%xmm3, %%xmm1  #               00 R7 00 R6 00 R5 00 R4           \n\
282 movdqa    %%xmm0, %%xmm5  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
283 punpcklwd %%xmm1, %%xmm5  #               00 R5 B5 G5 00 R4 B4 G4           \n\
284 movntdq   %%xmm5, 32(%3)  # Store ARGB11 ARGB10 ARGB9 ARGB8                 \n\
285 punpckhwd %%xmm1, %%xmm0  #               00 R7 B7 G7 00 R6 B6 G6           \n\
286 movntdq   %%xmm0, 48(%3)  # Store ARGB15 ARGB14 ARGB13 ARGB12               \n\
287 "
288
289 #define SSE2_UNPACK_32_ARGB_UNALIGNED "                                     \n\
290 pxor      %%xmm3, %%xmm3  # zero xmm3                                       \n\
291 movdqa    %%xmm0, %%xmm4  #               B7 B6 B5 B4 B3 B2 B1 B0           \n\
292 punpcklbw %%xmm2, %%xmm4  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
293 movdqa    %%xmm1, %%xmm5  #               R7 R6 R5 R4 R3 R2 R1 R0           \n\
294 punpcklbw %%xmm3, %%xmm5  #               00 R3 00 R2 00 R1 00 R0           \n\
295 movdqa    %%xmm4, %%xmm6  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
296 punpcklwd %%xmm5, %%xmm4  #               00 R1 B1 G1 00 R0 B0 G0           \n\
297 movdqu    %%xmm4, (%3)    # Store ARGB3 ARGB2 ARGB1 ARGB0                   \n\
298 punpckhwd %%xmm5, %%xmm6  #               00 R3 B3 G3 00 R2 B2 G2           \n\
299 movdqu    %%xmm6, 16(%3)  # Store ARGB7 ARGB6 ARGB5 ARGB4                   \n\
300 punpckhbw %%xmm2, %%xmm0  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
301 punpckhbw %%xmm3, %%xmm1  #               00 R7 00 R6 00 R5 00 R4           \n\
302 movdqa    %%xmm0, %%xmm5  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
303 punpcklwd %%xmm1, %%xmm5  #               00 R5 B5 G5 00 R4 B4 G4           \n\
304 movdqu    %%xmm5, 32(%3)  # Store ARGB11 ARGB10 ARGB9 ARGB8                 \n\
305 punpckhwd %%xmm1, %%xmm0  #               00 R7 B7 G7 00 R6 B6 G6           \n\
306 movdqu    %%xmm0, 48(%3)  # Store ARGB15 ARGB14 ARGB13 ARGB12               \n\
307 "
308
309 #define SSE2_UNPACK_32_RGBA_ALIGNED "                                       \n\
310 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
311 movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
312 punpcklbw %%xmm1, %%xmm4  #                 R3 G3 R2 G2 R1 G1 R0 G0         \n\
313 punpcklbw %%xmm0, %%xmm3  #                 B3 00 B2 00 B1 00 B0 00         \n\
314 movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
315 punpcklwd %%xmm4, %%xmm3  #                 R1 G1 B1 00 R0 B0 G0 00         \n\
316 movntdq   %%xmm3, (%3)    # Store RGBA3 RGBA2 RGBA1 RGBA0                   \n\
317 punpckhwd %%xmm4, %%xmm5  #                 R3 G3 B3 00 R2 G2 B2 00         \n\
318 movntdq   %%xmm5, 16(%3)  # Store RGBA7 RGBA6 RGBA5 RGBA4                   \n\
319 pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
320 punpckhbw %%xmm1, %%xmm2  #                 R7 G7 R6 G6 R5 G5 R4 G4         \n\
321 punpckhbw %%xmm0, %%xmm6  #                 B7 00 B6 00 B5 00 B4 00         \n\
322 movdqa    %%xmm6, %%xmm0  #                 B7 00 B6 00 B5 00 B4 00         \n\
323 punpcklwd %%xmm2, %%xmm6  #                 R5 G5 B5 00 R4 G4 B4 00         \n\
324 movntdq   %%xmm6, 32(%3)  # Store BGRA11 BGRA10 BGRA9 RGBA8                 \n\
325 punpckhwd %%xmm2, %%xmm0  #                 R7 G7 B7 00 R6 G6 B6 00         \n\
326 movntdq   %%xmm0, 48(%3)  # Store RGBA15 RGBA14 RGBA13 RGBA12               \n\
327 "
328
329 #define SSE2_UNPACK_32_RGBA_UNALIGNED "                                     \n\
330 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
331 movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
332 punpcklbw %%xmm1, %%xmm4  #                 R3 G3 R2 G2 R1 G1 R0 G0         \n\
333 punpcklbw %%xmm0, %%xmm3  #                 B3 00 B2 00 B1 00 B0 00         \n\
334 movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
335 punpcklwd %%xmm4, %%xmm3  #                 R1 G1 B1 00 R0 B0 G0 00         \n\
336 movdqu    %%xmm3, (%3)    # Store RGBA3 RGBA2 RGBA1 RGBA0                   \n\
337 punpckhwd %%xmm4, %%xmm5  #                 R3 G3 B3 00 R2 G2 B2 00         \n\
338 movdqu    %%xmm5, 16(%3)  # Store RGBA7 RGBA6 RGBA5 RGBA4                   \n\
339 pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
340 punpckhbw %%xmm1, %%xmm2  #                 R7 G7 R6 G6 R5 G5 R4 G4         \n\
341 punpckhbw %%xmm0, %%xmm6  #                 B7 00 B6 00 B5 00 B4 00         \n\
342 movdqa    %%xmm6, %%xmm0  #                 B7 00 B6 00 B5 00 B4 00         \n\
343 punpcklwd %%xmm2, %%xmm6  #                 R5 G5 B5 00 R4 G4 B4 00         \n\
344 movdqu    %%xmm6, 32(%3)  # Store RGBA11 RGBA10 RGBA9 RGBA8                 \n\
345 punpckhwd %%xmm2, %%xmm0  #                 R7 G7 B7 00 R6 G6 B6 00         \n\
346 movdqu    %%xmm0, 48(%3)  # Store RGBA15 RGBA14 RGBA13 RGBA12               \n\
347 "
348
349 #define SSE2_UNPACK_32_BGRA_ALIGNED "                                       \n\
350 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
351 movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
352 punpcklbw %%xmm0, %%xmm4  #                 B3 G3 B2 G2 B1 G1 B0 G0         \n\
353 punpcklbw %%xmm1, %%xmm3  #                 R3 00 R2 00 R1 00 R0 00         \n\
354 movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
355 punpcklwd %%xmm4, %%xmm3  #                 B1 G1 R1 00 B0 G0 R0 00         \n\
356 movntdq   %%xmm3, (%3)    # Store BGRA3 BGRA2 BGRA1 BGRA0                   \n\
357 punpckhwd %%xmm4, %%xmm5  #                 B3 G3 R3 00 B2 G2 R2 00         \n\
358 movntdq   %%xmm5, 16(%3)  # Store BGRA7 BGRA6 BGRA5 BGRA4                   \n\
359 pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
360 punpckhbw %%xmm0, %%xmm2  #                 B7 G7 B6 G6 B5 G5 B4 G4         \n\
361 punpckhbw %%xmm1, %%xmm6  #                 R7 00 R6 00 R5 00 R4 00         \n\
362 movdqa    %%xmm6, %%xmm0  #                 R7 00 R6 00 R5 00 R4 00         \n\
363 punpcklwd %%xmm2, %%xmm6  #                 B5 G5 R5 00 B4 G4 R4 00         \n\
364 movntdq   %%xmm6, 32(%3)  # Store BGRA11 BGRA10 BGRA9 BGRA8                 \n\
365 punpckhwd %%xmm2, %%xmm0  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
366 movntdq   %%xmm0, 48(%3)  # Store BGRA15 BGRA14 BGRA13 BGRA12               \n\
367 "
368
369 #define SSE2_UNPACK_32_BGRA_UNALIGNED "                                     \n\
370 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
371 movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
372 punpcklbw %%xmm0, %%xmm4  #                 B3 G3 B2 G2 B1 G1 B0 G0         \n\
373 punpcklbw %%xmm1, %%xmm3  #                 R3 00 R2 00 R1 00 R0 00         \n\
374 movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
375 punpcklwd %%xmm4, %%xmm3  #                 B1 G1 R1 00 B0 G0 R0 00         \n\
376 movdqu    %%xmm3, (%3)    # Store BGRA3 BGRA2 BGRA1 BGRA0                   \n\
377 punpckhwd %%xmm4, %%xmm5  #                 B3 G3 R3 00 B2 G2 R2 00         \n\
378 movdqu    %%xmm5, 16(%3)  # Store BGRA7 BGRA6 BGRA5 BGRA4                   \n\
379 pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
380 punpckhbw %%xmm0, %%xmm2  #                 B7 G7 B6 G6 B5 G5 B4 G4         \n\
381 punpckhbw %%xmm1, %%xmm6  #                 R7 00 R6 00 R5 00 R4 00         \n\
382 movdqa    %%xmm6, %%xmm0  #                 R7 00 R6 00 R5 00 R4 00         \n\
383 punpcklwd %%xmm2, %%xmm6  #                 B5 G5 R5 00 B4 G4 R4 00         \n\
384 movdqu    %%xmm6, 32(%3)  # Store BGRA11 BGRA10 BGRA9 BGRA8                 \n\
385 punpckhwd %%xmm2, %%xmm0  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
386 movdqu    %%xmm0, 48(%3)  # Store BGRA15 BGRA14 BGRA13 BGRA12               \n\
387 "
388
389 #define SSE2_UNPACK_32_ABGR_ALIGNED "                                       \n\
390 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
391 movdqa    %%xmm1, %%xmm4  #                 R7 R6 R5 R4 R3 R2 R1 R0         \n\
392 punpcklbw %%xmm2, %%xmm4  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
393 movdqa    %%xmm0, %%xmm5  #                 B7 B6 B5 B4 B3 B2 B1 B0         \n\
394 punpcklbw %%xmm3, %%xmm5  #                 00 B3 00 B2 00 B1 00 B0         \n\
395 movdqa    %%xmm4, %%xmm6  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
396 punpcklwd %%xmm5, %%xmm4  #                 00 B1 G1 R1 00 B0 G0 R0         \n\
397 movntdq   %%xmm4, (%3)    # Store ABGR3 ABGR2 ABGR1 ABGR0                   \n\
398 punpckhwd %%xmm5, %%xmm6  #                 00 B3 G3 R3 00 B2 G2 R2         \n\
399 movntdq   %%xmm6, 16(%3)  # Store ABGR7 ABGR6 ABGR5 ABGR4                   \n\
400 punpckhbw %%xmm2, %%xmm1  #                 G7 R7 G6 R6 G5 R5 G4 R4         \n\
401 punpckhbw %%xmm3, %%xmm0  #                 00 B7 00 B6 00 B5 00 B4         \n\
402 movdqa    %%xmm1, %%xmm2  #                 G7 R7 G6 R6 G5 R5 G4 R4         \n\
403 punpcklwd %%xmm0, %%xmm1  #                 00 B5 G5 R5 00 B4 G4 R4         \n\
404 movntdq   %%xmm1, 32(%3)  # Store ABGR11 ABGR10 ABGR9 ABGR8                 \n\
405 punpckhwd %%xmm0, %%xmm2  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
406 movntdq   %%xmm2, 48(%3)  # Store ABGR15 ABGR14 ABGR13 ABGR12               \n\
407 "
408
409 #define SSE2_UNPACK_32_ABGR_UNALIGNED "                                     \n\
410 pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
411 movdqa    %%xmm1, %%xmm4  #                 R7 R6 R5 R4 R3 R2 R1 R0         \n\
412 punpcklbw %%xmm2, %%xmm4  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
413 movdqa    %%xmm0, %%xmm5  #                 B7 B6 B5 B4 B3 B2 B1 B0         \n\
414 punpcklbw %%xmm3, %%xmm5  #                 00 B3 00 B2 00 B1 00 B0         \n\
415 movdqa    %%xmm4, %%xmm6  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
416 punpcklwd %%xmm5, %%xmm4  #                 00 B1 G1 R1 00 B0 G0 R0         \n\
417 movdqu    %%xmm4, (%3)    # Store ABGR3 ABGR2 ABGR1 ABGR0                   \n\
418 punpckhwd %%xmm5, %%xmm6  #                 00 B3 G3 R3 00 B2 G2 R2         \n\
419 movdqu    %%xmm6, 16(%3)  # Store ABGR7 ABGR6 ABGR5 ABGR4                   \n\
420 punpckhbw %%xmm2, %%xmm1  #                 G7 R7 G6 R6 G5 R5 G4 R4         \n\
421 punpckhbw %%xmm3, %%xmm0  #                 00 B7 00 B6 00 B5 00 B4         \n\
422 movdqa    %%xmm1, %%xmm2  #                 R7 00 R6 00 R5 00 R4 00         \n\
423 punpcklwd %%xmm0, %%xmm1  #                 00 B5 G5 R5 00 B4 G4 R4         \n\
424 movdqu    %%xmm1, 32(%3)  # Store ABGR11 ABGR10 ABGR9 ABGR8                 \n\
425 punpckhwd %%xmm0, %%xmm2  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
426 movdqu    %%xmm2, 48(%3)  # Store ABGR15 ABGR14 ABGR13 ABGR12               \n\
427 "
428
429 #elif defined(HAVE_SSE2_INTRINSICS)
430
431 /* SSE2 intrinsics */
432
433 #include <emmintrin.h>
434
435 #define SSE2_CALL(SSE2_INSTRUCTIONS)        \
436     do {                                    \
437         __m128i xmm0, xmm1, xmm2, xmm3,     \
438                 xmm4, xmm5, xmm6, xmm7;     \
439         SSE2_INSTRUCTIONS                   \
440     } while(0)
441
442 #define SSE2_END  _mm_sfence()
443
444 #define SSE2_INIT_16_ALIGNED                \
445     xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
446     xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
447     xmm4 = _mm_setzero_si128();             \
448     xmm6 = _mm_load_si128((__m128i *)p_y);
449
450 #define SSE2_INIT_16_UNALIGNED              \
451     xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
452     xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
453     xmm4 = _mm_setzero_si128();             \
454     xmm6 = _mm_loadu_si128((__m128i *)p_y); \
455     _mm_prefetch(p_buffer, _MM_HINT_NTA);
456
457 #define SSE2_INIT_32_ALIGNED                \
458     xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
459     xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
460     xmm4 = _mm_setzero_si128();             \
461     xmm6 = _mm_load_si128((__m128i *)p_y);
462
463 #define SSE2_INIT_32_UNALIGNED              \
464     xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
465     xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
466     xmm4 = _mm_setzero_si128();             \
467     xmm6 = _mm_loadu_si128((__m128i *)p_y); \
468     _mm_prefetch(p_buffer, _MM_HINT_NTA);
469
470 #define SSE2_YUV_MUL                        \
471     xmm0 = _mm_unpacklo_epi8(xmm0, xmm4);   \
472     xmm1 = _mm_unpacklo_epi8(xmm1, xmm4);   \
473     xmm5 = _mm_set1_epi32(0x00800080UL);    \
474     xmm0 = _mm_subs_epi16(xmm0, xmm5);      \
475     xmm1 = _mm_subs_epi16(xmm1, xmm5);      \
476     xmm0 = _mm_slli_epi16(xmm0, 3);         \
477     xmm1 = _mm_slli_epi16(xmm1, 3);         \
478     xmm2 = xmm0;                            \
479     xmm3 = xmm1;                            \
480     xmm5 = _mm_set1_epi32(0xf37df37dUL);    \
481     xmm2 = _mm_mulhi_epi16(xmm2, xmm5);     \
482     xmm5 = _mm_set1_epi32(0xe5fce5fcUL);    \
483     xmm3 = _mm_mulhi_epi16(xmm3, xmm5);     \
484     xmm5 = _mm_set1_epi32(0x40934093UL);    \
485     xmm0 = _mm_mulhi_epi16(xmm0, xmm5);     \
486     xmm5 = _mm_set1_epi32(0x33123312UL);    \
487     xmm1 = _mm_mulhi_epi16(xmm1, xmm5);     \
488     xmm2 = _mm_adds_epi16(xmm2, xmm3);      \
489     \
490     xmm5 = _mm_set1_epi32(0x10101010UL);    \
491     xmm6 = _mm_subs_epu8(xmm6, xmm5);       \
492     xmm7 = xmm6;                            \
493     xmm5 = _mm_set1_epi32(0x00ff00ffUL);    \
494     xmm6 = _mm_and_si128(xmm6, xmm5);       \
495     xmm7 = _mm_srli_epi16(xmm7, 8);         \
496     xmm6 = _mm_slli_epi16(xmm6, 3);         \
497     xmm7 = _mm_slli_epi16(xmm7, 3);         \
498     xmm5 = _mm_set1_epi32(0x253f253fUL);    \
499     xmm6 = _mm_mulhi_epi16(xmm6, xmm5);     \
500     xmm7 = _mm_mulhi_epi16(xmm7, xmm5);
501
502 #define SSE2_YUV_ADD                        \
503     xmm3 = xmm0;                            \
504     xmm4 = xmm1;                            \
505     xmm5 = xmm2;                            \
506     xmm0 = _mm_adds_epi16(xmm0, xmm6);      \
507     xmm3 = _mm_adds_epi16(xmm3, xmm7);      \
508     xmm1 = _mm_adds_epi16(xmm1, xmm6);      \
509     xmm4 = _mm_adds_epi16(xmm4, xmm7);      \
510     xmm2 = _mm_adds_epi16(xmm2, xmm6);      \
511     xmm5 = _mm_adds_epi16(xmm5, xmm7);      \
512     \
513     xmm0 = _mm_packus_epi16(xmm0, xmm0);    \
514     xmm1 = _mm_packus_epi16(xmm1, xmm1);    \
515     xmm2 = _mm_packus_epi16(xmm2, xmm2);    \
516     \
517     xmm3 = _mm_packus_epi16(xmm3, xmm3);    \
518     xmm4 = _mm_packus_epi16(xmm4, xmm4);    \
519     xmm5 = _mm_packus_epi16(xmm5, xmm5);    \
520     \
521     xmm0 = _mm_unpacklo_epi8(xmm0, xmm3);   \
522     xmm1 = _mm_unpacklo_epi8(xmm1, xmm4);   \
523     xmm2 = _mm_unpacklo_epi8(xmm2, xmm5);
524
525 #define SSE2_UNPACK_15_ALIGNED                      \
526     xmm5 = _mm_set1_epi32(0xf8f8f8f8UL);            \
527     xmm0 = _mm_and_si128(xmm0, xmm5);               \
528     xmm0 = _mm_srli_epi16(xmm0, 3);                 \
529     xmm2 = _mm_and_si128(xmm2, xmm5);               \
530     xmm1 = _mm_and_si128(xmm1, xmm5);               \
531     xmm1 = _mm_srli_epi16(xmm1, 1);                 \
532     xmm4 = _mm_setzero_si128();                     \
533     xmm5 = xmm0;                                    \
534     xmm7 = xmm2;                                    \
535     \
536     xmm2 = _mm_unpacklo_epi8(xmm2, xmm4);           \
537     xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);           \
538     xmm2 = _mm_slli_epi16(xmm2, 2);                 \
539     xmm0 = _mm_or_si128(xmm0, xmm2);                \
540     _mm_stream_si128((__m128i*)p_buffer, xmm0);     \
541     \
542     xmm7 = _mm_unpackhi_epi8(xmm7, xmm4);           \
543     xmm5 = _mm_unpackhi_epi8(xmm5, xmm1);           \
544     xmm7 = _mm_slli_epi16(xmm7, 2);                 \
545     xmm5 = _mm_or_si128(xmm5, xmm7);                \
546     _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
547
548 #define SSE2_UNPACK_15_UNALIGNED                    \
549     xmm5 = _mm_set1_epi32(0xf8f8f8f8UL);            \
550     xmm0 = _mm_and_si128(xmm0, xmm5);               \
551     xmm0 = _mm_srli_epi16(xmm0, 3);                 \
552     xmm2 = _mm_and_si128(xmm2, xmm5);               \
553     xmm1 = _mm_and_si128(xmm1, xmm5);               \
554     xmm1 = _mm_srli_epi16(xmm1, 1);                 \
555     xmm4 = _mm_setzero_si128();                     \
556     xmm5 = xmm0;                                    \
557     xmm7 = xmm2;                                    \
558     \
559     xmm2 = _mm_unpacklo_epi8(xmm2, xmm4);           \
560     xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);           \
561     xmm2 = _mm_slli_epi16(xmm2, 2);                 \
562     xmm0 = _mm_or_si128(xmm0, xmm2);                \
563     _mm_storeu_si128((__m128i*)p_buffer, xmm0);     \
564     \
565     xmm7 = _mm_unpackhi_epi8(xmm7, xmm4);           \
566     xmm5 = _mm_unpackhi_epi8(xmm5, xmm1);           \
567     xmm7 = _mm_slli_epi16(xmm7, 2);                 \
568     xmm5 = _mm_or_si128(xmm5, xmm7);                \
569     _mm_storeu_si128((__m128i*)(p_buffer+16), xmm5);
570
571 #define SSE2_UNPACK_16_ALIGNED                      \
572     xmm5 = _mm_set1_epi32(0xf8f8f8f8UL);            \
573     xmm0 = _mm_and_si128(xmm0, xmm5);               \
574     xmm1 = _mm_and_si128(xmm1, xmm5);               \
575     xmm5 = _mm_set1_epi32(0xfcfcfcfcUL);            \
576     xmm2 = _mm_and_si128(xmm2, xmm5);               \
577     xmm0 = _mm_srli_epi16(xmm0, 3);                 \
578     xmm4 = _mm_setzero_si128();                     \
579     xmm5 = xmm0;                                    \
580     xmm7 = xmm2;                                    \
581     \
582     xmm2 = _mm_unpacklo_epi8(xmm2, xmm4);           \
583     xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);           \
584     xmm2 = _mm_slli_epi16(xmm2, 3);                 \
585     xmm0 = _mm_or_si128(xmm0, xmm2);                \
586     _mm_stream_si128((__m128i*)p_buffer, xmm0);     \
587     \
588     xmm7 = _mm_unpackhi_epi8(xmm7, xmm4);           \
589     xmm5 = _mm_unpackhi_epi8(xmm5, xmm1);           \
590     xmm7 = _mm_slli_epi16(xmm7, 3);                 \
591     xmm5 = _mm_or_si128(xmm5, xmm7);                \
592     _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
593
594 #define SSE2_UNPACK_16_UNALIGNED                    \
595     xmm5 = _mm_set1_epi32(0xf8f8f8f8UL);            \
596     xmm0 = _mm_and_si128(xmm0, xmm5);               \
597     xmm1 = _mm_and_si128(xmm1, xmm5);               \
598     xmm5 = _mm_set1_epi32(0xfcfcfcfcUL);            \
599     xmm2 = _mm_and_si128(xmm2, xmm5);               \
600     xmm0 = _mm_srli_epi16(xmm0, 3);                 \
601     xmm4 = _mm_setzero_si128();                     \
602     xmm5 = xmm0;                                    \
603     xmm7 = xmm2;                                    \
604     \
605     xmm2 = _mm_unpacklo_epi8(xmm2, xmm4);           \
606     xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);           \
607     xmm2 = _mm_slli_epi16(xmm2, 3);                 \
608     xmm0 = _mm_or_si128(xmm0, xmm2);                \
609     _mm_storeu_si128((__m128i*)p_buffer, xmm0);     \
610     \
611     xmm7 = _mm_unpackhi_epi8(xmm7, xmm4);           \
612     xmm5 = _mm_unpackhi_epi8(xmm5, xmm1);           \
613     xmm7 = _mm_slli_epi16(xmm7, 3);                 \
614     xmm5 = _mm_or_si128(xmm5, xmm7);                \
615     _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5);
616
617 #define SSE2_UNPACK_32_ARGB_ALIGNED                 \
618     xmm3 = _mm_setzero_si128();                     \
619     xmm4 = xmm0;                                    \
620     xmm4 = _mm_unpacklo_epi8(xmm4, xmm2);           \
621     xmm5 = xmm1;                                    \
622     xmm5 = _mm_unpacklo_epi8(xmm5, xmm3);           \
623     xmm6 = xmm4;                                    \
624     xmm4 = _mm_unpacklo_epi16(xmm4, xmm5);          \
625     _mm_stream_si128((__m128i*)(p_buffer), xmm4);   \
626     xmm6 = _mm_unpackhi_epi16(xmm6, xmm5);          \
627     _mm_stream_si128((__m128i*)(p_buffer+4), xmm6); \
628     xmm0 = _mm_unpackhi_epi8(xmm0, xmm2);           \
629     xmm1 = _mm_unpackhi_epi8(xmm1, xmm3);           \
630     xmm5 = xmm0;                                    \
631     xmm5 = _mm_unpacklo_epi16(xmm5, xmm1);          \
632     _mm_stream_si128((__m128i*)(p_buffer+8), xmm5); \
633     xmm0 = _mm_unpackhi_epi16(xmm0, xmm1);          \
634     _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
635
636 #define SSE2_UNPACK_32_ARGB_UNALIGNED               \
637     xmm3 = _mm_setzero_si128();                     \
638     xmm4 = xmm0;                                    \
639     xmm4 = _mm_unpacklo_epi8(xmm4, xmm2);           \
640     xmm5 = xmm1;                                    \
641     xmm5 = _mm_unpacklo_epi8(xmm5, xmm3);           \
642     xmm6 = xmm4;                                    \
643     xmm4 = _mm_unpacklo_epi16(xmm4, xmm5);          \
644     _mm_storeu_si128((__m128i*)(p_buffer), xmm4);   \
645     xmm6 = _mm_unpackhi_epi16(xmm6, xmm5);          \
646     _mm_storeu_si128((__m128i*)(p_buffer+4), xmm6); \
647     xmm0 = _mm_unpackhi_epi8(xmm0, xmm2);           \
648     xmm1 = _mm_unpackhi_epi8(xmm1, xmm3);           \
649     xmm5 = xmm0;                                    \
650     xmm5 = _mm_unpacklo_epi16(xmm5, xmm1);          \
651     _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5); \
652     xmm0 = _mm_unpackhi_epi16(xmm0, xmm1);          \
653     _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
654
655 #define SSE2_UNPACK_32_RGBA_ALIGNED                 \
656     xmm3 = _mm_setzero_si128();                     \
657     xmm4 = xmm2;                                    \
658     xmm4 = _mm_unpacklo_epi8(xmm4, xmm1);           \
659     xmm3 = _mm_unpacklo_epi8(xmm3, xmm0);           \
660     xmm5 = xmm3;                                    \
661     xmm3 = _mm_unpacklo_epi16(xmm3, xmm4);          \
662     _mm_stream_si128((__m128i*)(p_buffer), xmm3);   \
663     xmm5 = _mm_unpackhi_epi16(xmm5, xmm4);          \
664     _mm_stream_si128((__m128i*)(p_buffer+4), xmm5); \
665     xmm6 = _mm_setzero_si128();                     \
666     xmm2 = _mm_unpackhi_epi8(xmm2, xmm1);           \
667     xmm6 = _mm_unpackhi_epi8(xmm6, xmm0);           \
668     xmm0 = xmm6;                                    \
669     xmm6 = _mm_unpacklo_epi16(xmm6, xmm2);          \
670     _mm_stream_si128((__m128i*)(p_buffer+8), xmm6); \
671     xmm0 = _mm_unpackhi_epi16(xmm0, xmm2);          \
672     _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
673
674 #define SSE2_UNPACK_32_RGBA_UNALIGNED               \
675     xmm3 = _mm_setzero_si128();                     \
676     xmm4 = xmm2;                                    \
677     xmm4 = _mm_unpacklo_epi8(xmm4, xmm1);           \
678     xmm3 = _mm_unpacklo_epi8(xmm3, xmm0);           \
679     xmm5 = xmm3;                                    \
680     xmm3 = _mm_unpacklo_epi16(xmm3, xmm4);          \
681     _mm_storeu_si128((__m128i*)(p_buffer), xmm3);   \
682     xmm5 = _mm_unpackhi_epi16(xmm5, xmm4);          \
683     _mm_storeu_si128((__m128i*)(p_buffer+4), xmm5); \
684     xmm6 = _mm_setzero_si128();                     \
685     xmm2 = _mm_unpackhi_epi8(xmm2, xmm1);           \
686     xmm6 = _mm_unpackhi_epi8(xmm6, xmm0);           \
687     xmm0 = xmm6;                                    \
688     xmm6 = _mm_unpacklo_epi16(xmm6, xmm2);          \
689     _mm_storeu_si128((__m128i*)(p_buffer+8), xmm6); \
690     xmm0 = _mm_unpackhi_epi16(xmm0, xmm2);          \
691     _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
692
693 #define SSE2_UNPACK_32_BGRA_ALIGNED                 \
694     xmm3 = _mm_setzero_si128();                     \
695     xmm4 = xmm2;                                    \
696     xmm4 = _mm_unpacklo_epi8(xmm4, xmm0);           \
697     xmm3 = _mm_unpacklo_epi8(xmm3, xmm1);           \
698     xmm5 = xmm3;                                    \
699     xmm3 = _mm_unpacklo_epi16(xmm3, xmm4);          \
700     _mm_stream_si128((__m128i*)(p_buffer), xmm3);   \
701     xmm5 = _mm_unpackhi_epi16(xmm5, xmm4);          \
702     _mm_stream_si128((__m128i*)(p_buffer+4), xmm5); \
703     xmm6 = _mm_setzero_si128();                     \
704     xmm2 = _mm_unpackhi_epi8(xmm2, xmm0);           \
705     xmm6 = _mm_unpackhi_epi8(xmm6, xmm1);           \
706     xmm0 = xmm6;                                    \
707     xmm6 = _mm_unpacklo_epi16(xmm6, xmm2);          \
708     _mm_stream_si128((__m128i*)(p_buffer+8), xmm6); \
709     xmm0 = _mm_unpackhi_epi16(xmm0, xmm2);          \
710     _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
711
712 #define SSE2_UNPACK_32_BGRA_UNALIGNED               \
713     xmm3 = _mm_setzero_si128();                     \
714     xmm4 = xmm2;                                    \
715     xmm4 = _mm_unpacklo_epi8(xmm4, xmm0);           \
716     xmm3 = _mm_unpacklo_epi8(xmm3, xmm1);           \
717     xmm5 = xmm3;                                    \
718     xmm3 = _mm_unpacklo_epi16(xmm3, xmm4);          \
719     _mm_storeu_si128((__m128i*)(p_buffer), xmm3);   \
720     xmm5 = _mm_unpackhi_epi16(xmm5, xmm4);          \
721     _mm_storeu_si128((__m128i*)(p_buffer+4), xmm5); \
722     xmm6 = _mm_setzero_si128();                     \
723     xmm2 = _mm_unpackhi_epi8(xmm2, xmm0);           \
724     xmm6 = _mm_unpackhi_epi8(xmm6, xmm1);           \
725     xmm0 = xmm6;                                    \
726     xmm6 = _mm_unpacklo_epi16(xmm6, xmm2);          \
727     _mm_storeu_si128((__m128i*)(p_buffer+8), xmm6); \
728     xmm0 = _mm_unpackhi_epi16(xmm0, xmm2);          \
729     _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
730
731 #define SSE2_UNPACK_32_ABGR_ALIGNED                 \
732     xmm3 = _mm_setzero_si128();                     \
733     xmm4 = xmm1;                                    \
734     xmm4 = _mm_unpacklo_epi8(xmm4, xmm2);           \
735     xmm5 = xmm0;                                    \
736     xmm5 = _mm_unpacklo_epi8(xmm5, xmm3);           \
737     xmm6 = xmm4;                                    \
738     xmm4 = _mm_unpacklo_epi16(xmm4, xmm5);          \
739     _mm_stream_si128((__m128i*)(p_buffer), xmm4);   \
740     xmm6 = _mm_unpackhi_epi16(xmm6, xmm5);          \
741     _mm_stream_si128((__m128i*)(p_buffer+4), xmm6); \
742     xmm1 = _mm_unpackhi_epi8(xmm1, xmm2);           \
743     xmm0 = _mm_unpackhi_epi8(xmm0, xmm3);           \
744     xmm2 = xmm1;                                    \
745     xmm1 = _mm_unpacklo_epi16(xmm1, xmm0);          \
746     _mm_stream_si128((__m128i*)(p_buffer+8), xmm1); \
747     xmm2 = _mm_unpackhi_epi16(xmm2, xmm0);          \
748     _mm_stream_si128((__m128i*)(p_buffer+12), xmm2);
749
750 #define SSE2_UNPACK_32_ABGR_UNALIGNED               \
751     xmm3 = _mm_setzero_si128();                     \
752     xmm4 = xmm1;                                    \
753     xmm4 = _mm_unpacklo_epi8(xmm4, xmm2);           \
754     xmm5 = xmm0;                                    \
755     xmm5 = _mm_unpacklo_epi8(xmm5, xmm3);           \
756     xmm6 = xmm4;                                    \
757     xmm4 = _mm_unpacklo_epi16(xmm4, xmm5);          \
758     _mm_storeu_si128((__m128i*)(p_buffer), xmm4);   \
759     xmm6 = _mm_unpackhi_epi16(xmm6, xmm5);          \
760     _mm_storeu_si128((__m128i*)(p_buffer+4), xmm6); \
761     xmm1 = _mm_unpackhi_epi8(xmm1, xmm2);           \
762     xmm0 = _mm_unpackhi_epi8(xmm0, xmm3);           \
763     xmm2 = xmm1;                                    \
764     xmm1 = _mm_unpacklo_epi16(xmm1, xmm0);          \
765     _mm_storeu_si128((__m128i*)(p_buffer+8), xmm1); \
766     xmm2 = _mm_unpackhi_epi16(xmm2, xmm0);          \
767     _mm_storeu_si128((__m128i*)(p_buffer+12), xmm2);
768
769 #endif