]> git.sesse.net Git - vlc/blob - modules/video_chroma/i420_rgb_mmx.h
* modules/video_chroma/i420_rgb_mmx.h: wrote an MMX intrinsics version of
[vlc] / modules / video_chroma / i420_rgb_mmx.h
1 /*****************************************************************************
2  * transforms_yuvmmx.h: MMX YUV transformation assembly
3  *****************************************************************************
4  * Copyright (C) 1999-2004 the VideoLAN team
5  * $Id$
6  *
7  * Authors: Olie Lho <ollie@sis.com.tw>
8  *          GaĆ«l Hendryckx <jimmy@via.ecp.fr>
9  *          Samuel Hocevar <sam@zoy.org>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111, USA.
24  *****************************************************************************/
25
26 /* hope these constant values are cache line aligned */
27 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
28 #define USED_U64(foo) \
29     static const uint64_t foo __asm__ (#foo) __attribute__((used))
30 #else
31 #define USED_U64(foo) \
32     static const uint64_t foo __asm__ (#foo) __attribute__((unused))
33 #endif
34 USED_U64(mmx_80w)     = 0x0080008000800080ULL;
35 USED_U64(mmx_10w)     = 0x1010101010101010ULL;
36 USED_U64(mmx_00ffw)   = 0x00ff00ff00ff00ffULL;
37 USED_U64(mmx_Y_coeff) = 0x253f253f253f253fULL;
38
39 USED_U64(mmx_U_green) = 0xf37df37df37df37dULL;
40 USED_U64(mmx_U_blue)  = 0x4093409340934093ULL;
41 USED_U64(mmx_V_red)   = 0x3312331233123312ULL;
42 USED_U64(mmx_V_green) = 0xe5fce5fce5fce5fcULL;
43
44 USED_U64(mmx_mask_f8) = 0xf8f8f8f8f8f8f8f8ULL;
45 USED_U64(mmx_mask_fc) = 0xfcfcfcfcfcfcfcfcULL;
46 #undef USED_U64
47
48 #define MMX_INIT_16 "                                                       \n\
49 movd      (%1), %%mm0       # Load 4 Cb       00 00 00 00 u3 u2 u1 u0       \n\
50 movd      (%2), %%mm1       # Load 4 Cr       00 00 00 00 v3 v2 v1 v0       \n\
51 pxor      %%mm4, %%mm4      # zero mm4                                      \n\
52 movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
53 #movl      $0, (%3)         # cache preload for image                       \n\
54 "
55
56 #define INTRINSICS_INIT_16 \
57     mm0 = (__m64)(uint64_t)*(uint32_t *)p_u; \
58     mm1 = (__m64)(uint64_t)*(uint32_t *)p_v; \
59     mm4 = (__m64)(uint64_t)0; \
60     mm6 = (__m64)*(uint64_t *)p_y; \
61     /* *(uint16_t *)p_buffer = 0; */
62
63 #define MMX_INIT_16_GRAY "                                                  \n\
64 movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
65 #movl      $0, (%3)         # cache preload for image                       \n\
66 "
67
68 #define MMX_INIT_32 "                                                       \n\
69 movd      (%1), %%mm0       # Load 4 Cb       00 00 00 00 u3 u2 u1 u0       \n\
70 movl      $0, (%3)          # cache preload for image                       \n\
71 movd      (%2), %%mm1       # Load 4 Cr       00 00 00 00 v3 v2 v1 v0       \n\
72 pxor      %%mm4, %%mm4      # zero mm4                                      \n\
73 movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
74 "
75
76 #define INTRINSICS_INIT_32 \
77     mm0 = (__m64)(uint64_t)*(uint32_t *)p_u; \
78     *(uint16_t *)p_buffer = 0; \
79     mm1 = (__m64)(uint64_t)*(uint32_t *)p_v; \
80     mm4 = (__m64)(uint64_t)0; \
81     mm6 = (__m64)*(uint64_t *)p_y;
82
83 /*
84  * Do the multiply part of the conversion for even and odd pixels,
85  * register usage:
86  * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
87  * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd  pixels,
88  * mm6 -> Y even, mm7 -> Y odd
89  */
90
91 #define MMX_YUV_MUL "                                                       \n\
92 # convert the chroma part                                                   \n\
93 punpcklbw %%mm4, %%mm0          # scatter 4 Cb    00 u3 00 u2 00 u1 00 u0   \n\
94 punpcklbw %%mm4, %%mm1          # scatter 4 Cr    00 v3 00 v2 00 v1 00 v0   \n\
95 psubsw    mmx_80w, %%mm0        # Cb -= 128                                 \n\
96 psubsw    mmx_80w, %%mm1        # Cr -= 128                                 \n\
97 psllw     $3, %%mm0             # Promote precision                         \n\
98 psllw     $3, %%mm1             # Promote precision                         \n\
99 movq      %%mm0, %%mm2          # Copy 4 Cb       00 u3 00 u2 00 u1 00 u0   \n\
100 movq      %%mm1, %%mm3          # Copy 4 Cr       00 v3 00 v2 00 v1 00 v0   \n\
101 pmulhw    mmx_U_green, %%mm2    # Mul Cb with green coeff -> Cb green       \n\
102 pmulhw    mmx_V_green, %%mm3    # Mul Cr with green coeff -> Cr green       \n\
103 pmulhw    mmx_U_blue, %%mm0     # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0   \n\
104 pmulhw    mmx_V_red, %%mm1      # Mul Cr -> Cred  00 r3 00 r2 00 r1 00 r0   \n\
105 paddsw    %%mm3, %%mm2          # Cb green + Cr green -> Cgreen             \n\
106                                                                             \n\
107 # convert the luma part                                                     \n\
108 psubusb   mmx_10w, %%mm6        # Y -= 16                                   \n\
109 movq      %%mm6, %%mm7          # Copy 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
110 pand      mmx_00ffw, %%mm6      # get Y even      00 Y6 00 Y4 00 Y2 00 Y0   \n\
111 psrlw     $8, %%mm7             # get Y odd       00 Y7 00 Y5 00 Y3 00 Y1   \n\
112 psllw     $3, %%mm6             # Promote precision                         \n\
113 psllw     $3, %%mm7             # Promote precision                         \n\
114 pmulhw    mmx_Y_coeff, %%mm6    # Mul 4 Y even    00 y6 00 y4 00 y2 00 y0   \n\
115 pmulhw    mmx_Y_coeff, %%mm7    # Mul 4 Y odd     00 y7 00 y5 00 y3 00 y1   \n\
116 "
117
118 #define INTRINSICS_YUV_MUL \
119     mm0 = _mm_unpacklo_pi8(mm0, mm4); \
120     mm1 = _mm_unpacklo_pi8(mm1, mm4); \
121     mm0 = _mm_subs_pi16(mm0, (__m64)mmx_80w); \
122     mm1 = _mm_subs_pi16(mm1, (__m64)mmx_80w); \
123     mm0 = _mm_slli_pi16(mm0, 3); \
124     mm1 = _mm_slli_pi16(mm1, 3); \
125     mm2 = mm0; \
126     mm3 = mm1; \
127     mm2 = _mm_mulhi_pi16(mm2, (__m64)mmx_U_green); \
128     mm3 = _mm_mulhi_pi16(mm3, (__m64)mmx_V_green); \
129     mm0 = _mm_mulhi_pi16(mm0, (__m64)mmx_U_blue); \
130     mm1 = _mm_mulhi_pi16(mm1, (__m64)mmx_V_red); \
131     mm2 = _mm_adds_pi16(mm2, mm3); \
132     \
133     mm6 = _mm_subs_pu8(mm6, (__m64)mmx_10w); \
134     mm7 = mm6; \
135     mm6 = _mm_and_si64(mm6, (__m64)mmx_00ffw); \
136     mm7 = _mm_srli_pi16(mm7, 8); \
137     mm6 = _mm_slli_pi16(mm6, 3); \
138     mm7 = _mm_slli_pi16(mm7, 3); \
139     mm6 = _mm_mulhi_pi16(mm6, (__m64)mmx_Y_coeff); \
140     mm7 = _mm_mulhi_pi16(mm7, (__m64)mmx_Y_coeff);
141
142 /*
143  * Do the addition part of the conversion for even and odd pixels,
144  * register usage:
145  * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
146  * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd  pixels,
147  * mm6 -> Y even, mm7 -> Y odd
148  */
149
150 #define MMX_YUV_ADD "                                                       \n\
151 # Do horizontal and vertical scaling                                        \n\
152 movq      %%mm0, %%mm3          # Copy Cblue                                \n\
153 movq      %%mm1, %%mm4          # Copy Cred                                 \n\
154 movq      %%mm2, %%mm5          # Copy Cgreen                               \n\
155 paddsw    %%mm6, %%mm0          # Y even + Cblue  00 B6 00 B4 00 B2 00 B0   \n\
156 paddsw    %%mm7, %%mm3          # Y odd  + Cblue  00 B7 00 B5 00 B3 00 B1   \n\
157 paddsw    %%mm6, %%mm1          # Y even + Cred   00 R6 00 R4 00 R2 00 R0   \n\
158 paddsw    %%mm7, %%mm4          # Y odd  + Cred   00 R7 00 R5 00 R3 00 R1   \n\
159 paddsw    %%mm6, %%mm2          # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0   \n\
160 paddsw    %%mm7, %%mm5          # Y odd  + Cgreen 00 G7 00 G5 00 G3 00 G1   \n\
161                                                                             \n\
162 # Limit RGB even to 0..255                                                  \n\
163 packuswb  %%mm0, %%mm0          # B6 B4 B2 B0 / B6 B4 B2 B0                 \n\
164 packuswb  %%mm1, %%mm1          # R6 R4 R2 R0 / R6 R4 R2 R0                 \n\
165 packuswb  %%mm2, %%mm2          # G6 G4 G2 G0 / G6 G4 G2 G0                 \n\
166                                                                             \n\
167 # Limit RGB odd to 0..255                                                   \n\
168 packuswb  %%mm3, %%mm3          # B7 B5 B3 B1 / B7 B5 B3 B1                 \n\
169 packuswb  %%mm4, %%mm4          # R7 R5 R3 R1 / R7 R5 R3 R1                 \n\
170 packuswb  %%mm5, %%mm5          # G7 G5 G3 G1 / G7 G5 G3 G1                 \n\
171                                                                             \n\
172 # Interleave RGB even and odd                                               \n\
173 punpcklbw %%mm3, %%mm0          #                 B7 B6 B5 B4 B3 B2 B1 B0   \n\
174 punpcklbw %%mm4, %%mm1          #                 R7 R6 R5 R4 R3 R2 R1 R0   \n\
175 punpcklbw %%mm5, %%mm2          #                 G7 G6 G5 G4 G3 G2 G1 G0   \n\
176 "
177
178 #define INTRINSICS_YUV_ADD \
179     mm3 = mm0; \
180     mm4 = mm1; \
181     mm5 = mm2; \
182     mm0 = _mm_adds_pi16(mm0, mm6); \
183     mm3 = _mm_adds_pi16(mm3, mm7); \
184     mm1 = _mm_adds_pi16(mm1, mm6); \
185     mm4 = _mm_adds_pi16(mm4, mm7); \
186     mm2 = _mm_adds_pi16(mm2, mm6); \
187     mm5 = _mm_adds_pi16(mm5, mm7); \
188     \
189     mm0 = _mm_packs_pu16(mm0, mm0); \
190     mm1 = _mm_packs_pu16(mm1, mm1); \
191     mm2 = _mm_packs_pu16(mm2, mm2); \
192     \
193     mm3 = _mm_packs_pu16(mm3, mm3); \
194     mm4 = _mm_packs_pu16(mm4, mm4); \
195     mm5 = _mm_packs_pu16(mm5, mm5); \
196     \
197     mm0 = _mm_unpacklo_pi8(mm0, mm3); \
198     mm1 = _mm_unpacklo_pi8(mm1, mm4); \
199     mm2 = _mm_unpacklo_pi8(mm2, mm5);
200
201 /*
202  * Grayscale case, only use Y
203  */
204
205 #define MMX_YUV_GRAY "                                                      \n\
206 # convert the luma part                                                     \n\
207 psubusb   mmx_10w, %%mm6                                                    \n\
208 movq      %%mm6, %%mm7                                                      \n\
209 pand      mmx_00ffw, %%mm6                                                  \n\
210 psrlw     $8, %%mm7                                                         \n\
211 psllw     $3, %%mm6                                                         \n\
212 psllw     $3, %%mm7                                                         \n\
213 pmulhw    mmx_Y_coeff, %%mm6                                                \n\
214 pmulhw    mmx_Y_coeff, %%mm7                                                \n\
215 packuswb  %%mm6, %%mm6                                                      \n\
216 packuswb  %%mm7, %%mm7                                                      \n\
217 punpcklbw %%mm7, %%mm6                                                      \n\
218 "
219
220 #define MMX_UNPACK_16_GRAY "                                                \n\
221 movq      %%mm6, %%mm5                                                      \n\
222 pand      mmx_mask_f8, %%mm6                                                \n\
223 pand      mmx_mask_fc, %%mm5                                                \n\
224 movq      %%mm6, %%mm7                                                      \n\
225 psrlw     $3, %%mm7                                                         \n\
226 pxor      %%mm3, %%mm3                                                      \n\
227 movq      %%mm7, %%mm2                                                      \n\
228 movq      %%mm5, %%mm0                                                      \n\
229 punpcklbw %%mm3, %%mm5                                                      \n\
230 punpcklbw %%mm6, %%mm7                                                      \n\
231 psllw     $3, %%mm5                                                         \n\
232 por       %%mm5, %%mm7                                                      \n\
233 movq      %%mm7, (%3)                                                       \n\
234 punpckhbw %%mm3, %%mm0                                                      \n\
235 punpckhbw %%mm6, %%mm2                                                      \n\
236 psllw     $3, %%mm0                                                         \n\
237 movq      8(%0), %%mm6                                                      \n\
238 por       %%mm0, %%mm2                                                      \n\
239 movq      %%mm2, 8(%3)                                                      \n\
240 "
241
242
243 /*
244  * convert RGB plane to RGB 15 bits,
245  * mm0 -> B, mm1 -> R, mm2 -> G,
246  * mm4 -> GB, mm5 -> AR pixel 4-7,
247  * mm6 -> GB, mm7 -> AR pixel 0-3
248  */
249
250 #define MMX_UNPACK_15 "                                                     \n\
251 # mask unneeded bits off                                                    \n\
252 pand      mmx_mask_f8, %%mm0    # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
253 psrlw     $3,%%mm0              # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
254 pand      mmx_mask_f8, %%mm2    # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
255 pand      mmx_mask_f8, %%mm1    # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
256 psrlw     $1,%%mm1              # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
257 pxor      %%mm4, %%mm4          # zero mm4                                  \n\
258 movq      %%mm0, %%mm5          # Copy B7-B0                                \n\
259 movq      %%mm2, %%mm7          # Copy G7-G0                                \n\
260                                                                             \n\
261 # convert rgb24 plane to rgb15 pack for pixel 0-3                           \n\
262 punpcklbw %%mm4, %%mm2          # ________ ________ g7g6g5g4 g3______       \n\
263 punpcklbw %%mm1, %%mm0          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
264 psllw     $2,%%mm2              # ________ ____g7g6 g5g4g3__ ________       \n\
265 por       %%mm2, %%mm0          # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
266 movq      8(%0), %%mm6          # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
267 movq      %%mm0, (%3)           # store pixel 0-3                           \n\
268                                                                             \n\
269 # convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
270 punpckhbw %%mm4, %%mm7          # ________ ________ g7g6g5g4 g3______       \n\
271 punpckhbw %%mm1, %%mm5          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
272 psllw     $2,%%mm7              # ________ ____g7g6 g5g4g3__ ________       \n\
273 movd      4(%1), %%mm0          # Load 4 Cb       __ __ __ __ u3 u2 u1 u0   \n\
274 por       %%mm7, %%mm5          # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
275 movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   \n\
276 movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
277 "
278
279 #define INTRINSICS_UNPACK_15 \
280     mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
281     mm0 = _mm_srli_pi16(mm0, 3); \
282     mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_f8); \
283     mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
284     mm1 = _mm_srli_pi16(mm1, 1); \
285     mm4 = (__m64)(uint64_t)0; \
286     mm5 = mm0; \
287     mm7 = mm2; \
288     \
289     mm2 = _mm_unpacklo_pi8(mm2, mm4); \
290     mm0 = _mm_unpacklo_pi8(mm0, mm1); \
291     mm2 = _mm_slli_pi16(mm2, 2); \
292     mm0 = _mm_or_si64(mm0, mm2); \
293     mm6 = (__m64)*(uint64_t *)(p_y + 8); \
294     *(uint64_t *)p_buffer = (uint64_t)mm0; \
295     \
296     mm7 = _mm_unpackhi_pi8(mm7, mm4); \
297     mm5 = _mm_unpackhi_pi8(mm5, mm1); \
298     mm7 = _mm_slli_pi16(mm7, 2); \
299     mm0 = (__m64)(uint64_t)*(uint32_t *)(p_u + 4); \
300     mm5 = _mm_or_si64(mm5, mm7); \
301     mm1 = (__m64)(uint64_t)*(uint32_t *)(p_v + 4); \
302     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
303
304 /*
305  * convert RGB plane to RGB 16 bits,
306  * mm0 -> B, mm1 -> R, mm2 -> G,
307  * mm4 -> GB, mm5 -> AR pixel 4-7,
308  * mm6 -> GB, mm7 -> AR pixel 0-3
309  */
310
311 #define MMX_UNPACK_16 "                                                     \n\
312 # mask unneeded bits off                                                    \n\
313 pand      mmx_mask_f8, %%mm0    # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
314 pand      mmx_mask_fc, %%mm2    # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
315 pand      mmx_mask_f8, %%mm1    # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
316 psrlw     $3,%%mm0              # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
317 pxor      %%mm4, %%mm4          # zero mm4                                  \n\
318 movq      %%mm0, %%mm5          # Copy B7-B0                                \n\
319 movq      %%mm2, %%mm7          # Copy G7-G0                                \n\
320                                                                             \n\
321 # convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
322 punpcklbw %%mm4, %%mm2          # ________ ________ g7g6g5g4 g3g2____       \n\
323 punpcklbw %%mm1, %%mm0          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
324 psllw     $3,%%mm2              # ________ __g7g6g5 g4g3g2__ ________       \n\
325 por       %%mm2, %%mm0          # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
326 movq      8(%0), %%mm6          # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
327 movq      %%mm0, (%3)           # store pixel 0-3                           \n\
328                                                                             \n\
329 # convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
330 punpckhbw %%mm4, %%mm7          # ________ ________ g7g6g5g4 g3g2____       \n\
331 punpckhbw %%mm1, %%mm5          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
332 psllw     $3,%%mm7              # ________ __g7g6g5 g4g3g2__ ________       \n\
333 movd      4(%1), %%mm0          # Load 4 Cb       __ __ __ __ u3 u2 u1 u0   \n\
334 por       %%mm7, %%mm5          # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
335 movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   \n\
336 movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
337 "
338
339 #define INTRINSICS_UNPACK_16 \
340     mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
341     mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_fc); \
342     mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
343     mm0 = _mm_srli_pi16(mm0, 3); \
344     mm4 = (__m64)(uint64_t)0; \
345     mm5 = mm0; \
346     mm7 = mm2; \
347     \
348     mm2 = _mm_unpacklo_pi8(mm2, mm4); \
349     mm0 = _mm_unpacklo_pi8(mm0, mm1); \
350     mm2 = _mm_slli_pi16(mm2, 3); \
351     mm0 = _mm_or_si64(mm0, mm2); \
352     mm6 = (__m64)*(uint64_t *)(p_y + 8); \
353     *(uint64_t *)p_buffer = (uint64_t)mm0; \
354     \
355     mm7 = _mm_unpackhi_pi8(mm7, mm4); \
356     mm5 = _mm_unpackhi_pi8(mm5, mm1); \
357     mm7 = _mm_slli_pi16(mm7, 3); \
358     mm0 = (__m64)(uint64_t)*(uint32_t *)(p_u + 4); \
359     mm5 = _mm_or_si64(mm5, mm7); \
360     mm1 = (__m64)(uint64_t)*(uint32_t *)(p_v + 4); \
361     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
362
363 /*
364  * convert RGB plane to RGB packed format,
365  * mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> 0,
366  * mm4 -> GB, mm5 -> AR pixel 4-7,
367  * mm6 -> GB, mm7 -> AR pixel 0-3
368  */
369
370 #define MMX_UNPACK_32 "                                                     \n\
371 pxor      %%mm3, %%mm3  # zero mm3                                          \n\
372 movq      %%mm0, %%mm6  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
373 movq      %%mm1, %%mm7  #                 R7 R6 R5 R4 R3 R2 R1 R0           \n\
374 movq      %%mm0, %%mm4  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
375 movq      %%mm1, %%mm5  #                 R7 R6 R5 R4 R3 R2 R1 R0           \n\
376 punpcklbw %%mm2, %%mm6  #                 G3 B3 G2 B2 G1 B1 G0 B0           \n\
377 punpcklbw %%mm3, %%mm7  #                 00 R3 00 R2 00 R1 00 R0           \n\
378 punpcklwd %%mm7, %%mm6  #                 00 R1 B1 G1 00 R0 B0 G0           \n\
379 movq      %%mm6, (%3)   # Store ARGB1 ARGB0                                 \n\
380 movq      %%mm0, %%mm6  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
381 punpcklbw %%mm2, %%mm6  #                 G3 B3 G2 B2 G1 B1 G0 B0           \n\
382 punpckhwd %%mm7, %%mm6  #                 00 R3 G3 B3 00 R2 B3 G2           \n\
383 movq      %%mm6, 8(%3)  # Store ARGB3 ARGB2                                 \n\
384 punpckhbw %%mm2, %%mm4  #                 G7 B7 G6 B6 G5 B5 G4 B4           \n\
385 punpckhbw %%mm3, %%mm5  #                 00 R7 00 R6 00 R5 00 R4           \n\
386 punpcklwd %%mm5, %%mm4  #                 00 R5 B5 G5 00 R4 B4 G4           \n\
387 movq      %%mm4, 16(%3) # Store ARGB5 ARGB4                                 \n\
388 movq      %%mm0, %%mm4  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
389 punpckhbw %%mm2, %%mm4  #                 G7 B7 G6 B6 G5 B5 G4 B4           \n\
390 punpckhwd %%mm5, %%mm4  #                 00 R7 G7 B7 00 R6 B6 G6           \n\
391 movq      %%mm4, 24(%3) # Store ARGB7 ARGB6                                 \n\
392                                                                             \n\
393 #movd      4(%1), %%mm0  # Load 4 Cb       00 00 00 00 u3 u2 u1 u0           \n\
394 #movd      4(%2), %%mm1  # Load 4 Cr       00 00 00 00 v3 v2 v1 v0           \n\
395 #pxor      %%mm4, %%mm4  # zero mm4                                          \n\
396 #movq      8(%0), %%mm6  # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0           \n\
397 "
398
399 #define INTRINSICS_UNPACK_32 \
400     mm3 = (__m64)(uint64_t)0; \
401     mm6 = mm0; \
402     mm7 = mm1; \
403     mm4 = mm0; \
404     mm5 = mm1; \
405     mm6 = _mm_unpacklo_pi8(mm6, mm2); \
406     mm7 = _mm_unpacklo_pi8(mm7, mm3); \
407     mm6 = _mm_unpacklo_pi16(mm6, mm7); \
408     *(uint64_t *)p_buffer = (uint64_t)mm6; \
409     mm6 = mm0; \
410     mm6 = _mm_unpacklo_pi8(mm6, mm2); \
411     mm6 = _mm_unpackhi_pi16(mm6, mm7); \
412     *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6; \
413     mm4 = _mm_unpackhi_pi8(mm4, mm2); \
414     mm5 = _mm_unpackhi_pi8(mm5, mm3); \
415     mm4 = _mm_unpacklo_pi16(mm4, mm5); \
416     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm4; \
417     mm4 = mm0; \
418     mm4 = _mm_unpackhi_pi8(mm4, mm2); \
419     mm4 = _mm_unpackhi_pi16(mm4, mm5); \
420     *(uint64_t *)(p_buffer + 6) = (uint64_t)mm4; \
421