]> git.sesse.net Git - vlc/blob - modules/video_chroma/i420_rgb_mmx.h
FSF address change.
[vlc] / modules / video_chroma / i420_rgb_mmx.h
1 /*****************************************************************************
2  * transforms_yuvmmx.h: MMX YUV transformation assembly
3  *****************************************************************************
4  * Copyright (C) 1999-2004 the VideoLAN team
5  * $Id$
6  *
7  * Authors: Olie Lho <ollie@sis.com.tw>
8  *          GaĆ«l Hendryckx <jimmy@via.ecp.fr>
9  *          Samuel Hocevar <sam@zoy.org>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
24  *****************************************************************************/
25
26 /* hope these constant values are cache line aligned */
27 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
28 #define USED_U64(foo) \
29     static const uint64_t foo __asm__ (#foo) __attribute__((used))
30 #else
31 #define USED_U64(foo) \
32     static const uint64_t foo __asm__ (#foo) __attribute__((unused))
33 #endif
34 USED_U64(mmx_80w)     = 0x0080008000800080ULL;
35 USED_U64(mmx_10w)     = 0x1010101010101010ULL;
36 USED_U64(mmx_00ffw)   = 0x00ff00ff00ff00ffULL;
37 USED_U64(mmx_Y_coeff) = 0x253f253f253f253fULL;
38
39 USED_U64(mmx_U_green) = 0xf37df37df37df37dULL;
40 USED_U64(mmx_U_blue)  = 0x4093409340934093ULL;
41 USED_U64(mmx_V_red)   = 0x3312331233123312ULL;
42 USED_U64(mmx_V_green) = 0xe5fce5fce5fce5fcULL;
43
44 USED_U64(mmx_mask_f8) = 0xf8f8f8f8f8f8f8f8ULL;
45 USED_U64(mmx_mask_fc) = 0xfcfcfcfcfcfcfcfcULL;
46 #undef USED_U64
47
48 /* Use RIP-relative code in PIC mode on amd64 */
49 #if defined(__x86_64__) && defined(__PIC__)
50 #   define G "(%%rip)"
51 #else
52 #   define G
53 #endif
54
55 #define MMX_INIT_16 "                                                       \n\
56 movd      (%1), %%mm0       # Load 4 Cb       00 00 00 00 u3 u2 u1 u0       \n\
57 movd      (%2), %%mm1       # Load 4 Cr       00 00 00 00 v3 v2 v1 v0       \n\
58 pxor      %%mm4, %%mm4      # zero mm4                                      \n\
59 movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
60 #movl      $0, (%3)         # cache preload for image                       \n\
61 "
62
63 #define INTRINSICS_INIT_16 \
64     tmp64 = *(uint32_t *)p_u; \
65     mm0 = (__m64)tmp64; \
66     tmp64 = *(uint32_t *)p_v; \
67     mm1 = (__m64)tmp64; \
68     mm4 = (__m64)(uint64_t)0; \
69     mm6 = (__m64)*(uint64_t *)p_y; \
70     /* *(uint16_t *)p_buffer = 0; */
71
72 #define MMX_INIT_16_GRAY "                                                  \n\
73 movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
74 #movl      $0, (%3)         # cache preload for image                       \n\
75 "
76
77 #define MMX_INIT_32 "                                                       \n\
78 movd      (%1), %%mm0       # Load 4 Cb       00 00 00 00 u3 u2 u1 u0       \n\
79 movl      $0, (%3)          # cache preload for image                       \n\
80 movd      (%2), %%mm1       # Load 4 Cr       00 00 00 00 v3 v2 v1 v0       \n\
81 pxor      %%mm4, %%mm4      # zero mm4                                      \n\
82 movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
83 "
84
85 #define INTRINSICS_INIT_32 \
86     tmp64 = *(uint32_t *)p_u; \
87     mm0 = (__m64)tmp64; \
88     *(uint16_t *)p_buffer = 0; \
89     tmp64 = *(uint32_t *)p_v; \
90     mm1 = (__m64)tmp64; \
91     mm4 = (__m64)(uint64_t)0; \
92     mm6 = (__m64)*(uint64_t *)p_y;
93
94 /*
95  * Do the multiply part of the conversion for even and odd pixels,
96  * register usage:
97  * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
98  * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd  pixels,
99  * mm6 -> Y even, mm7 -> Y odd
100  */
101
102 #define MMX_YUV_MUL "                                                       \n\
103 # convert the chroma part                                                   \n\
104 punpcklbw %%mm4, %%mm0          # scatter 4 Cb    00 u3 00 u2 00 u1 00 u0   \n\
105 punpcklbw %%mm4, %%mm1          # scatter 4 Cr    00 v3 00 v2 00 v1 00 v0   \n\
106 psubsw    mmx_80w"G", %%mm0     # Cb -= 128                                 \n\
107 psubsw    mmx_80w"G", %%mm1     # Cr -= 128                                 \n\
108 psllw     $3, %%mm0             # Promote precision                         \n\
109 psllw     $3, %%mm1             # Promote precision                         \n\
110 movq      %%mm0, %%mm2          # Copy 4 Cb       00 u3 00 u2 00 u1 00 u0   \n\
111 movq      %%mm1, %%mm3          # Copy 4 Cr       00 v3 00 v2 00 v1 00 v0   \n\
112 pmulhw    mmx_U_green"G", %%mm2 # Mul Cb with green coeff -> Cb green       \n\
113 pmulhw    mmx_V_green"G", %%mm3 # Mul Cr with green coeff -> Cr green       \n\
114 pmulhw    mmx_U_blue"G", %%mm0  # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0   \n\
115 pmulhw    mmx_V_red"G", %%mm1   # Mul Cr -> Cred  00 r3 00 r2 00 r1 00 r0   \n\
116 paddsw    %%mm3, %%mm2          # Cb green + Cr green -> Cgreen             \n\
117                                                                             \n\
118 # convert the luma part                                                     \n\
119 psubusb   mmx_10w"G", %%mm6     # Y -= 16                                   \n\
120 movq      %%mm6, %%mm7          # Copy 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
121 pand      mmx_00ffw"G", %%mm6   # get Y even      00 Y6 00 Y4 00 Y2 00 Y0   \n\
122 psrlw     $8, %%mm7             # get Y odd       00 Y7 00 Y5 00 Y3 00 Y1   \n\
123 psllw     $3, %%mm6             # Promote precision                         \n\
124 psllw     $3, %%mm7             # Promote precision                         \n\
125 pmulhw    mmx_Y_coeff"G", %%mm6 # Mul 4 Y even    00 y6 00 y4 00 y2 00 y0   \n\
126 pmulhw    mmx_Y_coeff"G", %%mm7 # Mul 4 Y odd     00 y7 00 y5 00 y3 00 y1   \n\
127 "
128
129 #define INTRINSICS_YUV_MUL \
130     mm0 = _mm_unpacklo_pi8(mm0, mm4); \
131     mm1 = _mm_unpacklo_pi8(mm1, mm4); \
132     mm0 = _mm_subs_pi16(mm0, (__m64)mmx_80w); \
133     mm1 = _mm_subs_pi16(mm1, (__m64)mmx_80w); \
134     mm0 = _mm_slli_pi16(mm0, 3); \
135     mm1 = _mm_slli_pi16(mm1, 3); \
136     mm2 = mm0; \
137     mm3 = mm1; \
138     mm2 = _mm_mulhi_pi16(mm2, (__m64)mmx_U_green); \
139     mm3 = _mm_mulhi_pi16(mm3, (__m64)mmx_V_green); \
140     mm0 = _mm_mulhi_pi16(mm0, (__m64)mmx_U_blue); \
141     mm1 = _mm_mulhi_pi16(mm1, (__m64)mmx_V_red); \
142     mm2 = _mm_adds_pi16(mm2, mm3); \
143     \
144     mm6 = _mm_subs_pu8(mm6, (__m64)mmx_10w); \
145     mm7 = mm6; \
146     mm6 = _mm_and_si64(mm6, (__m64)mmx_00ffw); \
147     mm7 = _mm_srli_pi16(mm7, 8); \
148     mm6 = _mm_slli_pi16(mm6, 3); \
149     mm7 = _mm_slli_pi16(mm7, 3); \
150     mm6 = _mm_mulhi_pi16(mm6, (__m64)mmx_Y_coeff); \
151     mm7 = _mm_mulhi_pi16(mm7, (__m64)mmx_Y_coeff);
152
153 /*
154  * Do the addition part of the conversion for even and odd pixels,
155  * register usage:
156  * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
157  * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd  pixels,
158  * mm6 -> Y even, mm7 -> Y odd
159  */
160
161 #define MMX_YUV_ADD "                                                       \n\
162 # Do horizontal and vertical scaling                                        \n\
163 movq      %%mm0, %%mm3          # Copy Cblue                                \n\
164 movq      %%mm1, %%mm4          # Copy Cred                                 \n\
165 movq      %%mm2, %%mm5          # Copy Cgreen                               \n\
166 paddsw    %%mm6, %%mm0          # Y even + Cblue  00 B6 00 B4 00 B2 00 B0   \n\
167 paddsw    %%mm7, %%mm3          # Y odd  + Cblue  00 B7 00 B5 00 B3 00 B1   \n\
168 paddsw    %%mm6, %%mm1          # Y even + Cred   00 R6 00 R4 00 R2 00 R0   \n\
169 paddsw    %%mm7, %%mm4          # Y odd  + Cred   00 R7 00 R5 00 R3 00 R1   \n\
170 paddsw    %%mm6, %%mm2          # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0   \n\
171 paddsw    %%mm7, %%mm5          # Y odd  + Cgreen 00 G7 00 G5 00 G3 00 G1   \n\
172                                                                             \n\
173 # Limit RGB even to 0..255                                                  \n\
174 packuswb  %%mm0, %%mm0          # B6 B4 B2 B0 / B6 B4 B2 B0                 \n\
175 packuswb  %%mm1, %%mm1          # R6 R4 R2 R0 / R6 R4 R2 R0                 \n\
176 packuswb  %%mm2, %%mm2          # G6 G4 G2 G0 / G6 G4 G2 G0                 \n\
177                                                                             \n\
178 # Limit RGB odd to 0..255                                                   \n\
179 packuswb  %%mm3, %%mm3          # B7 B5 B3 B1 / B7 B5 B3 B1                 \n\
180 packuswb  %%mm4, %%mm4          # R7 R5 R3 R1 / R7 R5 R3 R1                 \n\
181 packuswb  %%mm5, %%mm5          # G7 G5 G3 G1 / G7 G5 G3 G1                 \n\
182                                                                             \n\
183 # Interleave RGB even and odd                                               \n\
184 punpcklbw %%mm3, %%mm0          #                 B7 B6 B5 B4 B3 B2 B1 B0   \n\
185 punpcklbw %%mm4, %%mm1          #                 R7 R6 R5 R4 R3 R2 R1 R0   \n\
186 punpcklbw %%mm5, %%mm2          #                 G7 G6 G5 G4 G3 G2 G1 G0   \n\
187 "
188
189 #define INTRINSICS_YUV_ADD \
190     mm3 = mm0; \
191     mm4 = mm1; \
192     mm5 = mm2; \
193     mm0 = _mm_adds_pi16(mm0, mm6); \
194     mm3 = _mm_adds_pi16(mm3, mm7); \
195     mm1 = _mm_adds_pi16(mm1, mm6); \
196     mm4 = _mm_adds_pi16(mm4, mm7); \
197     mm2 = _mm_adds_pi16(mm2, mm6); \
198     mm5 = _mm_adds_pi16(mm5, mm7); \
199     \
200     mm0 = _mm_packs_pu16(mm0, mm0); \
201     mm1 = _mm_packs_pu16(mm1, mm1); \
202     mm2 = _mm_packs_pu16(mm2, mm2); \
203     \
204     mm3 = _mm_packs_pu16(mm3, mm3); \
205     mm4 = _mm_packs_pu16(mm4, mm4); \
206     mm5 = _mm_packs_pu16(mm5, mm5); \
207     \
208     mm0 = _mm_unpacklo_pi8(mm0, mm3); \
209     mm1 = _mm_unpacklo_pi8(mm1, mm4); \
210     mm2 = _mm_unpacklo_pi8(mm2, mm5);
211
212 /*
213  * Grayscale case, only use Y
214  */
215
216 #define MMX_YUV_GRAY "                                                      \n\
217 # convert the luma part                                                     \n\
218 psubusb   mmx_10w"G", %%mm6                                                 \n\
219 movq      %%mm6, %%mm7                                                      \n\
220 pand      mmx_00ffw"G", %%mm6                                               \n\
221 psrlw     $8, %%mm7                                                         \n\
222 psllw     $3, %%mm6                                                         \n\
223 psllw     $3, %%mm7                                                         \n\
224 pmulhw    mmx_Y_coeff"G", %%mm6                                             \n\
225 pmulhw    mmx_Y_coeff"G", %%mm7                                             \n\
226 packuswb  %%mm6, %%mm6                                                      \n\
227 packuswb  %%mm7, %%mm7                                                      \n\
228 punpcklbw %%mm7, %%mm6                                                      \n\
229 "
230
231 #define MMX_UNPACK_16_GRAY "                                                \n\
232 movq      %%mm6, %%mm5                                                      \n\
233 pand      mmx_mask_f8"G", %%mm6                                             \n\
234 pand      mmx_mask_fc"G", %%mm5                                             \n\
235 movq      %%mm6, %%mm7                                                      \n\
236 psrlw     $3, %%mm7                                                         \n\
237 pxor      %%mm3, %%mm3                                                      \n\
238 movq      %%mm7, %%mm2                                                      \n\
239 movq      %%mm5, %%mm0                                                      \n\
240 punpcklbw %%mm3, %%mm5                                                      \n\
241 punpcklbw %%mm6, %%mm7                                                      \n\
242 psllw     $3, %%mm5                                                         \n\
243 por       %%mm5, %%mm7                                                      \n\
244 movq      %%mm7, (%3)                                                       \n\
245 punpckhbw %%mm3, %%mm0                                                      \n\
246 punpckhbw %%mm6, %%mm2                                                      \n\
247 psllw     $3, %%mm0                                                         \n\
248 movq      8(%0), %%mm6                                                      \n\
249 por       %%mm0, %%mm2                                                      \n\
250 movq      %%mm2, 8(%3)                                                      \n\
251 "
252
253
254 /*
255  * convert RGB plane to RGB 15 bits,
256  * mm0 -> B, mm1 -> R, mm2 -> G,
257  * mm4 -> GB, mm5 -> AR pixel 4-7,
258  * mm6 -> GB, mm7 -> AR pixel 0-3
259  */
260
261 #define MMX_UNPACK_15 "                                                     \n\
262 # mask unneeded bits off                                                    \n\
263 pand      mmx_mask_f8"G", %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
264 psrlw     $3,%%mm0              # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
265 pand      mmx_mask_f8"G", %%mm2 # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
266 pand      mmx_mask_f8"G", %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
267 psrlw     $1,%%mm1              # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
268 pxor      %%mm4, %%mm4          # zero mm4                                  \n\
269 movq      %%mm0, %%mm5          # Copy B7-B0                                \n\
270 movq      %%mm2, %%mm7          # Copy G7-G0                                \n\
271                                                                             \n\
272 # convert rgb24 plane to rgb15 pack for pixel 0-3                           \n\
273 punpcklbw %%mm4, %%mm2          # ________ ________ g7g6g5g4 g3______       \n\
274 punpcklbw %%mm1, %%mm0          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
275 psllw     $2,%%mm2              # ________ ____g7g6 g5g4g3__ ________       \n\
276 por       %%mm2, %%mm0          # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
277 movq      8(%0), %%mm6          # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
278 movq      %%mm0, (%3)           # store pixel 0-3                           \n\
279                                                                             \n\
280 # convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
281 punpckhbw %%mm4, %%mm7          # ________ ________ g7g6g5g4 g3______       \n\
282 punpckhbw %%mm1, %%mm5          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
283 psllw     $2,%%mm7              # ________ ____g7g6 g5g4g3__ ________       \n\
284 movd      4(%1), %%mm0          # Load 4 Cb       __ __ __ __ u3 u2 u1 u0   \n\
285 por       %%mm7, %%mm5          # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
286 movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   \n\
287 movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
288 "
289
290 #define INTRINSICS_UNPACK_15 \
291     mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
292     mm0 = _mm_srli_pi16(mm0, 3); \
293     mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_f8); \
294     mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
295     mm1 = _mm_srli_pi16(mm1, 1); \
296     mm4 = (__m64)(uint64_t)0; \
297     mm5 = mm0; \
298     mm7 = mm2; \
299     \
300     mm2 = _mm_unpacklo_pi8(mm2, mm4); \
301     mm0 = _mm_unpacklo_pi8(mm0, mm1); \
302     mm2 = _mm_slli_pi16(mm2, 2); \
303     mm0 = _mm_or_si64(mm0, mm2); \
304     tmp64 = *(uint64_t *)(p_y + 8); \
305     mm6 = (__m64)tmp64; \
306     *(uint64_t *)p_buffer = (uint64_t)mm0; \
307     \
308     mm7 = _mm_unpackhi_pi8(mm7, mm4); \
309     mm5 = _mm_unpackhi_pi8(mm5, mm1); \
310     mm7 = _mm_slli_pi16(mm7, 2); \
311     tmp64 = (uint64_t)*(uint32_t *)(p_u + 4); \
312     mm0 = (__m64)tmp64; \
313     mm5 = _mm_or_si64(mm5, mm7); \
314     tmp64 = (uint64_t)*(uint32_t *)(p_v + 4); \
315     mm1 = (__m64)tmp64; \
316     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
317
318 /*
319  * convert RGB plane to RGB 16 bits,
320  * mm0 -> B, mm1 -> R, mm2 -> G,
321  * mm4 -> GB, mm5 -> AR pixel 4-7,
322  * mm6 -> GB, mm7 -> AR pixel 0-3
323  */
324
325 #define MMX_UNPACK_16 "                                                     \n\
326 # mask unneeded bits off                                                    \n\
327 pand      mmx_mask_f8"G", %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
328 pand      mmx_mask_fc"G", %%mm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
329 pand      mmx_mask_f8"G", %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
330 psrlw     $3,%%mm0              # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
331 pxor      %%mm4, %%mm4          # zero mm4                                  \n\
332 movq      %%mm0, %%mm5          # Copy B7-B0                                \n\
333 movq      %%mm2, %%mm7          # Copy G7-G0                                \n\
334                                                                             \n\
335 # convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
336 punpcklbw %%mm4, %%mm2          # ________ ________ g7g6g5g4 g3g2____       \n\
337 punpcklbw %%mm1, %%mm0          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
338 psllw     $3,%%mm2              # ________ __g7g6g5 g4g3g2__ ________       \n\
339 por       %%mm2, %%mm0          # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
340 movq      8(%0), %%mm6          # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
341 movq      %%mm0, (%3)           # store pixel 0-3                           \n\
342                                                                             \n\
343 # convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
344 punpckhbw %%mm4, %%mm7          # ________ ________ g7g6g5g4 g3g2____       \n\
345 punpckhbw %%mm1, %%mm5          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
346 psllw     $3,%%mm7              # ________ __g7g6g5 g4g3g2__ ________       \n\
347 movd      4(%1), %%mm0          # Load 4 Cb       __ __ __ __ u3 u2 u1 u0   \n\
348 por       %%mm7, %%mm5          # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
349 movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   \n\
350 movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
351 "
352
353 #define INTRINSICS_UNPACK_16 \
354     mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
355     mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_fc); \
356     mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
357     mm0 = _mm_srli_pi16(mm0, 3); \
358     mm4 = (__m64)(uint64_t)0; \
359     mm5 = mm0; \
360     mm7 = mm2; \
361     \
362     mm2 = _mm_unpacklo_pi8(mm2, mm4); \
363     mm0 = _mm_unpacklo_pi8(mm0, mm1); \
364     mm2 = _mm_slli_pi16(mm2, 3); \
365     mm0 = _mm_or_si64(mm0, mm2); \
366     tmp64 = *(uint64_t *)(p_y + 8); \
367     mm6 = (__m64)tmp64; \
368     *(uint64_t *)p_buffer = (uint64_t)mm0; \
369     \
370     mm7 = _mm_unpackhi_pi8(mm7, mm4); \
371     mm5 = _mm_unpackhi_pi8(mm5, mm1); \
372     mm7 = _mm_slli_pi16(mm7, 3); \
373     tmp64 = (uint64_t)*(uint32_t *)(p_u + 4); \
374     mm0 = (__m64)tmp64; \
375     mm5 = _mm_or_si64(mm5, mm7); \
376     tmp64 = (uint64_t)*(uint32_t *)(p_v + 4); \
377     mm1 = (__m64)tmp64; \
378     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
379
380 /*
381  * convert RGB plane to RGB packed format,
382  * mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> 0,
383  * mm4 -> GB, mm5 -> AR pixel 4-7,
384  * mm6 -> GB, mm7 -> AR pixel 0-3
385  */
386
387 #define MMX_UNPACK_32 "                                                     \n\
388 pxor      %%mm3, %%mm3  # zero mm3                                          \n\
389 movq      %%mm0, %%mm6  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
390 movq      %%mm1, %%mm7  #                 R7 R6 R5 R4 R3 R2 R1 R0           \n\
391 movq      %%mm0, %%mm4  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
392 movq      %%mm1, %%mm5  #                 R7 R6 R5 R4 R3 R2 R1 R0           \n\
393 punpcklbw %%mm2, %%mm6  #                 G3 B3 G2 B2 G1 B1 G0 B0           \n\
394 punpcklbw %%mm3, %%mm7  #                 00 R3 00 R2 00 R1 00 R0           \n\
395 punpcklwd %%mm7, %%mm6  #                 00 R1 B1 G1 00 R0 B0 G0           \n\
396 movq      %%mm6, (%3)   # Store ARGB1 ARGB0                                 \n\
397 movq      %%mm0, %%mm6  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
398 punpcklbw %%mm2, %%mm6  #                 G3 B3 G2 B2 G1 B1 G0 B0           \n\
399 punpckhwd %%mm7, %%mm6  #                 00 R3 G3 B3 00 R2 B3 G2           \n\
400 movq      %%mm6, 8(%3)  # Store ARGB3 ARGB2                                 \n\
401 punpckhbw %%mm2, %%mm4  #                 G7 B7 G6 B6 G5 B5 G4 B4           \n\
402 punpckhbw %%mm3, %%mm5  #                 00 R7 00 R6 00 R5 00 R4           \n\
403 punpcklwd %%mm5, %%mm4  #                 00 R5 B5 G5 00 R4 B4 G4           \n\
404 movq      %%mm4, 16(%3) # Store ARGB5 ARGB4                                 \n\
405 movq      %%mm0, %%mm4  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
406 punpckhbw %%mm2, %%mm4  #                 G7 B7 G6 B6 G5 B5 G4 B4           \n\
407 punpckhwd %%mm5, %%mm4  #                 00 R7 G7 B7 00 R6 B6 G6           \n\
408 movq      %%mm4, 24(%3) # Store ARGB7 ARGB6                                 \n\
409                                                                             \n\
410 #movd      4(%1), %%mm0  # Load 4 Cb       00 00 00 00 u3 u2 u1 u0           \n\
411 #movd      4(%2), %%mm1  # Load 4 Cr       00 00 00 00 v3 v2 v1 v0           \n\
412 #pxor      %%mm4, %%mm4  # zero mm4                                          \n\
413 #movq      8(%0), %%mm6  # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0           \n\
414 "
415
416 #define INTRINSICS_UNPACK_32 \
417     mm3 = (__m64)(uint64_t)0; \
418     mm6 = mm0; \
419     mm7 = mm1; \
420     mm4 = mm0; \
421     mm5 = mm1; \
422     mm6 = _mm_unpacklo_pi8(mm6, mm2); \
423     mm7 = _mm_unpacklo_pi8(mm7, mm3); \
424     mm6 = _mm_unpacklo_pi16(mm6, mm7); \
425     *(uint64_t *)p_buffer = (uint64_t)mm6; \
426     mm6 = mm0; \
427     mm6 = _mm_unpacklo_pi8(mm6, mm2); \
428     mm6 = _mm_unpackhi_pi16(mm6, mm7); \
429     *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6; \
430     mm4 = _mm_unpackhi_pi8(mm4, mm2); \
431     mm5 = _mm_unpackhi_pi8(mm5, mm3); \
432     mm4 = _mm_unpacklo_pi16(mm4, mm5); \
433     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm4; \
434     mm4 = mm0; \
435     mm4 = _mm_unpackhi_pi8(mm4, mm2); \
436     mm4 = _mm_unpackhi_pi16(mm4, mm5); \
437     *(uint64_t *)(p_buffer + 6) = (uint64_t)mm4; \
438