1 /*****************************************************************************
2 * transforms_yuvmmx.h: MMX YUV transformation assembly
3 *****************************************************************************
4 * Copyright (C) 1999-2004 the VideoLAN team
7 * Authors: Olie Lho <ollie@sis.com.tw>
8 * Gaƫl Hendryckx <jimmy@via.ecp.fr>
9 * Samuel Hocevar <sam@zoy.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
24 *****************************************************************************/
26 /* hope these constant values are cache line aligned */
27 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
28 #define USED_U64(foo) \
29 static const uint64_t foo __asm__ (#foo) __attribute__((used))
31 #define USED_U64(foo) \
32 static const uint64_t foo __asm__ (#foo) __attribute__((unused))
34 USED_U64(mmx_80w) = 0x0080008000800080ULL;
35 USED_U64(mmx_10w) = 0x1010101010101010ULL;
36 USED_U64(mmx_00ffw) = 0x00ff00ff00ff00ffULL;
37 USED_U64(mmx_Y_coeff) = 0x253f253f253f253fULL;
39 USED_U64(mmx_U_green) = 0xf37df37df37df37dULL;
40 USED_U64(mmx_U_blue) = 0x4093409340934093ULL;
41 USED_U64(mmx_V_red) = 0x3312331233123312ULL;
42 USED_U64(mmx_V_green) = 0xe5fce5fce5fce5fcULL;
44 USED_U64(mmx_mask_f8) = 0xf8f8f8f8f8f8f8f8ULL;
45 USED_U64(mmx_mask_fc) = 0xfcfcfcfcfcfcfcfcULL;
48 #define MMX_INIT_16 " \n\
49 movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
50 movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
51 pxor %%mm4, %%mm4 # zero mm4 \n\
52 movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
53 #movl $0, (%3) # cache preload for image \n\
56 #define INTRINSICS_INIT_16 \
57 tmp64 = *(uint32_t *)p_u; \
59 tmp64 = *(uint32_t *)p_v; \
61 mm4 = (__m64)(uint64_t)0; \
62 mm6 = (__m64)*(uint64_t *)p_y; \
63 /* *(uint16_t *)p_buffer = 0; */
65 #define MMX_INIT_16_GRAY " \n\
66 movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
67 #movl $0, (%3) # cache preload for image \n\
70 #define MMX_INIT_32 " \n\
71 movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
72 movl $0, (%3) # cache preload for image \n\
73 movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
74 pxor %%mm4, %%mm4 # zero mm4 \n\
75 movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
78 #define INTRINSICS_INIT_32 \
79 tmp64 = *(uint32_t *)p_u; \
81 *(uint16_t *)p_buffer = 0; \
82 tmp64 = *(uint32_t *)p_v; \
84 mm4 = (__m64)(uint64_t)0; \
85 mm6 = (__m64)*(uint64_t *)p_y;
88 * Do the multiply part of the conversion for even and odd pixels,
90 * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
91 * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
92 * mm6 -> Y even, mm7 -> Y odd
95 #define MMX_YUV_MUL " \n\
96 # convert the chroma part \n\
97 punpcklbw %%mm4, %%mm0 # scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\
98 punpcklbw %%mm4, %%mm1 # scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\
99 psubsw mmx_80w, %%mm0 # Cb -= 128 \n\
100 psubsw mmx_80w, %%mm1 # Cr -= 128 \n\
101 psllw $3, %%mm0 # Promote precision \n\
102 psllw $3, %%mm1 # Promote precision \n\
103 movq %%mm0, %%mm2 # Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\
104 movq %%mm1, %%mm3 # Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\
105 pmulhw mmx_U_green, %%mm2 # Mul Cb with green coeff -> Cb green \n\
106 pmulhw mmx_V_green, %%mm3 # Mul Cr with green coeff -> Cr green \n\
107 pmulhw mmx_U_blue, %%mm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 \n\
108 pmulhw mmx_V_red, %%mm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 \n\
109 paddsw %%mm3, %%mm2 # Cb green + Cr green -> Cgreen \n\
111 # convert the luma part \n\
112 psubusb mmx_10w, %%mm6 # Y -= 16 \n\
113 movq %%mm6, %%mm7 # Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
114 pand mmx_00ffw, %%mm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\
115 psrlw $8, %%mm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 \n\
116 psllw $3, %%mm6 # Promote precision \n\
117 psllw $3, %%mm7 # Promote precision \n\
118 pmulhw mmx_Y_coeff, %%mm6 # Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 \n\
119 pmulhw mmx_Y_coeff, %%mm7 # Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 \n\
122 #define INTRINSICS_YUV_MUL \
123 mm0 = _mm_unpacklo_pi8(mm0, mm4); \
124 mm1 = _mm_unpacklo_pi8(mm1, mm4); \
125 mm0 = _mm_subs_pi16(mm0, (__m64)mmx_80w); \
126 mm1 = _mm_subs_pi16(mm1, (__m64)mmx_80w); \
127 mm0 = _mm_slli_pi16(mm0, 3); \
128 mm1 = _mm_slli_pi16(mm1, 3); \
131 mm2 = _mm_mulhi_pi16(mm2, (__m64)mmx_U_green); \
132 mm3 = _mm_mulhi_pi16(mm3, (__m64)mmx_V_green); \
133 mm0 = _mm_mulhi_pi16(mm0, (__m64)mmx_U_blue); \
134 mm1 = _mm_mulhi_pi16(mm1, (__m64)mmx_V_red); \
135 mm2 = _mm_adds_pi16(mm2, mm3); \
137 mm6 = _mm_subs_pu8(mm6, (__m64)mmx_10w); \
139 mm6 = _mm_and_si64(mm6, (__m64)mmx_00ffw); \
140 mm7 = _mm_srli_pi16(mm7, 8); \
141 mm6 = _mm_slli_pi16(mm6, 3); \
142 mm7 = _mm_slli_pi16(mm7, 3); \
143 mm6 = _mm_mulhi_pi16(mm6, (__m64)mmx_Y_coeff); \
144 mm7 = _mm_mulhi_pi16(mm7, (__m64)mmx_Y_coeff);
147 * Do the addition part of the conversion for even and odd pixels,
149 * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
150 * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
151 * mm6 -> Y even, mm7 -> Y odd
154 #define MMX_YUV_ADD " \n\
155 # Do horizontal and vertical scaling \n\
156 movq %%mm0, %%mm3 # Copy Cblue \n\
157 movq %%mm1, %%mm4 # Copy Cred \n\
158 movq %%mm2, %%mm5 # Copy Cgreen \n\
159 paddsw %%mm6, %%mm0 # Y even + Cblue 00 B6 00 B4 00 B2 00 B0 \n\
160 paddsw %%mm7, %%mm3 # Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 \n\
161 paddsw %%mm6, %%mm1 # Y even + Cred 00 R6 00 R4 00 R2 00 R0 \n\
162 paddsw %%mm7, %%mm4 # Y odd + Cred 00 R7 00 R5 00 R3 00 R1 \n\
163 paddsw %%mm6, %%mm2 # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 \n\
164 paddsw %%mm7, %%mm5 # Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 \n\
166 # Limit RGB even to 0..255 \n\
167 packuswb %%mm0, %%mm0 # B6 B4 B2 B0 / B6 B4 B2 B0 \n\
168 packuswb %%mm1, %%mm1 # R6 R4 R2 R0 / R6 R4 R2 R0 \n\
169 packuswb %%mm2, %%mm2 # G6 G4 G2 G0 / G6 G4 G2 G0 \n\
171 # Limit RGB odd to 0..255 \n\
172 packuswb %%mm3, %%mm3 # B7 B5 B3 B1 / B7 B5 B3 B1 \n\
173 packuswb %%mm4, %%mm4 # R7 R5 R3 R1 / R7 R5 R3 R1 \n\
174 packuswb %%mm5, %%mm5 # G7 G5 G3 G1 / G7 G5 G3 G1 \n\
176 # Interleave RGB even and odd \n\
177 punpcklbw %%mm3, %%mm0 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
178 punpcklbw %%mm4, %%mm1 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
179 punpcklbw %%mm5, %%mm2 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
182 #define INTRINSICS_YUV_ADD \
186 mm0 = _mm_adds_pi16(mm0, mm6); \
187 mm3 = _mm_adds_pi16(mm3, mm7); \
188 mm1 = _mm_adds_pi16(mm1, mm6); \
189 mm4 = _mm_adds_pi16(mm4, mm7); \
190 mm2 = _mm_adds_pi16(mm2, mm6); \
191 mm5 = _mm_adds_pi16(mm5, mm7); \
193 mm0 = _mm_packs_pu16(mm0, mm0); \
194 mm1 = _mm_packs_pu16(mm1, mm1); \
195 mm2 = _mm_packs_pu16(mm2, mm2); \
197 mm3 = _mm_packs_pu16(mm3, mm3); \
198 mm4 = _mm_packs_pu16(mm4, mm4); \
199 mm5 = _mm_packs_pu16(mm5, mm5); \
201 mm0 = _mm_unpacklo_pi8(mm0, mm3); \
202 mm1 = _mm_unpacklo_pi8(mm1, mm4); \
203 mm2 = _mm_unpacklo_pi8(mm2, mm5);
206 * Grayscale case, only use Y
209 #define MMX_YUV_GRAY " \n\
210 # convert the luma part \n\
211 psubusb mmx_10w, %%mm6 \n\
212 movq %%mm6, %%mm7 \n\
213 pand mmx_00ffw, %%mm6 \n\
217 pmulhw mmx_Y_coeff, %%mm6 \n\
218 pmulhw mmx_Y_coeff, %%mm7 \n\
219 packuswb %%mm6, %%mm6 \n\
220 packuswb %%mm7, %%mm7 \n\
221 punpcklbw %%mm7, %%mm6 \n\
224 #define MMX_UNPACK_16_GRAY " \n\
225 movq %%mm6, %%mm5 \n\
226 pand mmx_mask_f8, %%mm6 \n\
227 pand mmx_mask_fc, %%mm5 \n\
228 movq %%mm6, %%mm7 \n\
230 pxor %%mm3, %%mm3 \n\
231 movq %%mm7, %%mm2 \n\
232 movq %%mm5, %%mm0 \n\
233 punpcklbw %%mm3, %%mm5 \n\
234 punpcklbw %%mm6, %%mm7 \n\
238 punpckhbw %%mm3, %%mm0 \n\
239 punpckhbw %%mm6, %%mm2 \n\
241 movq 8(%0), %%mm6 \n\
243 movq %%mm2, 8(%3) \n\
248 * convert RGB plane to RGB 15 bits,
249 * mm0 -> B, mm1 -> R, mm2 -> G,
250 * mm4 -> GB, mm5 -> AR pixel 4-7,
251 * mm6 -> GB, mm7 -> AR pixel 0-3
254 #define MMX_UNPACK_15 " \n\
255 # mask unneeded bits off \n\
256 pand mmx_mask_f8, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
257 psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
258 pand mmx_mask_f8, %%mm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
259 pand mmx_mask_f8, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
260 psrlw $1,%%mm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\
261 pxor %%mm4, %%mm4 # zero mm4 \n\
262 movq %%mm0, %%mm5 # Copy B7-B0 \n\
263 movq %%mm2, %%mm7 # Copy G7-G0 \n\
265 # convert rgb24 plane to rgb15 pack for pixel 0-3 \n\
266 punpcklbw %%mm4, %%mm2 # ________ ________ g7g6g5g4 g3______ \n\
267 punpcklbw %%mm1, %%mm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
268 psllw $2,%%mm2 # ________ ____g7g6 g5g4g3__ ________ \n\
269 por %%mm2, %%mm0 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
270 movq 8(%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
271 movq %%mm0, (%3) # store pixel 0-3 \n\
273 # convert rgb24 plane to rgb16 pack for pixel 0-3 \n\
274 punpckhbw %%mm4, %%mm7 # ________ ________ g7g6g5g4 g3______ \n\
275 punpckhbw %%mm1, %%mm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
276 psllw $2,%%mm7 # ________ ____g7g6 g5g4g3__ ________ \n\
277 movd 4(%1), %%mm0 # Load 4 Cb __ __ __ __ u3 u2 u1 u0 \n\
278 por %%mm7, %%mm5 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
279 movd 4(%2), %%mm1 # Load 4 Cr __ __ __ __ v3 v2 v1 v0 \n\
280 movq %%mm5, 8(%3) # store pixel 4-7 \n\
283 #define INTRINSICS_UNPACK_15 \
284 mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
285 mm0 = _mm_srli_pi16(mm0, 3); \
286 mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_f8); \
287 mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
288 mm1 = _mm_srli_pi16(mm1, 1); \
289 mm4 = (__m64)(uint64_t)0; \
293 mm2 = _mm_unpacklo_pi8(mm2, mm4); \
294 mm0 = _mm_unpacklo_pi8(mm0, mm1); \
295 mm2 = _mm_slli_pi16(mm2, 2); \
296 mm0 = _mm_or_si64(mm0, mm2); \
297 tmp64 = *(uint64_t *)(p_y + 8); \
298 mm6 = (__m64)tmp64; \
299 *(uint64_t *)p_buffer = (uint64_t)mm0; \
301 mm7 = _mm_unpackhi_pi8(mm7, mm4); \
302 mm5 = _mm_unpackhi_pi8(mm5, mm1); \
303 mm7 = _mm_slli_pi16(mm7, 2); \
304 tmp64 = (uint64_t)*(uint32_t *)(p_u + 4); \
305 mm0 = (__m64)tmp64; \
306 mm5 = _mm_or_si64(mm5, mm7); \
307 tmp64 = (uint64_t)*(uint32_t *)(p_v + 4); \
308 mm1 = (__m64)tmp64; \
309 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
312 * convert RGB plane to RGB 16 bits,
313 * mm0 -> B, mm1 -> R, mm2 -> G,
314 * mm4 -> GB, mm5 -> AR pixel 4-7,
315 * mm6 -> GB, mm7 -> AR pixel 0-3
318 #define MMX_UNPACK_16 " \n\
319 # mask unneeded bits off \n\
320 pand mmx_mask_f8, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
321 pand mmx_mask_fc, %%mm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
322 pand mmx_mask_f8, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
323 psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
324 pxor %%mm4, %%mm4 # zero mm4 \n\
325 movq %%mm0, %%mm5 # Copy B7-B0 \n\
326 movq %%mm2, %%mm7 # Copy G7-G0 \n\
328 # convert rgb24 plane to rgb16 pack for pixel 0-3 \n\
329 punpcklbw %%mm4, %%mm2 # ________ ________ g7g6g5g4 g3g2____ \n\
330 punpcklbw %%mm1, %%mm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
331 psllw $3,%%mm2 # ________ __g7g6g5 g4g3g2__ ________ \n\
332 por %%mm2, %%mm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
333 movq 8(%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
334 movq %%mm0, (%3) # store pixel 0-3 \n\
336 # convert rgb24 plane to rgb16 pack for pixel 0-3 \n\
337 punpckhbw %%mm4, %%mm7 # ________ ________ g7g6g5g4 g3g2____ \n\
338 punpckhbw %%mm1, %%mm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
339 psllw $3,%%mm7 # ________ __g7g6g5 g4g3g2__ ________ \n\
340 movd 4(%1), %%mm0 # Load 4 Cb __ __ __ __ u3 u2 u1 u0 \n\
341 por %%mm7, %%mm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
342 movd 4(%2), %%mm1 # Load 4 Cr __ __ __ __ v3 v2 v1 v0 \n\
343 movq %%mm5, 8(%3) # store pixel 4-7 \n\
346 #define INTRINSICS_UNPACK_16 \
347 mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
348 mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_fc); \
349 mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
350 mm0 = _mm_srli_pi16(mm0, 3); \
351 mm4 = (__m64)(uint64_t)0; \
355 mm2 = _mm_unpacklo_pi8(mm2, mm4); \
356 mm0 = _mm_unpacklo_pi8(mm0, mm1); \
357 mm2 = _mm_slli_pi16(mm2, 3); \
358 mm0 = _mm_or_si64(mm0, mm2); \
359 mm6 = (__m64)*(uint64_t *)(p_y + 8); \
360 *(uint64_t *)p_buffer = (uint64_t)mm0; \
362 mm7 = _mm_unpackhi_pi8(mm7, mm4); \
363 mm5 = _mm_unpackhi_pi8(mm5, mm1); \
364 mm7 = _mm_slli_pi16(mm7, 3); \
365 mm0 = (__m64)(uint64_t)*(uint32_t *)(p_u + 4); \
366 mm5 = _mm_or_si64(mm5, mm7); \
367 mm1 = (__m64)(uint64_t)*(uint32_t *)(p_v + 4); \
368 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
371 * convert RGB plane to RGB packed format,
372 * mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> 0,
373 * mm4 -> GB, mm5 -> AR pixel 4-7,
374 * mm6 -> GB, mm7 -> AR pixel 0-3
377 #define MMX_UNPACK_32 " \n\
378 pxor %%mm3, %%mm3 # zero mm3 \n\
379 movq %%mm0, %%mm6 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
380 movq %%mm1, %%mm7 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
381 movq %%mm0, %%mm4 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
382 movq %%mm1, %%mm5 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
383 punpcklbw %%mm2, %%mm6 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
384 punpcklbw %%mm3, %%mm7 # 00 R3 00 R2 00 R1 00 R0 \n\
385 punpcklwd %%mm7, %%mm6 # 00 R1 B1 G1 00 R0 B0 G0 \n\
386 movq %%mm6, (%3) # Store ARGB1 ARGB0 \n\
387 movq %%mm0, %%mm6 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
388 punpcklbw %%mm2, %%mm6 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
389 punpckhwd %%mm7, %%mm6 # 00 R3 G3 B3 00 R2 B3 G2 \n\
390 movq %%mm6, 8(%3) # Store ARGB3 ARGB2 \n\
391 punpckhbw %%mm2, %%mm4 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
392 punpckhbw %%mm3, %%mm5 # 00 R7 00 R6 00 R5 00 R4 \n\
393 punpcklwd %%mm5, %%mm4 # 00 R5 B5 G5 00 R4 B4 G4 \n\
394 movq %%mm4, 16(%3) # Store ARGB5 ARGB4 \n\
395 movq %%mm0, %%mm4 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
396 punpckhbw %%mm2, %%mm4 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
397 punpckhwd %%mm5, %%mm4 # 00 R7 G7 B7 00 R6 B6 G6 \n\
398 movq %%mm4, 24(%3) # Store ARGB7 ARGB6 \n\
400 #movd 4(%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
401 #movd 4(%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
402 #pxor %%mm4, %%mm4 # zero mm4 \n\
403 #movq 8(%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
406 #define INTRINSICS_UNPACK_32 \
407 mm3 = (__m64)(uint64_t)0; \
412 mm6 = _mm_unpacklo_pi8(mm6, mm2); \
413 mm7 = _mm_unpacklo_pi8(mm7, mm3); \
414 mm6 = _mm_unpacklo_pi16(mm6, mm7); \
415 *(uint64_t *)p_buffer = (uint64_t)mm6; \
417 mm6 = _mm_unpacklo_pi8(mm6, mm2); \
418 mm6 = _mm_unpackhi_pi16(mm6, mm7); \
419 *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6; \
420 mm4 = _mm_unpackhi_pi8(mm4, mm2); \
421 mm5 = _mm_unpackhi_pi8(mm5, mm3); \
422 mm4 = _mm_unpacklo_pi16(mm4, mm5); \
423 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm4; \
425 mm4 = _mm_unpackhi_pi8(mm4, mm2); \
426 mm4 = _mm_unpackhi_pi16(mm4, mm5); \
427 *(uint64_t *)(p_buffer + 6) = (uint64_t)mm4; \