1 /*****************************************************************************
2 * transforms_yuvmmx.h: MMX YUV transformation assembly
3 *****************************************************************************
4 * Copyright (C) 1999-2007 the VideoLAN team
7 * Authors: Olie Lho <ollie@sis.com.tw>
8 * Gaƫl Hendryckx <jimmy@via.ecp.fr>
9 * Samuel Hocevar <sam@zoy.org>
10 * Damien Fouilleul <damienf@videolan.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
25 *****************************************************************************/
27 /* hope these constant values are cache line aligned */
28 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
29 #define USED_U64(foo) \
30 static const uint64_t foo __asm__ (#foo) __attribute__((used))
32 #define USED_U64(foo) \
33 static const uint64_t foo __asm__ (#foo) __attribute__((unused))
35 USED_U64(mmx_80w) = 0x0080008000800080ULL;
36 USED_U64(mmx_10w) = 0x1010101010101010ULL;
37 USED_U64(mmx_00ffw) = 0x00ff00ff00ff00ffULL;
38 USED_U64(mmx_Y_coeff) = 0x253f253f253f253fULL;
40 USED_U64(mmx_U_green) = 0xf37df37df37df37dULL;
41 USED_U64(mmx_U_blue) = 0x4093409340934093ULL;
42 USED_U64(mmx_V_red) = 0x3312331233123312ULL;
43 USED_U64(mmx_V_green) = 0xe5fce5fce5fce5fcULL;
45 USED_U64(mmx_mask_f8) = 0xf8f8f8f8f8f8f8f8ULL;
46 USED_U64(mmx_mask_fc) = 0xfcfcfcfcfcfcfcfcULL;
49 /* Use RIP-relative code in PIC mode on amd64 */
50 #if defined(__x86_64__) && defined(__PIC__)
56 #define MMX_INIT_16 " \n\
57 movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
58 movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
59 pxor %%mm4, %%mm4 # zero mm4 \n\
60 movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
63 #define SSE2_INIT_16_ALIGNED " \n\
64 prefetcht1 (%3) # cache preload for image \n\
65 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
66 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
67 pxor %%xmm4, %%xmm4 # zero mm4 \n\
68 movdqa (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
71 #define SSE2_INIT_16_UNALIGNED " \n\
72 prefetcht1 (%3) # cache preload for image \n\
73 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
74 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
75 pxor %%xmm4, %%xmm4 # zero mm4 \n\
76 movdqu (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
79 #define MMX_INTRINSICS_INIT_16 \
80 tmp64 = *(uint32_t *)p_u; \
82 tmp64 = *(uint32_t *)p_v; \
84 mm4 = _mm_setzero_si64(); \
85 mm6 = (__m64)*(uint64_t *)p_y; \
87 #define SSE2_INTRINSICS_INIT_16_ALIGNED \
88 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
89 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
90 xmm4 = _mm_setzero_si128(); \
91 xmm6 = _mm_load_si128((__m128i *)p_y); \
93 #define SSE2_INTRINSICS_INIT_16_UNALIGNED \
94 _mm_prefetch(p_buffer, _MM_HINT_T1); \
95 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
96 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
97 xmm4 = _mm_setzero_si128(); \
98 xmm6 = _mm_loadu_si128((__m128i *)p_y); \
100 #define MMX_INIT_16_GRAY " \n\
101 movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
102 #movl $0, (%3) # cache preload for image \n\
105 #define MMX_INIT_32 " \n\
106 movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
107 movl $0, (%3) # cache preload for image \n\
108 movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
109 pxor %%mm4, %%mm4 # zero mm4 \n\
110 movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
113 #define SSE2_INIT_32_ALIGNED " \n\
114 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
115 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
116 pxor %%xmm4, %%xmm4 # zero mm4 \n\
117 movdqa (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
120 #define SSE2_INIT_32_UNALIGNED " \n\
121 prefetcht1 (%3) # cache preload for image \n\
122 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
123 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
124 pxor %%xmm4, %%xmm4 # zero mm4 \n\
125 movdqu (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
128 #define MMX_INTRINSICS_INIT_32 \
129 tmp64 = *(uint32_t *)p_u; \
130 mm0 = (__m64)tmp64; \
131 *(uint16_t *)p_buffer = 0; \
132 tmp64 = *(uint32_t *)p_v; \
133 mm1 = (__m64)tmp64; \
134 mm4 = _mm_setzero_si64(); \
135 mm6 = (__m64)*(uint64_t *)p_y;
137 #define SSE2_INTRINSICS_INIT_32_ALIGNED \
138 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
139 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
140 xmm4 = _mm_setzero_si128(); \
141 xmm6 = _mm_load_si128((__m128i *)p_y); \
143 #define SSE2_INTRINSICS_INIT_32_UNALIGNED \
144 _mm_prefetch(p_buffer, _MM_HINT_T1); \
145 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
146 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
147 xmm4 = _mm_setzero_si128(); \
148 xmm6 = _mm_loadu_si128((__m128i *)p_y); \
151 * Do the multiply part of the conversion for even and odd pixels,
153 * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
154 * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
155 * mm6 -> Y even, mm7 -> Y odd
158 #define MMX_YUV_MUL " \n\
159 # convert the chroma part \n\
160 punpcklbw %%mm4, %%mm0 # scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\
161 punpcklbw %%mm4, %%mm1 # scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\
162 psubsw mmx_80w"G", %%mm0 # Cb -= 128 \n\
163 psubsw mmx_80w"G", %%mm1 # Cr -= 128 \n\
164 psllw $3, %%mm0 # Promote precision \n\
165 psllw $3, %%mm1 # Promote precision \n\
166 movq %%mm0, %%mm2 # Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\
167 movq %%mm1, %%mm3 # Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\
168 pmulhw mmx_U_green"G", %%mm2 # Mul Cb with green coeff -> Cb green \n\
169 pmulhw mmx_V_green"G", %%mm3 # Mul Cr with green coeff -> Cr green \n\
170 pmulhw mmx_U_blue"G", %%mm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 \n\
171 pmulhw mmx_V_red"G", %%mm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 \n\
172 paddsw %%mm3, %%mm2 # Cb green + Cr green -> Cgreen \n\
174 # convert the luma part \n\
175 psubusb mmx_10w"G", %%mm6 # Y -= 16 \n\
176 movq %%mm6, %%mm7 # Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
177 pand mmx_00ffw"G", %%mm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\
178 psrlw $8, %%mm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 \n\
179 psllw $3, %%mm6 # Promote precision \n\
180 psllw $3, %%mm7 # Promote precision \n\
181 pmulhw mmx_Y_coeff"G", %%mm6 # Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 \n\
182 pmulhw mmx_Y_coeff"G", %%mm7 # Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 \n\
185 #define SSE2_YUV_MUL " \n\
186 # convert the chroma part \n\
187 punpcklbw %%xmm4, %%xmm0 # scatter 8 Cb 00 u3 00 u2 00 u1 00 u0 \n\
188 punpcklbw %%xmm4, %%xmm1 # scatter 8 Cr 00 v3 00 v2 00 v1 00 v0 \n\
189 movl $0x00800080, %%eax # \n\
190 movd %%eax, %%xmm5 # \n\
191 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 0080 0080 ... 0080 0080 \n\
192 psubsw %%xmm5, %%xmm0 # Cb -= 128 \n\
193 psubsw %%xmm5, %%xmm1 # Cr -= 128 \n\
194 psllw $3, %%xmm0 # Promote precision \n\
195 psllw $3, %%xmm1 # Promote precision \n\
196 movdqa %%xmm0, %%xmm2 # Copy 8 Cb 00 u3 00 u2 00 u1 00 u0 \n\
197 movdqa %%xmm1, %%xmm3 # Copy 8 Cr 00 v3 00 v2 00 v1 00 v0 \n\
198 movl $0xf37df37d, %%eax # \n\
199 movd %%eax, %%xmm5 # \n\
200 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to f37d f37d ... f37d f37d \n\
201 pmulhw %%xmm5, %%xmm2 # Mul Cb with green coeff -> Cb green \n\
202 movl $0xe5fce5fc, %%eax # \n\
203 movd %%eax, %%xmm5 # \n\
204 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to e5fc e5fc ... e5fc e5fc \n\
205 pmulhw %%xmm5, %%xmm3 # Mul Cr with green coeff -> Cr green \n\
206 movl $0x40934093, %%eax # \n\
207 movd %%eax, %%xmm5 # \n\
208 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 4093 4093 ... 4093 4093 \n\
209 pmulhw %%xmm5, %%xmm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 \n\
210 movl $0x33123312, %%eax # \n\
211 movd %%eax, %%xmm5 # \n\
212 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 3312 3312 ... 3312 3312 \n\
213 pmulhw %%xmm5, %%xmm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 \n\
214 paddsw %%xmm3, %%xmm2 # Cb green + Cr green -> Cgreen \n\
216 # convert the luma part \n\
217 movl $0x10101010, %%eax # \n\
218 movd %%eax, %%xmm5 # \n\
219 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 1010 1010 ... 1010 1010 \n\
220 psubusb %%xmm5, %%xmm6 # Y -= 16 \n\
221 movdqa %%xmm6, %%xmm7 # Copy 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
222 movl $0x00ff00ff, %%eax # \n\
223 movd %%eax, %%xmm5 # \n\
224 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to 00ff 00ff ... 00ff 00ff \n\
225 pand %%xmm5, %%xmm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\
226 psrlw $8, %%xmm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 \n\
227 psllw $3, %%xmm6 # Promote precision \n\
228 psllw $3, %%xmm7 # Promote precision \n\
229 movl $0x253f253f, %%eax # \n\
230 movd %%eax, %%xmm5 # \n\
231 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to 253f 253f ... 253f 253f \n\
232 pmulhw %%xmm5, %%xmm6 # Mul 8 Y even 00 y6 00 y4 00 y2 00 y0 \n\
233 pmulhw %%xmm5, %%xmm7 # Mul 8 Y odd 00 y7 00 y5 00 y3 00 y1 \n\
236 #define MMX_INTRINSICS_YUV_MUL \
237 mm0 = _mm_unpacklo_pi8(mm0, mm4); \
238 mm1 = _mm_unpacklo_pi8(mm1, mm4); \
239 mm0 = _mm_subs_pi16(mm0, (__m64)mmx_80w); \
240 mm1 = _mm_subs_pi16(mm1, (__m64)mmx_80w); \
241 mm0 = _mm_slli_pi16(mm0, 3); \
242 mm1 = _mm_slli_pi16(mm1, 3); \
245 mm2 = _mm_mulhi_pi16(mm2, (__m64)mmx_U_green); \
246 mm3 = _mm_mulhi_pi16(mm3, (__m64)mmx_V_green); \
247 mm0 = _mm_mulhi_pi16(mm0, (__m64)mmx_U_blue); \
248 mm1 = _mm_mulhi_pi16(mm1, (__m64)mmx_V_red); \
249 mm2 = _mm_adds_pi16(mm2, mm3); \
251 mm6 = _mm_subs_pu8(mm6, (__m64)mmx_10w); \
253 mm6 = _mm_and_si64(mm6, (__m64)mmx_00ffw); \
254 mm7 = _mm_srli_pi16(mm7, 8); \
255 mm6 = _mm_slli_pi16(mm6, 3); \
256 mm7 = _mm_slli_pi16(mm7, 3); \
257 mm6 = _mm_mulhi_pi16(mm6, (__m64)mmx_Y_coeff); \
258 mm7 = _mm_mulhi_pi16(mm7, (__m64)mmx_Y_coeff);
260 #define SSE2_INTRINSICS_YUV_MUL \
261 xmm0 = _mm_unpacklo_epi8(xmm0, xmm4); \
262 xmm1 = _mm_unpacklo_epi8(xmm1, xmm4); \
263 xmm5 = _mm_set1_epi32(0x80808080UL); \
264 xmm0 = _mm_subs_epi16(xmm0, xmm5); \
265 xmm1 = _mm_subs_epi16(xmm1, xmm5); \
266 xmm0 = _mm_slli_epi16(xmm0, 3); \
267 xmm1 = _mm_slli_epi16(xmm1, 3); \
270 xmm5 = _mm_set1_epi32(0xf37df37dUL); \
271 xmm2 = _mm_mulhi_epi16(xmm2, xmm5); \
272 xmm5 = _mm_set1_epi32(0xe5fce5fcUL); \
273 xmm3 = _mm_mulhi_epi16(xmm3, xmm5); \
274 xmm5 = _mm_set1_epi32(0x40934093UL); \
275 xmm0 = _mm_mulhi_epi16(xmm0, xmm5); \
276 xmm5 = _mm_set1_epi32(0x33123312UL); \
277 xmm1 = _mm_mulhi_epi16(xmm1, xmm5); \
278 xmm2 = _mm_adds_epi16(xmm2, xmm3); \
280 xmm5 = _mm_set1_epi32(0x10101010UL); \
281 xmm6 = _mm_subs_epu8(xmm6, xmm5); \
283 xmm5 = _mm_set1_epi32(0x00ff00ffUL); \
284 xmm6 = _mm_and_si128(xmm6, xmm5); \
285 xmm7 = _mm_srli_epi16(xmm7, 8); \
286 xmm6 = _mm_slli_epi16(xmm6, 3); \
287 xmm7 = _mm_slli_epi16(xmm7, 3); \
288 xmm5 = _mm_set1_epi32(0x253f253fUL); \
289 xmm6 = _mm_mulhi_epi16(xmm6, xmm5); \
290 xmm7 = _mm_mulhi_epi16(xmm7, xmm5);
293 * Do the addition part of the conversion for even and odd pixels,
295 * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
296 * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
297 * mm6 -> Y even, mm7 -> Y odd
300 #define MMX_YUV_ADD " \n\
301 # Do horizontal and vertical scaling \n\
302 movq %%mm0, %%mm3 # Copy Cblue \n\
303 movq %%mm1, %%mm4 # Copy Cred \n\
304 movq %%mm2, %%mm5 # Copy Cgreen \n\
305 paddsw %%mm6, %%mm0 # Y even + Cblue 00 B6 00 B4 00 B2 00 B0 \n\
306 paddsw %%mm7, %%mm3 # Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 \n\
307 paddsw %%mm6, %%mm1 # Y even + Cred 00 R6 00 R4 00 R2 00 R0 \n\
308 paddsw %%mm7, %%mm4 # Y odd + Cred 00 R7 00 R5 00 R3 00 R1 \n\
309 paddsw %%mm6, %%mm2 # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 \n\
310 paddsw %%mm7, %%mm5 # Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 \n\
312 # Limit RGB even to 0..255 \n\
313 packuswb %%mm0, %%mm0 # B6 B4 B2 B0 / B6 B4 B2 B0 \n\
314 packuswb %%mm1, %%mm1 # R6 R4 R2 R0 / R6 R4 R2 R0 \n\
315 packuswb %%mm2, %%mm2 # G6 G4 G2 G0 / G6 G4 G2 G0 \n\
317 # Limit RGB odd to 0..255 \n\
318 packuswb %%mm3, %%mm3 # B7 B5 B3 B1 / B7 B5 B3 B1 \n\
319 packuswb %%mm4, %%mm4 # R7 R5 R3 R1 / R7 R5 R3 R1 \n\
320 packuswb %%mm5, %%mm5 # G7 G5 G3 G1 / G7 G5 G3 G1 \n\
322 # Interleave RGB even and odd \n\
323 punpcklbw %%mm3, %%mm0 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
324 punpcklbw %%mm4, %%mm1 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
325 punpcklbw %%mm5, %%mm2 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
328 #define SSE2_YUV_ADD " \n\
329 # Do horizontal and vertical scaling \n\
330 movdqa %%xmm0, %%xmm3 # Copy Cblue \n\
331 movdqa %%xmm1, %%xmm4 # Copy Cred \n\
332 movdqa %%xmm2, %%xmm5 # Copy Cgreen \n\
333 paddsw %%xmm6, %%xmm0 # Y even + Cblue 00 B6 00 B4 00 B2 00 B0 \n\
334 paddsw %%xmm7, %%xmm3 # Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 \n\
335 paddsw %%xmm6, %%xmm1 # Y even + Cred 00 R6 00 R4 00 R2 00 R0 \n\
336 paddsw %%xmm7, %%xmm4 # Y odd + Cred 00 R7 00 R5 00 R3 00 R1 \n\
337 paddsw %%xmm6, %%xmm2 # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 \n\
338 paddsw %%xmm7, %%xmm5 # Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 \n\
340 # Limit RGB even to 0..255 \n\
341 packuswb %%xmm0, %%xmm0 # B6 B4 B2 B0 / B6 B4 B2 B0 \n\
342 packuswb %%xmm1, %%xmm1 # R6 R4 R2 R0 / R6 R4 R2 R0 \n\
343 packuswb %%xmm2, %%xmm2 # G6 G4 G2 G0 / G6 G4 G2 G0 \n\
345 # Limit RGB odd to 0..255 \n\
346 packuswb %%xmm3, %%xmm3 # B7 B5 B3 B1 / B7 B5 B3 B1 \n\
347 packuswb %%xmm4, %%xmm4 # R7 R5 R3 R1 / R7 R5 R3 R1 \n\
348 packuswb %%xmm5, %%xmm5 # G7 G5 G3 G1 / G7 G5 G3 G1 \n\
350 # Interleave RGB even and odd \n\
351 punpcklbw %%xmm3, %%xmm0 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
352 punpcklbw %%xmm4, %%xmm1 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
353 punpcklbw %%xmm5, %%xmm2 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
356 #define MMX_INTRINSICS_YUV_ADD \
360 mm0 = _mm_adds_pi16(mm0, mm6); \
361 mm3 = _mm_adds_pi16(mm3, mm7); \
362 mm1 = _mm_adds_pi16(mm1, mm6); \
363 mm4 = _mm_adds_pi16(mm4, mm7); \
364 mm2 = _mm_adds_pi16(mm2, mm6); \
365 mm5 = _mm_adds_pi16(mm5, mm7); \
367 mm0 = _mm_packs_pu16(mm0, mm0); \
368 mm1 = _mm_packs_pu16(mm1, mm1); \
369 mm2 = _mm_packs_pu16(mm2, mm2); \
371 mm3 = _mm_packs_pu16(mm3, mm3); \
372 mm4 = _mm_packs_pu16(mm4, mm4); \
373 mm5 = _mm_packs_pu16(mm5, mm5); \
375 mm0 = _mm_unpacklo_pi8(mm0, mm3); \
376 mm1 = _mm_unpacklo_pi8(mm1, mm4); \
377 mm2 = _mm_unpacklo_pi8(mm2, mm5);
379 #define SSE2_INTRINSICS_YUV_ADD \
383 xmm0 = _mm_adds_epi16(xmm0, xmm6); \
384 xmm3 = _mm_adds_epi16(xmm3, xmm7); \
385 xmm1 = _mm_adds_epi16(xmm1, xmm6); \
386 xmm4 = _mm_adds_epi16(xmm4, xmm7); \
387 xmm2 = _mm_adds_epi16(xmm2, xmm6); \
388 xmm5 = _mm_adds_epi16(xmm5, xmm7); \
390 xmm0 = _mm_packus_epi16(xmm0, xmm0); \
391 xmm1 = _mm_packus_epi16(xmm1, xmm1); \
392 xmm2 = _mm_packus_epi16(xmm2, xmm2); \
394 xmm3 = _mm_packus_epi16(xmm3, xmm3); \
395 xmm4 = _mm_packus_epi16(xmm4, xmm4); \
396 xmm5 = _mm_packus_epi16(xmm5, xmm5); \
398 xmm0 = _mm_unpacklo_epi8(xmm0, xmm3); \
399 xmm1 = _mm_unpacklo_epi8(xmm1, xmm4); \
400 xmm2 = _mm_unpacklo_epi8(xmm2, xmm5);
403 * Grayscale case, only use Y
406 #define MMX_YUV_GRAY " \n\
407 # convert the luma part \n\
408 psubusb mmx_10w"G", %%mm6 \n\
409 movq %%mm6, %%mm7 \n\
410 pand mmx_00ffw"G", %%mm6 \n\
414 pmulhw mmx_Y_coeff"G", %%mm6 \n\
415 pmulhw mmx_Y_coeff"G", %%mm7 \n\
416 packuswb %%mm6, %%mm6 \n\
417 packuswb %%mm7, %%mm7 \n\
418 punpcklbw %%mm7, %%mm6 \n\
421 #define MMX_UNPACK_16_GRAY " \n\
422 movq %%mm6, %%mm5 \n\
423 pand mmx_mask_f8"G", %%mm6 \n\
424 pand mmx_mask_fc"G", %%mm5 \n\
425 movq %%mm6, %%mm7 \n\
427 pxor %%mm3, %%mm3 \n\
428 movq %%mm7, %%mm2 \n\
429 movq %%mm5, %%mm0 \n\
430 punpcklbw %%mm3, %%mm5 \n\
431 punpcklbw %%mm6, %%mm7 \n\
435 punpckhbw %%mm3, %%mm0 \n\
436 punpckhbw %%mm6, %%mm2 \n\
438 movq 8(%0), %%mm6 \n\
440 movq %%mm2, 8(%3) \n\
445 * convert RGB plane to RGB 15 bits,
446 * mm0 -> B, mm1 -> R, mm2 -> G,
447 * mm4 -> GB, mm5 -> AR pixel 4-7,
448 * mm6 -> GB, mm7 -> AR pixel 0-3
451 #define MMX_UNPACK_15 " \n\
452 # mask unneeded bits off \n\
453 pand mmx_mask_f8"G", %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
454 psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
455 pand mmx_mask_f8"G", %%mm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
456 pand mmx_mask_f8"G", %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
457 psrlw $1,%%mm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\
458 pxor %%mm4, %%mm4 # zero mm4 \n\
459 movq %%mm0, %%mm5 # Copy B7-B0 \n\
460 movq %%mm2, %%mm7 # Copy G7-G0 \n\
462 # convert rgb24 plane to rgb15 pack for pixel 0-3 \n\
463 punpcklbw %%mm4, %%mm2 # ________ ________ g7g6g5g4 g3______ \n\
464 punpcklbw %%mm1, %%mm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
465 psllw $2,%%mm2 # ________ ____g7g6 g5g4g3__ ________ \n\
466 por %%mm2, %%mm0 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
467 movq 8(%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
468 movq %%mm0, (%3) # store pixel 0-3 \n\
470 # convert rgb24 plane to rgb16 pack for pixel 0-3 \n\
471 punpckhbw %%mm4, %%mm7 # ________ ________ g7g6g5g4 g3______ \n\
472 punpckhbw %%mm1, %%mm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
473 psllw $2,%%mm7 # ________ ____g7g6 g5g4g3__ ________ \n\
474 movd 4(%1), %%mm0 # Load 4 Cb __ __ __ __ u3 u2 u1 u0 \n\
475 por %%mm7, %%mm5 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
476 movd 4(%2), %%mm1 # Load 4 Cr __ __ __ __ v3 v2 v1 v0 \n\
477 movq %%mm5, 8(%3) # store pixel 4-7 \n\
480 #define SSE2_UNPACK_15_ALIGNED " \n\
481 # mask unneeded bits off \n\
482 movl $0xf8f8f8f8, %%eax # \n\
483 movd %%eax, %%xmm5 # \n\
484 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
485 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
486 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
487 pand %%xmm5, %%xmm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
488 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
489 psrlw $1,%%xmm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\
490 pxor %%xmm4, %%xmm4 # zero mm4 \n\
491 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
492 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
494 # convert rgb24 plane to rgb15 pack for pixel 0-7 \n\
495 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3______ \n\
496 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
497 psllw $2,%%xmm2 # ________ ____g7g6 g5g4g3__ ________ \n\
498 por %%xmm2, %%xmm0 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
499 movntdq %%xmm0, (%3) # store pixel 0-7 \n\
501 # convert rgb24 plane to rgb15 pack for pixel 8-15 \n\
502 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3______ \n\
503 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
504 psllw $2,%%xmm7 # ________ ____g7g6 g5g4g3__ ________ \n\
505 por %%xmm7, %%xmm5 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
506 movntdq %%xmm5, 16(%3) # store pixel 4-7 \n\
509 #define SSE2_UNPACK_15_UNALIGNED " \n\
510 # mask unneeded bits off \n\
511 movl $0xf8f8f8f8, %%eax # \n\
512 movd %%eax, %%xmm5 # \n\
513 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
514 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
515 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
516 pand %%xmm5, %%xmm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
517 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
518 psrlw $1,%%xmm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\
519 pxor %%xmm4, %%xmm4 # zero mm4 \n\
520 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
521 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
523 # convert rgb24 plane to rgb15 pack for pixel 0-7 \n\
524 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3______ \n\
525 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
526 psllw $2,%%xmm2 # ________ ____g7g6 g5g4g3__ ________ \n\
527 por %%xmm2, %%xmm0 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
528 movdqu %%xmm0, (%3) # store pixel 0-7 \n\
530 # convert rgb24 plane to rgb15 pack for pixel 8-15 \n\
531 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3______ \n\
532 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
533 psllw $2,%%xmm7 # ________ ____g7g6 g5g4g3__ ________ \n\
534 por %%xmm7, %%xmm5 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
535 movdqu %%xmm5, 16(%3) # store pixel 4-7 \n\
538 #define MMX_INTRINSICS_UNPACK_15 \
539 mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
540 mm0 = _mm_srli_pi16(mm0, 3); \
541 mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_f8); \
542 mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
543 mm1 = _mm_srli_pi16(mm1, 1); \
544 mm4 = _mm_setzero_si64(); \
548 mm2 = _mm_unpacklo_pi8(mm2, mm4); \
549 mm0 = _mm_unpacklo_pi8(mm0, mm1); \
550 mm2 = _mm_slli_pi16(mm2, 2); \
551 mm0 = _mm_or_si64(mm0, mm2); \
552 tmp64 = *(uint64_t *)(p_y + 8); \
553 mm6 = (__m64)tmp64; \
554 *(uint64_t *)p_buffer = (uint64_t)mm0; \
556 mm7 = _mm_unpackhi_pi8(mm7, mm4); \
557 mm5 = _mm_unpackhi_pi8(mm5, mm1); \
558 mm7 = _mm_slli_pi16(mm7, 2); \
559 tmp64 = (uint64_t)*(uint32_t *)(p_u + 4); \
560 mm0 = (__m64)tmp64; \
561 mm5 = _mm_or_si64(mm5, mm7); \
562 tmp64 = (uint64_t)*(uint32_t *)(p_v + 4); \
563 mm1 = (__m64)tmp64; \
564 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
566 #define SSE2_INTRINSICS_UNPACK_15_ALIGNED \
567 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
568 xmm0 = _mm_and_si128(xmm0, xmm5); \
569 xmm0 = _mm_srli_epi16(xmm0, 3); \
570 xmm2 = _mm_and_si128(xmm2, xmm5); \
571 xmm1 = _mm_and_si128(xmm1, xmm5); \
572 xmm1 = _mm_srli_epi16(xmm1, 1); \
573 xmm4 = _mm_setzero_si128(); \
577 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
578 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
579 xmm2 = _mm_slli_epi16(xmm2, 2); \
580 xmm0 = _mm_or_si128(xmm0, xmm2); \
581 _mm_stream_si128((__m128i*)p_buffer, xmm0); \
583 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
584 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
585 xmm7 = _mm_slli_epi16(xmm7, 2); \
586 xmm5 = _mm_or_si128(xmm5, xmm7); \
587 _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
589 #define SSE2_INTRINSICS_UNPACK_15_UNALIGNED \
590 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
591 xmm0 = _mm_and_si128(xmm0, xmm5); \
592 xmm0 = _mm_srli_epi16(xmm0, 3); \
593 xmm2 = _mm_and_si128(xmm2, xmm5); \
594 xmm1 = _mm_and_si128(xmm1, xmm5); \
595 xmm1 = _mm_srli_epi16(xmm1, 1); \
596 xmm4 = _mm_setzero_si128(); \
600 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
601 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
602 xmm2 = _mm_slli_epi16(xmm2, 2); \
603 xmm0 = _mm_or_si128(xmm0, xmm2); \
604 _mm_storeu_si128((__m128i*)p_buffer, xmm0); \
606 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
607 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
608 xmm7 = _mm_slli_epi16(xmm7, 2); \
609 xmm5 = _mm_or_si128(xmm5, xmm7); \
610 _mm_storeu_si128((__m128i*)(p_buffer+16), xmm5);
613 * convert RGB plane to RGB 16 bits,
614 * mm0 -> B, mm1 -> R, mm2 -> G,
615 * mm4 -> GB, mm5 -> AR pixel 4-7,
616 * mm6 -> GB, mm7 -> AR pixel 0-3
619 #define MMX_UNPACK_16 " \n\
620 # mask unneeded bits off \n\
621 pand mmx_mask_f8"G", %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
622 pand mmx_mask_fc"G", %%mm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
623 pand mmx_mask_f8"G", %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
624 psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
625 pxor %%mm4, %%mm4 # zero mm4 \n\
626 movq %%mm0, %%mm5 # Copy B7-B0 \n\
627 movq %%mm2, %%mm7 # Copy G7-G0 \n\
629 # convert rgb24 plane to rgb16 pack for pixel 0-3 \n\
630 punpcklbw %%mm4, %%mm2 # ________ ________ g7g6g5g4 g3g2____ \n\
631 punpcklbw %%mm1, %%mm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
632 psllw $3,%%mm2 # ________ __g7g6g5 g4g3g2__ ________ \n\
633 por %%mm2, %%mm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
634 movq 8(%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
635 movq %%mm0, (%3) # store pixel 0-3 \n\
637 # convert rgb24 plane to rgb16 pack for pixel 0-3 \n\
638 punpckhbw %%mm4, %%mm7 # ________ ________ g7g6g5g4 g3g2____ \n\
639 punpckhbw %%mm1, %%mm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
640 psllw $3,%%mm7 # ________ __g7g6g5 g4g3g2__ ________ \n\
641 movd 4(%1), %%mm0 # Load 4 Cb __ __ __ __ u3 u2 u1 u0 \n\
642 por %%mm7, %%mm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
643 movd 4(%2), %%mm1 # Load 4 Cr __ __ __ __ v3 v2 v1 v0 \n\
644 movq %%mm5, 8(%3) # store pixel 4-7 \n\
647 #define SSE2_UNPACK_16_ALIGNED " \n\
648 # mask unneeded bits off \n\
649 movl $0xf8f8f8f8, %%eax # \n\
650 movd %%eax, %%xmm5 # \n\
651 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
652 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
653 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
654 movl $0xfcfcfcfc, %%eax # \n\
655 movd %%eax, %%xmm5 # \n\
656 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
657 pand %%xmm5, %%xmm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
658 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
659 pxor %%xmm4, %%xmm4 # zero mm4 \n\
660 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
661 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
663 # convert rgb24 plane to rgb16 pack for pixel 0-7 \n\
664 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3g2____ \n\
665 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
666 psllw $3,%%xmm2 # ________ __g7g6g5 g4g3g2__ ________ \n\
667 por %%xmm2, %%xmm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
668 movntdq %%xmm0, (%3) # store pixel 0-7 \n\
670 # convert rgb24 plane to rgb16 pack for pixel 8-15 \n\
671 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3g2____ \n\
672 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
673 psllw $3,%%xmm7 # ________ __g7g6g5 g4g3g2__ ________ \n\
674 por %%xmm7, %%xmm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
675 movntdq %%xmm5, 16(%3) # store pixel 4-7 \n\
678 #define SSE2_UNPACK_16_UNALIGNED " \n\
679 # mask unneeded bits off \n\
680 movl $0xf8f8f8f8, %%eax # \n\
681 movd %%eax, %%xmm5 # \n\
682 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
683 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
684 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
685 movl $0xfcfcfcfc, %%eax # \n\
686 movd %%eax, %%xmm5 # \n\
687 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
688 pand %%xmm5, %%xmm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
689 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
690 pxor %%xmm4, %%xmm4 # zero mm4 \n\
691 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
692 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
694 # convert rgb24 plane to rgb16 pack for pixel 0-7 \n\
695 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3g2____ \n\
696 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
697 psllw $3,%%xmm2 # ________ __g7g6g5 g4g3g2__ ________ \n\
698 por %%xmm2, %%xmm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
699 movdqu %%xmm0, (%3) # store pixel 0-7 \n\
701 # convert rgb24 plane to rgb16 pack for pixel 8-15 \n\
702 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3g2____ \n\
703 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
704 psllw $3,%%xmm7 # ________ __g7g6g5 g4g3g2__ ________ \n\
705 por %%xmm7, %%xmm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
706 movdqu %%xmm5, 16(%3) # store pixel 4-7 \n\
709 #define MMX_INTRINSICS_UNPACK_16 \
710 mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
711 mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_fc); \
712 mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
713 mm0 = _mm_srli_pi16(mm0, 3); \
714 mm4 = _mm_setzero_si64(); \
718 mm2 = _mm_unpacklo_pi8(mm2, mm4); \
719 mm0 = _mm_unpacklo_pi8(mm0, mm1); \
720 mm2 = _mm_slli_pi16(mm2, 3); \
721 mm0 = _mm_or_si64(mm0, mm2); \
722 tmp64 = *(uint64_t *)(p_y + 8); \
723 mm6 = (__m64)tmp64; \
724 *(uint64_t *)p_buffer = (uint64_t)mm0; \
726 mm7 = _mm_unpackhi_pi8(mm7, mm4); \
727 mm5 = _mm_unpackhi_pi8(mm5, mm1); \
728 mm7 = _mm_slli_pi16(mm7, 3); \
729 tmp64 = (uint64_t)*(uint32_t *)(p_u + 4); \
730 mm0 = (__m64)tmp64; \
731 mm5 = _mm_or_si64(mm5, mm7); \
732 tmp64 = (uint64_t)*(uint32_t *)(p_v + 4); \
733 mm1 = (__m64)tmp64; \
734 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
736 #define SSE2_INTRINSICS_UNPACK_16_ALIGNED \
737 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
738 xmm0 = _mm_and_si128(xmm0, xmm5); \
739 xmm1 = _mm_and_si128(xmm1, xmm5); \
740 xmm5 = _mm_set1_epi32(0xfcfcfcfcUL); \
741 xmm2 = _mm_and_si128(xmm2, xmm5); \
742 xmm0 = _mm_srli_epi16(xmm0, 3); \
743 xmm4 = _mm_setzero_si128(); \
747 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
748 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
749 xmm2 = _mm_slli_epi16(xmm2, 3); \
750 xmm0 = _mm_or_si128(xmm0, xmm2); \
751 _mm_stream_si128((__m128i*)p_buffer, xmm0); \
753 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
754 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
755 xmm7 = _mm_slli_epi16(xmm7, 3); \
756 xmm5 = _mm_or_si128(xmm5, xmm7); \
757 _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
759 #define SSE2_INTRINSICS_UNPACK_16_UNALIGNED \
760 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
761 xmm0 = _mm_and_si128(xmm0, xmm5); \
762 xmm1 = _mm_and_si128(xmm1, xmm5); \
763 xmm5 = _mm_set1_epi32(0xfcfcfcfcUL); \
764 xmm2 = _mm_and_si128(xmm2, xmm5); \
765 xmm0 = _mm_srli_epi16(xmm0, 3); \
766 xmm4 = _mm_setzero_si128(); \
770 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
771 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
772 xmm2 = _mm_slli_epi16(xmm2, 3); \
773 xmm0 = _mm_or_si128(xmm0, xmm2); \
774 _mm_storeu_si128((__m128i*)p_buffer, xmm0); \
776 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
777 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
778 xmm7 = _mm_slli_epi16(xmm7, 3); \
779 xmm5 = _mm_or_si128(xmm5, xmm7); \
780 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5);
783 * convert RGB plane to RGB packed format,
784 * mm0 -> B, mm1 -> R, mm2 -> G
787 #define MMX_UNPACK_32_ARGB " \n\
788 pxor %%mm3, %%mm3 # zero mm3 \n\
789 movq %%mm0, %%mm4 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
790 punpcklbw %%mm2, %%mm4 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
791 movq %%mm1, %%mm5 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
792 punpcklbw %%mm3, %%mm5 # 00 R3 00 R2 00 R1 00 R0 \n\
793 movq %%mm4, %%mm6 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
794 punpcklwd %%mm5, %%mm4 # 00 R1 B1 G1 00 R0 B0 G0 \n\
795 movq %%mm4, (%3) # Store ARGB1 ARGB0 \n\
796 punpckhwd %%mm5, %%mm6 # 00 R3 B3 G3 00 R2 B2 G2 \n\
797 movq %%mm6, 8(%3) # Store ARGB3 ARGB2 \n\
798 punpckhbw %%mm2, %%mm0 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
799 punpckhbw %%mm3, %%mm1 # 00 R7 00 R6 00 R5 00 R4 \n\
800 movq %%mm0, %%mm5 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
801 punpcklwd %%mm1, %%mm5 # 00 R5 B5 G5 00 R4 B4 G4 \n\
802 movq %%mm5, 16(%3) # Store ARGB5 ARGB4 \n\
803 punpckhwd %%mm1, %%mm0 # 00 R7 B7 G7 00 R6 B6 G6 \n\
804 movq %%mm0, 24(%3) # Store ARGB7 ARGB6 \n\
807 #define SSE2_UNPACK_32_ARGB_ALIGNED " \n\
808 pxor %%xmm3, %%xmm3 # zero xmm3 \n\
809 movdqa %%xmm0, %%xmm4 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
810 punpcklbw %%xmm2, %%xmm4 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
811 movdqa %%xmm1, %%xmm5 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
812 punpcklbw %%xmm3, %%xmm5 # 00 R3 00 R2 00 R1 00 R0 \n\
813 movdqa %%xmm4, %%xmm6 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
814 punpcklwd %%xmm5, %%xmm4 # 00 R1 B1 G1 00 R0 B0 G0 \n\
815 movntdq %%xmm4, (%3) # Store ARGB3 ARGB2 ARGB1 ARGB0 \n\
816 punpckhwd %%xmm5, %%xmm6 # 00 R3 B3 G3 00 R2 B2 G2 \n\
817 movntdq %%xmm6, 16(%3) # Store ARGB7 ARGB6 ARGB5 ARGB4 \n\
818 punpckhbw %%xmm2, %%xmm0 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
819 punpckhbw %%xmm3, %%xmm1 # 00 R7 00 R6 00 R5 00 R4 \n\
820 movdqa %%xmm0, %%xmm5 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
821 punpcklwd %%xmm1, %%xmm5 # 00 R5 B5 G5 00 R4 B4 G4 \n\
822 movntdq %%xmm5, 32(%3) # Store ARGB11 ARGB10 ARGB9 ARGB8 \n\
823 punpckhwd %%xmm1, %%xmm0 # 00 R7 B7 G7 00 R6 B6 G6 \n\
824 movntdq %%xmm0, 48(%3) # Store ARGB15 ARGB14 ARGB13 ARGB12 \n\
827 #define SSE2_UNPACK_32_ARGB_UNALIGNED " \n\
828 pxor %%xmm3, %%xmm3 # zero xmm3 \n\
829 movdqa %%xmm0, %%xmm4 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
830 punpcklbw %%xmm2, %%xmm4 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
831 movdqa %%xmm1, %%xmm5 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
832 punpcklbw %%xmm3, %%xmm5 # 00 R3 00 R2 00 R1 00 R0 \n\
833 movdqa %%xmm4, %%xmm6 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
834 punpcklwd %%xmm5, %%xmm4 # 00 R1 B1 G1 00 R0 B0 G0 \n\
835 movdqu %%xmm4, (%3) # Store ARGB3 ARGB2 ARGB1 ARGB0 \n\
836 punpckhwd %%xmm5, %%xmm6 # 00 R3 B3 G3 00 R2 B2 G2 \n\
837 movdqu %%xmm6, 16(%3) # Store ARGB7 ARGB6 ARGB5 ARGB4 \n\
838 punpckhbw %%xmm2, %%xmm0 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
839 punpckhbw %%xmm3, %%xmm1 # 00 R7 00 R6 00 R5 00 R4 \n\
840 movdqa %%xmm0, %%xmm5 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
841 punpcklwd %%xmm1, %%xmm5 # 00 R5 B5 G5 00 R4 B4 G4 \n\
842 movdqu %%xmm5, 32(%3) # Store ARGB11 ARGB10 ARGB9 ARGB8 \n\
843 punpckhwd %%xmm1, %%xmm0 # 00 R7 B7 G7 00 R6 B6 G6 \n\
844 movdqu %%xmm0, 48(%3) # Store ARGB15 ARGB14 ARGB13 ARGB12 \n\
847 #define MMX_INTRINSICS_UNPACK_32_ARGB \
848 mm3 = _mm_setzero_si64(); \
850 mm4 = _mm_unpacklo_pi8(mm4, mm2); \
852 mm5 = _mm_unpacklo_pi8(mm5, mm3); \
854 mm4 = _mm_unpacklo_pi16(mm4, mm5); \
855 *(uint64_t *)p_buffer = (uint64_t)mm4; \
856 mm6 = _mm_unpackhi_pi16(mm6, mm5); \
857 *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6; \
858 mm0 = _mm_unpackhi_pi8(mm0, mm2); \
859 mm1 = _mm_unpackhi_pi8(mm1, mm3); \
861 mm5 = _mm_unpacklo_pi16(mm5, mm1); \
862 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5; \
863 mm0 = _mm_unpackhi_pi16(mm0, mm1); \
864 *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
866 #define SSE2_INTRINSICS_UNPACK_32_ARGB_ALIGNED \
867 xmm3 = _mm_setzero_si128(); \
869 xmm4 = _mm_unpacklo_epi8(xmm4, xmm2); \
871 xmm5 = _mm_unpacklo_epi8(xmm5, xmm3); \
873 xmm4 = _mm_unpacklo_epi16(xmm4, xmm5); \
874 _mm_stream_si128((__m128i*)(p_buffer), xmm4); \
875 xmm6 = _mm_unpackhi_epi16(xmm6, xmm5); \
876 _mm_stream_si128((__m128i*)(p_buffer+4), xmm6); \
877 xmm0 = _mm_unpackhi_epi8(xmm0, xmm2); \
878 xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
880 xmm5 = _mm_unpacklo_epi16(xmm5, xmm1); \
881 _mm_stream_si128((__m128i*)(p_buffer+8), xmm5); \
882 xmm0 = _mm_unpackhi_epi16(xmm0, xmm1); \
883 _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
885 #define SSE2_INTRINSICS_UNPACK_32_ARGB_UNALIGNED \
886 xmm3 = _mm_setzero_si128(); \
888 xmm4 = _mm_unpacklo_epi8(xmm4, xmm2); \
890 xmm5 = _mm_unpacklo_epi8(xmm5, xmm3); \
892 xmm4 = _mm_unpacklo_epi16(xmm4, xmm5); \
893 _mm_storeu_si128((__m128i*)(p_buffer), xmm4); \
894 xmm6 = _mm_unpackhi_epi16(xmm6, xmm5); \
895 _mm_storeu_si128((__m128i*)(p_buffer+4), xmm6); \
896 xmm0 = _mm_unpackhi_epi8(xmm0, xmm2); \
897 xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
899 xmm5 = _mm_unpacklo_epi16(xmm5, xmm1); \
900 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5); \
901 xmm0 = _mm_unpackhi_epi16(xmm0, xmm1); \
902 _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
904 #define MMX_UNPACK_32_BGRA " \n\
905 pxor %%mm3, %%mm3 # zero mm3 \n\
906 movq %%mm2, %%mm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
907 punpcklbw %%mm0, %%mm4 # B3 G3 B2 G2 B1 G1 B0 G0 \n\
908 punpcklbw %%mm1, %%mm3 # R3 00 R2 00 R1 00 R0 00 \n\
909 movq %%mm3, %%mm5 # R3 00 R2 00 R1 00 R0 00 \n\
910 punpcklwd %%mm4, %%mm3 # B1 G1 R1 00 B0 G0 R0 00 \n\
911 movq %%mm3, (%3) # Store BGRA1 BGRA0 \n\
912 punpckhwd %%mm4, %%mm5 # B3 G3 R3 00 B2 G2 R2 00 \n\
913 movq %%mm5, 8(%3) # Store BGRA3 BGRA2 \n\
914 pxor %%mm3, %%mm3 # zero mm3 \n\
915 movq %%mm2, %%mm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
916 punpckhbw %%mm0, %%mm4 # B7 G7 B6 G6 B5 G5 B4 G4 \n\
917 punpckhbw %%mm1, %%mm3 # R7 00 R6 00 R5 00 R4 00 \n\
918 movq %%mm3, %%mm5 # R7 00 R6 00 R5 00 R4 00 \n\
919 punpcklwd %%mm1, %%mm3 # B5 G5 R5 00 B4 G4 R4 00 \n\
920 movq %%mm3, 16(%3) # Store BGRA5 BGRA4 \n\
921 punpckhwd %%mm4, %%mm5 # B7 G7 R7 00 B6 G6 R6 00 \n\
922 movq %%mm5, 24(%3) # Store BGRA7 BGRA6 \n\
925 #define SSE2_UNPACK_32_BGRA_ALIGNED " \n\
926 pxor %%xmm3, %%xmm3 # zero mm3 \n\
927 movdqa %%xmm2, %%xmm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
928 punpcklbw %%xmm0, %%xmm4 # B3 G3 B2 G2 B1 G1 B0 G0 \n\
929 punpcklbw %%xmm1, %%xmm3 # R3 00 R2 00 R1 00 R0 00 \n\
930 movdqa %%xmm3, %%xmm5 # R3 00 R2 00 R1 00 R0 00 \n\
931 punpcklwd %%xmm4, %%xmm3 # B1 G1 R1 00 B0 G0 R0 00 \n\
932 movntdq %%xmm3, (%3) # Store BGRA3 BGRA2 BGRA1 BGRA0 \n\
933 punpckhwd %%xmm4, %%xmm5 # B3 G3 R3 00 B2 G2 R2 00 \n\
934 movntdq %%xmm5, 8(%3) # Store BGRA7 BGRA6 BGRA5 BGRA4 \n\
935 pxor %%xmm3, %%xmm3 # zero mm3 \n\
936 movdqa %%xmm2, %%xmm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
937 punpckhbw %%xmm0, %%xmm4 # B7 G7 B6 G6 B5 G5 B4 G4 \n\
938 punpckhbw %%xmm1, %%xmm3 # R7 00 R6 00 R5 00 R4 00 \n\
939 movdqa %%xmm3, %%xmm5 # R7 00 R6 00 R5 00 R4 00 \n\
940 punpcklwd %%xmm1, %%xmm3 # B5 G5 R5 00 B4 G4 R4 00 \n\
941 movntdq %%xmm3, 16(%3) # Store BGRA11 BGRA10 BGRA9 BGRA8 \n\
942 punpckhwd %%xmm4, %%xmm5 # B7 G7 R7 00 B6 G6 R6 00 \n\
943 movntdq %%xmm5, 24(%3) # Store BGRA15 BGRA14 BGRA13 BGRA12 \n\
946 #define SSE2_UNPACK_32_BGRA_UNALIGNED " \n\
947 pxor %%xmm3, %%xmm3 # zero mm3 \n\
948 movdqa %%xmm2, %%xmm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
949 punpcklbw %%xmm0, %%xmm4 # B3 G3 B2 G2 B1 G1 B0 G0 \n\
950 punpcklbw %%xmm1, %%xmm3 # R3 00 R2 00 R1 00 R0 00 \n\
951 movdqa %%xmm3, %%xmm5 # R3 00 R2 00 R1 00 R0 00 \n\
952 punpcklwd %%xmm4, %%xmm3 # B1 G1 R1 00 B0 G0 R0 00 \n\
953 movdqu %%xmm3, (%3) # Store BGRA3 BGRA2 BGRA1 BGRA0 \n\
954 punpckhwd %%xmm4, %%xmm5 # B3 G3 R3 00 B2 G2 R2 00 \n\
955 movdqu %%xmm5, 8(%3) # Store BGRA7 BGRA6 BGRA5 BGRA4 \n\
956 pxor %%xmm3, %%xmm3 # zero mm3 \n\
957 movdqa %%xmm2, %%xmm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
958 punpckhbw %%xmm0, %%xmm4 # B7 G7 B6 G6 B5 G5 B4 G4 \n\
959 punpckhbw %%xmm1, %%xmm3 # R7 00 R6 00 R5 00 R4 00 \n\
960 movdqa %%xmm3, %%xmm5 # R7 00 R6 00 R5 00 R4 00 \n\
961 punpcklwd %%xmm1, %%xmm3 # B5 G5 R5 00 B4 G4 R4 00 \n\
962 movdqu %%xmm3, 16(%3) # Store BGRA11 BGRA10 BGRA9 BGRA8 \n\
963 punpckhwd %%xmm4, %%xmm5 # B7 G7 R7 00 B6 G6 R6 00 \n\
964 movdqu %%xmm5, 24(%3) # Store BGRA15 BGRA14 BGRA13 BGRA12 \n\
967 #define MMX_INTRINSICS_UNPACK_32_BGRA \
968 mm3 = _mm_setzero_si64(); \
970 mm4 = _mm_unpacklo_pi8(mm4, mm0); \
971 mm1 = _mm_unpacklo_pi8(mm1, mm3); \
973 mm3 = _mm_unpacklo_pi16(mm3, mm4); \
974 *(uint64_t *)p_buffer = (uint64_t)mm3; \
975 mm5 = _mm_unpackhi_pi16(mm5, mm4); \
976 *(uint64_t *)(p_buffer + 2) = (uint64_t)mm5; \
977 mm3 = _mm_setzero_si64(); \
979 mm0 = _mm_unpackhi_pi8(mm0, mm4); \
980 mm1 = _mm_unpackhi_pi8(mm1, mm3); \
982 mm3 = _mm_unpacklo_pi16(mm3, mm1); \
983 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm3; \
984 mm5 = _mm_unpackhi_pi16(mm5, mm4); \
985 *(uint64_t *)(p_buffer + 6) = (uint64_t)mm5; \
987 #define SSE2_INTRINSICS_UNPACK_32_BGRA_ALIGNED \
988 xmm3 = _mm_setzero_si128(); \
990 xmm4 = _mm_unpacklo_epi8(xmm4, xmm0); \
991 xmm1 = _mm_unpacklo_epi8(xmm1, xmm3); \
993 xmm3 = _mm_unpacklo_epi16(xmm3, xmm4); \
994 _mm_stream_si128((__m128i*)(p_buffer), xmm3); \
995 xmm5 = _mm_unpackhi_epi16(xmm5, xmm4); \
996 _mm_stream_si128((__m128i*)(p_buffer+4), xmm5); \
997 xmm3 = _mm_setzero_si128(); \
999 xmm0 = _mm_unpackhi_epi8(xmm0, xmm4); \
1000 xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
1002 xmm3 = _mm_unpacklo_epi16(xmm3, xmm1); \
1003 _mm_stream_si128((__m128i*)(p_buffer+8), xmm3); \
1004 xmm5 = _xmm_unpackhi_pi16(xmm5, xmm4); \
1005 _mm_stream_si128((__m128i*)(p_buffer+12), xmm5); \
1007 #define SSE2_INTRINSICS_UNPACK_32_BGRA_UNALIGNED \
1008 xmm3 = _mm_setzero_si128(); \
1010 xmm4 = _mm_unpacklo_epi8(xmm4, xmm0); \
1011 xmm1 = _mm_unpacklo_epi8(xmm1, xmm3); \
1013 xmm3 = _mm_unpacklo_epi16(xmm3, xmm4); \
1014 _mm_storeu_si128((__m128i*)(p_buffer), xmm3); \
1015 xmm5 = _mm_unpackhi_epi16(xmm5, xmm4); \
1016 _mm_storeu_si128((__m128i*)(p_buffer+4), xmm5); \
1017 xmm3 = _mm_setzero_si128(); \
1019 xmm0 = _mm_unpackhi_epi8(xmm0, xmm4); \
1020 xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
1022 xmm3 = _mm_unpacklo_epi16(xmm3, xmm1); \
1023 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm3); \
1024 xmm5 = _xmm_unpackhi_pi16(xmm5, xmm4); \
1025 _mm_storeu_si128((__m128i*)(p_buffer+12), xmm5); \