]> git.sesse.net Git - vlc/blob - modules/video_chroma/i420_yuy2.h
video_chroma: added I420_ABGR32 support (mostly for opengl), some clean up as well
[vlc] / modules / video_chroma / i420_yuy2.h
1 /*****************************************************************************
2  * i420_yuy2.h : YUV to YUV conversion module for vlc
3  *****************************************************************************
4  * Copyright (C) 2000, 2001 the VideoLAN team
5  * $Id$
6  *
7  * Authors: Samuel Hocevar <sam@zoy.org>
8  *          Damien Fouilleul <damien@videolan.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  * 
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
23  *****************************************************************************/
24
25 #ifdef MODULE_NAME_IS_i420_yuy2_mmx
26
27 #if defined(CAN_COMPILE_MMX)
28
29 /* MMX assembly */
30  
31 #define MMX_CALL(MMX_INSTRUCTIONS)          \
32     do {                                    \
33     __asm__ __volatile__(                   \
34         ".p2align 3 \n\t"                   \
35         MMX_INSTRUCTIONS                    \
36         :                                   \
37         : "r" (p_line1), "r" (p_line2),     \
38           "r" (p_y1), "r" (p_y2),           \
39           "r" (p_u), "r" (p_v) );           \
40         p_line1 += 16; p_line2 += 16;       \
41         p_y1 += 8; p_y2 += 8;               \
42         p_u += 4; p_v += 4;                 \
43     } while(0)
44
45 #define MMX_END __asm__ __volatile__ ( "emms" )
46
47 #define MMX_YUV420_YUYV "                                                 \n\
48 movd       (%4), %%mm1  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0     \n\
49 movd       (%5), %%mm2  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0     \n\
50 movq       (%2), %%mm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0     \n\
51 movq       (%3), %%mm3  # Load 8 Y            Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
52 punpcklbw %%mm2, %%mm1  #                     v3 u3 v2 u2 v1 u1 v0 u0     \n\
53 movq      %%mm0, %%mm2  #                     y7 y6 y5 y4 y3 y2 y1 y0     \n\
54 punpcklbw %%mm1, %%mm2  #                     v1 y3 u1 y2 v0 y1 u0 y0     \n\
55 movq      %%mm2, (%0)   # Store low YUYV                                  \n\
56 punpckhbw %%mm1, %%mm0  #                     v3 y7 u3 y6 v2 y5 u2 y4     \n\
57 movq      %%mm0, 8(%0)  # Store high YUYV                                 \n\
58 movq      %%mm3, %%mm4  #                     Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
59 punpcklbw %%mm1, %%mm4  #                     v1 Y3 u1 Y2 v0 Y1 u0 Y0     \n\
60 movq      %%mm4, (%1)   # Store low YUYV                                  \n\
61 punpckhbw %%mm1, %%mm3  #                     v3 Y7 u3 Y6 v2 Y5 u2 Y4     \n\
62 movq      %%mm3, 8(%1)  # Store high YUYV                                 \n\
63 "
64
65 #define MMX_YUV420_YVYU "                                                 \n\
66 movd       (%4), %%mm2  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0     \n\
67 movd       (%5), %%mm1  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0     \n\
68 movq       (%2), %%mm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0     \n\
69 movq       (%3), %%mm3  # Load 8 Y            Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
70 punpcklbw %%mm2, %%mm1  #                     u3 v3 u2 v2 u1 v1 u0 v0     \n\
71 movq      %%mm0, %%mm2  #                     y7 y6 y5 y4 y3 y2 y1 y0     \n\
72 punpcklbw %%mm1, %%mm2  #                     u1 y3 v1 y2 u0 y1 v0 y0     \n\
73 movq      %%mm2, (%0)   # Store low YUYV                                  \n\
74 punpckhbw %%mm1, %%mm0  #                     u3 y7 v3 y6 u2 y5 v2 y4     \n\
75 movq      %%mm0, 8(%0)  # Store high YUYV                                 \n\
76 movq      %%mm3, %%mm4  #                     Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
77 punpcklbw %%mm1, %%mm4  #                     u1 Y3 v1 Y2 u0 Y1 v0 Y0     \n\
78 movq      %%mm4, (%1)   # Store low YUYV                                  \n\
79 punpckhbw %%mm1, %%mm3  #                     u3 Y7 v3 Y6 u2 Y5 v2 Y4     \n\
80 movq      %%mm3, 8(%1)  # Store high YUYV                                 \n\
81 "
82
83 #define MMX_YUV420_UYVY "                                                 \n\
84 movd       (%4), %%mm1  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0     \n\
85 movd       (%5), %%mm2  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0     \n\
86 movq       (%2), %%mm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0     \n\
87 movq       (%3), %%mm3  # Load 8 Y            Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
88 punpcklbw %%mm2, %%mm1  #                     v3 u3 v2 u2 v1 u1 v0 u0     \n\
89 movq      %%mm1, %%mm2  #                     v3 u3 v2 u2 v1 u1 v0 u0     \n\
90 punpcklbw %%mm0, %%mm2  #                     y3 v1 y2 u1 y1 v0 y0 u0     \n\
91 movq      %%mm2, (%0)   # Store low UYVY                                  \n\
92 movq      %%mm1, %%mm2  #                     u3 v3 u2 v2 u1 v1 u0 v0     \n\
93 punpckhbw %%mm0, %%mm2  #                     y3 v1 y2 u1 y1 v0 y0 u0     \n\
94 movq      %%mm2, 8(%0)  # Store high UYVY                                 \n\
95 movq      %%mm1, %%mm4  #                     u3 v3 u2 v2 u1 v1 u0 v0     \n\
96 punpcklbw %%mm3, %%mm4  #                     Y3 v1 Y2 u1 Y1 v0 Y0 u0     \n\
97 movq      %%mm4, (%1)   # Store low UYVY                                  \n\
98 punpckhbw %%mm3, %%mm1  #                     Y7 v3 Y6 u3 Y5 v2 Y4 u2     \n\
99 movq      %%mm1, 8(%1)  # Store high UYVY                                 \n\
100 "
101
102 /* FIXME: this code does not work ! Chroma seems to be wrong. */
103 #define MMX_YUV420_Y211 "                                                 \n\
104 movq       (%2), %%mm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0     \n\
105 movq       (%3), %%mm1  # Load 8 Y            Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
106 movd       (%4), %%mm2  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0     \n\
107 movd       (%5), %%mm3  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0     \n\
108 pand    i_00ffw, %%mm0  # get Y even          00 Y6 00 Y4 00 Y2 00 Y0     \n\
109 packuswb  %%mm0, %%mm0  # pack Y              y6 y4 y2 y0 y6 y4 y2 y0     \n\
110 pand    i_00ffw, %%mm2  # get U even          00 u6 00 u4 00 u2 00 u0     \n\
111 packuswb  %%mm2, %%mm2  # pack U              00 00 u2 u0 00 00 u2 u0     \n\
112 pand    i_00ffw, %%mm3  # get V even          00 v6 00 v4 00 v2 00 v0     \n\
113 packuswb  %%mm3, %%mm3  # pack V              00 00 v2 v0 00 00 v2 v0     \n\
114 punpcklbw %%mm3, %%mm2  #                     00 00 00 00 v2 u2 v0 u0     \n\
115 psubsw    i_80w, %%mm2  # U,V -= 128                                      \n\
116 punpcklbw %%mm2, %%mm0  #                     v2 y6 u2 y4 v0 y2 u0 y0     \n\
117 movq      %%mm0, (%0)   # Store YUYV                                      \n\
118 pand    i_00ffw, %%mm1  # get Y even          00 Y6 00 Y4 00 Y2 00 Y0     \n\
119 packuswb  %%mm1, %%mm1  # pack Y              Y6 Y4 Y2 Y0 Y6 Y4 Y2 Y0     \n\
120 punpcklbw %%mm2, %%mm1  #                     v2 Y6 u2 Y4 v0 Y2 u0 Y0     \n\
121 movq      %%mm1, (%1)   # Store YUYV                                      \n\
122 "
123 #elif defined(HAVE_MMX_INTRINSICS)
124
125 /* MMX intrinsics */
126
127 #include <mmintrin.h>
128
129 #define MMX_CALL(MMX_INSTRUCTIONS)          \
130     do {                                    \
131         __m64 mm0, mm1, mm2, mm3, mm4;      \
132         MMX_INSTRUCTIONS                    \
133         p_line1 += 16; p_line2 += 16;       \
134         p_y1 += 8; p_y2 += 8;               \
135         p_u += 4; p_v += 4;                 \
136     } while(0)
137
138 #define MMX_END _mm_empty()
139     
140 #define MMX_YUV420_YUYV                     \
141     mm1 = _mm_cvtsi32_si64((int)*p_u);      \
142     mm2 = _mm_cvtsi32_si64((int)*p_v);      \
143     mm0 = (__m64)*(uint64_t*)p_y1;          \
144     mm3 = (__m64)*(uint64_t*)p_y2;          \
145     mm1 = _mm_unpacklo_pi8(mm1, mm2);       \
146     mm2 = mm0;                              \
147     mm2 = _mm_unpacklo_pi8(mm2, mm1);       \
148     *(uin64_t)p_line1 = (uint64)mm2;        \
149     mm0 = _mm_unpackhi_pi8(mm0, mm1);       \
150     *(uin64_t)(p_line1 + 4) = (uint64)mm0;  \
151     mm4 = mm3;                              \
152     mm4 = _mm_unpacklo_pi8(mm4, mm1);       \
153     *(uin64_t)p_line2 = (uint64)mm4;        \
154     mm3 = _mm_unpackhi_pi8(mm3, mm1);       \
155     *(uin64_t)(p_line2 + 4) = (uint64)mm4;
156
157 #define MMX_YUV420_YVYU                     \
158     mm2 = _mm_cvtsi32_si64((int)*p_u);      \
159     mm1 = _mm_cvtsi32_si64((int)*p_v);      \
160     mm0 = (__m64)*(uint64_t*)p_y1;          \
161     mm3 = (__m64)*(uint64_t*)p_y2;          \
162     mm1 = _mm_unpacklo_pi8(mm1, mm2);       \
163     mm2 = mm0;                              \
164     mm2 = _mm_unpacklo_pi8(mm2, mm1);       \
165     *(uin64_t)p_line1 = (uint64)mm2;        \
166     mm0 = _mm_unpackhi_pi8(mm0, mm1);       \
167     *(uin64_t)(p_line1 + 4) = (uint64)mm0;  \
168     mm4 = mm3;                              \
169     mm4 = _mm_unpacklo_pi8(mm4, mm1);       \
170     *(uin64_t)p_line2 = (uint64)mm4;        \
171     mm3 = _mm_unpackhi_pi8(mm3, mm1);       \
172     *(uin64_t)(p_line2 + 4) = (uint64)mm4;
173
174 #define MMX_YUV420_UYVY                     \
175     mm1 = _mm_cvtsi32_si64((int)*p_u);      \
176     mm2 = _mm_cvtsi32_si64((int)*p_v);      \
177     mm0 = (__m64)*(uint64_t*)p_y1;          \
178     mm3 = (__m64)*(uint64_t*)p_y2;          \
179     mm1 = _mm_unpacklo_pi8(mm1, mm2);       \
180     mm2 = mm1;                              \
181     mm2 = _mm_unpacklo_pi8(mm2, mm0);       \
182     *(uin64_t)p_line1 = (uint64)mm2;        \
183     mm2 = mm1;                              \
184     mm2 = _mm_unpackhi_pi8(mm2, mm0);       \
185     *(uin64_t)(p_line1 + 4) = (uint64)mm2;  \
186     mm4 = mm1;                              \
187     mm4 = _mm_unpacklo_pi8(mm4, mm3);       \
188     *(uin64_t)p_line2 = (uint64)mm4;        \
189     mm1 = _mm_unpackhi_pi8(mm1, mm3);       \
190     *(uin64_t)(p_line2 + 4) = (uint64)mm1;
191
192 #endif
193
194 #elif defined( MODULE_NAME_IS_i420_yuy2_sse2 )
195
196 #if defined(CAN_COMPILE_SSE2)
197
198 /* SSE2 assembly */
199
200 #define SSE2_CALL(SSE2_INSTRUCTIONS)    \
201     do {                                \
202     __asm__ __volatile__(               \
203         ".p2align 3 \n\t"               \
204         SSE2_INSTRUCTIONS               \
205         :                               \
206         : "r" (p_line1), "r" (p_line2), \
207           "r" (p_y1),  "r" (p_y2),      \
208           "r" (p_u), "r" (p_v) );       \
209         p_line1 += 32; p_line2 += 32;   \
210         p_y1 += 16; p_y2 += 16;         \
211         p_u += 8; p_v += 8;             \
212     } while(0)
213
214 #define SSE2_END  __asm__ __volatile__ ( "sfence" ::: "memory" )
215
216 #define SSE2_YUV420_YUYV_ALIGNED "                                        \n\
217 movq        (%4), %%xmm1  # Load 8 Cb         u7 u6 u5 u4 u3 u2 u1 u0     \n\
218 movq        (%5), %%xmm2  # Load 8 Cr         v7 06 v5 v4 v3 v2 v1 v0     \n\
219 movdqa      (%2), %%xmm0  # Load 16 Y         y15 y14 y13 .. y2 y1 y0     \n\
220 movdqa      (%3), %%xmm3  # Load 16 Y         Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
221 punpcklbw %%xmm2, %%xmm1  #                   v7 u7 v6 u6 .. u1 v0 u0     \n\
222 movdqa    %%xmm0, %%xmm2  #                   y15 y14 y13 .. y2 y1 y0     \n\
223 punpcklbw %%xmm1, %%xmm2  #                   v3 y7 u3 .. v0 y1 u0 y0     \n\
224 movntdq   %%xmm2, (%0)    # Store low YUYV                                \n\
225 punpckhbw %%xmm1, %%xmm0  #                   v3 y7 u3 y6 v2 y5 u2 y4     \n\
226 movntdq   %%xmm0, 16(%0)  # Store high YUYV                               \n\
227 movdqa    %%xmm3, %%xmm4  #                   Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
228 punpcklbw %%xmm1, %%xmm4  #                   v1 Y3 u1 Y2 v0 Y1 u0 Y0     \n\
229 movntdq   %%xmm4, (%1)    # Store low YUYV                                \n\
230 punpckhbw %%xmm1, %%xmm3  #                   v3 Y7 u3 Y6 v2 Y5 u2 Y4     \n\
231 movntdq   %%xmm3, 16(%1)  # Store high YUYV                               \n\
232 "
233
234 #define SSE2_YUV420_YUYV_UNALIGNED "                                      \n\
235 movq        (%4), %%xmm1  # Load 8 Cb         00 00 00 00 u3 u2 u1 u0     \n\
236 movq        (%5), %%xmm2  # Load 8 Cr         00 00 00 00 v3 v2 v1 v0     \n\
237 movdqu      (%2), %%xmm0  # Load 16 Y         y7 y6 y5 y4 y3 y2 y1 y0     \n\
238 movdqu      (%3), %%xmm3  # Load 16 Y         Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
239 prefetchnta (%0)          # Tell CPU not to cache output YUYV data        \n\
240 prefetchnta (%1)          # Tell CPU not to cache output YUYV data        \n\
241 punpcklbw %%xmm2, %%xmm1  #                   v3 u3 v2 u2 v1 u1 v0 u0     \n\
242 movdqa    %%xmm0, %%xmm2  #                   y7 y6 y5 y4 y3 y2 y1 y0     \n\
243 punpcklbw %%xmm1, %%xmm2  #                   v1 y3 u1 y2 v0 y1 u0 y0     \n\
244 movdqu    %%xmm2, (%0)    # Store low YUYV                                \n\
245 punpckhbw %%xmm1, %%xmm0  #                   v3 y7 u3 y6 v2 y5 u2 y4     \n\
246 movdqu    %%xmm0, 16(%0)  # Store high YUYV                               \n\
247 movdqa    %%xmm3, %%xmm4  #                   Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
248 punpcklbw %%xmm1, %%xmm4  #                   v1 Y3 u1 Y2 v0 Y1 u0 Y0     \n\
249 movdqu    %%xmm4, (%1)    # Store low YUYV                                \n\
250 punpckhbw %%xmm1, %%xmm3  #                   v3 Y7 u3 Y6 v2 Y5 u2 Y4     \n\
251 movdqu    %%xmm3, 16(%1)  # Store high YUYV                               \n\
252 "
253
254 #define SSE2_YUV420_YVYU_ALIGNED "                                        \n\
255 movq        (%4), %%xmm2  # Load 8 Cb           00 00 00 00 u3 u2 u1 u0   \n\
256 movq        (%5), %%xmm1  # Load 8 Cr           00 00 00 00 v3 v2 v1 v0   \n\
257 movdqa      (%2), %%xmm0  # Load 16 Y           y7 y6 y5 y4 y3 y2 y1 y0   \n\
258 movdqa      (%3), %%xmm3  # Load 16 Y           Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
259 punpcklbw %%xmm2, %%xmm1  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\
260 movdqa    %%xmm0, %%xmm2  #                     y7 y6 y5 y4 y3 y2 y1 y0   \n\
261 punpcklbw %%xmm1, %%xmm2  #                     u1 y3 v1 y2 u0 y1 v0 y0   \n\
262 movntdq   %%xmm2, (%0)    # Store low YUYV                                \n\
263 punpckhbw %%xmm1, %%xmm0  #                     u3 y7 v3 y6 u2 y5 v2 y4   \n\
264 movntdq   %%xmm0, 16(%0)  # Store high YUYV                               \n\
265 movdqa    %%xmm3, %%xmm4  #                     Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
266 punpcklbw %%xmm1, %%xmm4  #                     u1 Y3 v1 Y2 u0 Y1 v0 Y0   \n\
267 movntdq   %%xmm4, (%1)    # Store low YUYV                                \n\
268 punpckhbw %%xmm1, %%xmm3  #                     u3 Y7 v3 Y6 u2 Y5 v2 Y4   \n\
269 movntdq   %%xmm3, 16(%1)  # Store high YUYV                               \n\
270 "
271
272 #define SSE2_YUV420_YVYU_UNALIGNED "                                      \n\
273 movq        (%4), %%xmm2  # Load 8 Cb           00 00 00 00 u3 u2 u1 u0   \n\
274 movq        (%5), %%xmm1  # Load 8 Cr           00 00 00 00 v3 v2 v1 v0   \n\
275 movdqu      (%2), %%xmm0  # Load 16 Y           y7 y6 y5 y4 y3 y2 y1 y0   \n\
276 movdqu      (%3), %%xmm3  # Load 16 Y           Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
277 prefetchnta (%0)          # Tell CPU not to cache output YVYU data        \n\
278 prefetchnta (%1)          # Tell CPU not to cache output YVYU data        \n\
279 punpcklbw %%xmm2, %%xmm1  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\
280 movdqu    %%xmm0, %%xmm2  #                     y7 y6 y5 y4 y3 y2 y1 y0   \n\
281 punpcklbw %%xmm1, %%xmm2  #                     u1 y3 v1 y2 u0 y1 v0 y0   \n\
282 movdqu    %%xmm2, (%0)    # Store low YUYV                                \n\
283 punpckhbw %%xmm1, %%xmm0  #                     u3 y7 v3 y6 u2 y5 v2 y4   \n\
284 movdqu    %%xmm0, 16(%0)  # Store high YUYV                               \n\
285 movdqu    %%xmm3, %%xmm4  #                     Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
286 punpcklbw %%xmm1, %%xmm4  #                     u1 Y3 v1 Y2 u0 Y1 v0 Y0   \n\
287 movdqu    %%xmm4, (%1)    # Store low YUYV                                \n\
288 punpckhbw %%xmm1, %%xmm3  #                     u3 Y7 v3 Y6 u2 Y5 v2 Y4   \n\
289 movdqu    %%xmm3, 16(%1)  # Store high YUYV                               \n\
290 "
291
292 #define SSE2_YUV420_UYVY_ALIGNED "                                        \n\
293 movq        (%4), %%xmm1  # Load 8 Cb           00 00 00 00 u3 u2 u1 u0   \n\
294 movq        (%5), %%xmm2  # Load 8 Cr           00 00 00 00 v3 v2 v1 v0   \n\
295 movdqa      (%2), %%xmm0  # Load 16 Y           y7 y6 y5 y4 y3 y2 y1 y0   \n\
296 movdqa      (%3), %%xmm3  # Load 16 Y           Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
297 punpcklbw %%xmm2, %%xmm1  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\
298 movdqa    %%xmm1, %%xmm2  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\
299 punpcklbw %%xmm0, %%xmm2  #                     y3 v1 y2 u1 y1 v0 y0 u0   \n\
300 movntdq   %%xmm2, (%0)    # Store low UYVY                                \n\
301 movdqa    %%xmm1, %%xmm2  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\
302 punpckhbw %%xmm0, %%xmm2  #                     y3 v1 y2 u1 y1 v0 y0 u0   \n\
303 movntdq   %%xmm2, 16(%0)  # Store high UYVY                               \n\
304 movdqa    %%xmm1, %%xmm4  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\
305 punpcklbw %%xmm3, %%xmm4  #                     Y3 v1 Y2 u1 Y1 v0 Y0 u0   \n\
306 movntdq   %%xmm4, (%1)    # Store low UYVY                                \n\
307 punpckhbw %%xmm3, %%xmm1  #                     Y7 v3 Y6 u3 Y5 v2 Y4 u2   \n\
308 movntdq   %%xmm1, 16(%1)  # Store high UYVY                               \n\
309 "
310
311 #define SSE2_YUV420_UYVY_UNALIGNED "                                      \n\
312 movq        (%4), %%xmm1  # Load 8 Cb           00 00 00 00 u3 u2 u1 u0   \n\
313 movq        (%5), %%xmm2  # Load 8 Cr           00 00 00 00 v3 v2 v1 v0   \n\
314 movdqu      (%2), %%xmm0  # Load 16 Y           y7 y6 y5 y4 y3 y2 y1 y0   \n\
315 movdqu      (%3), %%xmm3  # Load 16 Y           Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
316 prefetchnta (%0)          # Tell CPU not to cache output UYVY data        \n\
317 prefetchnta (%1)          # Tell CPU not to cache output UYVY data        \n\
318 punpcklbw %%xmm2, %%xmm1  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\
319 movdqu    %%xmm1, %%xmm2  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\
320 punpcklbw %%xmm0, %%xmm2  #                     y3 v1 y2 u1 y1 v0 y0 u0   \n\
321 movdqu    %%xmm2, (%0)    # Store low UYVY                                \n\
322 movdqu    %%xmm1, %%xmm2  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\
323 punpckhbw %%xmm0, %%xmm2  #                     y3 v1 y2 u1 y1 v0 y0 u0   \n\
324 movdqu    %%xmm2, 16(%0)  # Store high UYVY                               \n\
325 movdqu    %%xmm1, %%xmm4  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\
326 punpcklbw %%xmm3, %%xmm4  #                     Y3 v1 Y2 u1 Y1 v0 Y0 u0   \n\
327 movdqu    %%xmm4, (%1)    # Store low UYVY                                \n\
328 punpckhbw %%xmm3, %%xmm1  #                     Y7 v3 Y6 u3 Y5 v2 Y4 u2   \n\
329 movdqu    %%xmm1, 16(%1)  # Store high UYVY                               \n\
330 "
331
332 #elif defined(HAVE_SSE2_INTRINSICS)
333
334 /* SSE2 intrinsics */
335
336 #include <emmintrin.h>
337
338 #define SSE2_CALL(SSE2_INSTRUCTIONS)            \
339     do {                                        \
340         __m128i xmm0, xmm1, xmm2, xmm3, xmm4;   \
341         SSE2_INSTRUCTIONS                       \
342         p_line1 += 32; p_line2 += 32;           \
343         p_y1 += 16; p_y2 += 16;                 \
344         p_u += 8; p_v += 8;                     \
345     } while(0)
346
347 #define SSE2_END  _mm_sfence()
348
349 #define SSE2_YUV420_YUYV_ALIGNED                    \
350     xmm1 = _mm_loadl_epi64((__m128i *)p_u);         \
351     xmm2 = _mm_loadl_epi64((__m128i *)p_v);         \
352     xmm0 = _mm_load_si128((__m128i *)p_y1);         \
353     xmm3 = _mm_load_si128((__m128i *)p_y2);         \
354     xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);           \
355     xmm2 = xmm0;                                    \
356     xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);           \
357     _mm_stream_si128((__m128i*)(p_line1), xmm2);    \
358     xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);           \
359     _mm_stream_si128((__m128i*)(p_line1+16), xmm0); \
360     xmm4 = xmm3;                                    \
361     xmm4 = _mm_unpacklo_epi8(xmm4, xmm1);           \
362     _mm_stream_si128((__m128i*)(p_line2), xmm4);    \
363     xmm3 = _mm_unpackhi_epi8(xmm3, xmm1);           \
364     _mm_stream_si128((__m128i*)(p_line1+16), xmm3);
365
366 #define SSE2_YUV420_YUYV_UNALIGNED                  \
367     xmm1 = _mm_loadl_epi64((__m128i *)p_u);         \
368     xmm2 = _mm_loadl_epi64((__m128i *)p_v);         \
369     xmm0 = _mm_load_si128((__m128i *)p_y1);         \
370     xmm3 = _mm_load_si128((__m128i *)p_y2);         \
371     _mm_prefetch(p_line1, _MM_HINT_NTA);            \
372     _mm_prefetch(p_line2, _MM_HINT_NTA);            \
373     xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);           \
374     xmm2 = xmm0;                                    \
375     xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);           \
376     _mm_storeu_si128((__m128i*)(p_line1), xmm2);    \
377     xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);           \
378     _mm_storeu_si128((__m128i*)(p_line1+16), xmm0); \
379     xmm4 = xmm3;                                    \
380     xmm4 = _mm_unpacklo_epi8(xmm4, xmm1);           \
381     _mm_storeu_si128((__m128i*)(p_line2), xmm4);    \
382     xmm3 = _mm_unpackhi_epi8(xmm3, xmm1);           \
383     _mm_storeu_si128((__m128i*)(p_line1+16), xmm3);
384
385 #define SSE2_YUV420_YVYU_ALIGNED                    \
386     xmm1 = _mm_loadl_epi64((__m128i *)p_v);         \
387     xmm2 = _mm_loadl_epi64((__m128i *)p_u);         \
388     xmm0 = _mm_load_si128((__m128i *)p_y1);         \
389     xmm3 = _mm_load_si128((__m128i *)p_y2);         \
390     xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);           \
391     xmm2 = xmm0;                                    \
392     xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);           \
393     _mm_stream_si128((__m128i*)(p_line1), xmm2);    \
394     xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);           \
395     _mm_stream_si128((__m128i*)(p_line1+16), xmm0); \
396     xmm4 = xmm3;                                    \
397     xmm4 = _mm_unpacklo_epi8(xmm4, xmm1);           \
398     _mm_stream_si128((__m128i*)(p_line2), xmm4);    \
399     xmm3 = _mm_unpackhi_epi8(xmm3, xmm1);           \
400     _mm_stream_si128((__m128i*)(p_line1+16), xmm3);
401
402 #define SSE2_YUV420_YVYU_UNALIGNED                  \
403     xmm1 = _mm_loadl_epi64((__m128i *)p_v);         \
404     xmm2 = _mm_loadl_epi64((__m128i *)p_u);         \
405     xmm0 = _mm_load_si128((__m128i *)p_y1);         \
406     xmm3 = _mm_load_si128((__m128i *)p_y2);         \
407     _mm_prefetch(p_line1, _MM_HINT_NTA);            \
408     _mm_prefetch(p_line2, _MM_HINT_NTA);            \
409     xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);           \
410     xmm2 = xmm0;                                    \
411     xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);           \
412     _mm_storeu_si128((__m128i*)(p_line1), xmm2);    \
413     xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);           \
414     _mm_storeu_si128((__m128i*)(p_line1+16), xmm0); \
415     xmm4 = xmm3;                                    \
416     xmm4 = _mm_unpacklo_epi8(xmm4, xmm1);           \
417     _mm_storeu_si128((__m128i*)(p_line2), xmm4);    \
418     xmm3 = _mm_unpackhi_epi8(xmm3, xmm1);           \
419     _mm_storeu_si128((__m128i*)(p_line1+16), xmm3);
420
421 #define SSE2_YUV420_UYVY_ALIGNED                    \
422     xmm1 = _mm_loadl_epi64((__m128i *)p_u);         \
423     xmm2 = _mm_loadl_epi64((__m128i *)p_v);         \
424     xmm0 = _mm_load_si128((__m128i *)p_y1);         \
425     xmm3 = _mm_load_si128((__m128i *)p_y2);         \
426     xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);           \
427     xmm2 = xmm1;                                    \
428     xmm2 = _mm_unpacklo_epi8(xmm2, xmm0);           \
429     _mm_stream_si128((__m128i*)(p_line1), xmm2);    \
430     xmm2 = xmm1;                                    \
431     xmm2 = _mm_unpackhi_epi8(xmm2, xmm0);           \
432     _mm_stream_si128((__m128i*)(p_line1+16), xmm2); \
433     xmm4 = xmm1;                                    \
434     xmm4 = _mm_unpacklo_epi8(xmm4, xmm3);           \
435     _mm_stream_si128((__m128i*)(p_line2), xmm4);    \
436     xmm1 = _mm_unpackhi_epi8(xmm1, xmm3);           \
437     _mm_stream_si128((__m128i*)(p_line1+16), xmm1);
438
439 #define SSE2_YUV420_UYVY_UNALIGNED                  \
440     xmm1 = _mm_loadl_epi64((__m128i *)p_u);         \
441     xmm2 = _mm_loadl_epi64((__m128i *)p_v);         \
442     xmm0 = _mm_load_si128((__m128i *)p_y1);         \
443     xmm3 = _mm_load_si128((__m128i *)p_y2);         \
444     _mm_prefetch(p_line1, _MM_HINT_NTA);            \
445     _mm_prefetch(p_line2, _MM_HINT_NTA);            \
446     xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);           \
447     xmm2 = xmm1;                                    \
448     xmm2 = _mm_unpacklo_epi8(xmm2, xmm0);           \
449     _mm_storeu_si128((__m128i*)(p_line1), xmm2);    \
450     xmm2 = xmm1;                                    \
451     xmm2 = _mm_unpackhi_epi8(xmm2, xmm0);           \
452     _mm_storeu_si128((__m128i*)(p_line1+16), xmm2); \
453     xmm4 = xmm1;                                    \
454     xmm4 = _mm_unpacklo_epi8(xmm4, xmm3);           \
455     _mm_storeu_si128((__m128i*)(p_line2), xmm4);    \
456     xmm1 = _mm_unpackhi_epi8(xmm1, xmm3);           \
457     _mm_storeu_si128((__m128i*)(p_line1+16), xmm1);
458
459 #endif
460
461 #endif
462
463 /* Used in both accelerated and C modules */
464
465 #define C_YUV420_YVYU( )                                                    \
466     *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \
467     *(p_line1)++ =            *(p_line2)++ = *(p_v)++;                      \
468     *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \
469     *(p_line1)++ =            *(p_line2)++ = *(p_u)++;                      \
470
471 #define C_YUV420_Y211( )                                                    \
472     *(p_line1)++ = *(p_y1); p_y1 += 2;                                      \
473     *(p_line2)++ = *(p_y2); p_y2 += 2;                                      \
474     *(p_line1)++ = *(p_line2)++ = *(p_u) - 0x80; p_u += 2;                  \
475     *(p_line1)++ = *(p_y1); p_y1 += 2;                                      \
476     *(p_line2)++ = *(p_y2); p_y2 += 2;                                      \
477     *(p_line1)++ = *(p_line2)++ = *(p_v) - 0x80; p_v += 2;                  \
478
479
480 #define C_YUV420_YUYV( )                                                    \
481     *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \
482     *(p_line1)++ =            *(p_line2)++ = *(p_u)++;                      \
483     *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \
484     *(p_line1)++ =            *(p_line2)++ = *(p_v)++;                      \
485
486 #define C_YUV420_UYVY( )                                                    \
487     *(p_line1)++ =            *(p_line2)++ = *(p_u)++;                      \
488     *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \
489     *(p_line1)++ =            *(p_line2)++ = *(p_v)++;                      \
490     *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \
491