]> git.sesse.net Git - vlc/blob - modules/video_chroma/i420_rgb_mmx.h
mediacodec: skip prerolled frames
[vlc] / modules / video_chroma / i420_rgb_mmx.h
1 /*****************************************************************************
2  * i420_rgb_mmx.h: MMX YUV transformation assembly
3  *****************************************************************************
4  * Copyright (C) 1999-2007 the VideoLAN team
5  * $Id$
6  *
7  * Authors: Olie Lho <ollie@sis.com.tw>
8  *          GaĆ«l Hendryckx <jimmy@via.ecp.fr>
9  *          Samuel Hocevar <sam@zoy.org>
10  *          Damien Fouilleul <damienf@videolan.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
25  *****************************************************************************/
26
27 /* hope these constant values are cache line aligned */
28 static const uint64_t mmx_80w     = 0x0080008000800080ULL; /* Will be referenced as %4 in inline asm */
29 static const uint64_t mmx_10w     = 0x1010101010101010ULL; /* -- as %5 */
30 static const uint64_t mmx_00ffw   = 0x00ff00ff00ff00ffULL; /* -- as %6 */
31 static const uint64_t mmx_Y_coeff = 0x253f253f253f253fULL; /* -- as %7 */
32
33 static const uint64_t mmx_U_green = 0xf37df37df37df37dULL; /* -- as %8 */
34 static const uint64_t mmx_U_blue  = 0x4093409340934093ULL; /* -- as %9 */
35 static const uint64_t mmx_V_red   = 0x3312331233123312ULL; /* -- as %10 */
36 static const uint64_t mmx_V_green = 0xe5fce5fce5fce5fcULL; /* -- as %11 */
37
38 static const uint64_t mmx_mask_f8 = 0xf8f8f8f8f8f8f8f8ULL; /* -- as %12 */
39 static const uint64_t mmx_mask_fc = 0xfcfcfcfcfcfcfcfcULL; /* -- as %13 */
40
41 #if defined(CAN_COMPILE_MMX)
42
43 /* MMX assembly */
44  
45 #define MMX_CALL(MMX_INSTRUCTIONS)      \
46     do {                                \
47     __asm__ __volatile__(               \
48         ".p2align 3 \n\t"               \
49         MMX_INSTRUCTIONS                \
50         :                               \
51         : "r" (p_y), "r" (p_u),         \
52           "r" (p_v), "r" (p_buffer),    \
53           "m" (mmx_80w), "m" (mmx_10w), \
54           "m" (mmx_00ffw), "m" (mmx_Y_coeff), \
55           "m" (mmx_U_green), "m" (mmx_U_blue), \
56           "m" (mmx_V_red), "m" (mmx_V_green), \
57           "m" (mmx_mask_f8), "m" (mmx_mask_fc) \
58         : "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" );  \
59     } while(0)
60
61 #define MMX_END __asm__ __volatile__ ( "emms" )
62
63 #define MMX_INIT_16 "                                                       \n\
64 movd       (%1), %%mm0      # Load 4 Cb       00 00 00 00 u3 u2 u1 u0       \n\
65 movd       (%2), %%mm1      # Load 4 Cr       00 00 00 00 v3 v2 v1 v0       \n\
66 pxor      %%mm4, %%mm4      # zero mm4                                      \n\
67 movq       (%0), %%mm6      # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
68 "
69
70 #define MMX_INIT_16_GRAY "                                                  \n\
71 movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
72 #movl      $0, (%3)         # cache preload for image                       \n\
73 "
74
75 #define MMX_INIT_32 "                                                       \n\
76 movd      (%1), %%mm0       # Load 4 Cb       00 00 00 00 u3 u2 u1 u0       \n\
77 movl        $0, (%3)        # cache preload for image                       \n\
78 movd      (%2), %%mm1       # Load 4 Cr       00 00 00 00 v3 v2 v1 v0       \n\
79 pxor     %%mm4, %%mm4       # zero mm4                                      \n\
80 movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
81 "
82
83 /*
84  * Do the multiply part of the conversion for even and odd pixels,
85  * register usage:
86  * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
87  * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd  pixels,
88  * mm6 -> Y even, mm7 -> Y odd
89  */
90
91 #define MMX_YUV_MUL "                                                       \n\
92 # convert the chroma part                                                   \n\
93 punpcklbw %%mm4, %%mm0          # scatter 4 Cb    00 u3 00 u2 00 u1 00 u0   \n\
94 punpcklbw %%mm4, %%mm1          # scatter 4 Cr    00 v3 00 v2 00 v1 00 v0   \n\
95 psubsw    %4, %%mm0     # Cb -= 128                                 \n\
96 psubsw    %4, %%mm1     # Cr -= 128                                 \n\
97 psllw     $3, %%mm0             # Promote precision                         \n\
98 psllw     $3, %%mm1             # Promote precision                         \n\
99 movq      %%mm0, %%mm2          # Copy 4 Cb       00 u3 00 u2 00 u1 00 u0   \n\
100 movq      %%mm1, %%mm3          # Copy 4 Cr       00 v3 00 v2 00 v1 00 v0   \n\
101 pmulhw    %8, %%mm2 # Mul Cb with green coeff -> Cb green       \n\
102 pmulhw    %11, %%mm3 # Mul Cr with green coeff -> Cr green       \n\
103 pmulhw    %9, %%mm0  # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0   \n\
104 pmulhw    %10, %%mm1   # Mul Cr -> Cred  00 r3 00 r2 00 r1 00 r0   \n\
105 paddsw    %%mm3, %%mm2          # Cb green + Cr green -> Cgreen             \n\
106                                                                             \n\
107 # convert the luma part                                                     \n\
108 psubusb   %5, %%mm6     # Y -= 16                                   \n\
109 movq      %%mm6, %%mm7          # Copy 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
110 pand      %6, %%mm6   # get Y even      00 Y6 00 Y4 00 Y2 00 Y0   \n\
111 psrlw     $8, %%mm7             # get Y odd       00 Y7 00 Y5 00 Y3 00 Y1   \n\
112 psllw     $3, %%mm6             # Promote precision                         \n\
113 psllw     $3, %%mm7             # Promote precision                         \n\
114 pmulhw    %7, %%mm6 # Mul 4 Y even    00 y6 00 y4 00 y2 00 y0   \n\
115 pmulhw    %7, %%mm7 # Mul 4 Y odd     00 y7 00 y5 00 y3 00 y1   \n\
116 "
117
118 /*
119  * Do the addition part of the conversion for even and odd pixels,
120  * register usage:
121  * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
122  * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd  pixels,
123  * mm6 -> Y even, mm7 -> Y odd
124  */
125
126 #define MMX_YUV_ADD "                                                       \n\
127 # Do horizontal and vertical scaling                                        \n\
128 movq      %%mm0, %%mm3          # Copy Cblue                                \n\
129 movq      %%mm1, %%mm4          # Copy Cred                                 \n\
130 movq      %%mm2, %%mm5          # Copy Cgreen                               \n\
131 paddsw    %%mm6, %%mm0          # Y even + Cblue  00 B6 00 B4 00 B2 00 B0   \n\
132 paddsw    %%mm7, %%mm3          # Y odd  + Cblue  00 B7 00 B5 00 B3 00 B1   \n\
133 paddsw    %%mm6, %%mm1          # Y even + Cred   00 R6 00 R4 00 R2 00 R0   \n\
134 paddsw    %%mm7, %%mm4          # Y odd  + Cred   00 R7 00 R5 00 R3 00 R1   \n\
135 paddsw    %%mm6, %%mm2          # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0   \n\
136 paddsw    %%mm7, %%mm5          # Y odd  + Cgreen 00 G7 00 G5 00 G3 00 G1   \n\
137                                                                             \n\
138 # Limit RGB even to 0..255                                                  \n\
139 packuswb  %%mm0, %%mm0          # B6 B4 B2 B0 / B6 B4 B2 B0                 \n\
140 packuswb  %%mm1, %%mm1          # R6 R4 R2 R0 / R6 R4 R2 R0                 \n\
141 packuswb  %%mm2, %%mm2          # G6 G4 G2 G0 / G6 G4 G2 G0                 \n\
142                                                                             \n\
143 # Limit RGB odd to 0..255                                                   \n\
144 packuswb  %%mm3, %%mm3          # B7 B5 B3 B1 / B7 B5 B3 B1                 \n\
145 packuswb  %%mm4, %%mm4          # R7 R5 R3 R1 / R7 R5 R3 R1                 \n\
146 packuswb  %%mm5, %%mm5          # G7 G5 G3 G1 / G7 G5 G3 G1                 \n\
147                                                                             \n\
148 # Interleave RGB even and odd                                               \n\
149 punpcklbw %%mm3, %%mm0          #                 B7 B6 B5 B4 B3 B2 B1 B0   \n\
150 punpcklbw %%mm4, %%mm1          #                 R7 R6 R5 R4 R3 R2 R1 R0   \n\
151 punpcklbw %%mm5, %%mm2          #                 G7 G6 G5 G4 G3 G2 G1 G0   \n\
152 "
153
154 /*
155  * Grayscale case, only use Y
156  */
157
158 #define MMX_YUV_GRAY "                                                      \n\
159 # convert the luma part                                                     \n\
160 psubusb   %5, %%mm6                                                 \n\
161 movq      %%mm6, %%mm7                                                      \n\
162 pand      %6, %%mm6                                               \n\
163 psrlw     $8, %%mm7                                                         \n\
164 psllw     $3, %%mm6                                                         \n\
165 psllw     $3, %%mm7                                                         \n\
166 pmulhw    %7, %%mm6                                             \n\
167 pmulhw    %7, %%mm7                                             \n\
168 packuswb  %%mm6, %%mm6                                                      \n\
169 packuswb  %%mm7, %%mm7                                                      \n\
170 punpcklbw %%mm7, %%mm6                                                      \n\
171 "
172
173 #define MMX_UNPACK_16_GRAY "                                                \n\
174 movq      %%mm6, %%mm5                                                      \n\
175 pand      %12, %%mm6                                             \n\
176 pand      %13, %%mm5                                             \n\
177 movq      %%mm6, %%mm7                                                      \n\
178 psrlw     $3, %%mm7                                                         \n\
179 pxor      %%mm3, %%mm3                                                      \n\
180 movq      %%mm7, %%mm2                                                      \n\
181 movq      %%mm5, %%mm0                                                      \n\
182 punpcklbw %%mm3, %%mm5                                                      \n\
183 punpcklbw %%mm6, %%mm7                                                      \n\
184 psllw     $3, %%mm5                                                         \n\
185 por       %%mm5, %%mm7                                                      \n\
186 movq      %%mm7, (%3)                                                       \n\
187 punpckhbw %%mm3, %%mm0                                                      \n\
188 punpckhbw %%mm6, %%mm2                                                      \n\
189 psllw     $3, %%mm0                                                         \n\
190 movq      8(%0), %%mm6                                                      \n\
191 por       %%mm0, %%mm2                                                      \n\
192 movq      %%mm2, 8(%3)                                                      \n\
193 "
194
195
196 /*
197  * convert RGB plane to RGB 15 bits,
198  * mm0 -> B, mm1 -> R, mm2 -> G,
199  * mm4 -> GB, mm5 -> AR pixel 4-7,
200  * mm6 -> GB, mm7 -> AR pixel 0-3
201  */
202
203 #define MMX_UNPACK_15 "                                                     \n\
204 # mask unneeded bits off                                                    \n\
205 pand      %12, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
206 psrlw     $3,%%mm0              # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
207 pand      %12, %%mm2 # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
208 pand      %12, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
209 psrlw     $1,%%mm1              # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
210 pxor      %%mm4, %%mm4          # zero mm4                                  \n\
211 movq      %%mm0, %%mm5          # Copy B7-B0                                \n\
212 movq      %%mm2, %%mm7          # Copy G7-G0                                \n\
213                                                                             \n\
214 # convert rgb24 plane to rgb15 pack for pixel 0-3                           \n\
215 punpcklbw %%mm4, %%mm2          # ________ ________ g7g6g5g4 g3______       \n\
216 punpcklbw %%mm1, %%mm0          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
217 psllw     $2,%%mm2              # ________ ____g7g6 g5g4g3__ ________       \n\
218 por       %%mm2, %%mm0          # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
219 movq      8(%0), %%mm6          # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
220 movq      %%mm0, (%3)           # store pixel 0-3                           \n\
221                                                                             \n\
222 # convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
223 punpckhbw %%mm4, %%mm7          # ________ ________ g7g6g5g4 g3______       \n\
224 punpckhbw %%mm1, %%mm5          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
225 psllw     $2,%%mm7              # ________ ____g7g6 g5g4g3__ ________       \n\
226 movd      4(%1), %%mm0          # Load 4 Cb       __ __ __ __ u3 u2 u1 u0   \n\
227 por       %%mm7, %%mm5          # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
228 movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   \n\
229 movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
230 "
231
232 /*
233  * convert RGB plane to RGB 16 bits,
234  * mm0 -> B, mm1 -> R, mm2 -> G,
235  * mm4 -> GB, mm5 -> AR pixel 4-7,
236  * mm6 -> GB, mm7 -> AR pixel 0-3
237  */
238
239 #define MMX_UNPACK_16 "                                                     \n\
240 # mask unneeded bits off                                                    \n\
241 pand      %12, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
242 pand      %13, %%mm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
243 pand      %12, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
244 psrlw     $3,%%mm0              # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
245 pxor      %%mm4, %%mm4          # zero mm4                                  \n\
246 movq      %%mm0, %%mm5          # Copy B7-B0                                \n\
247 movq      %%mm2, %%mm7          # Copy G7-G0                                \n\
248                                                                             \n\
249 # convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
250 punpcklbw %%mm4, %%mm2          # ________ ________ g7g6g5g4 g3g2____       \n\
251 punpcklbw %%mm1, %%mm0          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
252 psllw     $3,%%mm2              # ________ __g7g6g5 g4g3g2__ ________       \n\
253 por       %%mm2, %%mm0          # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
254 movq      8(%0), %%mm6          # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
255 movq      %%mm0, (%3)           # store pixel 0-3                           \n\
256                                                                             \n\
257 # convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
258 punpckhbw %%mm4, %%mm7          # ________ ________ g7g6g5g4 g3g2____       \n\
259 punpckhbw %%mm1, %%mm5          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
260 psllw     $3,%%mm7              # ________ __g7g6g5 g4g3g2__ ________       \n\
261 movd      4(%1), %%mm0          # Load 4 Cb       __ __ __ __ u3 u2 u1 u0   \n\
262 por       %%mm7, %%mm5          # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
263 movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   \n\
264 movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
265 "
266
267 /*
268  * convert RGB plane to RGB packed format,
269  * mm0 -> B, mm1 -> R, mm2 -> G
270  */
271
272 #define MMX_UNPACK_32_ARGB "                                                \n\
273 pxor      %%mm3, %%mm3  # zero mm3                                          \n\
274 movq      %%mm0, %%mm4  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
275 punpcklbw %%mm2, %%mm4  #                 G3 B3 G2 B2 G1 B1 G0 B0           \n\
276 movq      %%mm1, %%mm5  #                 R7 R6 R5 R4 R3 R2 R1 R0           \n\
277 punpcklbw %%mm3, %%mm5  #                 00 R3 00 R2 00 R1 00 R0           \n\
278 movq      %%mm4, %%mm6  #                 G3 B3 G2 B2 G1 B1 G0 B0           \n\
279 punpcklwd %%mm5, %%mm4  #                 00 R1 B1 G1 00 R0 B0 G0           \n\
280 movq      %%mm4, (%3)   # Store ARGB1 ARGB0                                 \n\
281 punpckhwd %%mm5, %%mm6  #                 00 R3 B3 G3 00 R2 B2 G2           \n\
282 movq      %%mm6, 8(%3)  # Store ARGB3 ARGB2                                 \n\
283 punpckhbw %%mm2, %%mm0  #                 G7 B7 G6 B6 G5 B5 G4 B4           \n\
284 punpckhbw %%mm3, %%mm1  #                 00 R7 00 R6 00 R5 00 R4           \n\
285 movq      %%mm0, %%mm5  #                 G7 B7 G6 B6 G5 B5 G4 B4           \n\
286 punpcklwd %%mm1, %%mm5  #                 00 R5 B5 G5 00 R4 B4 G4           \n\
287 movq      %%mm5, 16(%3) # Store ARGB5 ARGB4                                 \n\
288 punpckhwd %%mm1, %%mm0  #                 00 R7 B7 G7 00 R6 B6 G6           \n\
289 movq      %%mm0, 24(%3) # Store ARGB7 ARGB6                                 \n\
290 "
291
292 #define MMX_UNPACK_32_RGBA "                                                \n\
293 pxor      %%mm3, %%mm3  # zero mm3                                          \n\
294 movq      %%mm2, %%mm4  #                 G7 G6 G5 G4 G3 G2 G1 G0           \n\
295 punpcklbw %%mm1, %%mm4  #                 R3 G3 R2 G2 R1 G1 R0 G0           \n\
296 punpcklbw %%mm0, %%mm3  #                 B3 00 B2 00 B1 00 B0 00           \n\
297 movq      %%mm3, %%mm5  #                 R3 00 R2 00 R1 00 R0 00           \n\
298 punpcklwd %%mm4, %%mm3  #                 R1 G1 B1 00 R0 G0 B0 00           \n\
299 movq      %%mm3, (%3)   # Store RGBA1 RGBA0                                 \n\
300 punpckhwd %%mm4, %%mm5  #                 R3 G3 B3 00 R2 G2 B2 00           \n\
301 movq      %%mm5, 8(%3)  # Store RGBA3 RGBA2                                 \n\
302 pxor      %%mm6, %%mm6  # zero mm6                                          \n\
303 punpckhbw %%mm1, %%mm2  #                 R7 G7 R6 G6 R5 G5 R4 G4           \n\
304 punpckhbw %%mm0, %%mm6  #                 B7 00 B6 00 B5 00 B4 00           \n\
305 movq      %%mm6, %%mm0  #                 B7 00 B6 00 B5 00 B4 00           \n\
306 punpcklwd %%mm2, %%mm6  #                 R5 G5 B5 00 R4 G4 B4 00           \n\
307 movq      %%mm6, 16(%3) # Store RGBA5 RGBA4                                 \n\
308 punpckhwd %%mm2, %%mm0  #                 R7 G7 B7 00 R6 G6 B6 00           \n\
309 movq      %%mm0, 24(%3) # Store RGBA7 RGBA6                                 \n\
310 "
311
312 #define MMX_UNPACK_32_BGRA "                                                \n\
313 pxor      %%mm3, %%mm3  # zero mm3                                          \n\
314 movq      %%mm2, %%mm4  #                 G7 G6 G5 G4 G3 G2 G1 G0           \n\
315 punpcklbw %%mm0, %%mm4  #                 B3 G3 B2 G2 B1 G1 B0 G0           \n\
316 punpcklbw %%mm1, %%mm3  #                 R3 00 R2 00 R1 00 R0 00           \n\
317 movq      %%mm3, %%mm5  #                 R3 00 R2 00 R1 00 R0 00           \n\
318 punpcklwd %%mm4, %%mm3  #                 B1 G1 R1 00 B0 G0 R0 00           \n\
319 movq      %%mm3, (%3)   # Store BGRA1 BGRA0                                 \n\
320 punpckhwd %%mm4, %%mm5  #                 B3 G3 R3 00 B2 G2 R2 00           \n\
321 movq      %%mm5, 8(%3)  # Store BGRA3 BGRA2                                 \n\
322 pxor      %%mm6, %%mm6  # zero mm6                                          \n\
323 punpckhbw %%mm0, %%mm2  #                 B7 G7 B6 G6 B5 G5 B4 G4           \n\
324 punpckhbw %%mm1, %%mm6  #                 R7 00 R6 00 R5 00 R4 00           \n\
325 movq      %%mm6, %%mm0  #                 R7 00 R6 00 R5 00 R4 00           \n\
326 punpcklwd %%mm2, %%mm6  #                 B5 G5 R5 00 B4 G4 R4 00           \n\
327 movq      %%mm6, 16(%3) # Store BGRA5 BGRA4                                 \n\
328 punpckhwd %%mm2, %%mm0  #                 B7 G7 R7 00 B6 G6 R6 00           \n\
329 movq      %%mm0, 24(%3) # Store BGRA7 BGRA6                                 \n\
330 "
331
332 #define MMX_UNPACK_32_ABGR "                                                \n\
333 pxor      %%mm3, %%mm3  # zero mm3                                          \n\
334 movq      %%mm1, %%mm4  #                 R7 R6 R5 R4 R3 R2 R1 R0           \n\
335 punpcklbw %%mm2, %%mm4  #                 G3 R3 G2 R2 G1 R1 G0 R0           \n\
336 movq      %%mm0, %%mm5  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
337 punpcklbw %%mm3, %%mm5  #                 00 B3 00 B2 00 B1 00 B0           \n\
338 movq      %%mm4, %%mm6  #                 G3 R3 G2 R2 G1 R1 G0 R0           \n\
339 punpcklwd %%mm5, %%mm4  #                 00 B1 G1 R1 00 B0 G0 R0           \n\
340 movq      %%mm4, (%3)   # Store ABGR1 ABGR0                                 \n\
341 punpckhwd %%mm5, %%mm6  #                 00 B3 G3 R3 00 B2 G2 R2           \n\
342 movq      %%mm6, 8(%3)  # Store ABGR3 ABGR2                                 \n\
343 punpckhbw %%mm2, %%mm1  #                 G7 R7 G6 R6 G5 R5 G4 R4           \n\
344 punpckhbw %%mm3, %%mm0  #                 00 B7 00 B6 00 B5 00 B4           \n\
345 movq      %%mm1, %%mm2  #                 G7 R7 G6 R6 G5 R5 G4 R4           \n\
346 punpcklwd %%mm0, %%mm1  #                 00 B5 G5 R5 00 B4 G4 R4           \n\
347 movq      %%mm1, 16(%3) # Store ABGR5 ABGR4                                 \n\
348 punpckhwd %%mm0, %%mm2  #                 B7 G7 R7 00 B6 G6 R6 00           \n\
349 movq      %%mm2, 24(%3) # Store ABGR7 ABGR6                                 \n\
350 "
351
352 #elif defined(HAVE_MMX_INTRINSICS)
353
354 /* MMX intrinsics */
355
356 #include <mmintrin.h>
357
358 #define MMX_CALL(MMX_INSTRUCTIONS)  \
359     do {                            \
360         __m64 mm0, mm1, mm2, mm3,   \
361               mm4, mm5, mm6, mm7;   \
362         MMX_INSTRUCTIONS            \
363     } while(0)
364
365 #define MMX_END _mm_empty()
366  
367 #define MMX_INIT_16                     \
368     mm0 = _mm_cvtsi32_si64(*(int*)p_u); \
369     mm1 = _mm_cvtsi32_si64(*(int*)p_v); \
370     mm4 = _mm_setzero_si64();           \
371     mm6 = (__m64)*(uint64_t *)p_y;
372
373 #define MMX_INIT_32                     \
374     mm0 = _mm_cvtsi32_si64(*(int*)p_u); \
375     *(uint16_t *)p_buffer = 0;          \
376     mm1 = _mm_cvtsi32_si64(*(int*)p_v); \
377     mm4 = _mm_setzero_si64();           \
378     mm6 = (__m64)*(uint64_t *)p_y;
379
380 #define MMX_YUV_MUL                                 \
381     mm0 = _mm_unpacklo_pi8(mm0, mm4);               \
382     mm1 = _mm_unpacklo_pi8(mm1, mm4);               \
383     mm0 = _mm_subs_pi16(mm0, (__m64)mmx_80w);       \
384     mm1 = _mm_subs_pi16(mm1, (__m64)mmx_80w);       \
385     mm0 = _mm_slli_pi16(mm0, 3);                    \
386     mm1 = _mm_slli_pi16(mm1, 3);                    \
387     mm2 = mm0;                                      \
388     mm3 = mm1;                                      \
389     mm2 = _mm_mulhi_pi16(mm2, (__m64)mmx_U_green);  \
390     mm3 = _mm_mulhi_pi16(mm3, (__m64)mmx_V_green);  \
391     mm0 = _mm_mulhi_pi16(mm0, (__m64)mmx_U_blue);   \
392     mm1 = _mm_mulhi_pi16(mm1, (__m64)mmx_V_red);    \
393     mm2 = _mm_adds_pi16(mm2, mm3);                  \
394     \
395     mm6 = _mm_subs_pu8(mm6, (__m64)mmx_10w);        \
396     mm7 = mm6;                                      \
397     mm6 = _mm_and_si64(mm6, (__m64)mmx_00ffw);      \
398     mm7 = _mm_srli_pi16(mm7, 8);                    \
399     mm6 = _mm_slli_pi16(mm6, 3);                    \
400     mm7 = _mm_slli_pi16(mm7, 3);                    \
401     mm6 = _mm_mulhi_pi16(mm6, (__m64)mmx_Y_coeff);  \
402     mm7 = _mm_mulhi_pi16(mm7, (__m64)mmx_Y_coeff);
403
404 #define MMX_YUV_ADD                     \
405     mm3 = mm0;                          \
406     mm4 = mm1;                          \
407     mm5 = mm2;                          \
408     mm0 = _mm_adds_pi16(mm0, mm6);      \
409     mm3 = _mm_adds_pi16(mm3, mm7);      \
410     mm1 = _mm_adds_pi16(mm1, mm6);      \
411     mm4 = _mm_adds_pi16(mm4, mm7);      \
412     mm2 = _mm_adds_pi16(mm2, mm6);      \
413     mm5 = _mm_adds_pi16(mm5, mm7);      \
414     \
415     mm0 = _mm_packs_pu16(mm0, mm0);     \
416     mm1 = _mm_packs_pu16(mm1, mm1);     \
417     mm2 = _mm_packs_pu16(mm2, mm2);     \
418     \
419     mm3 = _mm_packs_pu16(mm3, mm3);     \
420     mm4 = _mm_packs_pu16(mm4, mm4);     \
421     mm5 = _mm_packs_pu16(mm5, mm5);     \
422     \
423     mm0 = _mm_unpacklo_pi8(mm0, mm3);   \
424     mm1 = _mm_unpacklo_pi8(mm1, mm4);   \
425     mm2 = _mm_unpacklo_pi8(mm2, mm5);
426
427 #define MMX_UNPACK_15                               \
428     mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8);    \
429     mm0 = _mm_srli_pi16(mm0, 3);                    \
430     mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_f8);    \
431     mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8);    \
432     mm1 = _mm_srli_pi16(mm1, 1);                    \
433     mm4 = _mm_setzero_si64();                       \
434     mm5 = mm0;                                      \
435     mm7 = mm2;                                      \
436     \
437     mm2 = _mm_unpacklo_pi8(mm2, mm4);               \
438     mm0 = _mm_unpacklo_pi8(mm0, mm1);               \
439     mm2 = _mm_slli_pi16(mm2, 2);                    \
440     mm0 = _mm_or_si64(mm0, mm2);                    \
441     mm6 = (__m64)*(uint64_t *)(p_y + 8);            \
442     *(uint64_t *)p_buffer = (uint64_t)mm0;          \
443     \
444     mm7 = _mm_unpackhi_pi8(mm7, mm4);               \
445     mm5 = _mm_unpackhi_pi8(mm5, mm1);               \
446     mm7 = _mm_slli_pi16(mm7, 2);                    \
447     mm0 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_u + 4)); \
448     mm5 = _mm_or_si64(mm5, mm7);                    \
449     mm1 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_v + 4)); \
450     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
451
452 #define MMX_UNPACK_16                               \
453     mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8);    \
454     mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_fc);    \
455     mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8);    \
456     mm0 = _mm_srli_pi16(mm0, 3);                    \
457     mm4 = _mm_setzero_si64();                       \
458     mm5 = mm0;                                      \
459     mm7 = mm2;                                      \
460     \
461     mm2 = _mm_unpacklo_pi8(mm2, mm4);               \
462     mm0 = _mm_unpacklo_pi8(mm0, mm1);               \
463     mm2 = _mm_slli_pi16(mm2, 3);                    \
464     mm0 = _mm_or_si64(mm0, mm2);                    \
465     mm6 = (__m64)*(uint64_t *)(p_y + 8);            \
466     *(uint64_t *)p_buffer = (uint64_t)mm0;          \
467     \
468     mm7 = _mm_unpackhi_pi8(mm7, mm4);               \
469     mm5 = _mm_unpackhi_pi8(mm5, mm1);               \
470     mm7 = _mm_slli_pi16(mm7, 3);                    \
471     mm0 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_u + 4)); \
472     mm5 = _mm_or_si64(mm5, mm7);                    \
473     mm1 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_v + 4)); \
474     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
475
476 #define MMX_UNPACK_32_ARGB                      \
477     mm3 = _mm_setzero_si64();                   \
478     mm4 = mm0;                                  \
479     mm4 = _mm_unpacklo_pi8(mm4, mm2);           \
480     mm5 = mm1;                                  \
481     mm5 = _mm_unpacklo_pi8(mm5, mm3);           \
482     mm6 = mm4;                                  \
483     mm4 = _mm_unpacklo_pi16(mm4, mm5);          \
484     *(uint64_t *)p_buffer = (uint64_t)mm4;      \
485     mm6 = _mm_unpackhi_pi16(mm6, mm5);          \
486     *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6;\
487     mm0 = _mm_unpackhi_pi8(mm0, mm2);           \
488     mm1 = _mm_unpackhi_pi8(mm1, mm3);           \
489     mm5 = mm0;                                  \
490     mm5 = _mm_unpacklo_pi16(mm5, mm1);          \
491     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;\
492     mm0 = _mm_unpackhi_pi16(mm0, mm1);          \
493     *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
494
495 #define MMX_UNPACK_32_RGBA                      \
496     mm3 = _mm_setzero_si64();                   \
497     mm4 = mm2;                                  \
498     mm4 = _mm_unpacklo_pi8(mm4, mm1);           \
499     mm3 = _mm_unpacklo_pi8(mm3, mm0);           \
500     mm5 = mm3;                                  \
501     mm3 = _mm_unpacklo_pi16(mm3, mm4);          \
502     *(uint64_t *)p_buffer = (uint64_t)mm3;      \
503     mm5 = _mm_unpackhi_pi16(mm5, mm4);          \
504     *(uint64_t *)(p_buffer + 2) = (uint64_t)mm5;\
505     mm6 = _mm_setzero_si64();                   \
506     mm2 = _mm_unpackhi_pi8(mm2, mm1);           \
507     mm6 = _mm_unpackhi_pi8(mm6, mm0);           \
508     mm0 = mm6;                                  \
509     mm6 = _mm_unpacklo_pi16(mm6, mm2);          \
510     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm6;\
511     mm0 = _mm_unpackhi_pi16(mm0, mm2);          \
512     *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
513
514 #define MMX_UNPACK_32_BGRA                      \
515     mm3 = _mm_setzero_si64();                   \
516     mm4 = mm2;                                  \
517     mm4 = _mm_unpacklo_pi8(mm4, mm0);           \
518     mm3 = _mm_unpacklo_pi8(mm3, mm1);           \
519     mm5 = mm3;                                  \
520     mm3 = _mm_unpacklo_pi16(mm3, mm4);          \
521     *(uint64_t *)p_buffer = (uint64_t)mm3;      \
522     mm5 = _mm_unpackhi_pi16(mm5, mm4);          \
523     *(uint64_t *)(p_buffer + 2) = (uint64_t)mm5;\
524     mm6 = _mm_setzero_si64();                   \
525     mm2 = _mm_unpackhi_pi8(mm2, mm0);           \
526     mm6 = _mm_unpackhi_pi8(mm6, mm1);           \
527     mm0 = mm6;                                  \
528     mm6 = _mm_unpacklo_pi16(mm6, mm2);          \
529     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm6;\
530     mm0 = _mm_unpackhi_pi16(mm0, mm2);          \
531     *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
532
533 #define MMX_UNPACK_32_ABGR                      \
534     mm3 = _mm_setzero_si64();                   \
535     mm4 = mm1;                                  \
536     mm4 = _mm_unpacklo_pi8(mm4, mm2);           \
537     mm5 = mm0;                                  \
538     mm5 = _mm_unpacklo_pi8(mm5, mm3);           \
539     mm6 = mm4;                                  \
540     mm4 = _mm_unpacklo_pi16(mm4, mm5);          \
541     *(uint64_t *)p_buffer = (uint64_t)mm4;      \
542     mm6 = _mm_unpackhi_pi16(mm6, mm5);          \
543     *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6;\
544     mm1 = _mm_unpackhi_pi8(mm1, mm2);           \
545     mm0 = _mm_unpackhi_pi8(mm0, mm3);           \
546     mm2 = mm1;                                  \
547     mm1 = _mm_unpacklo_pi16(mm1, mm0);          \
548     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm1;\
549     mm2 = _mm_unpackhi_pi16(mm2, mm0);          \
550     *(uint64_t *)(p_buffer + 6) = (uint64_t)mm2;
551
552 #endif
553