]> git.sesse.net Git - vlc/blob - plugins/chroma/i420_yuy2.h
e78980e48e2ea22c4cad1ce3b236eb2d57051ee8
[vlc] / plugins / chroma / i420_yuy2.h
1 /*****************************************************************************
2  * i420_yuy2.h : YUV to YUV conversion module for vlc
3  *****************************************************************************
4  * Copyright (C) 2000, 2001 VideoLAN
5  * $Id: i420_yuy2.h,v 1.7 2002/06/01 11:08:24 sam Exp $
6  *
7  * Authors: Samuel Hocevar <sam@zoy.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  * 
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111, USA.
22  *****************************************************************************/
23
24 #ifdef MODULE_NAME_IS_chroma_i420_yuy2_mmx
25
26 #define UNUSED_LONGLONG(foo) \
27     unsigned long long foo __asm__ (#foo) __attribute__((unused))
28 UNUSED_LONGLONG(woo_00ffw) = 0x00ff00ff00ff00ff;
29 UNUSED_LONGLONG(woo_80w)   = 0x0000000080808080;
30
31 #define MMX_LOAD "                                                        \n\
32 movl         %9,%%ebx                                                     \n\
33 "
34
35 #define MMX_SAVE "                                                        \n\
36 movl      %%ebx,%9                                                        \n\
37 "
38
39 #define MMX_INC "                                                         \n\
40 addl         $16, %0                                                      \n\
41 addl         $16, %1                                                      \n\
42 addl         $8, %2                                                       \n\
43 addl         $8, %3                                                       \n\
44 addl         $4, %%eax                                                    \n\
45 addl         $4, %%ebx                                                    \n\
46 "
47
48 #define MMX_CALL(MMX_INSTRUCTIONS)                                  \
49     __asm__ __volatile__(                                           \
50         MMX_LOAD                                                    \
51         ".align 8 \n\t"                                             \
52         MMX_INSTRUCTIONS                                            \
53         MMX_INC                                                     \
54         MMX_SAVE                                                    \
55         : "=c" (p_line1), "=d" (p_line2), "=D" (p_y1), "=S" (p_y2)  \
56         :  "c" (p_line1),  "d" (p_line2),  "D" (p_y1),  "S" (p_y2), \
57           "a" (p_u), "m" (p_v)                                      \
58         : "ebx", "memory" );
59
60 #define MMX_YUV420_YUYV "                                                 \n\
61 movq       (%2), %%mm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0     \n\
62 movd    (%%eax), %%mm1  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0     \n\
63 movd    (%%ebx), %%mm2  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0     \n\
64 punpcklbw %%mm2, %%mm1  #                     v3 u3 v2 u2 v1 u1 v0 u0     \n\
65 movq      %%mm0, %%mm2  #                     y7 y6 y5 y4 y3 y2 y1 y0     \n\
66 punpcklbw %%mm1, %%mm2  #                     v1 y3 u1 y2 v0 y1 u0 y0     \n\
67 movq      %%mm2, (%0)   # Store low YUYV                                  \n\
68 punpckhbw %%mm1, %%mm0  #                     v3 y7 u3 y6 v2 y5 u2 y4     \n\
69 movq      %%mm0, 8(%0)  # Store high YUYV                                 \n\
70 movq       (%3), %%mm0  # Load 8 Y            Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
71 movq      %%mm0, %%mm2  #                     Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
72 punpcklbw %%mm1, %%mm2  #                     v1 Y3 u1 Y2 v0 Y1 u0 Y0     \n\
73 movq      %%mm2, (%1)   # Store low YUYV                                  \n\
74 punpckhbw %%mm1, %%mm0  #                     v3 Y7 u3 Y6 v2 Y5 u2 Y4     \n\
75 movq      %%mm0, 8(%1)  # Store high YUYV                                 \n\
76 "
77
78 #define MMX_YUV420_YVYU "                                                 \n\
79 movq       (%2), %%mm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0     \n\
80 movd    (%%eax), %%mm2  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0     \n\
81 movd    (%%ebx), %%mm1  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0     \n\
82 punpcklbw %%mm2, %%mm1  #                     u3 v3 u2 v2 u1 v1 u0 v0     \n\
83 movq      %%mm0, %%mm2  #                     y7 y6 y5 y4 y3 y2 y1 y0     \n\
84 punpcklbw %%mm1, %%mm2  #                     u1 y3 v1 y2 u0 y1 v0 y0     \n\
85 movq      %%mm2, (%0)   # Store low YUYV                                  \n\
86 punpckhbw %%mm1, %%mm0  #                     u3 y7 v3 y6 u2 y5 v2 y4     \n\
87 movq      %%mm0, 8(%0)  # Store high YUYV                                 \n\
88 movq       (%3), %%mm0  # Load 8 Y            Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
89 movq      %%mm0, %%mm2  #                     Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
90 punpcklbw %%mm1, %%mm2  #                     u1 Y3 v1 Y2 u0 Y1 v0 Y0     \n\
91 movq      %%mm2, (%1)   # Store low YUYV                                  \n\
92 punpckhbw %%mm1, %%mm0  #                     u3 Y7 v3 Y6 u2 Y5 v2 Y4     \n\
93 movq      %%mm0, 8(%1)  # Store high YUYV                                 \n\
94 "
95
96 #define MMX_YUV420_UYVY "                                                 \n\
97 movq       (%2), %%mm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0     \n\
98 movq       (%3), %%mm3  # Load 8 Y            Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
99 movd    (%%eax), %%mm2  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0     \n\
100 movd    (%%ebx), %%mm1  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0     \n\
101 punpcklbw %%mm2, %%mm1  #                     u3 v3 u2 v2 u1 v1 u0 v0     \n\
102 movq      %%mm1, %%mm2  #                     u3 v3 u2 v2 u1 v1 u0 v0     \n\
103 punpcklbw %%mm0, %%mm2  #                     y3 v1 y2 u1 y1 v0 y0 u0     \n\
104 movq      %%mm2, (%0)   # Store low UYVY                                  \n\
105 movq      %%mm1, %%mm2  #                     u3 v3 u2 v2 u1 v1 u0 v0     \n\
106 punpckhbw %%mm0, %%mm2  #                     y3 v1 y2 u1 y1 v0 y0 u0     \n\
107 movq      %%mm2, 8(%0)  # Store high UYVY                                 \n\
108 movq      %%mm1, %%mm2  #                     u3 v3 u2 v2 u1 v1 u0 v0     \n\
109 punpcklbw %%mm3, %%mm2  #                     Y3 v1 Y2 u1 Y1 v0 Y0 u0     \n\
110 movq      %%mm2, (%1)   # Store low UYVY                                  \n\
111 punpckhbw %%mm3, %%mm1  #                     Y7 v3 Y6 u3 Y5 v2 Y4 u2     \n\
112 movq      %%mm1, 8(%1)  # Store high UYVY                                 \n\
113 "
114
115 /* FIXME: this code does not work ! Chroma seems to be wrong. */
116 #define MMX_YUV420_Y211 "                                                 \n\
117 movq       (%2), %%mm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0     \n\
118 movq       (%3), %%mm1  # Load 8 Y            Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
119 movd    (%%eax), %%mm2  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0     \n\
120 movd    (%%ebx), %%mm3  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0     \n\
121 pand  woo_00ffw, %%mm0  # get Y even          00 Y6 00 Y4 00 Y2 00 Y0     \n\
122 packuswb  %%mm0, %%mm0  # pack Y              y6 y4 y2 y0 y6 y4 y2 y0     \n\
123 pand  woo_00ffw, %%mm2  # get U even          00 u6 00 u4 00 u2 00 u0     \n\
124 packuswb  %%mm2, %%mm2  # pack U              00 00 u2 u0 00 00 u2 u0     \n\
125 pand  woo_00ffw, %%mm3  # get V even          00 v6 00 v4 00 v2 00 v0     \n\
126 packuswb  %%mm3, %%mm3  # pack V              00 00 v2 v0 00 00 v2 v0     \n\
127 punpcklbw %%mm3, %%mm2  #                     00 00 00 00 v2 u2 v0 u0     \n\
128 psubsw  woo_80w, %%mm2  # U,V -= 128                                      \n\
129 punpcklbw %%mm2, %%mm0  #                     v2 y6 u2 y4 v0 y2 u0 y0     \n\
130 movq      %%mm0, (%0)   # Store YUYV                                      \n\
131 pand  woo_00ffw, %%mm1  # get Y even          00 Y6 00 Y4 00 Y2 00 Y0     \n\
132 packuswb  %%mm1, %%mm1  # pack Y              Y6 Y4 Y2 Y0 Y6 Y4 Y2 Y0     \n\
133 punpcklbw %%mm2, %%mm1  #                     v2 Y6 u2 Y4 v0 Y2 u0 Y0     \n\
134 movq      %%mm1, (%1)   # Store YUYV                                      \n\
135 "
136
137 #else
138
139 #define C_YUV420_YUYV( )                                                    \
140     *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \
141     *(p_line1)++ =            *(p_line2)++ = *(p_u)++;                      \
142     *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \
143     *(p_line1)++ =            *(p_line2)++ = *(p_v)++;                      \
144
145 #define C_YUV420_YVYU( )                                                    \
146     *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \
147     *(p_line1)++ =            *(p_line2)++ = *(p_v)++;                      \
148     *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \
149     *(p_line1)++ =            *(p_line2)++ = *(p_u)++;                      \
150
151 #define C_YUV420_UYVY( )                                                    \
152     *(p_line1)++ =            *(p_line2)++ = *(p_u)++;                      \
153     *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \
154     *(p_line1)++ =            *(p_line2)++ = *(p_v)++;                      \
155     *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++;                     \
156
157 #define C_YUV420_Y211( )                                                    \
158     *(p_line1)++ = *(p_y1); ((u16*)p_y1)++;                                 \
159     *(p_line2)++ = *(p_y2); ((u16*)p_y2)++;                                 \
160     *(p_line1)++ = *(p_line2)++ = *(p_u) - 0x80; ((u16*)p_u)++;             \
161     *(p_line1)++ = *(p_y1); ((u16*)p_y1)++;                                 \
162     *(p_line2)++ = *(p_y2); ((u16*)p_y2)++;                                 \
163     *(p_line1)++ = *(p_line2)++ = *(p_v) - 0x80; ((u16*)p_v)++;             \
164
165 #endif
166