]> git.sesse.net Git - vlc/blobdiff - modules/video_chroma/i422_yuy2.h
vlc_aout.h: fix typo
[vlc] / modules / video_chroma / i422_yuy2.h
index 1b0405bb2750f66bd47138a00bcd7ccbda64b67e..7d2c469128180e8a7721c90f6cb8bf6044da96f6 100644 (file)
@@ -1,25 +1,25 @@
 /*****************************************************************************
  * i422_yuy2.h : YUV to YUV conversion module for vlc
  *****************************************************************************
- * Copyright (C) 2002 the VideoLAN team
+ * Copyright (C) 2002 VLC authors and VideoLAN
  * $Id$
  *
  * Authors: Samuel Hocevar <sam@zoy.org>
  *          Damien Fouilleul <damienf@videolan.org>
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or
  * (at your option) any later version.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
  *****************************************************************************/
 
 #ifdef MODULE_NAME_IS_i422_yuy2_mmx
@@ -35,7 +35,8 @@
         MMX_INSTRUCTIONS                    \
         :                                   \
         : "r" (p_line), "r" (p_y),          \
-          "r" (p_u), "r" (p_v) );           \
+          "r" (p_u), "r" (p_v)              \
+        : "mm0", "mm1", "mm2" );            \
         p_line += 16; p_y += 8;             \
         p_u += 4; p_v += 4;                 \
     } while(0)
@@ -107,7 +108,7 @@ movq      %%mm1, 8(%0)  # Store high UYVY                                 \n\
     *(uint64_t*)p_line = (uint64_t)mm2;     \
     mm0 = _mm_unpackhi_pi8(mm0, mm1);       \
     *(uint64_t*)(p_line+8) = (uint64_t)mm0;
-    
 #define MMX_YUV422_YVYU                     \
     mm0 = (__m64)*(uint64_t*)p_y;           \
     mm2 = _mm_cvtsi32_si64(*(int*)p_u);     \
@@ -145,7 +146,8 @@ movq      %%mm1, 8(%0)  # Store high UYVY                                 \n\
         MMX_INSTRUCTIONS                    \
         :                                   \
         : "r" (p_line), "r" (p_y),          \
-          "r" (p_u), "r" (p_v) );           \
+          "r" (p_u), "r" (p_v)              \
+        : "xmm0", "xmm1", "xmm2" );         \
         p_line += 32; p_y += 16;            \
         p_u += 8; p_v += 8;                 \
     } while(0)
@@ -233,9 +235,82 @@ movdqu    %%xmm1, 16(%0)  # Store high UYVY                               \n\
 
 #include <emmintrin.h>
 
+#define SSE2_CALL(SSE2_INSTRUCTIONS)    \
+    do {                                \
+        __m128i xmm0, xmm1, xmm2;        \
+        SSE2_INSTRUCTIONS               \
+        p_line += 32; p_y += 16;        \
+        p_u += 8; p_v += 8;             \
+    } while(0)
 
 #define SSE2_END  _mm_sfence()
 
+#define SSE2_YUV422_YUYV_ALIGNED                \
+    xmm0 = _mm_load_si128((__m128i *)p_y);      \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_u);     \
+    xmm2 = _mm_loadl_epi64((__m128i *)p_v);     \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
+    xmm2 = xmm0;                                \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);       \
+    _mm_stream_si128((__m128i*)(p_line), xmm2); \
+    xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);       \
+    _mm_stream_si128((__m128i*)(p_line+16), xmm0);
+#define SSE2_YUV422_YUYV_UNALIGNED              \
+    xmm0 = _mm_loadu_si128((__m128i *)p_y);     \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_u);     \
+    xmm2 = _mm_loadl_epi64((__m128i *)p_v);     \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
+    xmm2 = xmm0;                                \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);       \
+    _mm_storeu_si128((__m128i*)(p_line), xmm2); \
+    xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);       \
+    _mm_storeu_si128((__m128i*)(p_line+16), xmm0);
+#define SSE2_YUV422_YVYU_ALIGNED                \
+    xmm0 = _mm_load_si128((__m128i *)p_y);      \
+    xmm2 = _mm_loadl_epi64((__m128i *)p_u);     \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_v);     \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
+    xmm2 = xmm0;                                \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);       \
+    _mm_stream_si128((__m128i*)(p_line), xmm2); \
+    xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);       \
+    _mm_stream_si128((__m128i*)(p_line+16), xmm0);
+
+#define SSE2_YUV422_YVYU_UNALIGNED              \
+    xmm0 = _mm_loadu_si128((__m128i *)p_y);     \
+    xmm2 = _mm_loadl_epi64((__m128i *)p_u);     \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_v);     \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
+    xmm2 = xmm0;                                \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);       \
+    _mm_storeu_si128((__m128i*)(p_line), xmm2); \
+    xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);       \
+    _mm_storeu_si128((__m128i*)(p_line+16), xmm0);
+
+#define SSE2_YUV422_UYVY_ALIGNED                \
+    xmm0 = _mm_load_si128((__m128i *)p_y);      \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_u);     \
+    xmm2 = _mm_loadl_epi64((__m128i *)p_v);     \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
+    xmm2 = xmm1;                                \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm0);       \
+    _mm_stream_si128((__m128i*)(p_line), xmm2); \
+    xmm1 = _mm_unpackhi_epi8(xmm1, xmm0);       \
+    _mm_stream_si128((__m128i*)(p_line+16), xmm1);
+
+#define SSE2_YUV422_UYVY_UNALIGNED              \
+    xmm0 = _mm_loadu_si128((__m128i *)p_y);     \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_u);     \
+    xmm2 = _mm_loadl_epi64((__m128i *)p_v);     \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
+    xmm2 = xmm1;                                \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm0);       \
+    _mm_storeu_si128((__m128i*)(p_line), xmm2); \
+    xmm1 = _mm_unpackhi_epi8(xmm1, xmm0);       \
+    _mm_storeu_si128((__m128i*)(p_line+16), xmm1);
+
 #endif
 
 #endif