]> git.sesse.net Git - vlc/commitdiff
video chromas: finalize SSE2 improvements
authorDamien Fouilleul <damienf@videolan.org>
Wed, 15 Aug 2007 16:15:45 +0000 (16:15 +0000)
committerDamien Fouilleul <damienf@videolan.org>
Wed, 15 Aug 2007 16:15:45 +0000 (16:15 +0000)
AUTHORS
NEWS
modules/video_chroma/i420_yuy2.h
modules/video_chroma/i422_yuy2.c
modules/video_chroma/i422_yuy2.h

diff --git a/AUTHORS b/AUTHORS
index cf8e02eef7d9f765084dc0498f42bc3ce51df42a..d9595ecdb4f32be5e4242060868c1799c6f60e55 100644 (file)
--- a/AUTHORS
+++ b/AUTHORS
@@ -190,6 +190,9 @@ E: Damien.Fouilleul@laposte.net
 C: Quovodis
 D: ActiveX control
 D: Safari/Firefox plugin for MacOS X
+D: Direct3D Video output
+D: SSE2 chroma converters
+D: improved MMX chroma converters
 S: Ireland
 
 N: Alexis Guillard
diff --git a/NEWS b/NEWS
index d0cf308cd40d6de7d01c4bbdadb99d117713c578..a12068786da722145977126bc0f89a407fff654e 100644 (file)
--- a/NEWS
+++ b/NEWS
@@ -81,6 +81,8 @@ Video output and filters:
    was previously part of the mosaic module.
  * Fix random characters problem in RSS filter.
  * Add rotate-deciangle for more precision on rotate filter
+ * Support for Intel SSE2 intruction set in chroma converters
+ * Improved use of Intel MMX intruction set in chroma converters
 
 Audio output
  * Replay gain support.
index 1f35a3061f19fc3407dafc8e8922fadcffa323a3..a630157dc2e8c34012d8a2784ecff0b69cf09ae0 100644 (file)
@@ -366,8 +366,8 @@ movdqu    %%xmm1, 16(%1)  # Store high UYVY                               \n\
 #define SSE2_YUV420_YUYV_UNALIGNED                  \
     xmm1 = _mm_loadl_epi64((__m128i *)p_u);         \
     xmm2 = _mm_loadl_epi64((__m128i *)p_v);         \
-    xmm0 = _mm_load_si128((__m128i *)p_y1);         \
-    xmm3 = _mm_load_si128((__m128i *)p_y2);         \
+    xmm0 = _mm_loadu_si128((__m128i *)p_y1);        \
+    xmm3 = _mm_loadu_si128((__m128i *)p_y2);        \
     _mm_prefetch(p_line1, _MM_HINT_NTA);            \
     _mm_prefetch(p_line2, _MM_HINT_NTA);            \
     xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);           \
@@ -402,8 +402,8 @@ movdqu    %%xmm1, 16(%1)  # Store high UYVY                               \n\
 #define SSE2_YUV420_YVYU_UNALIGNED                  \
     xmm1 = _mm_loadl_epi64((__m128i *)p_v);         \
     xmm2 = _mm_loadl_epi64((__m128i *)p_u);         \
-    xmm0 = _mm_load_si128((__m128i *)p_y1);         \
-    xmm3 = _mm_load_si128((__m128i *)p_y2);         \
+    xmm0 = _mm_loadu_si128((__m128i *)p_y1);        \
+    xmm3 = _mm_loadu_si128((__m128i *)p_y2);        \
     _mm_prefetch(p_line1, _MM_HINT_NTA);            \
     _mm_prefetch(p_line2, _MM_HINT_NTA);            \
     xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);           \
@@ -439,8 +439,8 @@ movdqu    %%xmm1, 16(%1)  # Store high UYVY                               \n\
 #define SSE2_YUV420_UYVY_UNALIGNED                  \
     xmm1 = _mm_loadl_epi64((__m128i *)p_u);         \
     xmm2 = _mm_loadl_epi64((__m128i *)p_v);         \
-    xmm0 = _mm_load_si128((__m128i *)p_y1);         \
-    xmm3 = _mm_load_si128((__m128i *)p_y2);         \
+    xmm0 = _mm_loadu_si128((__m128i *)p_y1);        \
+    xmm3 = _mm_loadu_si128((__m128i *)p_y2);        \
     _mm_prefetch(p_line1, _MM_HINT_NTA);            \
     _mm_prefetch(p_line2, _MM_HINT_NTA);            \
     xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);           \
index c255079f318d7b669b90bab84ec0749d1802b307..84eaf90aac2049ae494166537e9eb5e1269cd2a7 100644 (file)
@@ -442,6 +442,61 @@ static void I422_cyuv( vout_thread_t *p_vout, picture_t *p_source,
 
     int i_x, i_y;
 
+    const int i_source_margin = p_source->p[0].i_pitch
+                                 - p_source->p[0].i_visible_pitch;
+    const int i_source_margin_c = p_source->p[1].i_pitch
+                                 - p_source->p[1].i_visible_pitch;
+    const int i_dest_margin = p_dest->p->i_pitch
+                               - p_dest->p->i_visible_pitch;
+
+#if defined (MODULE_NAME_IS_i422_yuy2_sse2)
+
+    if( 0 == (15 & (p_source->p[Y_PLANE].i_pitch|p_dest->p->i_pitch|
+        ((int)p_line|(int)p_y))) )
+    {
+        /* use faster SSE2 aligned fetch and store */
+        for( i_y = p_vout->render.i_height ; i_y-- ; )
+        {
+            p_line -= 2 * p_dest->p->i_pitch;
+
+            for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+            {
+                SSE2_CALL( SSE2_YUV422_UYVY_ALIGNED );
+            }
+            for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+            {
+                C_YUV422_UYVY( p_line, p_y, p_u, p_v );
+            }
+            p_y += i_source_margin;
+            p_u += i_source_margin_c;
+            p_v += i_source_margin_c;
+            p_line += i_dest_margin;
+        }
+    }
+    else {
+        /* use slower SSE2 unaligned fetch and store */
+        for( i_y = p_vout->render.i_height ; i_y-- ; )
+        {
+            p_line -= 2 * p_dest->p->i_pitch;
+
+            for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+            {
+                SSE2_CALL( SSE2_YUV422_UYVY_UNALIGNED );
+            }
+            for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+            {
+                C_YUV422_UYVY( p_line, p_y, p_u, p_v );
+            }
+            p_y += i_source_margin;
+            p_u += i_source_margin_c;
+            p_v += i_source_margin_c;
+            p_line += i_dest_margin;
+        }
+    }
+    SSE2_END;
+
+#else
+
     for( i_y = p_vout->render.i_height ; i_y-- ; )
     {
         for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
@@ -457,12 +512,18 @@ static void I422_cyuv( vout_thread_t *p_vout, picture_t *p_source,
             MMX_CALL( MMX_YUV422_UYVY );
 #endif
         }
+        p_y += i_source_margin;
+        p_u += i_source_margin_c;
+        p_v += i_source_margin_c;
+        p_line += i_dest_margin;
     }
 #if defined (MODULE_NAME_IS_i422_yuy2_mmx)
     MMX_END;
 #elif defined (MODULE_NAME_IS_i422_yuy2_sse2)
     SSE2_END;
 #endif
+
+#endif
 }
 
 /*****************************************************************************
index 1b0405bb2750f66bd47138a00bcd7ccbda64b67e..68057c7c04431ba3928d1e9c7aebd6faba271d5e 100644 (file)
@@ -233,9 +233,82 @@ movdqu    %%xmm1, 16(%0)  # Store high UYVY                               \n\
 
 #include <emmintrin.h>
 
+#define SSE2_CALL(SSE2_INSTRUCTIONS)    \
+    do {                                \
+        __m128i xmm0, xmm1, xmm2;        \
+        SSE2_INSTRUCTIONS               \
+        p_line += 32; p_y += 16;        \
+        p_u += 8; p_v += 8;             \
+    } while(0)
 
 #define SSE2_END  _mm_sfence()
 
+#define SSE2_YUV422_YUYV_ALIGNED                \
+    xmm0 = _mm_load_si128((__m128i *)p_y);      \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_u);     \
+    xmm2 = _mm_loadl_epi64((__m128i *)p_v);     \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
+    xmm2 = xmm0;                                \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);       \
+    _mm_stream_si128((__m128i*)(p_line), xmm2); \
+    xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);       \
+    _mm_stream_si128((__m128i*)(p_line+16), xmm0);
+#define SSE2_YUV422_YUYV_UNALIGNED              \
+    xmm0 = _mm_loadu_si128((__m128i *)p_y);     \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_u);     \
+    xmm2 = _mm_loadl_epi64((__m128i *)p_v);     \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
+    xmm2 = xmm0;                                \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);       \
+    _mm_storeu_si128((__m128i*)(p_line), xmm2); \
+    xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);       \
+    _mm_storeu_si128((__m128i*)(p_line+16), xmm0);
+#define SSE2_YUV422_YVYU_ALIGNED                \
+    xmm0 = _mm_load_si128((__m128i *)p_y);      \
+    xmm2 = _mm_loadl_epi64((__m128i *)p_u);     \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_v);     \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
+    xmm2 = xmm0;                                \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);       \
+    _mm_stream_si128((__m128i*)(p_line), xmm2); \
+    xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);       \
+    _mm_stream_si128((__m128i*)(p_line+16), xmm0);
+
+#define SSE2_YUV422_YVYU_UNALIGNED              \
+    xmm0 = _mm_loadu_si128((__m128i *)p_y);     \
+    xmm2 = _mm_loadl_epi64((__m128i *)p_u);     \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_v);     \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
+    xmm2 = xmm0;                                \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);       \
+    _mm_storeu_si128((__m128i*)(p_line), xmm2); \
+    xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);       \
+    _mm_storeu_si128((__m128i*)(p_line+16), xmm0);
+
+#define SSE2_YUV422_UYVY_ALIGNED                \
+    xmm0 = _mm_load_si128((__m128i *)p_y);      \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_u);     \
+    xmm2 = _mm_loadl_epi64((__m128i *)p_v);     \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
+    xmm2 = xmm1;                                \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm0);       \
+    _mm_stream_si128((__m128i*)(p_line), xmm2); \
+    xmm1 = _mm_unpackhi_epi8(xmm1, xmm0);       \
+    _mm_stream_si128((__m128i*)(p_line+16), xmm1);
+
+#define SSE2_YUV422_UYVY_UNALIGNED              \
+    xmm0 = _mm_loadu_si128((__m128i *)p_y);     \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_u);     \
+    xmm2 = _mm_loadl_epi64((__m128i *)p_v);     \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
+    xmm2 = xmm1;                                \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm0);       \
+    _mm_storeu_si128((__m128i*)(p_line), xmm2); \
+    xmm1 = _mm_unpackhi_epi8(xmm1, xmm0);       \
+    _mm_storeu_si128((__m128i*)(p_line+16), xmm1);
+
 #endif
 
 #endif