+#elif defined(HAVE_SSE2_INTRINSICS)
+
+/* SSE2 intrinsics */
+
+#include <emmintrin.h>
+
+#define SSE2_CALL(SSE2_INSTRUCTIONS) \
+ do { \
+ __m128i xmm0, xmm1, xmm2, xmm3, xmm4; \
+ SSE2_INSTRUCTIONS \
+ p_line1 += 32; p_line2 += 32; \
+ p_y1 += 16; p_y2 += 16; \
+ p_u += 8; p_v += 8; \
+ } while(0)
+
+#define SSE2_END _mm_sfence()
+
+#define SSE2_YUV420_YUYV_ALIGNED \
+ xmm1 = _mm_loadl_epi64((__m128i *)p_u); \
+ xmm2 = _mm_loadl_epi64((__m128i *)p_v); \
+ xmm0 = _mm_load_si128((__m128i *)p_y1); \
+ xmm3 = _mm_load_si128((__m128i *)p_y2); \
+ xmm1 = _mm_unpacklo_epi8(xmm1, xmm2); \
+ xmm2 = xmm0; \
+ xmm2 = _mm_unpacklo_epi8(xmm2, xmm1); \
+ _mm_stream_si128((__m128i*)(p_line1), xmm2); \
+ xmm0 = _mm_unpackhi_epi8(xmm0, xmm1); \
+ _mm_stream_si128((__m128i*)(p_line1+16), xmm0); \
+ xmm4 = xmm3; \
+ xmm4 = _mm_unpacklo_epi8(xmm4, xmm1); \
+ _mm_stream_si128((__m128i*)(p_line2), xmm4); \
+ xmm3 = _mm_unpackhi_epi8(xmm3, xmm1); \
+ _mm_stream_si128((__m128i*)(p_line1+16), xmm3);
+
+#define SSE2_YUV420_YUYV_UNALIGNED \
+ xmm1 = _mm_loadl_epi64((__m128i *)p_u); \
+ xmm2 = _mm_loadl_epi64((__m128i *)p_v); \
+ xmm0 = _mm_loadu_si128((__m128i *)p_y1); \
+ xmm3 = _mm_loadu_si128((__m128i *)p_y2); \
+ _mm_prefetch(p_line1, _MM_HINT_NTA); \
+ _mm_prefetch(p_line2, _MM_HINT_NTA); \
+ xmm1 = _mm_unpacklo_epi8(xmm1, xmm2); \
+ xmm2 = xmm0; \
+ xmm2 = _mm_unpacklo_epi8(xmm2, xmm1); \
+ _mm_storeu_si128((__m128i*)(p_line1), xmm2); \
+ xmm0 = _mm_unpackhi_epi8(xmm0, xmm1); \
+ _mm_storeu_si128((__m128i*)(p_line1+16), xmm0); \
+ xmm4 = xmm3; \
+ xmm4 = _mm_unpacklo_epi8(xmm4, xmm1); \
+ _mm_storeu_si128((__m128i*)(p_line2), xmm4); \
+ xmm3 = _mm_unpackhi_epi8(xmm3, xmm1); \
+ _mm_storeu_si128((__m128i*)(p_line1+16), xmm3);
+
+#define SSE2_YUV420_YVYU_ALIGNED \
+ xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
+ xmm2 = _mm_loadl_epi64((__m128i *)p_u); \
+ xmm0 = _mm_load_si128((__m128i *)p_y1); \
+ xmm3 = _mm_load_si128((__m128i *)p_y2); \
+ xmm1 = _mm_unpacklo_epi8(xmm1, xmm2); \
+ xmm2 = xmm0; \
+ xmm2 = _mm_unpacklo_epi8(xmm2, xmm1); \
+ _mm_stream_si128((__m128i*)(p_line1), xmm2); \
+ xmm0 = _mm_unpackhi_epi8(xmm0, xmm1); \
+ _mm_stream_si128((__m128i*)(p_line1+16), xmm0); \
+ xmm4 = xmm3; \
+ xmm4 = _mm_unpacklo_epi8(xmm4, xmm1); \
+ _mm_stream_si128((__m128i*)(p_line2), xmm4); \
+ xmm3 = _mm_unpackhi_epi8(xmm3, xmm1); \
+ _mm_stream_si128((__m128i*)(p_line1+16), xmm3);
+
+#define SSE2_YUV420_YVYU_UNALIGNED \
+ xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
+ xmm2 = _mm_loadl_epi64((__m128i *)p_u); \
+ xmm0 = _mm_loadu_si128((__m128i *)p_y1); \
+ xmm3 = _mm_loadu_si128((__m128i *)p_y2); \
+ _mm_prefetch(p_line1, _MM_HINT_NTA); \
+ _mm_prefetch(p_line2, _MM_HINT_NTA); \
+ xmm1 = _mm_unpacklo_epi8(xmm1, xmm2); \
+ xmm2 = xmm0; \
+ xmm2 = _mm_unpacklo_epi8(xmm2, xmm1); \
+ _mm_storeu_si128((__m128i*)(p_line1), xmm2); \
+ xmm0 = _mm_unpackhi_epi8(xmm0, xmm1); \
+ _mm_storeu_si128((__m128i*)(p_line1+16), xmm0); \
+ xmm4 = xmm3; \
+ xmm4 = _mm_unpacklo_epi8(xmm4, xmm1); \
+ _mm_storeu_si128((__m128i*)(p_line2), xmm4); \
+ xmm3 = _mm_unpackhi_epi8(xmm3, xmm1); \
+ _mm_storeu_si128((__m128i*)(p_line1+16), xmm3);
+
+#define SSE2_YUV420_UYVY_ALIGNED \
+ xmm1 = _mm_loadl_epi64((__m128i *)p_u); \
+ xmm2 = _mm_loadl_epi64((__m128i *)p_v); \
+ xmm0 = _mm_load_si128((__m128i *)p_y1); \
+ xmm3 = _mm_load_si128((__m128i *)p_y2); \
+ xmm1 = _mm_unpacklo_epi8(xmm1, xmm2); \
+ xmm2 = xmm1; \
+ xmm2 = _mm_unpacklo_epi8(xmm2, xmm0); \
+ _mm_stream_si128((__m128i*)(p_line1), xmm2); \
+ xmm2 = xmm1; \
+ xmm2 = _mm_unpackhi_epi8(xmm2, xmm0); \
+ _mm_stream_si128((__m128i*)(p_line1+16), xmm2); \
+ xmm4 = xmm1; \
+ xmm4 = _mm_unpacklo_epi8(xmm4, xmm3); \
+ _mm_stream_si128((__m128i*)(p_line2), xmm4); \
+ xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
+ _mm_stream_si128((__m128i*)(p_line1+16), xmm1);
+
+#define SSE2_YUV420_UYVY_UNALIGNED \
+ xmm1 = _mm_loadl_epi64((__m128i *)p_u); \
+ xmm2 = _mm_loadl_epi64((__m128i *)p_v); \
+ xmm0 = _mm_loadu_si128((__m128i *)p_y1); \
+ xmm3 = _mm_loadu_si128((__m128i *)p_y2); \
+ _mm_prefetch(p_line1, _MM_HINT_NTA); \
+ _mm_prefetch(p_line2, _MM_HINT_NTA); \
+ xmm1 = _mm_unpacklo_epi8(xmm1, xmm2); \
+ xmm2 = xmm1; \
+ xmm2 = _mm_unpacklo_epi8(xmm2, xmm0); \
+ _mm_storeu_si128((__m128i*)(p_line1), xmm2); \
+ xmm2 = xmm1; \
+ xmm2 = _mm_unpackhi_epi8(xmm2, xmm0); \
+ _mm_storeu_si128((__m128i*)(p_line1+16), xmm2); \
+ xmm4 = xmm1; \
+ xmm4 = _mm_unpacklo_epi8(xmm4, xmm3); \
+ _mm_storeu_si128((__m128i*)(p_line2), xmm4); \
+ xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
+ _mm_storeu_si128((__m128i*)(p_line1+16), xmm1);
+
+#endif
+