]> git.sesse.net Git - vlc/blobdiff - modules/video_chroma/i420_rgb_mmx.h
- video_chromas: more SSE2 and MMX support and optimization, added SSE2 i420 -> RGB...
[vlc] / modules / video_chroma / i420_rgb_mmx.h
index 1c2f9bb5b02e1ecd676e8288cd41967ade9d7381..42b33d412c0526ea5a4e7d2da1b55c6b84fcd4b9 100644 (file)
@@ -1,12 +1,13 @@
 /*****************************************************************************
  * transforms_yuvmmx.h: MMX YUV transformation assembly
  *****************************************************************************
- * Copyright (C) 1999-2004 the VideoLAN team
+ * Copyright (C) 1999-2007 the VideoLAN team
  * $Id$
  *
  * Authors: Olie Lho <ollie@sis.com.tw>
  *          GaĆ«l Hendryckx <jimmy@via.ecp.fr>
  *          Samuel Hocevar <sam@zoy.org>
+ *          Damien Fouilleul <damienf@videolan.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -53,21 +54,48 @@ USED_U64(mmx_mask_fc) = 0xfcfcfcfcfcfcfcfcULL;
 #endif
 
 #define MMX_INIT_16 "                                                       \n\
-movd      (%1), %%mm0       # Load 4 Cb       00 00 00 00 u3 u2 u1 u0       \n\
-movd      (%2), %%mm1       # Load 4 Cr       00 00 00 00 v3 v2 v1 v0       \n\
+movd       (%1), %%mm0      # Load 4 Cb       00 00 00 00 u3 u2 u1 u0       \n\
+movd       (%2), %%mm1      # Load 4 Cr       00 00 00 00 v3 v2 v1 v0       \n\
 pxor      %%mm4, %%mm4      # zero mm4                                      \n\
-movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
-#movl      $0, (%3)         # cache preload for image                       \n\
+movq       (%0), %%mm6      # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
 "
 
-#define INTRINSICS_INIT_16 \
-    tmp64 = *(uint32_t *)p_u; \
-    mm0 = (__m64)tmp64; \
-    tmp64 = *(uint32_t *)p_v; \
-    mm1 = (__m64)tmp64; \
-    mm4 = (__m64)(uint64_t)0; \
-    mm6 = (__m64)*(uint64_t *)p_y; \
-    /* *(uint16_t *)p_buffer = 0; */
+#define SSE2_INIT_16_ALIGNED "                                              \n\
+prefetcht1  (%3)            # cache preload for image                       \n\
+movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
+movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
+pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
+movdqa      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
+"
+
+#define SSE2_INIT_16_UNALIGNED "                                            \n\
+prefetcht1  (%3)            # cache preload for image                       \n\
+movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
+movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
+pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
+movdqu      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
+"
+
+#define MMX_INTRINSICS_INIT_16      \
+    tmp64 = *(uint32_t *)p_u;       \
+    mm0 = (__m64)tmp64;             \
+    tmp64 = *(uint32_t *)p_v;       \
+    mm1 = (__m64)tmp64;             \
+    mm4 = _mm_setzero_si64();       \
+    mm6 = (__m64)*(uint64_t *)p_y;  \
+
+#define SSE2_INTRINSICS_INIT_16_ALIGNED     \
+    xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
+    xmm4 = _mm_setzero_si128();             \
+    xmm6 = _mm_load_si128((__m128i *)p_y);  \
+
+#define SSE2_INTRINSICS_INIT_16_UNALIGNED   \
+    _mm_prefetch(p_buffer, _MM_HINT_T1);    \
+    xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
+    xmm4 = _mm_setzero_si128();             \
+    xmm6 = _mm_loadu_si128((__m128i *)p_y); \
 
 #define MMX_INIT_16_GRAY "                                                  \n\
 movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
@@ -76,21 +104,49 @@ movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
 
 #define MMX_INIT_32 "                                                       \n\
 movd      (%1), %%mm0       # Load 4 Cb       00 00 00 00 u3 u2 u1 u0       \n\
-movl      $0, (%3)          # cache preload for image                       \n\
+movl        $0, (%3)        # cache preload for image                       \n\
 movd      (%2), %%mm1       # Load 4 Cr       00 00 00 00 v3 v2 v1 v0       \n\
-pxor      %%mm4, %%mm4      # zero mm4                                      \n\
+pxor     %%mm4, %%mm4       # zero mm4                                      \n\
 movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
 "
 
-#define INTRINSICS_INIT_32 \
-    tmp64 = *(uint32_t *)p_u; \
-    mm0 = (__m64)tmp64; \
-    *(uint16_t *)p_buffer = 0; \
-    tmp64 = *(uint32_t *)p_v; \
-    mm1 = (__m64)tmp64; \
-    mm4 = (__m64)(uint64_t)0; \
+#define SSE2_INIT_32_ALIGNED "                                              \n\
+movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
+movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
+pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
+movdqa      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
+"
+
+#define SSE2_INIT_32_UNALIGNED "                                            \n\
+prefetcht1  (%3)            # cache preload for image                       \n\
+movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
+movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
+pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
+movdqu      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
+"
+
+#define MMX_INTRINSICS_INIT_32      \
+    tmp64 = *(uint32_t *)p_u;       \
+    mm0 = (__m64)tmp64;             \
+    *(uint16_t *)p_buffer = 0;      \
+    tmp64 = *(uint32_t *)p_v;       \
+    mm1 = (__m64)tmp64;             \
+    mm4 = _mm_setzero_si64();       \
     mm6 = (__m64)*(uint64_t *)p_y;
 
+#define SSE2_INTRINSICS_INIT_32_ALIGNED      \
+    xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
+    xmm4 = _mm_setzero_si128();             \
+    xmm6 = _mm_load_si128((__m128i *)p_y);  \
+
+#define SSE2_INTRINSICS_INIT_32_UNALIGNED    \
+    _mm_prefetch(p_buffer, _MM_HINT_T1);     \
+    xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
+    xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
+    xmm4 = _mm_setzero_si128();             \
+    xmm6 = _mm_loadu_si128((__m128i *)p_y); \
+
 /*
  * Do the multiply part of the conversion for even and odd pixels,
  * register usage:
@@ -126,7 +182,58 @@ pmulhw    mmx_Y_coeff"G", %%mm6 # Mul 4 Y even    00 y6 00 y4 00 y2 00 y0   \n\
 pmulhw    mmx_Y_coeff"G", %%mm7 # Mul 4 Y odd     00 y7 00 y5 00 y3 00 y1   \n\
 "
 
-#define INTRINSICS_YUV_MUL \
+#define SSE2_YUV_MUL "                                                      \n\
+# convert the chroma part                                                   \n\
+punpcklbw %%xmm4, %%xmm0        # scatter 8 Cb    00 u3 00 u2 00 u1 00 u0   \n\
+punpcklbw %%xmm4, %%xmm1        # scatter 8 Cr    00 v3 00 v2 00 v1 00 v0   \n\
+movl      $0x00800080, %%eax    #                                           \n\
+movd      %%eax, %%xmm5         #                                           \n\
+pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     0080 0080 ... 0080 0080   \n\
+psubsw    %%xmm5, %%xmm0        # Cb -= 128                                 \n\
+psubsw    %%xmm5, %%xmm1        # Cr -= 128                                 \n\
+psllw     $3, %%xmm0            # Promote precision                         \n\
+psllw     $3, %%xmm1            # Promote precision                         \n\
+movdqa    %%xmm0, %%xmm2        # Copy 8 Cb       00 u3 00 u2 00 u1 00 u0   \n\
+movdqa    %%xmm1, %%xmm3        # Copy 8 Cr       00 v3 00 v2 00 v1 00 v0   \n\
+movl      $0xf37df37d, %%eax    #                                           \n\
+movd      %%eax, %%xmm5         #                                           \n\
+pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     f37d f37d ... f37d f37d   \n\
+pmulhw    %%xmm5, %%xmm2        # Mul Cb with green coeff -> Cb green       \n\
+movl      $0xe5fce5fc, %%eax    #                                           \n\
+movd      %%eax, %%xmm5         #                                           \n\
+pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     e5fc e5fc ... e5fc e5fc   \n\
+pmulhw    %%xmm5, %%xmm3        # Mul Cr with green coeff -> Cr green       \n\
+movl      $0x40934093, %%eax    #                                           \n\
+movd      %%eax, %%xmm5         #                                           \n\
+pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     4093 4093 ... 4093 4093   \n\
+pmulhw    %%xmm5, %%xmm0        # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0   \n\
+movl      $0x33123312, %%eax    #                                           \n\
+movd      %%eax, %%xmm5         #                                           \n\
+pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     3312 3312 ... 3312 3312   \n\
+pmulhw    %%xmm5, %%xmm1        # Mul Cr -> Cred  00 r3 00 r2 00 r1 00 r0   \n\
+paddsw    %%xmm3, %%xmm2        # Cb green + Cr green -> Cgreen             \n\
+                                                                            \n\
+# convert the luma part                                                     \n\
+movl      $0x10101010, %%eax    #                                           \n\
+movd      %%eax, %%xmm5         #                                           \n\
+pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to   1010 1010 ... 1010 1010     \n\
+psubusb   %%xmm5, %%xmm6        # Y -= 16                                   \n\
+movdqa    %%xmm6, %%xmm7        # Copy 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
+movl      $0x00ff00ff, %%eax    #                                           \n\
+movd      %%eax, %%xmm5         #                                           \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     00ff 00ff ... 00ff 00ff   \n\
+pand      %%xmm5, %%xmm6        # get Y even      00 Y6 00 Y4 00 Y2 00 Y0   \n\
+psrlw     $8, %%xmm7            # get Y odd       00 Y7 00 Y5 00 Y3 00 Y1   \n\
+psllw     $3, %%xmm6            # Promote precision                         \n\
+psllw     $3, %%xmm7            # Promote precision                         \n\
+movl      $0x253f253f, %%eax    #                                           \n\
+movd      %%eax, %%xmm5         #                                           \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     253f 253f ... 253f 253f   \n\
+pmulhw    %%xmm5, %%xmm6        # Mul 8 Y even    00 y6 00 y4 00 y2 00 y0   \n\
+pmulhw    %%xmm5, %%xmm7        # Mul 8 Y odd     00 y7 00 y5 00 y3 00 y1   \n\
+"
+
+#define MMX_INTRINSICS_YUV_MUL \
     mm0 = _mm_unpacklo_pi8(mm0, mm4); \
     mm1 = _mm_unpacklo_pi8(mm1, mm4); \
     mm0 = _mm_subs_pi16(mm0, (__m64)mmx_80w); \
@@ -150,6 +257,38 @@ pmulhw    mmx_Y_coeff"G", %%mm7 # Mul 4 Y odd     00 y7 00 y5 00 y3 00 y1   \n\
     mm6 = _mm_mulhi_pi16(mm6, (__m64)mmx_Y_coeff); \
     mm7 = _mm_mulhi_pi16(mm7, (__m64)mmx_Y_coeff);
 
+#define SSE2_INTRINSICS_YUV_MUL \
+    xmm0 = _mm_unpacklo_epi8(xmm0, xmm4); \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm4); \
+    xmm5 = _mm_set1_epi32(0x80808080UL); \
+    xmm0 = _mm_subs_epi16(xmm0, xmm5); \
+    xmm1 = _mm_subs_epi16(xmm1, xmm5); \
+    xmm0 = _mm_slli_epi16(xmm0, 3); \
+    xmm1 = _mm_slli_epi16(xmm1, 3); \
+    xmm2 = xmm0; \
+    xmm3 = xmm1; \
+    xmm5 = _mm_set1_epi32(0xf37df37dUL); \
+    xmm2 = _mm_mulhi_epi16(xmm2, xmm5); \
+    xmm5 = _mm_set1_epi32(0xe5fce5fcUL); \
+    xmm3 = _mm_mulhi_epi16(xmm3, xmm5); \
+    xmm5 = _mm_set1_epi32(0x40934093UL); \
+    xmm0 = _mm_mulhi_epi16(xmm0, xmm5); \
+    xmm5 = _mm_set1_epi32(0x33123312UL); \
+    xmm1 = _mm_mulhi_epi16(xmm1, xmm5); \
+    xmm2 = _mm_adds_epi16(xmm2, xmm3); \
+    \
+    xmm5 = _mm_set1_epi32(0x10101010UL); \
+    xmm6 = _mm_subs_epu8(xmm6, xmm5); \
+    xmm7 = xmm6; \
+    xmm5 = _mm_set1_epi32(0x00ff00ffUL); \
+    xmm6 = _mm_and_si128(xmm6, xmm5); \
+    xmm7 = _mm_srli_epi16(xmm7, 8); \
+    xmm6 = _mm_slli_epi16(xmm6, 3); \
+    xmm7 = _mm_slli_epi16(xmm7, 3); \
+    xmm5 = _mm_set1_epi32(0x253f253fUL); \
+    xmm6 = _mm_mulhi_epi16(xmm6, xmm5); \
+    xmm7 = _mm_mulhi_epi16(xmm7, xmm5);
+
 /*
  * Do the addition part of the conversion for even and odd pixels,
  * register usage:
@@ -186,7 +325,35 @@ punpcklbw %%mm4, %%mm1          #                 R7 R6 R5 R4 R3 R2 R1 R0   \n\
 punpcklbw %%mm5, %%mm2          #                 G7 G6 G5 G4 G3 G2 G1 G0   \n\
 "
 
-#define INTRINSICS_YUV_ADD \
+#define SSE2_YUV_ADD "                                                      \n\
+# Do horizontal and vertical scaling                                        \n\
+movdqa    %%xmm0, %%xmm3        # Copy Cblue                                \n\
+movdqa    %%xmm1, %%xmm4        # Copy Cred                                 \n\
+movdqa    %%xmm2, %%xmm5        # Copy Cgreen                               \n\
+paddsw    %%xmm6, %%xmm0        # Y even + Cblue  00 B6 00 B4 00 B2 00 B0   \n\
+paddsw    %%xmm7, %%xmm3        # Y odd  + Cblue  00 B7 00 B5 00 B3 00 B1   \n\
+paddsw    %%xmm6, %%xmm1        # Y even + Cred   00 R6 00 R4 00 R2 00 R0   \n\
+paddsw    %%xmm7, %%xmm4        # Y odd  + Cred   00 R7 00 R5 00 R3 00 R1   \n\
+paddsw    %%xmm6, %%xmm2        # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0   \n\
+paddsw    %%xmm7, %%xmm5        # Y odd  + Cgreen 00 G7 00 G5 00 G3 00 G1   \n\
+                                                                            \n\
+# Limit RGB even to 0..255                                                  \n\
+packuswb  %%xmm0, %%xmm0        # B6 B4 B2 B0 / B6 B4 B2 B0                 \n\
+packuswb  %%xmm1, %%xmm1        # R6 R4 R2 R0 / R6 R4 R2 R0                 \n\
+packuswb  %%xmm2, %%xmm2        # G6 G4 G2 G0 / G6 G4 G2 G0                 \n\
+                                                                            \n\
+# Limit RGB odd to 0..255                                                   \n\
+packuswb  %%xmm3, %%xmm3        # B7 B5 B3 B1 / B7 B5 B3 B1                 \n\
+packuswb  %%xmm4, %%xmm4        # R7 R5 R3 R1 / R7 R5 R3 R1                 \n\
+packuswb  %%xmm5, %%xmm5        # G7 G5 G3 G1 / G7 G5 G3 G1                 \n\
+                                                                            \n\
+# Interleave RGB even and odd                                               \n\
+punpcklbw %%xmm3, %%xmm0        #                 B7 B6 B5 B4 B3 B2 B1 B0   \n\
+punpcklbw %%xmm4, %%xmm1        #                 R7 R6 R5 R4 R3 R2 R1 R0   \n\
+punpcklbw %%xmm5, %%xmm2        #                 G7 G6 G5 G4 G3 G2 G1 G0   \n\
+"
+
+#define MMX_INTRINSICS_YUV_ADD \
     mm3 = mm0; \
     mm4 = mm1; \
     mm5 = mm2; \
@@ -209,6 +376,29 @@ punpcklbw %%mm5, %%mm2          #                 G7 G6 G5 G4 G3 G2 G1 G0   \n\
     mm1 = _mm_unpacklo_pi8(mm1, mm4); \
     mm2 = _mm_unpacklo_pi8(mm2, mm5);
 
+#define SSE2_INTRINSICS_YUV_ADD \
+    xmm3 = xmm0; \
+    xmm4 = xmm1; \
+    xmm5 = xmm2; \
+    xmm0 = _mm_adds_epi16(xmm0, xmm6); \
+    xmm3 = _mm_adds_epi16(xmm3, xmm7); \
+    xmm1 = _mm_adds_epi16(xmm1, xmm6); \
+    xmm4 = _mm_adds_epi16(xmm4, xmm7); \
+    xmm2 = _mm_adds_epi16(xmm2, xmm6); \
+    xmm5 = _mm_adds_epi16(xmm5, xmm7); \
+    \
+    xmm0 = _mm_packus_epi16(xmm0, xmm0); \
+    xmm1 = _mm_packus_epi16(xmm1, xmm1); \
+    xmm2 = _mm_packus_epi16(xmm2, xmm2); \
+    \
+    xmm3 = _mm_packus_epi16(xmm3, xmm3); \
+    xmm4 = _mm_packus_epi16(xmm4, xmm4); \
+    xmm5 = _mm_packus_epi16(xmm5, xmm5); \
+    \
+    xmm0 = _mm_unpacklo_epi8(xmm0, xmm3); \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm4); \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm5);
+
 /*
  * Grayscale case, only use Y
  */
@@ -287,13 +477,71 @@ movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   \n\
 movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
 "
 
-#define INTRINSICS_UNPACK_15 \
+#define SSE2_UNPACK_15_ALIGNED "                                            \n\
+# mask unneeded bits off                                                    \n\
+movl      $0xf8f8f8f8, %%eax    #                                           \n\
+movd      %%eax, %%xmm5         #                                           \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
+pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
+psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
+pand      %%xmm5, %%xmm2        # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
+pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
+psrlw     $1,%%xmm1             # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
+pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
+movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
+movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
+                                                                            \n\
+# convert rgb24 plane to rgb15 pack for pixel 0-7                           \n\
+punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3______       \n\
+punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
+psllw     $2,%%xmm2             # ________ ____g7g6 g5g4g3__ ________       \n\
+por       %%xmm2, %%xmm0        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
+movntdq   %%xmm0, (%3)          # store pixel 0-7                           \n\
+                                                                            \n\
+# convert rgb24 plane to rgb15 pack for pixel 8-15                          \n\
+punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3______       \n\
+punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
+psllw     $2,%%xmm7             # ________ ____g7g6 g5g4g3__ ________       \n\
+por       %%xmm7, %%xmm5        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
+movntdq   %%xmm5, 16(%3)        # store pixel 4-7                           \n\
+"
+
+#define SSE2_UNPACK_15_UNALIGNED "                                          \n\
+# mask unneeded bits off                                                    \n\
+movl      $0xf8f8f8f8, %%eax    #                                           \n\
+movd      %%eax, %%xmm5         #                                           \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
+pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
+psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
+pand      %%xmm5, %%xmm2        # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
+pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
+psrlw     $1,%%xmm1             # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
+pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
+movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
+movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
+                                                                            \n\
+# convert rgb24 plane to rgb15 pack for pixel 0-7                           \n\
+punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3______       \n\
+punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
+psllw     $2,%%xmm2             # ________ ____g7g6 g5g4g3__ ________       \n\
+por       %%xmm2, %%xmm0        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
+movdqu    %%xmm0, (%3)          # store pixel 0-7                           \n\
+                                                                            \n\
+# convert rgb24 plane to rgb15 pack for pixel 8-15                          \n\
+punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3______       \n\
+punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
+psllw     $2,%%xmm7             # ________ ____g7g6 g5g4g3__ ________       \n\
+por       %%xmm7, %%xmm5        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
+movdqu    %%xmm5, 16(%3)        # store pixel 4-7                           \n\
+"
+
+#define MMX_INTRINSICS_UNPACK_15 \
     mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
     mm0 = _mm_srli_pi16(mm0, 3); \
     mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_f8); \
     mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
     mm1 = _mm_srli_pi16(mm1, 1); \
-    mm4 = (__m64)(uint64_t)0; \
+    mm4 = _mm_setzero_si64(); \
     mm5 = mm0; \
     mm7 = mm2; \
     \
@@ -315,6 +563,52 @@ movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
     mm1 = (__m64)tmp64; \
     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
 
+#define SSE2_INTRINSICS_UNPACK_15_ALIGNED \
+    xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
+    xmm0 = _mm_and_si128(xmm0, xmm5); \
+    xmm0 = _mm_srli_epi16(xmm0, 3); \
+    xmm2 = _mm_and_si128(xmm2, xmm5); \
+    xmm1 = _mm_and_si128(xmm1, xmm5); \
+    xmm1 = _mm_srli_epi16(xmm1, 1); \
+    xmm4 = _mm_setzero_si128();          \
+    xmm5 = xmm0; \
+    xmm7 = xmm2; \
+    \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
+    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
+    xmm2 = _mm_slli_epi16(xmm2, 2); \
+    xmm0 = _mm_or_si128(xmm0, xmm2); \
+    _mm_stream_si128((__m128i*)p_buffer, xmm0); \
+    \
+    xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
+    xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
+    xmm7 = _mm_slli_epi16(xmm7, 2); \
+    xmm5 = _mm_or_si128(xmm5, xmm7); \
+    _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
+
+#define SSE2_INTRINSICS_UNPACK_15_UNALIGNED \
+    xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
+    xmm0 = _mm_and_si128(xmm0, xmm5); \
+    xmm0 = _mm_srli_epi16(xmm0, 3); \
+    xmm2 = _mm_and_si128(xmm2, xmm5); \
+    xmm1 = _mm_and_si128(xmm1, xmm5); \
+    xmm1 = _mm_srli_epi16(xmm1, 1); \
+    xmm4 = _mm_setzero_si128();          \
+    xmm5 = xmm0; \
+    xmm7 = xmm2; \
+    \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
+    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
+    xmm2 = _mm_slli_epi16(xmm2, 2); \
+    xmm0 = _mm_or_si128(xmm0, xmm2); \
+    _mm_storeu_si128((__m128i*)p_buffer, xmm0); \
+    \
+    xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
+    xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
+    xmm7 = _mm_slli_epi16(xmm7, 2); \
+    xmm5 = _mm_or_si128(xmm5, xmm7); \
+    _mm_storeu_si128((__m128i*)(p_buffer+16), xmm5);
+
 /*
  * convert RGB plane to RGB 16 bits,
  * mm0 -> B, mm1 -> R, mm2 -> G,
@@ -350,12 +644,74 @@ movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   \n\
 movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
 "
 
-#define INTRINSICS_UNPACK_16 \
+#define SSE2_UNPACK_16_ALIGNED "                                            \n\
+# mask unneeded bits off                                                    \n\
+movl      $0xf8f8f8f8, %%eax    #                                           \n\
+movd      %%eax, %%xmm5         #                                           \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
+pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
+pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
+movl      $0xfcfcfcfc, %%eax    #                                           \n\
+movd      %%eax, %%xmm5         #                                           \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
+pand      %%xmm5, %%xmm2        # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
+psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
+pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
+movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
+movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
+                                                                            \n\
+# convert rgb24 plane to rgb16 pack for pixel 0-7                           \n\
+punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3g2____       \n\
+punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
+psllw     $3,%%xmm2             # ________ __g7g6g5 g4g3g2__ ________       \n\
+por       %%xmm2, %%xmm0        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
+movntdq   %%xmm0, (%3)          # store pixel 0-7                           \n\
+                                                                            \n\
+# convert rgb24 plane to rgb16 pack for pixel 8-15                          \n\
+punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3g2____       \n\
+punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
+psllw     $3,%%xmm7             # ________ __g7g6g5 g4g3g2__ ________       \n\
+por       %%xmm7, %%xmm5        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
+movntdq   %%xmm5, 16(%3)        # store pixel 4-7                           \n\
+"
+
+#define SSE2_UNPACK_16_UNALIGNED "                                          \n\
+# mask unneeded bits off                                                    \n\
+movl      $0xf8f8f8f8, %%eax    #                                           \n\
+movd      %%eax, %%xmm5         #                                           \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
+pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
+pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
+movl      $0xfcfcfcfc, %%eax    #                                           \n\
+movd      %%eax, %%xmm5         #                                           \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
+pand      %%xmm5, %%xmm2        # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
+psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
+pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
+movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
+movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
+                                                                            \n\
+# convert rgb24 plane to rgb16 pack for pixel 0-7                           \n\
+punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3g2____       \n\
+punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
+psllw     $3,%%xmm2             # ________ __g7g6g5 g4g3g2__ ________       \n\
+por       %%xmm2, %%xmm0        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
+movdqu    %%xmm0, (%3)          # store pixel 0-7                           \n\
+                                                                            \n\
+# convert rgb24 plane to rgb16 pack for pixel 8-15                          \n\
+punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3g2____       \n\
+punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
+psllw     $3,%%xmm7             # ________ __g7g6g5 g4g3g2__ ________       \n\
+por       %%xmm7, %%xmm5        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
+movdqu    %%xmm5, 16(%3)        # store pixel 4-7                           \n\
+"
+
+#define MMX_INTRINSICS_UNPACK_16 \
     mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
     mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_fc); \
     mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
     mm0 = _mm_srli_pi16(mm0, 3); \
-    mm4 = (__m64)(uint64_t)0; \
+    mm4 = _mm_setzero_si64(); \
     mm5 = mm0; \
     mm7 = mm2; \
     \
@@ -377,62 +733,294 @@ movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
     mm1 = (__m64)tmp64; \
     *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
 
+#define SSE2_INTRINSICS_UNPACK_16_ALIGNED \
+    xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
+    xmm0 = _mm_and_si128(xmm0, xmm5); \
+    xmm1 = _mm_and_si128(xmm1, xmm5); \
+    xmm5 = _mm_set1_epi32(0xfcfcfcfcUL); \
+    xmm2 = _mm_and_si128(xmm2, xmm5); \
+    xmm0 = _mm_srli_epi16(xmm0, 3); \
+    xmm4 = _mm_setzero_si128();          \
+    xmm5 = xmm0; \
+    xmm7 = xmm2; \
+    \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
+    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
+    xmm2 = _mm_slli_epi16(xmm2, 3); \
+    xmm0 = _mm_or_si128(xmm0, xmm2); \
+    _mm_stream_si128((__m128i*)p_buffer, xmm0); \
+    \
+    xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
+    xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
+    xmm7 = _mm_slli_epi16(xmm7, 3); \
+    xmm5 = _mm_or_si128(xmm5, xmm7); \
+    _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
+
+#define SSE2_INTRINSICS_UNPACK_16_UNALIGNED \
+    xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
+    xmm0 = _mm_and_si128(xmm0, xmm5); \
+    xmm1 = _mm_and_si128(xmm1, xmm5); \
+    xmm5 = _mm_set1_epi32(0xfcfcfcfcUL); \
+    xmm2 = _mm_and_si128(xmm2, xmm5); \
+    xmm0 = _mm_srli_epi16(xmm0, 3); \
+    xmm4 = _mm_setzero_si128();          \
+    xmm5 = xmm0; \
+    xmm7 = xmm2; \
+    \
+    xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
+    xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
+    xmm2 = _mm_slli_epi16(xmm2, 3); \
+    xmm0 = _mm_or_si128(xmm0, xmm2); \
+    _mm_storeu_si128((__m128i*)p_buffer, xmm0); \
+    \
+    xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
+    xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
+    xmm7 = _mm_slli_epi16(xmm7, 3); \
+    xmm5 = _mm_or_si128(xmm5, xmm7); \
+    _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5);
+
 /*
  * convert RGB plane to RGB packed format,
- * mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> 0,
- * mm4 -> GB, mm5 -> AR pixel 4-7,
- * mm6 -> GB, mm7 -> AR pixel 0-3
+ * mm0 -> B, mm1 -> R, mm2 -> G
  */
 
-#define MMX_UNPACK_32 "                                                     \n\
+#define MMX_UNPACK_32_ARGB "                                                \n\
 pxor      %%mm3, %%mm3  # zero mm3                                          \n\
-movq      %%mm0, %%mm6  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
-movq      %%mm1, %%mm7  #                 R7 R6 R5 R4 R3 R2 R1 R0           \n\
 movq      %%mm0, %%mm4  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
+punpcklbw %%mm2, %%mm4  #                 G3 B3 G2 B2 G1 B1 G0 B0           \n\
 movq      %%mm1, %%mm5  #                 R7 R6 R5 R4 R3 R2 R1 R0           \n\
-punpcklbw %%mm2, %%mm6  #                 G3 B3 G2 B2 G1 B1 G0 B0           \n\
-punpcklbw %%mm3, %%mm7  #                 00 R3 00 R2 00 R1 00 R0           \n\
-punpcklwd %%mm7, %%mm6  #                 00 R1 B1 G1 00 R0 B0 G0           \n\
-movq      %%mm6, (%3)   # Store ARGB1 ARGB0                                 \n\
-movq      %%mm0, %%mm6  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
-punpcklbw %%mm2, %%mm6  #                 G3 B3 G2 B2 G1 B1 G0 B0           \n\
-punpckhwd %%mm7, %%mm6  #                 00 R3 G3 B3 00 R2 B3 G2           \n\
+punpcklbw %%mm3, %%mm5  #                 00 R3 00 R2 00 R1 00 R0           \n\
+movq      %%mm4, %%mm6  #                 G3 B3 G2 B2 G1 B1 G0 B0           \n\
+punpcklwd %%mm5, %%mm4  #                 00 R1 B1 G1 00 R0 B0 G0           \n\
+movq      %%mm4, (%3)   # Store ARGB1 ARGB0                                 \n\
+punpckhwd %%mm5, %%mm6  #                 00 R3 B3 G3 00 R2 B2 G2           \n\
 movq      %%mm6, 8(%3)  # Store ARGB3 ARGB2                                 \n\
-punpckhbw %%mm2, %%mm4  #                 G7 B7 G6 B6 G5 B5 G4 B4           \n\
-punpckhbw %%mm3, %%mm5  #                 00 R7 00 R6 00 R5 00 R4           \n\
-punpcklwd %%mm5, %%mm4  #                 00 R5 B5 G5 00 R4 B4 G4           \n\
-movq      %%mm4, 16(%3) # Store ARGB5 ARGB4                                 \n\
-movq      %%mm0, %%mm4  #                 B7 B6 B5 B4 B3 B2 B1 B0           \n\
-punpckhbw %%mm2, %%mm4  #                 G7 B7 G6 B6 G5 B5 G4 B4           \n\
-punpckhwd %%mm5, %%mm4  #                 00 R7 G7 B7 00 R6 B6 G6           \n\
-movq      %%mm4, 24(%3) # Store ARGB7 ARGB6                                 \n\
-                                                                            \n\
-#movd      4(%1), %%mm0  # Load 4 Cb       00 00 00 00 u3 u2 u1 u0           \n\
-#movd      4(%2), %%mm1  # Load 4 Cr       00 00 00 00 v3 v2 v1 v0           \n\
-#pxor      %%mm4, %%mm4  # zero mm4                                          \n\
-#movq      8(%0), %%mm6  # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0           \n\
+punpckhbw %%mm2, %%mm0  #                 G7 B7 G6 B6 G5 B5 G4 B4           \n\
+punpckhbw %%mm3, %%mm1  #                 00 R7 00 R6 00 R5 00 R4           \n\
+movq      %%mm0, %%mm5  #                 G7 B7 G6 B6 G5 B5 G4 B4           \n\
+punpcklwd %%mm1, %%mm5  #                 00 R5 B5 G5 00 R4 B4 G4           \n\
+movq      %%mm5, 16(%3) # Store ARGB5 ARGB4                                 \n\
+punpckhwd %%mm1, %%mm0  #                 00 R7 B7 G7 00 R6 B6 G6           \n\
+movq      %%mm0, 24(%3) # Store ARGB7 ARGB6                                 \n\
+"
+
+#define SSE2_UNPACK_32_ARGB_ALIGNED "                                       \n\
+pxor      %%xmm3, %%xmm3  # zero xmm3                                       \n\
+movdqa    %%xmm0, %%xmm4  #               B7 B6 B5 B4 B3 B2 B1 B0           \n\
+punpcklbw %%xmm2, %%xmm4  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
+movdqa    %%xmm1, %%xmm5  #               R7 R6 R5 R4 R3 R2 R1 R0           \n\
+punpcklbw %%xmm3, %%xmm5  #               00 R3 00 R2 00 R1 00 R0           \n\
+movdqa    %%xmm4, %%xmm6  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
+punpcklwd %%xmm5, %%xmm4  #               00 R1 B1 G1 00 R0 B0 G0           \n\
+movntdq   %%xmm4, (%3)    # Store ARGB3 ARGB2 ARGB1 ARGB0                   \n\
+punpckhwd %%xmm5, %%xmm6  #               00 R3 B3 G3 00 R2 B2 G2           \n\
+movntdq   %%xmm6, 16(%3)  # Store ARGB7 ARGB6 ARGB5 ARGB4                   \n\
+punpckhbw %%xmm2, %%xmm0  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
+punpckhbw %%xmm3, %%xmm1  #               00 R7 00 R6 00 R5 00 R4           \n\
+movdqa    %%xmm0, %%xmm5  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
+punpcklwd %%xmm1, %%xmm5  #               00 R5 B5 G5 00 R4 B4 G4           \n\
+movntdq   %%xmm5, 32(%3)  # Store ARGB11 ARGB10 ARGB9 ARGB8                 \n\
+punpckhwd %%xmm1, %%xmm0  #               00 R7 B7 G7 00 R6 B6 G6           \n\
+movntdq   %%xmm0, 48(%3)  # Store ARGB15 ARGB14 ARGB13 ARGB12               \n\
+"
+
+#define SSE2_UNPACK_32_ARGB_UNALIGNED "                                     \n\
+pxor      %%xmm3, %%xmm3  # zero xmm3                                       \n\
+movdqa    %%xmm0, %%xmm4  #               B7 B6 B5 B4 B3 B2 B1 B0           \n\
+punpcklbw %%xmm2, %%xmm4  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
+movdqa    %%xmm1, %%xmm5  #               R7 R6 R5 R4 R3 R2 R1 R0           \n\
+punpcklbw %%xmm3, %%xmm5  #               00 R3 00 R2 00 R1 00 R0           \n\
+movdqa    %%xmm4, %%xmm6  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
+punpcklwd %%xmm5, %%xmm4  #               00 R1 B1 G1 00 R0 B0 G0           \n\
+movdqu    %%xmm4, (%3)    # Store ARGB3 ARGB2 ARGB1 ARGB0                   \n\
+punpckhwd %%xmm5, %%xmm6  #               00 R3 B3 G3 00 R2 B2 G2           \n\
+movdqu    %%xmm6, 16(%3)  # Store ARGB7 ARGB6 ARGB5 ARGB4                   \n\
+punpckhbw %%xmm2, %%xmm0  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
+punpckhbw %%xmm3, %%xmm1  #               00 R7 00 R6 00 R5 00 R4           \n\
+movdqa    %%xmm0, %%xmm5  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
+punpcklwd %%xmm1, %%xmm5  #               00 R5 B5 G5 00 R4 B4 G4           \n\
+movdqu    %%xmm5, 32(%3)  # Store ARGB11 ARGB10 ARGB9 ARGB8                 \n\
+punpckhwd %%xmm1, %%xmm0  #               00 R7 B7 G7 00 R6 B6 G6           \n\
+movdqu    %%xmm0, 48(%3)  # Store ARGB15 ARGB14 ARGB13 ARGB12               \n\
 "
 
-#define INTRINSICS_UNPACK_32 \
-    mm3 = (__m64)(uint64_t)0; \
-    mm6 = mm0; \
-    mm7 = mm1; \
+#define MMX_INTRINSICS_UNPACK_32_ARGB \
+    mm3 = _mm_setzero_si64(); \
     mm4 = mm0; \
+    mm4 = _mm_unpacklo_pi8(mm4, mm2); \
     mm5 = mm1; \
-    mm6 = _mm_unpacklo_pi8(mm6, mm2); \
-    mm7 = _mm_unpacklo_pi8(mm7, mm3); \
-    mm6 = _mm_unpacklo_pi16(mm6, mm7); \
-    *(uint64_t *)p_buffer = (uint64_t)mm6; \
-    mm6 = mm0; \
-    mm6 = _mm_unpacklo_pi8(mm6, mm2); \
-    mm6 = _mm_unpackhi_pi16(mm6, mm7); \
-    *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6; \
-    mm4 = _mm_unpackhi_pi8(mm4, mm2); \
-    mm5 = _mm_unpackhi_pi8(mm5, mm3); \
+    mm5 = _mm_unpacklo_pi8(mm5, mm3); \
+    mm6 = mm4; \
     mm4 = _mm_unpacklo_pi16(mm4, mm5); \
-    *(uint64_t *)(p_buffer + 4) = (uint64_t)mm4; \
-    mm4 = mm0; \
-    mm4 = _mm_unpackhi_pi8(mm4, mm2); \
-    mm4 = _mm_unpackhi_pi16(mm4, mm5); \
-    *(uint64_t *)(p_buffer + 6) = (uint64_t)mm4; \
+    *(uint64_t *)p_buffer = (uint64_t)mm4; \
+    mm6 = _mm_unpackhi_pi16(mm6, mm5); \
+    *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6; \
+    mm0 = _mm_unpackhi_pi8(mm0, mm2); \
+    mm1 = _mm_unpackhi_pi8(mm1, mm3); \
+    mm5 = mm0; \
+    mm5 = _mm_unpacklo_pi16(mm5, mm1); \
+    *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5; \
+    mm0 = _mm_unpackhi_pi16(mm0, mm1); \
+    *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
+
+#define SSE2_INTRINSICS_UNPACK_32_ARGB_ALIGNED \
+    xmm3 = _mm_setzero_si128();          \
+    xmm4 = xmm0; \
+    xmm4 = _mm_unpacklo_epi8(xmm4, xmm2); \
+    xmm5 = xmm1; \
+    xmm5 = _mm_unpacklo_epi8(xmm5, xmm3); \
+    xmm6 = xmm4; \
+    xmm4 = _mm_unpacklo_epi16(xmm4, xmm5); \
+    _mm_stream_si128((__m128i*)(p_buffer), xmm4); \
+    xmm6 = _mm_unpackhi_epi16(xmm6, xmm5); \
+    _mm_stream_si128((__m128i*)(p_buffer+4), xmm6); \
+    xmm0 = _mm_unpackhi_epi8(xmm0, xmm2); \
+    xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
+    xmm5 = xmm0; \
+    xmm5 = _mm_unpacklo_epi16(xmm5, xmm1); \
+    _mm_stream_si128((__m128i*)(p_buffer+8), xmm5); \
+    xmm0 = _mm_unpackhi_epi16(xmm0, xmm1); \
+    _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
+
+#define SSE2_INTRINSICS_UNPACK_32_ARGB_UNALIGNED \
+    xmm3 = _mm_setzero_si128();          \
+    xmm4 = xmm0; \
+    xmm4 = _mm_unpacklo_epi8(xmm4, xmm2); \
+    xmm5 = xmm1; \
+    xmm5 = _mm_unpacklo_epi8(xmm5, xmm3); \
+    xmm6 = xmm4; \
+    xmm4 = _mm_unpacklo_epi16(xmm4, xmm5); \
+    _mm_storeu_si128((__m128i*)(p_buffer), xmm4); \
+    xmm6 = _mm_unpackhi_epi16(xmm6, xmm5); \
+    _mm_storeu_si128((__m128i*)(p_buffer+4), xmm6); \
+    xmm0 = _mm_unpackhi_epi8(xmm0, xmm2); \
+    xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
+    xmm5 = xmm0; \
+    xmm5 = _mm_unpacklo_epi16(xmm5, xmm1); \
+    _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5); \
+    xmm0 = _mm_unpackhi_epi16(xmm0, xmm1); \
+    _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
+
+#define MMX_UNPACK_32_BGRA "                                                \n\
+pxor      %%mm3, %%mm3  # zero mm3                                          \n\
+movq      %%mm2, %%mm4  #                 G7 G6 G5 G4 G3 G2 G1 G0           \n\
+punpcklbw %%mm0, %%mm4  #                 B3 G3 B2 G2 B1 G1 B0 G0           \n\
+punpcklbw %%mm1, %%mm3  #                 R3 00 R2 00 R1 00 R0 00           \n\
+movq      %%mm3, %%mm5  #                 R3 00 R2 00 R1 00 R0 00           \n\
+punpcklwd %%mm4, %%mm3  #                 B1 G1 R1 00 B0 G0 R0 00           \n\
+movq      %%mm3, (%3)   # Store BGRA1 BGRA0                                 \n\
+punpckhwd %%mm4, %%mm5  #                 B3 G3 R3 00 B2 G2 R2 00           \n\
+movq      %%mm5, 8(%3)  # Store BGRA3 BGRA2                                 \n\
+pxor      %%mm3, %%mm3  # zero mm3                                          \n\
+movq      %%mm2, %%mm4  #                 G7 G6 G5 G4 G3 G2 G1 G0           \n\
+punpckhbw %%mm0, %%mm4  #                 B7 G7 B6 G6 B5 G5 B4 G4           \n\
+punpckhbw %%mm1, %%mm3  #                 R7 00 R6 00 R5 00 R4 00           \n\
+movq      %%mm3, %%mm5  #                 R7 00 R6 00 R5 00 R4 00           \n\
+punpcklwd %%mm1, %%mm3  #                 B5 G5 R5 00 B4 G4 R4 00           \n\
+movq      %%mm3, 16(%3) # Store BGRA5 BGRA4                                 \n\
+punpckhwd %%mm4, %%mm5  #                 B7 G7 R7 00 B6 G6 R6 00           \n\
+movq      %%mm5, 24(%3) # Store BGRA7 BGRA6                                 \n\
+"
+
+#define SSE2_UNPACK_32_BGRA_ALIGNED "                                       \n\
+pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
+movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
+punpcklbw %%xmm0, %%xmm4  #                 B3 G3 B2 G2 B1 G1 B0 G0         \n\
+punpcklbw %%xmm1, %%xmm3  #                 R3 00 R2 00 R1 00 R0 00         \n\
+movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
+punpcklwd %%xmm4, %%xmm3  #                 B1 G1 R1 00 B0 G0 R0 00         \n\
+movntdq   %%xmm3, (%3)    # Store BGRA3 BGRA2 BGRA1 BGRA0                   \n\
+punpckhwd %%xmm4, %%xmm5  #                 B3 G3 R3 00 B2 G2 R2 00         \n\
+movntdq   %%xmm5, 8(%3)   # Store BGRA7 BGRA6 BGRA5 BGRA4                   \n\
+pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
+movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
+punpckhbw %%xmm0, %%xmm4  #                 B7 G7 B6 G6 B5 G5 B4 G4         \n\
+punpckhbw %%xmm1, %%xmm3  #                 R7 00 R6 00 R5 00 R4 00         \n\
+movdqa    %%xmm3, %%xmm5  #                 R7 00 R6 00 R5 00 R4 00         \n\
+punpcklwd %%xmm1, %%xmm3  #                 B5 G5 R5 00 B4 G4 R4 00         \n\
+movntdq   %%xmm3, 16(%3)  # Store BGRA11 BGRA10 BGRA9 BGRA8                 \n\
+punpckhwd %%xmm4, %%xmm5  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
+movntdq   %%xmm5, 24(%3)  # Store BGRA15 BGRA14 BGRA13 BGRA12               \n\
+"
+
+#define SSE2_UNPACK_32_BGRA_UNALIGNED "                                     \n\
+pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
+movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
+punpcklbw %%xmm0, %%xmm4  #                 B3 G3 B2 G2 B1 G1 B0 G0         \n\
+punpcklbw %%xmm1, %%xmm3  #                 R3 00 R2 00 R1 00 R0 00         \n\
+movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
+punpcklwd %%xmm4, %%xmm3  #                 B1 G1 R1 00 B0 G0 R0 00         \n\
+movdqu    %%xmm3, (%3)    # Store BGRA3 BGRA2 BGRA1 BGRA0                   \n\
+punpckhwd %%xmm4, %%xmm5  #                 B3 G3 R3 00 B2 G2 R2 00         \n\
+movdqu    %%xmm5, 8(%3)   # Store BGRA7 BGRA6 BGRA5 BGRA4                   \n\
+pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
+movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
+punpckhbw %%xmm0, %%xmm4  #                 B7 G7 B6 G6 B5 G5 B4 G4         \n\
+punpckhbw %%xmm1, %%xmm3  #                 R7 00 R6 00 R5 00 R4 00         \n\
+movdqa    %%xmm3, %%xmm5  #                 R7 00 R6 00 R5 00 R4 00         \n\
+punpcklwd %%xmm1, %%xmm3  #                 B5 G5 R5 00 B4 G4 R4 00         \n\
+movdqu    %%xmm3, 16(%3)  # Store BGRA11 BGRA10 BGRA9 BGRA8                 \n\
+punpckhwd %%xmm4, %%xmm5  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
+movdqu    %%xmm5, 24(%3)  # Store BGRA15 BGRA14 BGRA13 BGRA12               \n\
+"
+
+#define MMX_INTRINSICS_UNPACK_32_BGRA \
+    mm3 = _mm_setzero_si64(); \
+    mm4 = mm2; \
+    mm4 = _mm_unpacklo_pi8(mm4, mm0); \
+    mm1 = _mm_unpacklo_pi8(mm1, mm3); \
+    mm5 = mm3; \
+    mm3 = _mm_unpacklo_pi16(mm3, mm4); \
+    *(uint64_t *)p_buffer = (uint64_t)mm3; \
+    mm5 = _mm_unpackhi_pi16(mm5, mm4); \
+    *(uint64_t *)(p_buffer + 2) = (uint64_t)mm5; \
+    mm3 = _mm_setzero_si64(); \
+    mm4 = mm2; \
+    mm0 = _mm_unpackhi_pi8(mm0, mm4); \
+    mm1 = _mm_unpackhi_pi8(mm1, mm3); \
+    mm5 = mm3; \
+    mm3 = _mm_unpacklo_pi16(mm3, mm1); \
+    *(uint64_t *)(p_buffer + 4) = (uint64_t)mm3; \
+    mm5 = _mm_unpackhi_pi16(mm5, mm4); \
+    *(uint64_t *)(p_buffer + 6) = (uint64_t)mm5; \
+
+#define SSE2_INTRINSICS_UNPACK_32_BGRA_ALIGNED \
+    xmm3 = _mm_setzero_si128(); \
+    xmm4 = xmm2; \
+    xmm4 = _mm_unpacklo_epi8(xmm4, xmm0); \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm3); \
+    xmm5 = xmm3; \
+    xmm3 = _mm_unpacklo_epi16(xmm3, xmm4); \
+    _mm_stream_si128((__m128i*)(p_buffer), xmm3); \
+    xmm5 = _mm_unpackhi_epi16(xmm5, xmm4); \
+    _mm_stream_si128((__m128i*)(p_buffer+4), xmm5); \
+    xmm3 = _mm_setzero_si128(); \
+    xmm4 = xmm2; \
+    xmm0 = _mm_unpackhi_epi8(xmm0, xmm4); \
+    xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
+    xmm5 = xmm3; \
+    xmm3 = _mm_unpacklo_epi16(xmm3, xmm1); \
+    _mm_stream_si128((__m128i*)(p_buffer+8), xmm3); \
+    xmm5 = _xmm_unpackhi_pi16(xmm5, xmm4); \
+    _mm_stream_si128((__m128i*)(p_buffer+12), xmm5); \
+
+#define SSE2_INTRINSICS_UNPACK_32_BGRA_UNALIGNED \
+    xmm3 = _mm_setzero_si128(); \
+    xmm4 = xmm2; \
+    xmm4 = _mm_unpacklo_epi8(xmm4, xmm0); \
+    xmm1 = _mm_unpacklo_epi8(xmm1, xmm3); \
+    xmm5 = xmm3; \
+    xmm3 = _mm_unpacklo_epi16(xmm3, xmm4); \
+    _mm_storeu_si128((__m128i*)(p_buffer), xmm3); \
+    xmm5 = _mm_unpackhi_epi16(xmm5, xmm4); \
+    _mm_storeu_si128((__m128i*)(p_buffer+4), xmm5); \
+    xmm3 = _mm_setzero_si128(); \
+    xmm4 = xmm2; \
+    xmm0 = _mm_unpackhi_epi8(xmm0, xmm4); \
+    xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
+    xmm5 = xmm3; \
+    xmm3 = _mm_unpacklo_epi16(xmm3, xmm1); \
+    _mm_storeu_si128((__m128i*)(p_buffer+8), xmm3); \
+    xmm5 = _xmm_unpackhi_pi16(xmm5, xmm4); \
+    _mm_storeu_si128((__m128i*)(p_buffer+12), xmm5); \