]> git.sesse.net Git - x264/blobdiff - common/x86/predict-c.c
Bump dates to 2016
[x264] / common / x86 / predict-c.c
index 1c40cfc4bfd9382b39826a164d62058085fdd4df..b5a8b45c1874cf786d48ebaea3a61d8d942371fd 100644 (file)
@@ -1,10 +1,11 @@
 /*****************************************************************************
- * predict.c: h264 encoder
+ * predict-c.c: intra prediction
  *****************************************************************************
- * Copyright (C) 2003 Laurent Aimar
- * $Id: predict.c,v 1.1 2004/06/03 19:27:07 fenrir Exp $
+ * Copyright (C) 2003-2016 x264 project
  *
  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
+ *          Loren Merritt <lorenm@u.washington.edu>
+ *          Fiona Glaser <fiona@x264.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111, USA.
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at licensing@x264.com.
  *****************************************************************************/
 
 #include "common/common.h"
 #include "predict.h"
 #include "pixel.h"
 
-extern void predict_16x16_v_mmx( uint8_t *src );
-extern void predict_16x16_dc_core_mmxext( uint8_t *src, int i_dc_left );
-extern void predict_16x16_dc_top_mmxext( uint8_t *src );
-extern void predict_16x16_p_core_mmxext( uint8_t *src, int i00, int b, int c );
-extern void predict_8x8c_p_core_mmxext( uint8_t *src, int i00, int b, int c );
-extern void predict_8x8c_dc_core_mmxext( uint8_t *src, int s2, int s3 );
-extern void predict_8x8c_v_mmx( uint8_t *src );
-extern void predict_8x8_v_mmxext( uint8_t *src, uint8_t edge[33] );
-extern void predict_8x8_dc_mmxext( uint8_t *src, uint8_t edge[33] );
-extern void predict_8x8_dc_top_mmxext( uint8_t *src, uint8_t edge[33] );
-extern void predict_8x8_dc_left_mmxext( uint8_t *src, uint8_t edge[33] );
-extern void predict_8x8_ddl_mmxext( uint8_t *src, uint8_t edge[33] );
-extern void predict_8x8_ddr_mmxext( uint8_t *src, uint8_t edge[33] );
-extern void predict_8x8_ddl_sse2( uint8_t *src, uint8_t edge[33] );
-extern void predict_8x8_ddr_sse2( uint8_t *src, uint8_t edge[33] );
-extern void predict_8x8_vl_sse2( uint8_t *src, uint8_t edge[33] );
-extern void predict_8x8_vr_core_mmxext( uint8_t *src, uint8_t edge[33] );
-extern void predict_4x4_ddl_mmxext( uint8_t *src );
-extern void predict_4x4_vl_mmxext( uint8_t *src );
-extern void predict_16x16_dc_top_sse2( uint8_t *src );
-extern void predict_16x16_dc_core_sse2( uint8_t *src, int i_dc_left );
-extern void predict_16x16_v_sse2( uint8_t *src );
-extern void predict_16x16_p_core_sse2( uint8_t *src, int i00, int b, int c );
-
-#define PREDICT_16x16_P(name)\
-static void predict_16x16_p_##name( uint8_t *src )\
+#define PREDICT_16x16_DC(name)\
+void x264_predict_16x16_dc_##name( pixel *src )\
 {\
-    int a, b, c, i;\
-    int H = 0;\
-    int V = 0;\
-    int i00;\
-    for( i = 1; i <= 8; i++ )\
+    uint32_t dc = 16;\
+    for( int i = 0; i < 16; i += 2 )\
     {\
-        H += i * ( src[7+i - FDEC_STRIDE ]  - src[7-i - FDEC_STRIDE ] );\
-        V += i * ( src[(7+i)*FDEC_STRIDE -1] - src[(7-i)*FDEC_STRIDE -1] );\
+        dc += src[-1 + i * FDEC_STRIDE];\
+        dc += src[-1 + (i+1) * FDEC_STRIDE];\
     }\
-    a = 16 * ( src[15*FDEC_STRIDE -1] + src[15 - FDEC_STRIDE] );\
-    b = ( 5 * H + 32 ) >> 6;\
-    c = ( 5 * V + 32 ) >> 6;\
-    i00 = a - b * 7 - c * 7 + 16;\
-    predict_16x16_p_core_##name( src, i00, b, c );\
+    x264_predict_16x16_dc_core_##name( src, dc );\
 }
 
-PREDICT_16x16_P( mmxext )
-PREDICT_16x16_P( sse2   )
-
-static void predict_8x8c_p_mmxext( uint8_t *src )
-{
-    int a, b, c, i;
-    int H = 0;
-    int V = 0;
-    int i00;
-
-    for( i = 1; i <= 4; i++ )
-    {
-        H += i * ( src[3+i - FDEC_STRIDE] - src[3-i - FDEC_STRIDE] );
-        V += i * ( src[(3+i)*FDEC_STRIDE -1] - src[(3-i)*FDEC_STRIDE -1] );
-    }
-
-    a = 16 * ( src[7*FDEC_STRIDE -1] + src[7 - FDEC_STRIDE] );
-    b = ( 17 * H + 16 ) >> 5;
-    c = ( 17 * V + 16 ) >> 5;
-    i00 = a -3*b -3*c + 16;
+PREDICT_16x16_DC( mmx2 )
+PREDICT_16x16_DC( sse2 )
+PREDICT_16x16_DC( avx2 )
 
-    predict_8x8c_p_core_mmxext( src, i00, b, c );
-}
-
-#define PREDICT_16x16_DC(name)\
-static void predict_16x16_dc_##name( uint8_t *src )\
+#define PREDICT_16x16_DC_LEFT(name)\
+static void x264_predict_16x16_dc_left_##name( pixel *src )\
 {\
-    uint32_t dc=16;\
-    int i;\
-    for( i = 0; i < 16; i+=2 )\
+    uint32_t dc = 8;\
+    for( int i = 0; i < 16; i += 2 )\
     {\
         dc += src[-1 + i * FDEC_STRIDE];\
         dc += src[-1 + (i+1) * FDEC_STRIDE];\
     }\
-    predict_16x16_dc_core_##name( src, dc );\
+    x264_predict_16x16_dc_left_core_##name( src, dc>>4 );\
 }
 
-PREDICT_16x16_DC( mmxext )
-PREDICT_16x16_DC( sse2   )
-
-static void predict_8x8c_dc_mmxext( uint8_t *src )
-{
-    int s2 = 4
-       + src[-1 + 0*FDEC_STRIDE]
-       + src[-1 + 1*FDEC_STRIDE]
-       + src[-1 + 2*FDEC_STRIDE]
-       + src[-1 + 3*FDEC_STRIDE];
-
-    int s3 = 2
-       + src[-1 + 4*FDEC_STRIDE]
-       + src[-1 + 5*FDEC_STRIDE]
-       + src[-1 + 6*FDEC_STRIDE]
-       + src[-1 + 7*FDEC_STRIDE];
-
-    predict_8x8c_dc_core_mmxext( src, s2, s3 );
+PREDICT_16x16_DC_LEFT( mmx2 )
+PREDICT_16x16_DC_LEFT( sse2 )
+PREDICT_16x16_DC_LEFT( avx2 )
+
+#define PREDICT_P_SUM(j,i)\
+    H += i * ( src[j+i - FDEC_STRIDE ]  - src[j-i - FDEC_STRIDE ] );\
+    V += i * ( src[(j+i)*FDEC_STRIDE -1] - src[(j-i)*FDEC_STRIDE -1] );
+
+#if HAVE_X86_INLINE_ASM
+#if HIGH_BIT_DEPTH
+ALIGNED_16( static const int16_t pw_12345678[8] ) = {1,2,3,4,5,6,7,8};
+ALIGNED_16( static const int16_t pw_m87654321[8] ) = {-8,-7,-6,-5,-4,-3,-2,-1};
+ALIGNED_16( static const int16_t pw_m32101234[8] ) = {-3,-2,-1,0,1,2,3,4};
+#else // !HIGH_BIT_DEPTH
+ALIGNED_8( static const int8_t pb_12345678[8] ) = {1,2,3,4,5,6,7,8};
+ALIGNED_8( static const int8_t pb_m87654321[8] ) = {-8,-7,-6,-5,-4,-3,-2,-1};
+ALIGNED_8( static const int8_t pb_m32101234[8] ) = {-3,-2,-1,0,1,2,3,4};
+#endif // HIGH_BIT_DEPTH
+#endif // HAVE_X86_INLINE_ASM
+
+#define PREDICT_16x16_P_CORE\
+    int H = 0;\
+    int V = 0;\
+    PREDICT_P_SUM(7,1)\
+    PREDICT_P_SUM(7,2)\
+    PREDICT_P_SUM(7,3)\
+    PREDICT_P_SUM(7,4)\
+    PREDICT_P_SUM(7,5)\
+    PREDICT_P_SUM(7,6)\
+    PREDICT_P_SUM(7,7)\
+    PREDICT_P_SUM(7,8)
+
+#define PREDICT_16x16_P_END(name)\
+    int a = 16 * ( src[15*FDEC_STRIDE -1] + src[15 - FDEC_STRIDE] );\
+    int b = ( 5 * H + 32 ) >> 6;\
+    int c = ( 5 * V + 32 ) >> 6;\
+    int i00 = a - b * 7 - c * 7 + 16;\
+    /* b*15 + c*15 can overflow: it's easier to just branch away in this rare case
+     * than to try to consider it in the asm. */\
+    if( BIT_DEPTH > 8 && (i00 > 0x7fff || abs(b) > 1092 || abs(c) > 1092) )\
+        x264_predict_16x16_p_c( src );\
+    else\
+        x264_predict_16x16_p_core_##name( src, i00, b, c );
+
+#define PREDICT_16x16_P(name, name2)\
+static void x264_predict_16x16_p_##name( pixel *src )\
+{\
+    PREDICT_16x16_P_CORE\
+    PREDICT_16x16_P_END(name2)\
 }
 
-#ifdef ARCH_X86_64
-static void predict_16x16_h( uint8_t *src )
-{
-    int y;
-    for( y = 0; y < 16; y++ )
-    {
-        const uint64_t v = 0x0101010101010101ULL * src[-1];
-        uint64_t *p = (uint64_t*)src;
-        p[0] = p[1] = v;
-        src += FDEC_STRIDE;
-    }
+#if HAVE_X86_INLINE_ASM
+#if HIGH_BIT_DEPTH
+#define PREDICT_16x16_P_ASM\
+    asm (\
+        "movdqu           %1, %%xmm1 \n"\
+        "movdqa           %2, %%xmm0 \n"\
+        "pmaddwd          %3, %%xmm0 \n"\
+        "pmaddwd          %4, %%xmm1 \n"\
+        "paddd        %%xmm1, %%xmm0 \n"\
+        "movhlps      %%xmm0, %%xmm1 \n"\
+        "paddd        %%xmm1, %%xmm0 \n"\
+        "pshuflw $14, %%xmm0, %%xmm1 \n"\
+        "paddd        %%xmm1, %%xmm0 \n"\
+        "movd         %%xmm0, %0     \n"\
+        :"=r"(H)\
+        :"m"(src[-FDEC_STRIDE-1]), "m"(src[-FDEC_STRIDE+8]),\
+         "m"(*pw_12345678), "m"(*pw_m87654321)\
+    );
+#else // !HIGH_BIT_DEPTH
+#define PREDICT_16x16_P_ASM\
+    asm (\
+        "movq           %1, %%mm1 \n"\
+        "movq           %2, %%mm0 \n"\
+        "palignr $7,    %3, %%mm1 \n"\
+        "pmaddubsw      %4, %%mm0 \n"\
+        "pmaddubsw      %5, %%mm1 \n"\
+        "paddw       %%mm1, %%mm0 \n"\
+        "pshufw $14, %%mm0, %%mm1 \n"\
+        "paddw       %%mm1, %%mm0 \n"\
+        "pshufw  $1, %%mm0, %%mm1 \n"\
+        "paddw       %%mm1, %%mm0 \n"\
+        "movd        %%mm0, %0    \n"\
+        "movswl        %w0, %0    \n"\
+        :"=r"(H)\
+        :"m"(src[-FDEC_STRIDE]), "m"(src[-FDEC_STRIDE+8]),\
+         "m"(src[-FDEC_STRIDE-8]), "m"(*pb_12345678), "m"(*pb_m87654321)\
+    );
+#endif // HIGH_BIT_DEPTH
+
+#define PREDICT_16x16_P_CORE_INLINE\
+    int H, V;\
+    PREDICT_16x16_P_ASM\
+    V = 8 * ( src[15*FDEC_STRIDE-1] - src[-1*FDEC_STRIDE-1] )\
+      + 7 * ( src[14*FDEC_STRIDE-1] - src[ 0*FDEC_STRIDE-1] )\
+      + 6 * ( src[13*FDEC_STRIDE-1] - src[ 1*FDEC_STRIDE-1] )\
+      + 5 * ( src[12*FDEC_STRIDE-1] - src[ 2*FDEC_STRIDE-1] )\
+      + 4 * ( src[11*FDEC_STRIDE-1] - src[ 3*FDEC_STRIDE-1] )\
+      + 3 * ( src[10*FDEC_STRIDE-1] - src[ 4*FDEC_STRIDE-1] )\
+      + 2 * ( src[ 9*FDEC_STRIDE-1] - src[ 5*FDEC_STRIDE-1] )\
+      + 1 * ( src[ 8*FDEC_STRIDE-1] - src[ 6*FDEC_STRIDE-1] );
+
+#define PREDICT_16x16_P_INLINE(name, name2)\
+static void x264_predict_16x16_p_##name( pixel *src )\
+{\
+    PREDICT_16x16_P_CORE_INLINE\
+    PREDICT_16x16_P_END(name2)\
 }
-
-static void predict_8x8c_h( uint8_t *src )
-{
-    int y;
-    for( y = 0; y < 8; y++ )
-    {
-        *(uint64_t*)src = 0x0101010101010101ULL * src[-1];
-        src += FDEC_STRIDE;
-    }
+#else // !HAVE_X86_INLINE_ASM
+#define PREDICT_16x16_P_INLINE(name, name2) PREDICT_16x16_P(name, name2)
+#endif // HAVE_X86_INLINE_ASM
+
+#if HIGH_BIT_DEPTH
+PREDICT_16x16_P_INLINE( sse2, sse2 )
+#else // !HIGH_BIT_DEPTH
+#if !ARCH_X86_64
+PREDICT_16x16_P( mmx2, mmx2 )
+#endif // !ARCH_X86_64
+PREDICT_16x16_P( sse2, sse2 )
+#if HAVE_X86_INLINE_ASM
+PREDICT_16x16_P_INLINE( ssse3, sse2 )
+#endif // HAVE_X86_INLINE_ASM
+PREDICT_16x16_P_INLINE( avx, avx )
+#endif // HIGH_BIT_DEPTH
+PREDICT_16x16_P_INLINE( avx2, avx2 )
+
+#define PREDICT_8x16C_P_CORE\
+    int H = 0, V = 0;\
+    for( int i = 0; i < 4; i++ )\
+        H += ( i + 1 ) * ( src[4 + i - FDEC_STRIDE] - src[2 - i - FDEC_STRIDE] );\
+    for( int i = 0; i < 8; i++ )\
+        V += ( i + 1 ) * ( src[-1 + (i+8)*FDEC_STRIDE] - src[-1 + (6-i)*FDEC_STRIDE] );
+
+#if HIGH_BIT_DEPTH
+#define PREDICT_8x16C_P_END(name)\
+    int a = 16 * ( src[-1 + 15*FDEC_STRIDE] + src[7 - FDEC_STRIDE] );\
+    int b = ( 17 * H + 16 ) >> 5;\
+    int c = ( 5 * V + 32 ) >> 6;\
+    x264_predict_8x16c_p_core_##name( src, a, b, c );
+#else // !HIGH_BIT_DEPTH
+#define PREDICT_8x16C_P_END(name)\
+    int a = 16 * ( src[-1 + 15*FDEC_STRIDE] + src[7 - FDEC_STRIDE] );\
+    int b = ( 17 * H + 16 ) >> 5;\
+    int c = ( 5 * V + 32 ) >> 6;\
+    int i00 = a -3*b -7*c + 16;\
+    x264_predict_8x16c_p_core_##name( src, i00, b, c );
+#endif // HIGH_BIT_DEPTH
+
+#define PREDICT_8x16C_P(name)\
+static void x264_predict_8x16c_p_##name( pixel *src )\
+{\
+    PREDICT_8x16C_P_CORE\
+    PREDICT_8x16C_P_END(name)\
 }
 
-static void predict_16x16_dc_left( uint8_t *src )
-{
-    uint32_t s = 0;
-    uint64_t dc; 
-    int y;
-    
-    for( y = 0; y < 16; y++ )
-    {
-        s += src[-1 + y * FDEC_STRIDE];
-    }   
-    dc = (( s + 8 ) >> 4) * 0x0101010101010101ULL;
-    
-    for( y = 0; y < 16; y++ )
-    {
-        uint64_t *p = (uint64_t*)src;
-        p[0] = p[1] = dc;
-        src += FDEC_STRIDE;
-    }
+#if !ARCH_X86_64 && !HIGH_BIT_DEPTH
+PREDICT_8x16C_P( mmx2 )
+#endif // !ARCH_X86_64 && !HIGH_BIT_DEPTH
+PREDICT_8x16C_P( sse2 )
+PREDICT_8x16C_P( avx )
+PREDICT_8x16C_P( avx2 )
+
+#define PREDICT_8x8C_P_CORE\
+    int H = 0;\
+    int V = 0;\
+    PREDICT_P_SUM(3,1)\
+    PREDICT_P_SUM(3,2)\
+    PREDICT_P_SUM(3,3)\
+    PREDICT_P_SUM(3,4)
+
+#if HIGH_BIT_DEPTH
+#define PREDICT_8x8C_P_END(name)\
+    int a = 16 * ( src[7*FDEC_STRIDE -1] + src[7 - FDEC_STRIDE] );\
+    int b = ( 17 * H + 16 ) >> 5;\
+    int c = ( 17 * V + 16 ) >> 5;\
+    x264_predict_8x8c_p_core_##name( src, a, b, c );
+#else // !HIGH_BIT_DEPTH
+#define PREDICT_8x8C_P_END(name)\
+    int a = 16 * ( src[7*FDEC_STRIDE -1] + src[7 - FDEC_STRIDE] );\
+    int b = ( 17 * H + 16 ) >> 5;\
+    int c = ( 17 * V + 16 ) >> 5;\
+    int i00 = a -3*b -3*c + 16;\
+    x264_predict_8x8c_p_core_##name( src, i00, b, c );
+#endif // HIGH_BIT_DEPTH
+
+#define PREDICT_8x8C_P(name, name2)\
+static void x264_predict_8x8c_p_##name( pixel *src )\
+{\
+    PREDICT_8x8C_P_CORE\
+    PREDICT_8x8C_P_END(name2)\
 }
 
-static void predict_8x8c_dc_left( uint8_t *src )
+#if HAVE_X86_INLINE_ASM
+#if HIGH_BIT_DEPTH
+#define PREDICT_8x8C_P_ASM\
+    asm (\
+        "movdqa           %1, %%xmm0 \n"\
+        "pmaddwd          %2, %%xmm0 \n"\
+        "movhlps      %%xmm0, %%xmm1 \n"\
+        "paddd        %%xmm1, %%xmm0 \n"\
+        "pshuflw $14, %%xmm0, %%xmm1 \n"\
+        "paddd        %%xmm1, %%xmm0 \n"\
+        "movd         %%xmm0, %0     \n"\
+        :"=r"(H)\
+        :"m"(src[-FDEC_STRIDE]), "m"(*pw_m32101234)\
+    );
+#else // !HIGH_BIT_DEPTH
+#define PREDICT_8x8C_P_ASM\
+    asm (\
+        "movq           %1, %%mm0 \n"\
+        "pmaddubsw      %2, %%mm0 \n"\
+        "pshufw $14, %%mm0, %%mm1 \n"\
+        "paddw       %%mm1, %%mm0 \n"\
+        "pshufw  $1, %%mm0, %%mm1 \n"\
+        "paddw       %%mm1, %%mm0 \n"\
+        "movd        %%mm0, %0    \n"\
+        "movswl        %w0, %0    \n"\
+        :"=r"(H)\
+        :"m"(src[-FDEC_STRIDE]), "m"(*pb_m32101234)\
+    );
+#endif // HIGH_BIT_DEPTH
+
+#define PREDICT_8x8C_P_CORE_INLINE\
+    int H, V;\
+    PREDICT_8x8C_P_ASM\
+    V = 1 * ( src[4*FDEC_STRIDE -1] - src[ 2*FDEC_STRIDE -1] )\
+      + 2 * ( src[5*FDEC_STRIDE -1] - src[ 1*FDEC_STRIDE -1] )\
+      + 3 * ( src[6*FDEC_STRIDE -1] - src[ 0*FDEC_STRIDE -1] )\
+      + 4 * ( src[7*FDEC_STRIDE -1] - src[-1*FDEC_STRIDE -1] );\
+    H += -4 * src[-1*FDEC_STRIDE -1];
+
+#define PREDICT_8x8C_P_INLINE(name, name2)\
+static void x264_predict_8x8c_p_##name( pixel *src )\
+{\
+    PREDICT_8x8C_P_CORE_INLINE\
+    PREDICT_8x8C_P_END(name2)\
+}
+#else // !HAVE_X86_INLINE_ASM
+#define PREDICT_8x8C_P_INLINE(name, name2) PREDICT_8x8C_P(name, name2)
+#endif // HAVE_X86_INLINE_ASM
+
+#if HIGH_BIT_DEPTH
+PREDICT_8x8C_P_INLINE( sse2, sse2 )
+#else  //!HIGH_BIT_DEPTH
+#if !ARCH_X86_64
+PREDICT_8x8C_P( mmx2, mmx2 )
+#endif // !ARCH_X86_64
+PREDICT_8x8C_P( sse2, sse2 )
+#if HAVE_X86_INLINE_ASM
+PREDICT_8x8C_P_INLINE( ssse3, sse2 )
+#endif // HAVE_X86_INLINE_ASM
+#endif // HIGH_BIT_DEPTH
+PREDICT_8x8C_P_INLINE( avx, avx )
+PREDICT_8x8C_P_INLINE( avx2, avx2 )
+
+#if ARCH_X86_64 && !HIGH_BIT_DEPTH
+static void x264_predict_8x8c_dc_left( uint8_t *src )
 {
     int y;
     uint32_t s0 = 0, s1 = 0;
@@ -184,385 +329,316 @@ static void predict_8x8c_dc_left( uint8_t *src )
 
     for( y = 0; y < 4; y++ )
     {
-        *(uint64_t*)src = dc0;
+        M64( src ) = dc0;
         src += FDEC_STRIDE;
     }
     for( y = 0; y < 4; y++ )
     {
-        *(uint64_t*)src = dc1;
+        M64( src ) = dc1;
         src += FDEC_STRIDE;
     }
-
 }
+#endif // ARCH_X86_64 && !HIGH_BIT_DEPTH
 
-static void predict_8x8c_dc_top( uint8_t *src )
+/****************************************************************************
+ * Exported functions:
+ ****************************************************************************/
+void x264_predict_16x16_init_mmx( int cpu, x264_predict_t pf[7] )
 {
-    int y, x;
-    uint32_t s0 = 0, s1 = 0;
-    uint64_t dc;
-
-    for( x = 0; x < 4; x++ )
-    {
-        s0 += src[x     - FDEC_STRIDE];
-        s1 += src[x + 4 - FDEC_STRIDE];
-    }
-    dc = (( s0 + 2 ) >> 2) * 0x01010101
-       + (( s1 + 2 ) >> 2) * 0x0101010100000000ULL;
+    if( !(cpu&X264_CPU_MMX2) )
+        return;
+    pf[I_PRED_16x16_DC]      = x264_predict_16x16_dc_mmx2;
+    pf[I_PRED_16x16_DC_TOP]  = x264_predict_16x16_dc_top_mmx2;
+    pf[I_PRED_16x16_DC_LEFT] = x264_predict_16x16_dc_left_mmx2;
+    pf[I_PRED_16x16_V]       = x264_predict_16x16_v_mmx2;
+    pf[I_PRED_16x16_H]       = x264_predict_16x16_h_mmx2;
+#if HIGH_BIT_DEPTH
+    if( !(cpu&X264_CPU_SSE) )
+        return;
+    pf[I_PRED_16x16_V]       = x264_predict_16x16_v_sse;
+    if( !(cpu&X264_CPU_SSE2) )
+        return;
+    pf[I_PRED_16x16_DC]      = x264_predict_16x16_dc_sse2;
+    pf[I_PRED_16x16_DC_TOP]  = x264_predict_16x16_dc_top_sse2;
+    pf[I_PRED_16x16_DC_LEFT] = x264_predict_16x16_dc_left_sse2;
+    pf[I_PRED_16x16_H]       = x264_predict_16x16_h_sse2;
+    pf[I_PRED_16x16_P]       = x264_predict_16x16_p_sse2;
+    if( !(cpu&X264_CPU_AVX) )
+        return;
+    pf[I_PRED_16x16_V]       = x264_predict_16x16_v_avx;
+    if( !(cpu&X264_CPU_AVX2) )
+        return;
+    pf[I_PRED_16x16_H]       = x264_predict_16x16_h_avx2;
+#else
+#if !ARCH_X86_64
+    pf[I_PRED_16x16_P]       = x264_predict_16x16_p_mmx2;
+#endif
+    if( !(cpu&X264_CPU_SSE) )
+        return;
+    pf[I_PRED_16x16_V]       = x264_predict_16x16_v_sse;
+    if( !(cpu&X264_CPU_SSE2) )
+        return;
+    pf[I_PRED_16x16_DC]      = x264_predict_16x16_dc_sse2;
+    if( cpu&X264_CPU_SSE2_IS_SLOW )
+        return;
+    pf[I_PRED_16x16_DC_TOP]  = x264_predict_16x16_dc_top_sse2;
+    pf[I_PRED_16x16_DC_LEFT] = x264_predict_16x16_dc_left_sse2;
+    pf[I_PRED_16x16_P]       = x264_predict_16x16_p_sse2;
+    if( !(cpu&X264_CPU_SSSE3) )
+        return;
+    if( !(cpu&X264_CPU_SLOW_PSHUFB) )
+        pf[I_PRED_16x16_H]       = x264_predict_16x16_h_ssse3;
+#if HAVE_X86_INLINE_ASM
+    pf[I_PRED_16x16_P]       = x264_predict_16x16_p_ssse3;
+#endif
+    if( !(cpu&X264_CPU_AVX) )
+        return;
+    pf[I_PRED_16x16_P]       = x264_predict_16x16_p_avx;
+#endif // HIGH_BIT_DEPTH
 
-    for( y = 0; y < 8; y++ )
+    if( cpu&X264_CPU_AVX2 )
     {
-        *(uint64_t*)src = dc;
-        src += FDEC_STRIDE;
+        pf[I_PRED_16x16_P]       = x264_predict_16x16_p_avx2;
+        pf[I_PRED_16x16_DC]      = x264_predict_16x16_dc_avx2;
+        pf[I_PRED_16x16_DC_TOP]  = x264_predict_16x16_dc_top_avx2;
+        pf[I_PRED_16x16_DC_LEFT] = x264_predict_16x16_dc_left_avx2;
     }
 }
-#endif
-
-/* Diagonals */
-
-#define PREDICT_4x4_LOAD_LEFT \
-    const int l0 = src[-1+0*FDEC_STRIDE];   \
-    const int l1 = src[-1+1*FDEC_STRIDE];   \
-    const int l2 = src[-1+2*FDEC_STRIDE];   \
-    UNUSED const int l3 = src[-1+3*FDEC_STRIDE];
-
-#define PREDICT_4x4_LOAD_TOP \
-    const int t0 = src[0-1*FDEC_STRIDE];   \
-    const int t1 = src[1-1*FDEC_STRIDE];   \
-    const int t2 = src[2-1*FDEC_STRIDE];   \
-    UNUSED const int t3 = src[3-1*FDEC_STRIDE];
 
-#define PREDICT_4x4_LOAD_TOP_RIGHT \
-    const int t4 = src[4-1*FDEC_STRIDE];   \
-    const int t5 = src[5-1*FDEC_STRIDE];   \
-    const int t6 = src[6-1*FDEC_STRIDE];   \
-    UNUSED const int t7 = src[7-1*FDEC_STRIDE];
-
-#define F1(a,b)   (((a)+(b)+1)>>1)
-#define F2(a,b,c) (((a)+2*(b)+(c)+2)>>2)
-
-#ifdef ARCH_X86_64 // slower on x86
-#if 0
-static void predict_4x4_ddl( uint8_t *src )
+void x264_predict_8x8c_init_mmx( int cpu, x264_predict_t pf[7] )
 {
-    PREDICT_4x4_LOAD_TOP
-    PREDICT_4x4_LOAD_TOP_RIGHT
-    uint32_t vec = (F2(t3,t4,t5)<< 0)
-                 + (F2(t4,t5,t6)<< 8)
-                 + (F2(t5,t6,t7)<<16)
-                 + (F2(t6,t7,t7)<<24);
-    *(uint32_t*)&src[3*FDEC_STRIDE] = vec;
-    *(uint32_t*)&src[2*FDEC_STRIDE] = vec = (vec<<8) + F2(t2,t3,t4);
-    *(uint32_t*)&src[1*FDEC_STRIDE] = vec = (vec<<8) + F2(t1,t2,t3);
-    *(uint32_t*)&src[0*FDEC_STRIDE] = vec = (vec<<8) + F2(t0,t1,t2);
-}
+    if( !(cpu&X264_CPU_MMX) )
+        return;
+#if HIGH_BIT_DEPTH
+    pf[I_PRED_CHROMA_V]       = x264_predict_8x8c_v_mmx;
+    if( !(cpu&X264_CPU_MMX2) )
+        return;
+    pf[I_PRED_CHROMA_DC]      = x264_predict_8x8c_dc_mmx2;
+    pf[I_PRED_CHROMA_H]       = x264_predict_8x8c_h_mmx2;
+    if( !(cpu&X264_CPU_SSE) )
+        return;
+    pf[I_PRED_CHROMA_V]       = x264_predict_8x8c_v_sse;
+    if( !(cpu&X264_CPU_SSE2) )
+        return;
+    pf[I_PRED_CHROMA_DC]      = x264_predict_8x8c_dc_sse2;
+    pf[I_PRED_CHROMA_DC_TOP]  = x264_predict_8x8c_dc_top_sse2;
+    pf[I_PRED_CHROMA_H]       = x264_predict_8x8c_h_sse2;
+    pf[I_PRED_CHROMA_P]       = x264_predict_8x8c_p_sse2;
+    if( !(cpu&X264_CPU_AVX) )
+        return;
+    pf[I_PRED_CHROMA_P]       = x264_predict_8x8c_p_avx;
+    if( !(cpu&X264_CPU_AVX2) )
+        return;
+    pf[I_PRED_CHROMA_H]   = x264_predict_8x8c_h_avx2;
+#else
+#if ARCH_X86_64
+    pf[I_PRED_CHROMA_DC_LEFT] = x264_predict_8x8c_dc_left;
 #endif
-
-static void predict_4x4_ddr( uint8_t *src )
-{
-    const int lt = src[-1-FDEC_STRIDE];
-    PREDICT_4x4_LOAD_LEFT
-    PREDICT_4x4_LOAD_TOP
-    uint32_t vec = (F2(l0,lt,t0)<< 0)
-                 + (F2(lt,t0,t1)<< 8)
-                 + (F2(t0,t1,t2)<<16)
-                 + (F2(t1,t2,t3)<<24);
-    *(uint32_t*)&src[0*FDEC_STRIDE] = vec;
-    *(uint32_t*)&src[1*FDEC_STRIDE] = vec = (vec<<8) + F2(l1,l0,lt);
-    *(uint32_t*)&src[2*FDEC_STRIDE] = vec = (vec<<8) + F2(l2,l1,l0);
-    *(uint32_t*)&src[3*FDEC_STRIDE] = vec = (vec<<8) + F2(l3,l2,l1);
-}
-
-static void predict_4x4_vr( uint8_t *src )
-{
-    const int lt = src[-1-FDEC_STRIDE];
-    PREDICT_4x4_LOAD_LEFT
-    PREDICT_4x4_LOAD_TOP
-    const int ltt0 = lt + t0 + 1;
-    const int t0t1 = t0 + t1 + 1;
-    const int t1t2 = t1 + t2 + 1;
-    const int t2t3 = t2 + t3 + 1;
-    const int l0lt = l0 + lt + 1;
-    const int l1l0 = l1 + l0 + 1;
-    const int l2l1 = l2 + l1 + 1;
-
-    src[0*FDEC_STRIDE+0]=
-    src[2*FDEC_STRIDE+1]= ltt0 >> 1;
-
-    src[0*FDEC_STRIDE+1]=
-    src[2*FDEC_STRIDE+2]= t0t1 >> 1;
-
-    src[0*FDEC_STRIDE+2]=
-    src[2*FDEC_STRIDE+3]= t1t2 >> 1;
-
-    src[0*FDEC_STRIDE+3]= t2t3 >> 1;
-
-    src[1*FDEC_STRIDE+0]=
-    src[3*FDEC_STRIDE+1]= (l0lt + ltt0) >> 2;
-
-    src[1*FDEC_STRIDE+1]=
-    src[3*FDEC_STRIDE+2]= (ltt0 + t0t1) >> 2;
-
-    src[1*FDEC_STRIDE+2]=
-    src[3*FDEC_STRIDE+3]= (t0t1 + t1t2) >> 2;
-
-    src[1*FDEC_STRIDE+3]= (t1t2 + t2t3) >> 2;
-    src[2*FDEC_STRIDE+0]= (l1l0 + l0lt) >> 2;
-    src[3*FDEC_STRIDE+0]= (l2l1 + l1l0) >> 2;
-}
-
-static void predict_4x4_hd( uint8_t *src )
-{
-    const int lt= src[-1-1*FDEC_STRIDE];
-    PREDICT_4x4_LOAD_LEFT
-    PREDICT_4x4_LOAD_TOP
-    const int ltt0 = lt + t0 + 1;
-    const int t0t1 = t0 + t1 + 1;
-    const int t1t2 = t1 + t2 + 1;
-    const int l0lt = l0 + lt + 1;
-    const int l1l0 = l1 + l0 + 1;
-    const int l2l1 = l2 + l1 + 1;
-    const int l3l2 = l3 + l2 + 1;
-
-    src[0*FDEC_STRIDE+0]=
-    src[1*FDEC_STRIDE+2]= l0lt >> 1;
-    src[0*FDEC_STRIDE+1]=
-    src[1*FDEC_STRIDE+3]= (l0lt + ltt0) >> 2;
-    src[0*FDEC_STRIDE+2]= (ltt0 + t0t1) >> 2;
-    src[0*FDEC_STRIDE+3]= (t0t1 + t1t2) >> 2;
-    src[1*FDEC_STRIDE+0]=
-    src[2*FDEC_STRIDE+2]= l1l0 >> 1;
-    src[1*FDEC_STRIDE+1]=
-    src[2*FDEC_STRIDE+3]= (l0lt + l1l0) >> 2;
-    src[2*FDEC_STRIDE+0]=
-    src[3*FDEC_STRIDE+2]= l2l1 >> 1;
-    src[2*FDEC_STRIDE+1]=
-    src[3*FDEC_STRIDE+3]= (l1l0 + l2l1) >> 2;
-    src[3*FDEC_STRIDE+0]= l3l2 >> 1;
-    src[3*FDEC_STRIDE+1]= (l2l1 + l3l2) >> 2;
-}
-
-#if 0
-static void predict_4x4_vl( uint8_t *src )
-{
-    PREDICT_4x4_LOAD_TOP
-    PREDICT_4x4_LOAD_TOP_RIGHT
-    const int t0t1 = t0 + t1 + 1;
-    const int t1t2 = t1 + t2 + 1;
-    const int t2t3 = t2 + t3 + 1;
-    const int t3t4 = t3 + t4 + 1;
-    const int t4t5 = t4 + t5 + 1;
-    const int t5t6 = t5 + t6 + 1;
-
-    src[0*FDEC_STRIDE+0]= t0t1 >> 1;
-    src[0*FDEC_STRIDE+1]=
-    src[2*FDEC_STRIDE+0]= t1t2 >> 1;
-    src[0*FDEC_STRIDE+2]=
-    src[2*FDEC_STRIDE+1]= t2t3 >> 1;
-    src[0*FDEC_STRIDE+3]=
-    src[2*FDEC_STRIDE+2]= t3t4 >> 1;
-    src[2*FDEC_STRIDE+3]= t4t5 >> 1;
-    src[1*FDEC_STRIDE+0]= (t0t1 + t1t2) >> 2;
-    src[1*FDEC_STRIDE+1]=
-    src[3*FDEC_STRIDE+0]= (t1t2 + t2t3) >> 2;
-    src[1*FDEC_STRIDE+2]=
-    src[3*FDEC_STRIDE+1]= (t2t3 + t3t4) >> 2;
-    src[1*FDEC_STRIDE+3]=
-    src[3*FDEC_STRIDE+2]= (t3t4 + t4t5) >> 2;
-    src[3*FDEC_STRIDE+3]= (t4t5 + t5t6) >> 2;
-}
+    pf[I_PRED_CHROMA_V]       = x264_predict_8x8c_v_mmx;
+    if( !(cpu&X264_CPU_MMX2) )
+        return;
+    pf[I_PRED_CHROMA_DC_TOP]  = x264_predict_8x8c_dc_top_mmx2;
+    pf[I_PRED_CHROMA_H]       = x264_predict_8x8c_h_mmx2;
+#if !ARCH_X86_64
+    pf[I_PRED_CHROMA_P]       = x264_predict_8x8c_p_mmx2;
 #endif
-
-static void predict_4x4_hu( uint8_t *src )
-{
-    PREDICT_4x4_LOAD_LEFT
-    const int l1l0 = l1 + l0 + 1;
-    const int l2l1 = l2 + l1 + 1;
-    const int l3l2 = l3 + l2 + 1;
-
-    src[0*FDEC_STRIDE+0]= l1l0 >> 1;
-    src[0*FDEC_STRIDE+1]= (l1l0 + l2l1) >> 2;
-
-    src[0*FDEC_STRIDE+2]=
-    src[1*FDEC_STRIDE+0]= l2l1 >> 1;
-
-    src[0*FDEC_STRIDE+3]=
-    src[1*FDEC_STRIDE+1]= (l2l1 + l3l2) >> 2;
-
-    src[1*FDEC_STRIDE+2]=
-    src[2*FDEC_STRIDE+0]= l3l2 >> 1;
-
-    src[1*FDEC_STRIDE+3]=
-    src[2*FDEC_STRIDE+1]= (l2 + 3*l3 + 2) >> 2;
-
-    src[2*FDEC_STRIDE+3]=
-    src[3*FDEC_STRIDE+1]=
-    src[3*FDEC_STRIDE+0]=
-    src[2*FDEC_STRIDE+2]=
-    src[3*FDEC_STRIDE+2]=
-    src[3*FDEC_STRIDE+3]= l3;
-}
+    pf[I_PRED_CHROMA_DC]      = x264_predict_8x8c_dc_mmx2;
+    if( !(cpu&X264_CPU_SSE2) )
+        return;
+    pf[I_PRED_CHROMA_P]       = x264_predict_8x8c_p_sse2;
+    if( !(cpu&X264_CPU_SSSE3) )
+        return;
+    pf[I_PRED_CHROMA_H]       = x264_predict_8x8c_h_ssse3;
+#if HAVE_X86_INLINE_ASM
+    pf[I_PRED_CHROMA_P]       = x264_predict_8x8c_p_ssse3;
 #endif
+    if( !(cpu&X264_CPU_AVX) )
+        return;
+    pf[I_PRED_CHROMA_P]       = x264_predict_8x8c_p_avx;
+#endif // HIGH_BIT_DEPTH
 
-/****************************************************************************
- * 8x8 prediction for intra luma block
- ****************************************************************************/
-
-#define PL(y) \
-    UNUSED int l##y = edge[14-y];
-#define PT(x) \
-    UNUSED int t##x = edge[16+x];
-#define PREDICT_8x8_LOAD_TOPLEFT \
-    int lt = edge[15];
-#define PREDICT_8x8_LOAD_LEFT \
-    PL(0) PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) PL(7)
-#define PREDICT_8x8_LOAD_TOP \
-    PT(0) PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) PT(7)
-
-#define PREDICT_8x8_DC(v) \
-    int y; \
-    for( y = 0; y < 8; y++ ) { \
-        ((uint32_t*)src)[0] = \
-        ((uint32_t*)src)[1] = v; \
-        src += FDEC_STRIDE; \
-    }
-
-#define SRC(x,y) src[(x)+(y)*FDEC_STRIDE]
-
-static void predict_8x8_vr_mmxext( uint8_t *src, uint8_t edge[33] )
-{
-    predict_8x8_vr_core_mmxext( src, edge );
+    if( cpu&X264_CPU_AVX2 )
     {
-        PREDICT_8x8_LOAD_TOPLEFT
-        PREDICT_8x8_LOAD_LEFT
-        SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
-        SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
-        SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
-        SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
-        SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
-        SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
+        pf[I_PRED_CHROMA_P]   = x264_predict_8x8c_p_avx2;
     }
 }
 
-#define SUMSUB(a,b,c,d,e,f,g,h)\
-    t=a; a+=b; b-=t;\
-    t=c; c+=d; d-=t;\
-    t=e; e+=f; f-=t;\
-    t=g; g+=h; h-=t;
-
-#define INTRA_SA8D_X3(cpu) \
-void x264_intra_sa8d_x3_8x8_##cpu( uint8_t *fenc, uint8_t edge[33], int res[3] )\
-{\
-    PREDICT_8x8_LOAD_TOP\
-    PREDICT_8x8_LOAD_LEFT\
-    int t;\
-    DECLARE_ALIGNED( int16_t, sa8d_1d[2][8], 16 );\
-    SUMSUB(l0,l4,l1,l5,l2,l6,l3,l7);\
-    SUMSUB(l0,l2,l1,l3,l4,l6,l5,l7);\
-    SUMSUB(l0,l1,l2,l3,l4,l5,l6,l7);\
-    sa8d_1d[0][0] = l0;\
-    sa8d_1d[0][1] = l1;\
-    sa8d_1d[0][2] = l2;\
-    sa8d_1d[0][3] = l3;\
-    sa8d_1d[0][4] = l4;\
-    sa8d_1d[0][5] = l5;\
-    sa8d_1d[0][6] = l6;\
-    sa8d_1d[0][7] = l7;\
-    SUMSUB(t0,t4,t1,t5,t2,t6,t3,t7);\
-    SUMSUB(t0,t2,t1,t3,t4,t6,t5,t7);\
-    SUMSUB(t0,t1,t2,t3,t4,t5,t6,t7);\
-    sa8d_1d[1][0] = t0;\
-    sa8d_1d[1][1] = t1;\
-    sa8d_1d[1][2] = t2;\
-    sa8d_1d[1][3] = t3;\
-    sa8d_1d[1][4] = t4;\
-    sa8d_1d[1][5] = t5;\
-    sa8d_1d[1][6] = t6;\
-    sa8d_1d[1][7] = t7;\
-    x264_intra_sa8d_x3_8x8_core_##cpu( fenc, sa8d_1d, res );\
-}
-
-#ifdef ARCH_X86_64
-INTRA_SA8D_X3(sse2)
-#ifdef HAVE_SSE3
-INTRA_SA8D_X3(ssse3)
-#endif
-#else
-INTRA_SA8D_X3(mmxext)
-#endif
-
-/****************************************************************************
- * Exported functions:
- ****************************************************************************/
-void x264_predict_16x16_init_mmx( int cpu, x264_predict_t pf[7] )
+void x264_predict_8x16c_init_mmx( int cpu, x264_predict_t pf[7] )
 {
     if( !(cpu&X264_CPU_MMX) )
         return;
-#ifdef ARCH_X86_64
-    pf[I_PRED_16x16_H]       = predict_16x16_h;
-    pf[I_PRED_16x16_DC_LEFT] = predict_16x16_dc_left;
-#endif
-    pf[I_PRED_16x16_V]       = predict_16x16_v_mmx;
-    if( !(cpu&X264_CPU_MMXEXT) )
-        return;
-    pf[I_PRED_16x16_DC]      = predict_16x16_dc_mmxext;
-    pf[I_PRED_16x16_DC_TOP]  = predict_16x16_dc_top_mmxext;
-    pf[I_PRED_16x16_P]       = predict_16x16_p_mmxext;
-    if( !(cpu&X264_CPU_SSE2) || (cpu&X264_CPU_3DNOW) )
-        return;
-    pf[I_PRED_16x16_DC]     = predict_16x16_dc_sse2;
-    pf[I_PRED_16x16_DC_TOP] = predict_16x16_dc_top_sse2;
-    pf[I_PRED_16x16_V]      = predict_16x16_v_sse2;
-    pf[I_PRED_16x16_P]      = predict_16x16_p_sse2;
-}
-
-void x264_predict_8x8c_init_mmx( int cpu, x264_predict_t pf[7] )
-{
-    if( !(cpu&X264_CPU_MMX) )
+#if HIGH_BIT_DEPTH
+    if( !(cpu&X264_CPU_MMX2) )
+        return;
+    pf[I_PRED_CHROMA_DC]      = x264_predict_8x16c_dc_mmx2;
+    pf[I_PRED_CHROMA_H]       = x264_predict_8x16c_h_mmx2;
+    if( !(cpu&X264_CPU_SSE) )
         return;
-#ifdef ARCH_X86_64
-    pf[I_PRED_CHROMA_H]       = predict_8x8c_h;
-    pf[I_PRED_CHROMA_DC_LEFT] = predict_8x8c_dc_left;
-    pf[I_PRED_CHROMA_DC_TOP]  = predict_8x8c_dc_top;
+    pf[I_PRED_CHROMA_V]       = x264_predict_8x16c_v_sse;
+    if( !(cpu&X264_CPU_SSE2) )
+        return;
+    pf[I_PRED_CHROMA_DC_TOP]  = x264_predict_8x16c_dc_top_sse2;
+    pf[I_PRED_CHROMA_DC]      = x264_predict_8x16c_dc_sse2;
+    pf[I_PRED_CHROMA_H]       = x264_predict_8x16c_h_sse2;
+    pf[I_PRED_CHROMA_P]       = x264_predict_8x16c_p_sse2;
+    if( !(cpu&X264_CPU_AVX) )
+        return;
+    pf[I_PRED_CHROMA_P]       = x264_predict_8x16c_p_avx;
+    if( !(cpu&X264_CPU_AVX2) )
+        return;
+    pf[I_PRED_CHROMA_H]   = x264_predict_8x16c_h_avx2;
+#else
+    pf[I_PRED_CHROMA_V]       = x264_predict_8x16c_v_mmx;
+    if( !(cpu&X264_CPU_MMX2) )
+        return;
+    pf[I_PRED_CHROMA_DC_TOP]  = x264_predict_8x16c_dc_top_mmx2;
+    pf[I_PRED_CHROMA_DC]      = x264_predict_8x16c_dc_mmx2;
+    pf[I_PRED_CHROMA_H]       = x264_predict_8x16c_h_mmx2;
+#if !ARCH_X86_64
+    pf[I_PRED_CHROMA_P]       = x264_predict_8x16c_p_mmx2;
 #endif
-    pf[I_PRED_CHROMA_V]       = predict_8x8c_v_mmx;
-    if( !(cpu&X264_CPU_MMXEXT) )
+    if( !(cpu&X264_CPU_SSE2) )
+        return;
+    pf[I_PRED_CHROMA_P]       = x264_predict_8x16c_p_sse2;
+    if( !(cpu&X264_CPU_SSSE3) )
+        return;
+    pf[I_PRED_CHROMA_H]       = x264_predict_8x16c_h_ssse3;
+    if( !(cpu&X264_CPU_AVX) )
         return;
-    pf[I_PRED_CHROMA_P]       = predict_8x8c_p_mmxext;
-    pf[I_PRED_CHROMA_DC]      = predict_8x8c_dc_mmxext;
+    pf[I_PRED_CHROMA_P]       = x264_predict_8x16c_p_avx;
+#endif // HIGH_BIT_DEPTH
+
+    if( cpu&X264_CPU_AVX2 )
+    {
+        pf[I_PRED_CHROMA_P]   = x264_predict_8x16c_p_avx2;
+    }
 }
 
-void x264_predict_8x8_init_mmx( int cpu, x264_predict8x8_t pf[12] )
+void x264_predict_8x8_init_mmx( int cpu, x264_predict8x8_t pf[12], x264_predict_8x8_filter_t *predict_8x8_filter )
 {
-    if( !(cpu&X264_CPU_MMXEXT) )
-        return;
-    pf[I_PRED_8x8_V]   = predict_8x8_v_mmxext;
-    pf[I_PRED_8x8_DC]  = predict_8x8_dc_mmxext;
-    pf[I_PRED_8x8_DC_TOP] = predict_8x8_dc_top_mmxext;
-    pf[I_PRED_8x8_DC_LEFT]= predict_8x8_dc_left_mmxext;
-    pf[I_PRED_8x8_VR]  = predict_8x8_vr_mmxext;
-#ifdef ARCH_X86
-    pf[I_PRED_8x8_DDL] = predict_8x8_ddl_mmxext;
-    pf[I_PRED_8x8_DDR] = predict_8x8_ddr_mmxext;
+    if( !(cpu&X264_CPU_MMX2) )
+        return;
+#if HIGH_BIT_DEPTH
+    if( !(cpu&X264_CPU_SSE) )
+        return;
+    pf[I_PRED_8x8_V]      = x264_predict_8x8_v_sse;
+    if( !(cpu&X264_CPU_SSE2) )
+        return;
+    pf[I_PRED_8x8_H]      = x264_predict_8x8_h_sse2;
+    pf[I_PRED_8x8_DC]     = x264_predict_8x8_dc_sse2;
+    pf[I_PRED_8x8_DC_TOP] = x264_predict_8x8_dc_top_sse2;
+    pf[I_PRED_8x8_DC_LEFT]= x264_predict_8x8_dc_left_sse2;
+    pf[I_PRED_8x8_DDL]    = x264_predict_8x8_ddl_sse2;
+    pf[I_PRED_8x8_DDR]    = x264_predict_8x8_ddr_sse2;
+    pf[I_PRED_8x8_VL]     = x264_predict_8x8_vl_sse2;
+    pf[I_PRED_8x8_VR]     = x264_predict_8x8_vr_sse2;
+    pf[I_PRED_8x8_HD]     = x264_predict_8x8_hd_sse2;
+    pf[I_PRED_8x8_HU]     = x264_predict_8x8_hu_sse2;
+    *predict_8x8_filter   = x264_predict_8x8_filter_sse2;
+    if( !(cpu&X264_CPU_SSSE3) )
+        return;
+    pf[I_PRED_8x8_DDL]    = x264_predict_8x8_ddl_ssse3;
+    pf[I_PRED_8x8_DDR]    = x264_predict_8x8_ddr_ssse3;
+    pf[I_PRED_8x8_HD]     = x264_predict_8x8_hd_ssse3;
+    pf[I_PRED_8x8_HU]     = x264_predict_8x8_hu_ssse3;
+    pf[I_PRED_8x8_VL]     = x264_predict_8x8_vl_ssse3;
+    pf[I_PRED_8x8_VR]     = x264_predict_8x8_vr_ssse3;
+    *predict_8x8_filter   = x264_predict_8x8_filter_ssse3;
+    if( cpu&X264_CPU_CACHELINE_64 )
+    {
+        pf[I_PRED_8x8_DDL]= x264_predict_8x8_ddl_ssse3_cache64;
+        pf[I_PRED_8x8_DDR]= x264_predict_8x8_ddr_ssse3_cache64;
+    }
+    if( !(cpu&X264_CPU_AVX) )
+        return;
+    pf[I_PRED_8x8_HD]     = x264_predict_8x8_hd_avx;
+    pf[I_PRED_8x8_HU]     = x264_predict_8x8_hu_avx;
+    pf[I_PRED_8x8_VL]     = x264_predict_8x8_vl_avx;
+    pf[I_PRED_8x8_VR]     = x264_predict_8x8_vr_avx;
+    *predict_8x8_filter   = x264_predict_8x8_filter_avx;
+#else
+    pf[I_PRED_8x8_V]      = x264_predict_8x8_v_mmx2;
+    pf[I_PRED_8x8_H]      = x264_predict_8x8_h_mmx2;
+    pf[I_PRED_8x8_DC]     = x264_predict_8x8_dc_mmx2;
+    pf[I_PRED_8x8_DC_TOP] = x264_predict_8x8_dc_top_mmx2;
+    pf[I_PRED_8x8_DC_LEFT]= x264_predict_8x8_dc_left_mmx2;
+    pf[I_PRED_8x8_HD]     = x264_predict_8x8_hd_mmx2;
+    pf[I_PRED_8x8_VL]     = x264_predict_8x8_vl_mmx2;
+    *predict_8x8_filter   = x264_predict_8x8_filter_mmx2;
+#if ARCH_X86
+    pf[I_PRED_8x8_DDL]  = x264_predict_8x8_ddl_mmx2;
+    pf[I_PRED_8x8_DDR]  = x264_predict_8x8_ddr_mmx2;
+    pf[I_PRED_8x8_VR]   = x264_predict_8x8_vr_mmx2;
+    pf[I_PRED_8x8_HU]   = x264_predict_8x8_hu_mmx2;
 #endif
     if( !(cpu&X264_CPU_SSE2) )
         return;
-    pf[I_PRED_8x8_DDL] = predict_8x8_ddl_sse2;
-    pf[I_PRED_8x8_VL]  = predict_8x8_vl_sse2;
-    pf[I_PRED_8x8_DDR] = predict_8x8_ddr_sse2;
+    pf[I_PRED_8x8_DDL]  = x264_predict_8x8_ddl_sse2;
+    pf[I_PRED_8x8_VL]   = x264_predict_8x8_vl_sse2;
+    pf[I_PRED_8x8_VR]   = x264_predict_8x8_vr_sse2;
+    pf[I_PRED_8x8_DDR]  = x264_predict_8x8_ddr_sse2;
+    pf[I_PRED_8x8_HD]   = x264_predict_8x8_hd_sse2;
+    pf[I_PRED_8x8_HU]   = x264_predict_8x8_hu_sse2;
+    if( !(cpu&X264_CPU_SSSE3) )
+        return;
+    if( !(cpu&X264_CPU_SLOW_PALIGNR) )
+    {
+        pf[I_PRED_8x8_DDL]  = x264_predict_8x8_ddl_ssse3;
+        pf[I_PRED_8x8_VR]   = x264_predict_8x8_vr_ssse3;
+    }
+    pf[I_PRED_8x8_HU]   = x264_predict_8x8_hu_ssse3;
+    *predict_8x8_filter = x264_predict_8x8_filter_ssse3;
+    if( !(cpu&X264_CPU_AVX) )
+        return;
+    pf[I_PRED_8x8_DDL]  = x264_predict_8x8_ddl_avx;
+    pf[I_PRED_8x8_DDR]  = x264_predict_8x8_ddr_avx;
+    pf[I_PRED_8x8_VL]   = x264_predict_8x8_vl_avx;
+    pf[I_PRED_8x8_VR]   = x264_predict_8x8_vr_avx;
+    pf[I_PRED_8x8_HD]   = x264_predict_8x8_hd_avx;
+#endif // HIGH_BIT_DEPTH
 }
 
 void x264_predict_4x4_init_mmx( int cpu, x264_predict_t pf[12] )
 {
-    if( !(cpu&X264_CPU_MMX) )
+    if( !(cpu&X264_CPU_MMX2) )
         return;
-#ifdef ARCH_X86_64
-    pf[I_PRED_4x4_DDR] = predict_4x4_ddr;
-    pf[I_PRED_4x4_VR]  = predict_4x4_vr;
-    pf[I_PRED_4x4_HD]  = predict_4x4_hd;
-    pf[I_PRED_4x4_HU]  = predict_4x4_hu;
-#endif
-    if( !(cpu&X264_CPU_MMXEXT) )
+    pf[I_PRED_4x4_DC]  = x264_predict_4x4_dc_mmx2;
+    pf[I_PRED_4x4_DDL] = x264_predict_4x4_ddl_mmx2;
+    pf[I_PRED_4x4_DDR] = x264_predict_4x4_ddr_mmx2;
+    pf[I_PRED_4x4_VL]  = x264_predict_4x4_vl_mmx2;
+    pf[I_PRED_4x4_HD]  = x264_predict_4x4_hd_mmx2;
+    pf[I_PRED_4x4_HU]  = x264_predict_4x4_hu_mmx2;
+#if HIGH_BIT_DEPTH
+    if( !(cpu&X264_CPU_SSE2) )
+        return;
+    pf[I_PRED_4x4_DDL] = x264_predict_4x4_ddl_sse2;
+    pf[I_PRED_4x4_DDR] = x264_predict_4x4_ddr_sse2;
+    pf[I_PRED_4x4_HD]  = x264_predict_4x4_hd_sse2;
+    pf[I_PRED_4x4_VL]  = x264_predict_4x4_vl_sse2;
+    pf[I_PRED_4x4_VR]  = x264_predict_4x4_vr_sse2;
+    if( !(cpu&X264_CPU_SSSE3) )
+        return;
+    pf[I_PRED_4x4_DDR] = x264_predict_4x4_ddr_ssse3;
+    pf[I_PRED_4x4_VR]  = x264_predict_4x4_vr_ssse3;
+    pf[I_PRED_4x4_HD]  = x264_predict_4x4_hd_ssse3;
+    if( !(cpu&X264_CPU_AVX) )
+        return;
+    pf[I_PRED_4x4_DDL] = x264_predict_4x4_ddl_avx;
+    pf[I_PRED_4x4_DDR] = x264_predict_4x4_ddr_avx;
+    pf[I_PRED_4x4_HD]  = x264_predict_4x4_hd_avx;
+    pf[I_PRED_4x4_VL]  = x264_predict_4x4_vl_avx;
+    pf[I_PRED_4x4_VR]  = x264_predict_4x4_vr_avx;
+    if( !(cpu&X264_CPU_AVX2) )
+        return;
+    pf[I_PRED_4x4_H]  = x264_predict_4x4_h_avx2;
+#else
+    pf[I_PRED_4x4_VR]  = x264_predict_4x4_vr_mmx2;
+    if( !(cpu&X264_CPU_SSSE3) )
         return;
-    pf[I_PRED_4x4_DDL] = predict_4x4_ddl_mmxext;
-    pf[I_PRED_4x4_VL]  = predict_4x4_vl_mmxext;
+    pf[I_PRED_4x4_DDR] = x264_predict_4x4_ddr_ssse3;
+    pf[I_PRED_4x4_VR]  = x264_predict_4x4_vr_ssse3;
+    pf[I_PRED_4x4_HD]  = x264_predict_4x4_hd_ssse3;
+    if( cpu&X264_CPU_CACHELINE_64 )
+        pf[I_PRED_4x4_VR] = x264_predict_4x4_vr_ssse3_cache64;
+#endif // HIGH_BIT_DEPTH
 }