#ifdef ARCH_PPC
# include "ppc/pixel.h"
#endif
+#ifdef ARCH_ARM
+# include "arm/pixel.h"
+#endif
#ifdef ARCH_UltraSparc
# include "sparc/pixel.h"
#endif
{
int64_t i_ssd = 0;
int x, y;
- int align = !(((long)pix1 | (long)pix2 | i_pix1 | i_pix2) & 15);
+ int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
#define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
pix2 + y*i_pix2 + x, i_pix2 );
}
-static inline void pixel_sub_wxh( int16_t *diff, int i_size,
- uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
-{
- int y, x;
- for( y = 0; y < i_size; y++ )
- {
- for( x = 0; x < i_size; x++ )
- {
- diff[x + y*i_size] = pix1[x] - pix2[x];
- }
- pix1 += i_pix1;
- pix2 += i_pix2;
- }
-}
-
-
/****************************************************************************
* pixel_var_wxh
****************************************************************************/
-#define PIXEL_VAR_C( name, w, shift ) \
-static int name( uint8_t *pix, int i_stride, uint32_t *sad ) \
+#define PIXEL_VAR_C( name, w ) \
+static uint64_t name( uint8_t *pix, int i_stride ) \
{ \
- uint32_t var = 0, sum = 0, sqr = 0; \
+ uint32_t sum = 0, sqr = 0; \
int x, y; \
for( y = 0; y < w; y++ ) \
{ \
} \
pix += i_stride; \
} \
- var = sqr - (sum * sum >> shift); \
- *sad = sum; \
- return var; \
+ return sum + ((uint64_t)sqr << 32); \
}
-PIXEL_VAR_C( x264_pixel_var_16x16, 16, 8 )
-PIXEL_VAR_C( x264_pixel_var_8x8, 8, 6 )
-
-
+PIXEL_VAR_C( x264_pixel_var_16x16, 16 )
+PIXEL_VAR_C( x264_pixel_var_8x8, 8 )
/****************************************************************************
- * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
+ * pixel_var2_wxh
****************************************************************************/
-static int pixel_satd_wxh( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2, int i_width, int i_height )
+static int pixel_var2_8x8( uint8_t *pix1, int i_stride1, uint8_t *pix2, int i_stride2, int *ssd )
{
- int16_t tmp[4][4];
- int16_t diff[4][4];
+ uint32_t var = 0, sum = 0, sqr = 0;
int x, y;
- int i_satd = 0;
-
- for( y = 0; y < i_height; y += 4 )
+ for( y = 0; y < 8; y++ )
{
- for( x = 0; x < i_width; x += 4 )
+ for( x = 0; x < 8; x++ )
{
- int d;
-
- pixel_sub_wxh( (int16_t*)diff, 4, &pix1[x], i_pix1, &pix2[x], i_pix2 );
-
- for( d = 0; d < 4; d++ )
- {
- int s01, s23;
- int d01, d23;
+ int diff = pix1[x] - pix2[x];
+ sum += diff;
+ sqr += diff * diff;
+ }
+ pix1 += i_stride1;
+ pix2 += i_stride2;
+ }
+ sum = abs(sum);
+ var = sqr - (sum * sum >> 6);
+ *ssd = sqr;
+ return var;
+}
- s01 = diff[d][0] + diff[d][1]; s23 = diff[d][2] + diff[d][3];
- d01 = diff[d][0] - diff[d][1]; d23 = diff[d][2] - diff[d][3];
- tmp[d][0] = s01 + s23;
- tmp[d][1] = s01 - s23;
- tmp[d][2] = d01 - d23;
- tmp[d][3] = d01 + d23;
- }
- for( d = 0; d < 4; d++ )
- {
- int s01, s23;
- int d01, d23;
+#define HADAMARD4(d0,d1,d2,d3,s0,s1,s2,s3) {\
+ int t0 = s0 + s1;\
+ int t1 = s0 - s1;\
+ int t2 = s2 + s3;\
+ int t3 = s2 - s3;\
+ d0 = t0 + t2;\
+ d2 = t0 - t2;\
+ d1 = t1 + t3;\
+ d3 = t1 - t3;\
+}
- s01 = tmp[0][d] + tmp[1][d]; s23 = tmp[2][d] + tmp[3][d];
- d01 = tmp[0][d] - tmp[1][d]; d23 = tmp[2][d] - tmp[3][d];
+// in: a pseudo-simd number of the form x+(y<<16)
+// return: abs(x)+(abs(y)<<16)
+static ALWAYS_INLINE uint32_t abs2( uint32_t a )
+{
+ uint32_t s = ((a>>15)&0x10001)*0xffff;
+ return (a+s)^s;
+}
- i_satd += abs( s01 + s23 ) + abs( s01 - s23 ) + abs( d01 - d23 ) + abs( d01 + d23 );
- }
+/****************************************************************************
+ * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
+ ****************************************************************************/
- }
- pix1 += 4 * i_pix1;
- pix2 += 4 * i_pix2;
+static NOINLINE int x264_pixel_satd_4x4( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
+{
+ uint32_t tmp[4][2];
+ uint32_t a0,a1,a2,a3,b0,b1;
+ int sum=0, i;
+ for( i=0; i<4; i++, pix1+=i_pix1, pix2+=i_pix2 )
+ {
+ a0 = pix1[0] - pix2[0];
+ a1 = pix1[1] - pix2[1];
+ b0 = (a0+a1) + ((a0-a1)<<16);
+ a2 = pix1[2] - pix2[2];
+ a3 = pix1[3] - pix2[3];
+ b1 = (a2+a3) + ((a2-a3)<<16);
+ tmp[i][0] = b0 + b1;
+ tmp[i][1] = b0 - b1;
+ }
+ for( i=0; i<2; i++ )
+ {
+ HADAMARD4( a0,a1,a2,a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
+ a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
+ sum += ((uint16_t)a0) + (a0>>16);
}
+ return sum >> 1;
+}
- return i_satd / 2;
+static NOINLINE int x264_pixel_satd_8x4( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
+{
+ uint32_t tmp[4][4];
+ uint32_t a0,a1,a2,a3;
+ int sum=0, i;
+ for( i=0; i<4; i++, pix1+=i_pix1, pix2+=i_pix2 )
+ {
+ a0 = (pix1[0] - pix2[0]) + ((pix1[4] - pix2[4]) << 16);
+ a1 = (pix1[1] - pix2[1]) + ((pix1[5] - pix2[5]) << 16);
+ a2 = (pix1[2] - pix2[2]) + ((pix1[6] - pix2[6]) << 16);
+ a3 = (pix1[3] - pix2[3]) + ((pix1[7] - pix2[7]) << 16);
+ HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
+ }
+ for( i=0; i<4; i++ )
+ {
+ HADAMARD4( a0,a1,a2,a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
+ sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
+ }
+ return (((uint16_t)sum) + ((uint32_t)sum>>16)) >> 1;
}
-#define PIXEL_SATD_C( name, width, height ) \
-static int name( uint8_t *pix1, int i_stride_pix1, \
- uint8_t *pix2, int i_stride_pix2 ) \
-{ \
- return pixel_satd_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, width, height ); \
+
+#define PIXEL_SATD_C( w, h, sub )\
+static int x264_pixel_satd_##w##x##h( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )\
+{\
+ int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
+ + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
+ if( w==16 )\
+ sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
+ + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
+ if( h==16 )\
+ sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
+ + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
+ if( w==16 && h==16 )\
+ sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
+ + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
+ return sum;\
}
-PIXEL_SATD_C( x264_pixel_satd_16x16, 16, 16 )
-PIXEL_SATD_C( x264_pixel_satd_16x8, 16, 8 )
-PIXEL_SATD_C( x264_pixel_satd_8x16, 8, 16 )
-PIXEL_SATD_C( x264_pixel_satd_8x8, 8, 8 )
-PIXEL_SATD_C( x264_pixel_satd_8x4, 8, 4 )
-PIXEL_SATD_C( x264_pixel_satd_4x8, 4, 8 )
-PIXEL_SATD_C( x264_pixel_satd_4x4, 4, 4 )
+PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
+PIXEL_SATD_C( 16, 8, x264_pixel_satd_8x4 )
+PIXEL_SATD_C( 8, 16, x264_pixel_satd_8x4 )
+PIXEL_SATD_C( 8, 8, x264_pixel_satd_8x4 )
+PIXEL_SATD_C( 4, 8, x264_pixel_satd_4x4 )
-/****************************************************************************
- * pixel_sa8d_WxH: sum of 8x8 Hadamard transformed differences
- ****************************************************************************/
-#define SA8D_1D {\
- const int a0 = SRC(0) + SRC(4);\
- const int a4 = SRC(0) - SRC(4);\
- const int a1 = SRC(1) + SRC(5);\
- const int a5 = SRC(1) - SRC(5);\
- const int a2 = SRC(2) + SRC(6);\
- const int a6 = SRC(2) - SRC(6);\
- const int a3 = SRC(3) + SRC(7);\
- const int a7 = SRC(3) - SRC(7);\
- const int b0 = a0 + a2;\
- const int b2 = a0 - a2;\
- const int b1 = a1 + a3;\
- const int b3 = a1 - a3;\
- const int b4 = a4 + a6;\
- const int b6 = a4 - a6;\
- const int b5 = a5 + a7;\
- const int b7 = a5 - a7;\
- DST(0, b0 + b1);\
- DST(1, b0 - b1);\
- DST(2, b2 + b3);\
- DST(3, b2 - b3);\
- DST(4, b4 + b5);\
- DST(5, b4 - b5);\
- DST(6, b6 + b7);\
- DST(7, b6 - b7);\
+static NOINLINE int sa8d_8x8( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
+{
+ uint32_t tmp[8][4];
+ uint32_t a0,a1,a2,a3,a4,a5,a6,a7,b0,b1,b2,b3;
+ int sum=0, i;
+ for( i=0; i<8; i++, pix1+=i_pix1, pix2+=i_pix2 )
+ {
+ a0 = pix1[0] - pix2[0];
+ a1 = pix1[1] - pix2[1];
+ b0 = (a0+a1) + ((a0-a1)<<16);
+ a2 = pix1[2] - pix2[2];
+ a3 = pix1[3] - pix2[3];
+ b1 = (a2+a3) + ((a2-a3)<<16);
+ a4 = pix1[4] - pix2[4];
+ a5 = pix1[5] - pix2[5];
+ b2 = (a4+a5) + ((a4-a5)<<16);
+ a6 = pix1[6] - pix2[6];
+ a7 = pix1[7] - pix2[7];
+ b3 = (a6+a7) + ((a6-a7)<<16);
+ HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
+ }
+ for( i=0; i<4; i++ )
+ {
+ HADAMARD4( a0,a1,a2,a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
+ HADAMARD4( a4,a5,a6,a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
+ b0 = abs2(a0+a4) + abs2(a0-a4);
+ b0 += abs2(a1+a5) + abs2(a1-a5);
+ b0 += abs2(a2+a6) + abs2(a2-a6);
+ b0 += abs2(a3+a7) + abs2(a3-a7);
+ sum += (uint16_t)b0 + (b0>>16);
+ }
+ return sum;
}
-static inline int pixel_sa8d_wxh( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2,
- int i_width, int i_height )
+static int x264_pixel_sa8d_8x8( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
{
- int16_t diff[8][8];
- int i_satd = 0;
- int x, y;
+ int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
+ return (sum+2)>>2;
+}
- for( y = 0; y < i_height; y += 8 )
+static int x264_pixel_sa8d_16x16( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
+{
+ int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
+ + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
+ + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
+ + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
+ return (sum+2)>>2;
+}
+
+
+static NOINLINE uint64_t pixel_hadamard_ac( uint8_t *pix, int stride )
+{
+ uint32_t tmp[32];
+ uint32_t a0,a1,a2,a3,dc;
+ int sum4=0, sum8=0, i;
+ for( i=0; i<8; i++, pix+=stride )
{
- for( x = 0; x < i_width; x += 8 )
- {
- int i;
- pixel_sub_wxh( (int16_t*)diff, 8, pix1+x, i_pix1, pix2+x, i_pix2 );
-
-#define SRC(x) diff[i][x]
-#define DST(x,rhs) diff[i][x] = (rhs)
- for( i = 0; i < 8; i++ )
- SA8D_1D
-#undef SRC
-#undef DST
-
-#define SRC(x) diff[x][i]
-#define DST(x,rhs) i_satd += abs(rhs)
- for( i = 0; i < 8; i++ )
- SA8D_1D
-#undef SRC
-#undef DST
- }
- pix1 += 8 * i_pix1;
- pix2 += 8 * i_pix2;
+ uint32_t *t = tmp + (i&3) + (i&4)*4;
+ a0 = (pix[0]+pix[1]) + ((pix[0]-pix[1])<<16);
+ a1 = (pix[2]+pix[3]) + ((pix[2]-pix[3])<<16);
+ t[0] = a0 + a1;
+ t[4] = a0 - a1;
+ a2 = (pix[4]+pix[5]) + ((pix[4]-pix[5])<<16);
+ a3 = (pix[6]+pix[7]) + ((pix[6]-pix[7])<<16);
+ t[8] = a2 + a3;
+ t[12] = a2 - a3;
}
-
- return i_satd;
+ for( i=0; i<8; i++ )
+ {
+ HADAMARD4( a0,a1,a2,a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
+ tmp[i*4+0] = a0;
+ tmp[i*4+1] = a1;
+ tmp[i*4+2] = a2;
+ tmp[i*4+3] = a3;
+ sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
+ }
+ for( i=0; i<8; i++ )
+ {
+ HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
+ sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
+ }
+ dc = (uint16_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
+ sum4 = (uint16_t)sum4 + ((uint32_t)sum4>>16) - dc;
+ sum8 = (uint16_t)sum8 + ((uint32_t)sum8>>16) - dc;
+ return ((uint64_t)sum8<<32) + sum4;
}
-#define PIXEL_SA8D_C( width, height ) \
-static int x264_pixel_sa8d_##width##x##height( uint8_t *pix1, int i_stride_pix1, \
- uint8_t *pix2, int i_stride_pix2 ) \
-{ \
- return ( pixel_sa8d_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, width, height ) + 2 ) >> 2; \
+#define HADAMARD_AC(w,h) \
+static uint64_t x264_pixel_hadamard_ac_##w##x##h( uint8_t *pix, int stride )\
+{\
+ uint64_t sum = pixel_hadamard_ac( pix, stride );\
+ if( w==16 )\
+ sum += pixel_hadamard_ac( pix+8, stride );\
+ if( h==16 )\
+ sum += pixel_hadamard_ac( pix+8*stride, stride );\
+ if( w==16 && h==16 )\
+ sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
+ return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
}
-PIXEL_SA8D_C( 16, 16 )
-PIXEL_SA8D_C( 16, 8 )
-PIXEL_SA8D_C( 8, 16 )
-PIXEL_SA8D_C( 8, 8 )
+HADAMARD_AC( 16, 16 )
+HADAMARD_AC( 16, 8 )
+HADAMARD_AC( 8, 16 )
+HADAMARD_AC( 8, 8 )
+
/****************************************************************************
* pixel_sad_x4
scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
}
-#define SATD_X_DECL5( cpu )\
+#define SATD_X_DECL6( cpu )\
SATD_X( 16x16, cpu )\
SATD_X( 16x8, cpu )\
SATD_X( 8x16, cpu )\
SATD_X( 8x8, cpu )\
-SATD_X( 8x4, cpu )
+SATD_X( 8x4, cpu )\
+SATD_X( 4x8, cpu )
#define SATD_X_DECL7( cpu )\
-SATD_X_DECL5( cpu )\
-SATD_X( 4x8, cpu )\
+SATD_X_DECL6( cpu )\
SATD_X( 4x4, cpu )
SATD_X_DECL7()
#ifdef HAVE_MMX
SATD_X_DECL7( _mmxext )
-SATD_X_DECL5( _sse2 )
+SATD_X_DECL6( _sse2 )
SATD_X_DECL7( _ssse3 )
-SATD_X_DECL5( _ssse3_phadd )
+SATD_X_DECL7( _sse4 )
+#endif
+
+#ifdef HAVE_ARMV6
+SATD_X_DECL7( _neon )
#endif
/****************************************************************************
float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
uint8_t *pix1, int stride1,
uint8_t *pix2, int stride2,
- int width, int height )
+ int width, int height, void *buf )
{
int x, y, z;
float ssim = 0.0;
- int (*sum0)[4] = x264_malloc(4 * (width/4+3) * sizeof(int));
- int (*sum1)[4] = x264_malloc(4 * (width/4+3) * sizeof(int));
+ int (*sum0)[4] = buf;
+ int (*sum1)[4] = sum0 + width/4+3;
width >>= 2;
height >>= 2;
z = 0;
for( x = 0; x < width-1; x += 4 )
ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
}
- x264_free(sum0);
- x264_free(sum1);
return ssim;
}
#define INIT5_NAME( name1, name2, cpu ) \
INIT4_NAME( name1, name2, cpu ) \
pixf->name1[PIXEL_8x4] = x264_pixel_##name2##_8x4##cpu;
-#define INIT7_NAME( name1, name2, cpu ) \
+#define INIT6_NAME( name1, name2, cpu ) \
INIT5_NAME( name1, name2, cpu ) \
- pixf->name1[PIXEL_4x8] = x264_pixel_##name2##_4x8##cpu;\
+ pixf->name1[PIXEL_4x8] = x264_pixel_##name2##_4x8##cpu;
+#define INIT7_NAME( name1, name2, cpu ) \
+ INIT6_NAME( name1, name2, cpu ) \
pixf->name1[PIXEL_4x4] = x264_pixel_##name2##_4x4##cpu;
#define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
#define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
#define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
+#define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
#define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
#define INIT_ADS( cpu ) \
INIT7( satd, );
INIT7( satd_x3, );
INIT7( satd_x4, );
- INIT4( sa8d, );
+ INIT4( hadamard_ac, );
INIT_ADS( );
+ pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
+ pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8;
pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
pixf->var[PIXEL_8x8] = x264_pixel_var_8x8;
pixf->ssim_4x4x2_core = ssim_4x4x2_core;
pixf->ssim_end4 = ssim_end4;
+ pixf->var2_8x8 = pixel_var2_8x8;
#ifdef HAVE_MMX
if( cpu&X264_CPU_MMX )
INIT7( satd, _mmxext );
INIT7( satd_x3, _mmxext );
INIT7( satd_x4, _mmxext );
+ INIT4( hadamard_ac, _mmxext );
INIT_ADS( _mmxext );
pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmxext;
pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_mmxext;
pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_mmxext;
pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmxext;
pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_mmxext;
+ pixf->var2_8x8 = x264_pixel_var2_8x8_mmxext;
if( cpu&X264_CPU_CACHELINE_32 )
{
}
#endif
pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmxext;
+ pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_mmxext;
pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_mmxext;
+ pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_mmxext;
+ pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_mmxext;
pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_mmxext;
+ pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_mmxext;
+ }
+
+ if( cpu&X264_CPU_SSE2 )
+ {
+ INIT5( ssd, _sse2slow );
+ INIT2_NAME( sad_aligned, sad, _sse2_aligned );
+ pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
+ pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_sse2;
+ pixf->ssim_end4 = x264_pixel_ssim_end4_sse2;
+ pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
+ pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse2;
+#ifdef ARCH_X86_64
+ pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
+#endif
+ pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
}
if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
INIT2( sad, _sse2 );
INIT2( sad_x3, _sse2 );
INIT2( sad_x4, _sse2 );
+ INIT6( satd, _sse2 );
+ INIT6( satd_x3, _sse2 );
+ INIT6( satd_x4, _sse2 );
+ if( !(cpu&X264_CPU_STACK_MOD4) )
+ {
+ INIT4( hadamard_ac, _sse2 );
+ }
INIT_ADS( _sse2 );
pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
-
-#ifdef ARCH_X86
+ pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
if( cpu&X264_CPU_CACHELINE_64 )
{
+ INIT2( ssd, _sse2); /* faster for width 16 on p4 */
+#ifdef ARCH_X86
INIT2( sad, _cache64_sse2 );
INIT2( sad_x3, _cache64_sse2 );
INIT2( sad_x4, _cache64_sse2 );
- }
#endif
+ if( cpu&X264_CPU_SSE2_IS_FAST )
+ {
+ pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
+ pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
+ }
+ }
+
+ if( cpu&X264_CPU_SSE_MISALIGN )
+ {
+ INIT2( sad_x3, _sse2_misalign );
+ INIT2( sad_x4, _sse2_misalign );
+ }
}
- if( cpu&X264_CPU_SSE2 )
+
+ if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
{
- INIT5( ssd, _sse2 );
- INIT5( satd, _sse2 );
- INIT5( satd_x3, _sse2 );
- INIT5( satd_x4, _sse2 );
- INIT2_NAME( sad_aligned, sad, _sse2_aligned );
- pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
- pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_sse2;
- pixf->ssim_end4 = x264_pixel_ssim_end4_sse2;
- pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
- pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse2;
-#ifdef ARCH_X86_64
- pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
-#endif
+ pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
+ pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
+ pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
+ pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
+ pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
+ pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
+ pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
+ pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
}
if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
if( cpu&X264_CPU_SSSE3 )
{
+ INIT7( ssd, _ssse3 );
INIT7( satd, _ssse3 );
INIT7( satd_x3, _ssse3 );
INIT7( satd_x4, _ssse3 );
+ if( !(cpu&X264_CPU_STACK_MOD4) )
+ {
+ INIT4( hadamard_ac, _ssse3 );
+ }
INIT_ADS( _ssse3 );
pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_ssse3;
pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
+ pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_ssse3;
pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_ssse3;
+ pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_ssse3;
pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_ssse3;
#ifdef ARCH_X86_64
pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_ssse3;
#endif
+ pixf->var2_8x8 = x264_pixel_var2_8x8_ssse3;
if( cpu&X264_CPU_CACHELINE_64 )
{
INIT2( sad, _cache64_ssse3 );
INIT2( sad_x3, _cache64_ssse3 );
INIT2( sad_x4, _cache64_ssse3 );
}
- if( cpu&X264_CPU_PHADD_IS_FAST )
+ if( !(cpu&X264_CPU_SHUFFLE_IS_FAST) )
{
- INIT5( satd, _ssse3_phadd );
- INIT5( satd_x3, _ssse3_phadd );
- INIT5( satd_x4, _ssse3_phadd );
+ INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
}
}
+
+ if( cpu&X264_CPU_SSE4 )
+ {
+ INIT7( satd, _sse4 );
+ INIT7( satd_x3, _sse4 );
+ INIT7( satd_x4, _sse4 );
+ if( !(cpu&X264_CPU_STACK_MOD4) )
+ {
+ INIT4( hadamard_ac, _sse4 );
+ }
+ pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
+ pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse4;
+ }
#endif //HAVE_MMX
+#ifdef HAVE_ARMV6
+ if( cpu&X264_CPU_ARMV6 )
+ {
+ pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
+ pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
+ pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
+ pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
+ }
+ if( cpu&X264_CPU_NEON )
+ {
+ INIT5( sad, _neon );
+ INIT5( sad_aligned, _neon );
+ INIT7( sad_x3, _neon );
+ INIT7( sad_x4, _neon );
+ INIT7( ssd, _neon );
+ INIT7( satd, _neon );
+ INIT7( satd_x3, _neon );
+ INIT7( satd_x4, _neon );
+ INIT4( hadamard_ac, _neon );
+ pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_neon;
+ pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
+ pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_neon;
+ pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_neon;
+ pixf->var2_8x8 = x264_pixel_var2_8x8_neon;
+
+ pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_neon;
+ pixf->ssim_end4 = x264_pixel_ssim_end4_neon;
+
+ if( cpu&X264_CPU_FAST_NEON_MRC )
+ {
+ pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
+ pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
+ pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
+ pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
+ }
+ else // really just scheduled for dual issue / A8
+ {
+ INIT5( sad_aligned, _neon_dual );
+ }
+ }
+#endif
#ifdef ARCH_PPC
if( cpu&X264_CPU_ALTIVEC )
{