/*****************************************************************************
- * pixel.c: h264 encoder
+ * pixel.c: pixel metrics
*****************************************************************************
- * Copyright (C) 2003-2008 x264 project
+ * Copyright (C) 2003-2012 x264 project
*
* Authors: Loren Merritt <lorenm@u.washington.edu>
* Laurent Aimar <fenrir@via.ecp.fr>
+ * Fiona Glaser <fiona@x264.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "common.h"
#if HAVE_MMX
# include "x86/pixel.h"
+# include "x86/predict.h"
#endif
#if ARCH_PPC
# include "ppc/pixel.h"
#if ARCH_ARM
# include "arm/pixel.h"
#endif
-#if ARCH_UltraSparc
+#if ARCH_UltraSPARC
# include "sparc/pixel.h"
#endif
* pixel_sad_WxH
****************************************************************************/
#define PIXEL_SAD_C( name, lx, ly ) \
-static int name( pixel *pix1, int i_stride_pix1, \
- pixel *pix2, int i_stride_pix2 ) \
+static int name( pixel *pix1, intptr_t i_stride_pix1, \
+ pixel *pix2, intptr_t i_stride_pix2 ) \
{ \
int i_sum = 0; \
for( int y = 0; y < ly; y++ ) \
PIXEL_SAD_C( x264_pixel_sad_8x16, 8, 16 )
PIXEL_SAD_C( x264_pixel_sad_8x8, 8, 8 )
PIXEL_SAD_C( x264_pixel_sad_8x4, 8, 4 )
+PIXEL_SAD_C( x264_pixel_sad_4x16, 4, 16 )
PIXEL_SAD_C( x264_pixel_sad_4x8, 4, 8 )
PIXEL_SAD_C( x264_pixel_sad_4x4, 4, 4 )
-
/****************************************************************************
* pixel_ssd_WxH
****************************************************************************/
#define PIXEL_SSD_C( name, lx, ly ) \
-static int name( pixel *pix1, int i_stride_pix1, \
- pixel *pix2, int i_stride_pix2 ) \
+static int name( pixel *pix1, intptr_t i_stride_pix1, \
+ pixel *pix2, intptr_t i_stride_pix2 ) \
{ \
int i_sum = 0; \
for( int y = 0; y < ly; y++ ) \
PIXEL_SSD_C( x264_pixel_ssd_8x16, 8, 16 )
PIXEL_SSD_C( x264_pixel_ssd_8x8, 8, 8 )
PIXEL_SSD_C( x264_pixel_ssd_8x4, 8, 4 )
+PIXEL_SSD_C( x264_pixel_ssd_4x16, 4, 16 )
PIXEL_SSD_C( x264_pixel_ssd_4x8, 4, 8 )
PIXEL_SSD_C( x264_pixel_ssd_4x4, 4, 4 )
-uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height )
+uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, intptr_t i_pix1,
+ pixel *pix2, intptr_t i_pix2, int i_width, int i_height )
{
uint64_t i_ssd = 0;
int y;
return i_ssd;
}
-static uint64_t pixel_ssd_nv12_core( pixel *pixuv1, int stride1, pixel *pixuv2, int stride2, int width, int height )
+static void pixel_ssd_nv12_core( pixel *pixuv1, intptr_t stride1, pixel *pixuv2, intptr_t stride2,
+ int width, int height, uint64_t *ssd_u, uint64_t *ssd_v )
{
- uint32_t ssd_u=0, ssd_v=0;
+ *ssd_u = 0, *ssd_v = 0;
for( int y = 0; y < height; y++, pixuv1+=stride1, pixuv2+=stride2 )
for( int x = 0; x < width; x++ )
{
int du = pixuv1[2*x] - pixuv2[2*x];
int dv = pixuv1[2*x+1] - pixuv2[2*x+1];
- ssd_u += du*du;
- ssd_v += dv*dv;
+ *ssd_u += du*du;
+ *ssd_v += dv*dv;
}
- return ssd_u + ((uint64_t)ssd_v<<32);
}
-// SSD in uint32 (i.e. packing two into uint64) can potentially overflow on
-// image widths >= 11008 (or 6604 if interlaced), since this is called on blocks
-// of height up to 12 (resp 20). Though it will probably take significantly more
-// than that at sane distortion levels.
-uint64_t x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height )
+void x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2,
+ int i_width, int i_height, uint64_t *ssd_u, uint64_t *ssd_v )
{
- uint64_t ssd = pf->ssd_nv12_core( pix1, i_pix1, pix2, i_pix2, i_width&~7, i_height );
+ pf->ssd_nv12_core( pix1, i_pix1, pix2, i_pix2, i_width&~7, i_height, ssd_u, ssd_v );
if( i_width&7 )
- ssd += pixel_ssd_nv12_core( pix1+(i_width&~7), i_pix1, pix2+(i_width&~7), i_pix2, i_width&7, i_height );
- return ssd;
+ {
+ uint64_t tmp[2];
+ pixel_ssd_nv12_core( pix1+(i_width&~7), i_pix1, pix2+(i_width&~7), i_pix2, i_width&7, i_height, &tmp[0], &tmp[1] );
+ *ssd_u += tmp[0];
+ *ssd_v += tmp[1];
+ }
}
/****************************************************************************
* pixel_var_wxh
****************************************************************************/
-#define PIXEL_VAR_C( name, w ) \
-static uint64_t name( pixel *pix, int i_stride ) \
+#define PIXEL_VAR_C( name, w, h ) \
+static uint64_t name( pixel *pix, intptr_t i_stride ) \
{ \
uint32_t sum = 0, sqr = 0; \
- for( int y = 0; y < w; y++ ) \
+ for( int y = 0; y < h; y++ ) \
{ \
for( int x = 0; x < w; x++ ) \
{ \
return sum + ((uint64_t)sqr << 32); \
}
-PIXEL_VAR_C( x264_pixel_var_16x16, 16 )
-PIXEL_VAR_C( x264_pixel_var_8x8, 8 )
+PIXEL_VAR_C( x264_pixel_var_16x16, 16, 16 )
+PIXEL_VAR_C( x264_pixel_var_8x16, 8, 16 )
+PIXEL_VAR_C( x264_pixel_var_8x8, 8, 8 )
/****************************************************************************
* pixel_var2_wxh
****************************************************************************/
-static int pixel_var2_8x8( pixel *pix1, int i_stride1, pixel *pix2, int i_stride2, int *ssd )
-{
- uint32_t var = 0, sum = 0, sqr = 0;
- for( int y = 0; y < 8; y++ )
- {
- for( int x = 0; x < 8; x++ )
- {
- int diff = pix1[x] - pix2[x];
- sum += diff;
- sqr += diff * diff;
- }
- pix1 += i_stride1;
- pix2 += i_stride2;
- }
- sum = abs(sum);
- var = sqr - ((uint64_t)sum * sum >> 6);
- *ssd = sqr;
- return var;
+#define PIXEL_VAR2_C( name, w, h, shift ) \
+static int name( pixel *pix1, intptr_t i_stride1, pixel *pix2, intptr_t i_stride2, int *ssd ) \
+{ \
+ uint32_t var = 0, sum = 0, sqr = 0; \
+ for( int y = 0; y < h; y++ ) \
+ { \
+ for( int x = 0; x < w; x++ ) \
+ { \
+ int diff = pix1[x] - pix2[x]; \
+ sum += diff; \
+ sqr += diff * diff; \
+ } \
+ pix1 += i_stride1; \
+ pix2 += i_stride2; \
+ } \
+ sum = abs(sum); \
+ var = sqr - ((uint64_t)sum * sum >> shift); \
+ *ssd = sqr; \
+ return var; \
}
+PIXEL_VAR2_C( x264_pixel_var2_8x16, 8, 16, 7 )
+PIXEL_VAR2_C( x264_pixel_var2_8x8, 8, 8, 6 )
+
+#if BIT_DEPTH > 8
+ typedef uint32_t sum_t;
+ typedef uint64_t sum2_t;
+#else
+ typedef uint16_t sum_t;
+ typedef uint32_t sum2_t;
+#endif
+#define BITS_PER_SUM (8 * sizeof(sum_t))
#define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
- int t0 = s0 + s1;\
- int t1 = s0 - s1;\
- int t2 = s2 + s3;\
- int t3 = s2 - s3;\
+ sum2_t t0 = s0 + s1;\
+ sum2_t t1 = s0 - s1;\
+ sum2_t t2 = s2 + s3;\
+ sum2_t t3 = s2 - s3;\
d0 = t0 + t2;\
d2 = t0 - t2;\
d1 = t1 + t3;\
// in: a pseudo-simd number of the form x+(y<<16)
// return: abs(x)+(abs(y)<<16)
-static ALWAYS_INLINE uint32_t abs2( uint32_t a )
+static ALWAYS_INLINE sum2_t abs2( sum2_t a )
{
- uint32_t s = ((a>>15)&0x10001)*0xffff;
+ sum2_t s = ((a>>(BITS_PER_SUM-1))&(((sum2_t)1<<BITS_PER_SUM)+1))*((sum_t)-1);
return (a+s)^s;
}
* pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
****************************************************************************/
-static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
+static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
{
- uint32_t tmp[4][2];
- uint32_t a0, a1, a2, a3, b0, b1;
- int sum = 0;
+ sum2_t tmp[4][2];
+ sum2_t a0, a1, a2, a3, b0, b1;
+ sum2_t sum = 0;
for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
{
a0 = pix1[0] - pix2[0];
a1 = pix1[1] - pix2[1];
- b0 = (a0+a1) + ((a0-a1)<<16);
+ b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
a2 = pix1[2] - pix2[2];
a3 = pix1[3] - pix2[3];
- b1 = (a2+a3) + ((a2-a3)<<16);
+ b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
tmp[i][0] = b0 + b1;
tmp[i][1] = b0 - b1;
}
{
HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
- sum += ((uint16_t)a0) + (a0>>16);
+ sum += ((sum_t)a0) + (a0>>BITS_PER_SUM);
}
return sum >> 1;
}
-static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
+static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
{
- uint32_t tmp[4][4];
- uint32_t a0, a1, a2, a3;
- int sum = 0;
+ sum2_t tmp[4][4];
+ sum2_t a0, a1, a2, a3;
+ sum2_t sum = 0;
for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
{
- a0 = (pix1[0] - pix2[0]) + ((pix1[4] - pix2[4]) << 16);
- a1 = (pix1[1] - pix2[1]) + ((pix1[5] - pix2[5]) << 16);
- a2 = (pix1[2] - pix2[2]) + ((pix1[6] - pix2[6]) << 16);
- a3 = (pix1[3] - pix2[3]) + ((pix1[7] - pix2[7]) << 16);
+ a0 = (pix1[0] - pix2[0]) + ((sum2_t)(pix1[4] - pix2[4]) << BITS_PER_SUM);
+ a1 = (pix1[1] - pix2[1]) + ((sum2_t)(pix1[5] - pix2[5]) << BITS_PER_SUM);
+ a2 = (pix1[2] - pix2[2]) + ((sum2_t)(pix1[6] - pix2[6]) << BITS_PER_SUM);
+ a3 = (pix1[3] - pix2[3]) + ((sum2_t)(pix1[7] - pix2[7]) << BITS_PER_SUM);
HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
}
for( int i = 0; i < 4; i++ )
HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
}
- return (((uint16_t)sum) + ((uint32_t)sum>>16)) >> 1;
+ return (((sum_t)sum) + (sum>>BITS_PER_SUM)) >> 1;
}
#define PIXEL_SATD_C( w, h, sub )\
-static int x264_pixel_satd_##w##x##h( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )\
+static int x264_pixel_satd_##w##x##h( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )\
{\
int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
+ sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
PIXEL_SATD_C( 16, 8, x264_pixel_satd_8x4 )
PIXEL_SATD_C( 8, 16, x264_pixel_satd_8x4 )
PIXEL_SATD_C( 8, 8, x264_pixel_satd_8x4 )
+PIXEL_SATD_C( 4, 16, x264_pixel_satd_4x4 )
PIXEL_SATD_C( 4, 8, x264_pixel_satd_4x4 )
-
-static NOINLINE int sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
+static NOINLINE int sa8d_8x8( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
{
- uint32_t tmp[8][4];
- uint32_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
- int sum = 0;
+ sum2_t tmp[8][4];
+ sum2_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
+ sum2_t sum = 0;
for( int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2 )
{
a0 = pix1[0] - pix2[0];
a1 = pix1[1] - pix2[1];
- b0 = (a0+a1) + ((a0-a1)<<16);
+ b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
a2 = pix1[2] - pix2[2];
a3 = pix1[3] - pix2[3];
- b1 = (a2+a3) + ((a2-a3)<<16);
+ b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
a4 = pix1[4] - pix2[4];
a5 = pix1[5] - pix2[5];
- b2 = (a4+a5) + ((a4-a5)<<16);
+ b2 = (a4+a5) + ((a4-a5)<<BITS_PER_SUM);
a6 = pix1[6] - pix2[6];
a7 = pix1[7] - pix2[7];
- b3 = (a6+a7) + ((a6-a7)<<16);
+ b3 = (a6+a7) + ((a6-a7)<<BITS_PER_SUM);
HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
}
for( int i = 0; i < 4; i++ )
b0 += abs2(a1+a5) + abs2(a1-a5);
b0 += abs2(a2+a6) + abs2(a2-a6);
b0 += abs2(a3+a7) + abs2(a3-a7);
- sum += (uint16_t)b0 + (b0>>16);
+ sum += (sum_t)b0 + (b0>>BITS_PER_SUM);
}
return sum;
}
-static int x264_pixel_sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
+static int x264_pixel_sa8d_8x8( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
{
int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
return (sum+2)>>2;
}
-static int x264_pixel_sa8d_16x16( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
+static int x264_pixel_sa8d_16x16( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
{
int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
+ sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
}
-static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, int stride )
+static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, intptr_t stride )
{
- uint32_t tmp[32];
- uint32_t a0, a1, a2, a3, dc;
- int sum4 = 0, sum8 = 0;
+ sum2_t tmp[32];
+ sum2_t a0, a1, a2, a3, dc;
+ sum2_t sum4 = 0, sum8 = 0;
for( int i = 0; i < 8; i++, pix+=stride )
{
- uint32_t *t = tmp + (i&3) + (i&4)*4;
- a0 = (pix[0]+pix[1]) + ((pix[0]-pix[1])<<16);
- a1 = (pix[2]+pix[3]) + ((pix[2]-pix[3])<<16);
+ sum2_t *t = tmp + (i&3) + (i&4)*4;
+ a0 = (pix[0]+pix[1]) + ((sum2_t)(pix[0]-pix[1])<<BITS_PER_SUM);
+ a1 = (pix[2]+pix[3]) + ((sum2_t)(pix[2]-pix[3])<<BITS_PER_SUM);
t[0] = a0 + a1;
t[4] = a0 - a1;
- a2 = (pix[4]+pix[5]) + ((pix[4]-pix[5])<<16);
- a3 = (pix[6]+pix[7]) + ((pix[6]-pix[7])<<16);
+ a2 = (pix[4]+pix[5]) + ((sum2_t)(pix[4]-pix[5])<<BITS_PER_SUM);
+ a3 = (pix[6]+pix[7]) + ((sum2_t)(pix[6]-pix[7])<<BITS_PER_SUM);
t[8] = a2 + a3;
t[12] = a2 - a3;
}
HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
}
- dc = (uint16_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
- sum4 = (uint16_t)sum4 + ((uint32_t)sum4>>16) - dc;
- sum8 = (uint16_t)sum8 + ((uint32_t)sum8>>16) - dc;
+ dc = (sum_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
+ sum4 = (sum_t)sum4 + (sum4>>BITS_PER_SUM) - dc;
+ sum8 = (sum_t)sum8 + (sum8>>BITS_PER_SUM) - dc;
return ((uint64_t)sum8<<32) + sum4;
}
#define HADAMARD_AC(w,h) \
-static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, int stride )\
+static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, intptr_t stride )\
{\
uint64_t sum = pixel_hadamard_ac( pix, stride );\
if( w==16 )\
* pixel_sad_x4
****************************************************************************/
#define SAD_X( size ) \
-static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
+static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2,\
+ intptr_t i_stride, int scores[3] )\
{\
scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
}\
-static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
+static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1,pixel *pix2, pixel *pix3,\
+ intptr_t i_stride, int scores[4] )\
{\
scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
SAD_X( 4x8 )
SAD_X( 4x4 )
-#if !X264_HIGH_BIT_DEPTH
-#if ARCH_UltraSparc
+#if !HIGH_BIT_DEPTH
+#if ARCH_UltraSPARC
SAD_X( 16x16_vis )
SAD_X( 16x8_vis )
SAD_X( 8x16_vis )
SAD_X( 8x8_vis )
#endif
-#endif // !X264_HIGH_BIT_DEPTH
+#endif // !HIGH_BIT_DEPTH
/****************************************************************************
* pixel_satd_x4
****************************************************************************/
#define SATD_X( size, cpu ) \
-static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
+static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2,\
+ intptr_t i_stride, int scores[3] )\
{\
scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
}\
-static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
+static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3,\
+ intptr_t i_stride, int scores[4] )\
{\
scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
SATD_X( 4x4, cpu )
SATD_X_DECL7()
-#if !X264_HIGH_BIT_DEPTH
#if HAVE_MMX
-SATD_X_DECL7( _mmxext )
+SATD_X_DECL7( _mmx2 )
+#if !HIGH_BIT_DEPTH
SATD_X_DECL6( _sse2 )
SATD_X_DECL7( _ssse3 )
SATD_X_DECL7( _sse4 )
+SATD_X_DECL7( _avx )
+SATD_X_DECL7( _xop )
+#endif // !HIGH_BIT_DEPTH
#endif
+#if !HIGH_BIT_DEPTH
#if HAVE_ARMV6
SATD_X_DECL7( _neon )
#endif
-#endif // !X264_HIGH_BIT_DEPTH
+#endif // !HIGH_BIT_DEPTH
-#define INTRA_MBCMP_8x8( mbcmp )\
-void x264_intra_##mbcmp##_x3_8x8( pixel *fenc, pixel edge[33], int res[3] )\
+#define INTRA_MBCMP_8x8( mbcmp, cpu, cpu2 )\
+void x264_intra_##mbcmp##_x3_8x8##cpu( pixel *fenc, pixel edge[36], int res[3] )\
{\
- pixel pix[8*FDEC_STRIDE];\
- x264_predict_8x8_v_c( pix, edge );\
- res[0] = x264_pixel_##mbcmp##_8x8( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
- x264_predict_8x8_h_c( pix, edge );\
- res[1] = x264_pixel_##mbcmp##_8x8( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
- x264_predict_8x8_dc_c( pix, edge );\
- res[2] = x264_pixel_##mbcmp##_8x8( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
+ ALIGNED_ARRAY_16( pixel, pix, [8*FDEC_STRIDE] );\
+ x264_predict_8x8_v##cpu2( pix, edge );\
+ res[0] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
+ x264_predict_8x8_h##cpu2( pix, edge );\
+ res[1] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
+ x264_predict_8x8_dc##cpu2( pix, edge );\
+ res[2] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
}
-INTRA_MBCMP_8x8(sad)
-INTRA_MBCMP_8x8(sa8d)
+INTRA_MBCMP_8x8( sad,, _c )
+INTRA_MBCMP_8x8(sa8d,, _c )
+#if HIGH_BIT_DEPTH && HAVE_MMX
+INTRA_MBCMP_8x8( sad, _mmx2, _c )
+INTRA_MBCMP_8x8(sa8d, _sse2, _sse2 )
+#endif
-#define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma )\
-void x264_intra_##mbcmp##_x3_##size##x##size##chroma( pixel *fenc, pixel *fdec, int res[3] )\
+#define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma, cpu, cpu2 )\
+void x264_intra_##mbcmp##_x3_##size##chroma##cpu( pixel *fenc, pixel *fdec, int res[3] )\
{\
- x264_predict_##size##x##size##chroma##_##pred1##_c( fdec );\
- res[0] = x264_pixel_##mbcmp##_##size##x##size( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
- x264_predict_##size##x##size##chroma##_##pred2##_c( fdec );\
- res[1] = x264_pixel_##mbcmp##_##size##x##size( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
- x264_predict_##size##x##size##chroma##_##pred3##_c( fdec );\
- res[2] = x264_pixel_##mbcmp##_##size##x##size( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
+ x264_predict_##size##chroma##_##pred1##cpu2( fdec );\
+ res[0] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
+ x264_predict_##size##chroma##_##pred2##cpu2( fdec );\
+ res[1] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
+ x264_predict_##size##chroma##_##pred3##cpu2( fdec );\
+ res[2] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
}
-INTRA_MBCMP(sad, 4, v, h, dc, )
-INTRA_MBCMP(satd, 4, v, h, dc, )
-INTRA_MBCMP(sad, 8, dc, h, v, c )
-INTRA_MBCMP(satd, 8, dc, h, v, c )
-INTRA_MBCMP(sad, 16, v, h, dc, )
-INTRA_MBCMP(satd, 16, v, h, dc, )
+INTRA_MBCMP( sad, 4x4, v, h, dc, ,, _c )
+INTRA_MBCMP(satd, 4x4, v, h, dc, ,, _c )
+INTRA_MBCMP( sad, 8x8, dc, h, v, c,, _c )
+INTRA_MBCMP(satd, 8x8, dc, h, v, c,, _c )
+INTRA_MBCMP( sad, 8x16, dc, h, v, c,, _c )
+INTRA_MBCMP(satd, 8x16, dc, h, v, c,, _c )
+INTRA_MBCMP( sad, 16x16, v, h, dc, ,, _c )
+INTRA_MBCMP(satd, 16x16, v, h, dc, ,, _c )
+
+#if HAVE_MMX
+#if HIGH_BIT_DEPTH
+INTRA_MBCMP( sad, 4x4, v, h, dc, , _mmx2, _c )
+INTRA_MBCMP( sad, 8x8, dc, h, v, c, _mmx2, _c )
+INTRA_MBCMP( sad, 16x16, v, h, dc, , _mmx2, _mmx2 )
+INTRA_MBCMP( sad, 8x8, dc, h, v, c, _sse2, _sse2 )
+INTRA_MBCMP( sad, 16x16, v, h, dc, , _sse2, _sse2 )
+INTRA_MBCMP( sad, 8x8, dc, h, v, c, _ssse3, _sse2 )
+INTRA_MBCMP( sad, 16x16, v, h, dc, , _ssse3, _sse2 )
+#else
+#define x264_predict_8x16c_v_mmx2 x264_predict_8x16c_v_mmx
+INTRA_MBCMP( sad, 8x16, dc, h, v, c, _mmx2, _mmx2 )
+INTRA_MBCMP(satd, 8x16, dc, h, v, c, _mmx2, _mmx2 )
+INTRA_MBCMP( sad, 8x16, dc, h, v, c, _sse2, _mmx2 )
+INTRA_MBCMP(satd, 8x16, dc, h, v, c, _sse2, _mmx2 )
+INTRA_MBCMP(satd, 8x16, dc, h, v, c, _ssse3, _mmx2 )
+INTRA_MBCMP(satd, 8x16, dc, h, v, c, _sse4, _mmx2 )
+INTRA_MBCMP(satd, 8x16, dc, h, v, c, _avx, _mmx2 )
+INTRA_MBCMP(satd, 8x16, dc, h, v, c, _xop, _mmx2 )
+#endif
+#endif
+
+// No C implementation of intra_satd_x9. See checkasm for its behavior,
+// or see x264_mb_analyse_intra for the entirely different algorithm we
+// use when lacking an asm implementation of it.
+
+
/****************************************************************************
* structural similarity metric
****************************************************************************/
-static void ssim_4x4x2_core( const pixel *pix1, int stride1,
- const pixel *pix2, int stride2,
- int sums[2][4])
+static void ssim_4x4x2_core( const pixel *pix1, intptr_t stride1,
+ const pixel *pix2, intptr_t stride2,
+ int sums[2][4] )
{
for( int z = 0; z < 2; z++ )
{
static float ssim_end1( int s1, int s2, int ss, int s12 )
{
+/* Maximum value for 10-bit is: ss*64 = (2^10-1)^2*16*4*64 = 4286582784, which will overflow in some cases.
+ * s1*s1, s2*s2, and s1*s2 also obtain this value for edge cases: ((2^10-1)*16*4)^2 = 4286582784.
+ * Maximum value for 9-bit is: ss*64 = (2^9-1)^2*16*4*64 = 1069551616, which will not overflow. */
+#if BIT_DEPTH > 9
+#define type float
+ static const float ssim_c1 = .01*.01*PIXEL_MAX*PIXEL_MAX*64;
+ static const float ssim_c2 = .03*.03*PIXEL_MAX*PIXEL_MAX*64*63;
+#else
+#define type int
static const int ssim_c1 = (int)(.01*.01*PIXEL_MAX*PIXEL_MAX*64 + .5);
static const int ssim_c2 = (int)(.03*.03*PIXEL_MAX*PIXEL_MAX*64*63 + .5);
- int vars = ss*64 - s1*s1 - s2*s2;
- int covar = s12*64 - s1*s2;
- return (float)(2*s1*s2 + ssim_c1) * (float)(2*covar + ssim_c2)
- / ((float)(s1*s1 + s2*s2 + ssim_c1) * (float)(vars + ssim_c2));
+#endif
+ type fs1 = s1;
+ type fs2 = s2;
+ type fss = ss;
+ type fs12 = s12;
+ type vars = fss*64 - fs1*fs1 - fs2*fs2;
+ type covar = fs12*64 - fs1*fs2;
+ return (float)(2*fs1*fs2 + ssim_c1) * (float)(2*covar + ssim_c2)
+ / ((float)(fs1*fs1 + fs2*fs2 + ssim_c1) * (float)(vars + ssim_c2));
+#undef type
}
static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
}
float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
- pixel *pix1, int stride1,
- pixel *pix2, int stride2,
- int width, int height, void *buf )
+ pixel *pix1, intptr_t stride1,
+ pixel *pix2, intptr_t stride2,
+ int width, int height, void *buf, int *cnt )
{
int z = 0;
float ssim = 0.0;
for( int x = 0; x < width-1; x += 4 )
ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
}
+ *cnt = (height-1) * (width-1);
return ssim;
}
+static int pixel_vsad( pixel *src, intptr_t stride, int height )
+{
+ int score = 0;
+ for( int i = 1; i < height; i++, src += stride )
+ for( int j = 0; j < 16; j++ )
+ score += abs(src[j] - src[j+stride]);
+ return score;
+}
+
+int x264_field_vsad( x264_t *h, int mb_x, int mb_y )
+{
+ int score_field, score_frame;
+ int stride = h->fenc->i_stride[0];
+ int mb_stride = h->mb.i_mb_stride;
+ pixel *fenc = h->fenc->plane[0] + 16 * (mb_x + mb_y * stride);
+ int mb_xy = mb_x + mb_y*mb_stride;
+
+ /* We don't want to analyze pixels outside the frame, as it gives inaccurate results. */
+ int mbpair_height = X264_MIN( h->param.i_height - mb_y * 16, 32 );
+ score_frame = h->pixf.vsad( fenc, stride, mbpair_height );
+ score_field = h->pixf.vsad( fenc, stride*2, mbpair_height >> 1 );
+ score_field += h->pixf.vsad( fenc+stride, stride*2, mbpair_height >> 1 );
+
+ if( mb_x > 0 )
+ score_field += 512 - h->mb.field[mb_xy -1]*1024;
+ if( mb_y > 0 )
+ score_field += 512 - h->mb.field[mb_xy-mb_stride]*1024;
+
+ return (score_field < score_frame);
+}
+
+static int pixel_asd8( pixel *pix1, intptr_t stride1, pixel *pix2, intptr_t stride2, int height )
+{
+ int sum = 0;
+ for( int y = 0; y < height; y++, pix1 += stride1, pix2 += stride2 )
+ for( int x = 0; x < 8; x++ )
+ sum += pix1[x] - pix2[x];
+ return abs( sum );
+}
/****************************************************************************
* successive elimination
#define INIT7_NAME( name1, name2, cpu ) \
INIT6_NAME( name1, name2, cpu ) \
pixf->name1[PIXEL_4x4] = x264_pixel_##name2##_4x4##cpu;
+#define INIT8_NAME( name1, name2, cpu ) \
+ INIT7_NAME( name1, name2, cpu ) \
+ pixf->name1[PIXEL_4x16] = x264_pixel_##name2##_4x16##cpu;
#define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
#define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
#define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
#define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
#define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
+#define INIT8( name, cpu ) INIT8_NAME( name, name, cpu )
#define INIT_ADS( cpu ) \
pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
- INIT7( sad, );
- INIT7_NAME( sad_aligned, sad, );
+ INIT8( sad, );
+ INIT8_NAME( sad_aligned, sad, );
INIT7( sad_x3, );
INIT7( sad_x4, );
- INIT7( ssd, );
- INIT7( satd, );
+ INIT8( ssd, );
+ INIT8( satd, );
INIT7( satd_x3, );
INIT7( satd_x4, );
INIT4( hadamard_ac, );
pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8;
pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
+ pixf->var[PIXEL_8x16] = x264_pixel_var_8x16;
pixf->var[PIXEL_8x8] = x264_pixel_var_8x8;
+ pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16;
+ pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8;
pixf->ssd_nv12_core = pixel_ssd_nv12_core;
pixf->ssim_4x4x2_core = ssim_4x4x2_core;
pixf->ssim_end4 = ssim_end4;
- pixf->var2_8x8 = pixel_var2_8x8;
+ pixf->vsad = pixel_vsad;
+ pixf->asd8 = pixel_asd8;
pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4;
pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4;
pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8;
pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c;
pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c;
+ pixf->intra_sad_x3_8x16c = x264_intra_sad_x3_8x16c;
+ pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c;
pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16;
pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16;
-#if !X264_HIGH_BIT_DEPTH
+#if HIGH_BIT_DEPTH
+#if HAVE_MMX
+ if( cpu&X264_CPU_MMX2 )
+ {
+ INIT7( sad, _mmx2 );
+ INIT7( sad_x3, _mmx2 );
+ INIT7( sad_x4, _mmx2 );
+ INIT8( satd, _mmx2 );
+ INIT7( satd_x3, _mmx2 );
+ INIT7( satd_x4, _mmx2 );
+ INIT4( hadamard_ac, _mmx2 );
+ INIT8( ssd, _mmx2 );
+ INIT_ADS( _mmx2 );
+
+ pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_mmx2;
+ pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
+ pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_mmx2;
+#if ARCH_X86
+ pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_mmx2;
+ pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_mmx2;
+#endif
+
+ pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_mmx2;
+ pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_mmx2;
+ pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_mmx2;
+ pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_mmx2;
+ pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_mmx2;
+ pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_mmx2;
+ pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
+ }
+ if( cpu&X264_CPU_SSE2 )
+ {
+ INIT4_NAME( sad_aligned, sad, _sse2_aligned );
+ INIT5( ssd, _sse2 );
+
+ pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
+ pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse2;
+#if ARCH_X86_64
+ pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
+#endif
+ pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_sse2;
+ pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_sse2;
+ pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_sse2;
+ pixf->ssim_end4 = x264_pixel_ssim_end4_sse2;
+ pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
+ pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
+ pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_sse2;
+ pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_sse2;
+ pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_sse2;
+ }
+ if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
+ {
+ INIT5( sad, _sse2 );
+ INIT2( sad_x3, _sse2 );
+ INIT2( sad_x4, _sse2 );
+ INIT_ADS( _sse2 );
+
+ if( !(cpu&X264_CPU_STACK_MOD4) )
+ {
+ INIT4( hadamard_ac, _sse2 );
+ }
+ pixf->vsad = x264_pixel_vsad_sse2;
+ pixf->asd8 = x264_pixel_asd8_sse2;
+ pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_sse2;
+ pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_sse2;
+ pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
+ }
+ if( cpu&X264_CPU_SSE2_IS_FAST )
+ {
+ pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
+ pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
+ pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
+ pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
+ pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
+ pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
+ pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
+ }
+ if( cpu&X264_CPU_SSSE3 )
+ {
+ INIT4_NAME( sad_aligned, sad, _ssse3_aligned );
+ INIT7( sad, _ssse3 );
+ INIT7( sad_x3, _ssse3 );
+ INIT7( sad_x4, _ssse3 );
+ INIT_ADS( _ssse3 );
+
+ if( !(cpu&X264_CPU_STACK_MOD4) )
+ {
+ INIT4( hadamard_ac, _ssse3 );
+ }
+ pixf->vsad = x264_pixel_vsad_ssse3;
+ pixf->asd8 = x264_pixel_asd8_ssse3;
+ pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_ssse3;
+ pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
+ pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_ssse3;
+ pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_ssse3;
+ pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_ssse3;
+ pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_ssse3;
+ pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_ssse3;
+ }
+ if( cpu&X264_CPU_SSE4 )
+ {
+ if( !(cpu&X264_CPU_STACK_MOD4) )
+ {
+ INIT4( hadamard_ac, _sse4 );
+ }
+ pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
+ pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse4;
+ }
+ if( cpu&X264_CPU_AVX )
+ {
+ INIT_ADS( _avx );
+ if( !(cpu&X264_CPU_STACK_MOD4) )
+ {
+ INIT4( hadamard_ac, _avx );
+ }
+ pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_avx;
+ pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
+ pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_avx;
+ pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
+ pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_avx;
+ pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_avx;
+ pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_avx;
+ pixf->ssim_end4 = x264_pixel_ssim_end4_avx;
+ }
+ if( cpu&X264_CPU_XOP )
+ {
+ pixf->vsad = x264_pixel_vsad_xop;
+ pixf->asd8 = x264_pixel_asd8_xop;
+ }
+#endif // HAVE_MMX
+#else // !HIGH_BIT_DEPTH
#if HAVE_MMX
if( cpu&X264_CPU_MMX )
{
- INIT7( ssd, _mmx );
+ INIT8( ssd, _mmx );
}
- if( cpu&X264_CPU_MMXEXT )
+ if( cpu&X264_CPU_MMX2 )
{
- INIT7( sad, _mmxext );
- INIT7_NAME( sad_aligned, sad, _mmxext );
- INIT7( sad_x3, _mmxext );
- INIT7( sad_x4, _mmxext );
- INIT7( satd, _mmxext );
- INIT7( satd_x3, _mmxext );
- INIT7( satd_x4, _mmxext );
- INIT4( hadamard_ac, _mmxext );
- INIT_ADS( _mmxext );
- pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmxext;
- pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_mmxext;
- pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_mmxext;
+ INIT8( sad, _mmx2 );
+ INIT8_NAME( sad_aligned, sad, _mmx2 );
+ INIT7( sad_x3, _mmx2 );
+ INIT7( sad_x4, _mmx2 );
+ INIT8( satd, _mmx2 );
+ INIT7( satd_x3, _mmx2 );
+ INIT7( satd_x4, _mmx2 );
+ INIT4( hadamard_ac, _mmx2 );
+ INIT_ADS( _mmx2 );
+ pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
+ pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_mmx2;
+ pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_mmx2;
+ pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_mmx2;
#if ARCH_X86
- pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmxext;
- pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_mmxext;
- pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmxext;
- pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_mmxext;
- pixf->var2_8x8 = x264_pixel_var2_8x8_mmxext;
+ pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmx2;
+ pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_mmx2;
+ pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmx2;
+ pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_mmx2;
+ pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_mmx2;
+ pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_mmx2;
+ pixf->vsad = x264_pixel_vsad_mmx2;
if( cpu&X264_CPU_CACHELINE_32 )
{
- INIT5( sad, _cache32_mmxext );
- INIT4( sad_x3, _cache32_mmxext );
- INIT4( sad_x4, _cache32_mmxext );
+ INIT5( sad, _cache32_mmx2 );
+ INIT4( sad_x3, _cache32_mmx2 );
+ INIT4( sad_x4, _cache32_mmx2 );
}
else if( cpu&X264_CPU_CACHELINE_64 )
{
- INIT5( sad, _cache64_mmxext );
- INIT4( sad_x3, _cache64_mmxext );
- INIT4( sad_x4, _cache64_mmxext );
+ INIT5( sad, _cache64_mmx2 );
+ INIT4( sad_x3, _cache64_mmx2 );
+ INIT4( sad_x4, _cache64_mmx2 );
}
#else
if( cpu&X264_CPU_CACHELINE_64 )
{
- pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmxext;
- pixf->sad[PIXEL_8x8] = x264_pixel_sad_8x8_cache64_mmxext;
- pixf->sad[PIXEL_8x4] = x264_pixel_sad_8x4_cache64_mmxext;
- pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmxext;
- pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_cache64_mmxext;
- pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmxext;
- pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_cache64_mmxext;
+ pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmx2;
+ pixf->sad[PIXEL_8x8] = x264_pixel_sad_8x8_cache64_mmx2;
+ pixf->sad[PIXEL_8x4] = x264_pixel_sad_8x4_cache64_mmx2;
+ pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmx2;
+ pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_cache64_mmx2;
+ pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmx2;
+ pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_cache64_mmx2;
}
#endif
- pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmxext;
- pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_mmxext;
- pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_mmxext;
- pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_mmxext;
- pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_mmxext;
- pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_mmxext;
- pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_mmxext;
+ pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
+ pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_mmx2;
+ pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_mmx2;
+ pixf->intra_sad_x3_8x16c = x264_intra_sad_x3_8x16c_mmx2;
+ pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_mmx2;
+ pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_mmx2;
+ pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_mmx2;
+ pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_mmx2;
+ pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_mmx2;
}
if( cpu&X264_CPU_SSE2 )
#if ARCH_X86_64
pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
#endif
- pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
+ pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_sse2;
+ pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_sse2;
+ pixf->vsad = x264_pixel_vsad_sse2;
+ pixf->asd8 = x264_pixel_asd8_sse2;
}
if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
INIT2( sad_x3, _sse2 );
INIT2( sad_x4, _sse2 );
INIT6( satd, _sse2 );
+ pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_sse2;
INIT6( satd_x3, _sse2 );
INIT6( satd_x4, _sse2 );
if( !(cpu&X264_CPU_STACK_MOD4) )
}
INIT_ADS( _sse2 );
pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
+ pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_sse2;
pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
+ pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_sse2;
+ pixf->intra_sad_x3_8x16c = x264_intra_sad_x3_8x16c_sse2;
if( cpu&X264_CPU_CACHELINE_64 )
{
INIT2( ssd, _sse2); /* faster for width 16 on p4 */
if( !(cpu&X264_CPU_STACK_MOD4) )
{
INIT4( hadamard_ac, _ssse3 );
+ pixf->intra_sad_x9_4x4 = x264_intra_sad_x9_4x4_ssse3;
+ pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_ssse3;
+ pixf->intra_sad_x9_8x8 = x264_intra_sad_x9_8x8_ssse3;
+#if ARCH_X86_64
+ pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_ssse3;
+#endif
}
INIT_ADS( _ssse3 );
if( !(cpu&X264_CPU_SLOW_ATOM) )
{
- INIT7( ssd, _ssse3 );
+ INIT8( ssd, _ssse3 );
pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_ssse3;
- INIT7( satd, _ssse3 );
+ INIT8( satd, _ssse3 );
INIT7( satd_x3, _ssse3 );
INIT7( satd_x4, _ssse3 );
}
pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_ssse3;
+ pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_ssse3;
pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_ssse3;
pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_ssse3;
- pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_ssse3;
-#if ARCH_X86_64
- pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_ssse3;
-#endif
- pixf->var2_8x8 = x264_pixel_var2_8x8_ssse3;
+ pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_ssse3;
+ pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_ssse3;
+ pixf->asd8 = x264_pixel_asd8_ssse3;
if( cpu&X264_CPU_CACHELINE_64 )
{
INIT2( sad, _cache64_ssse3 );
if( cpu&X264_CPU_SSE4 )
{
- INIT7( satd, _sse4 );
+ INIT8( satd, _sse4 );
INIT7( satd_x3, _sse4 );
INIT7( satd_x4, _sse4 );
if( !(cpu&X264_CPU_STACK_MOD4) )
{
INIT4( hadamard_ac, _sse4 );
+ pixf->intra_sad_x9_4x4 = x264_intra_sad_x9_4x4_sse4;
+ pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_sse4;
+ pixf->intra_sad_x9_8x8 = x264_intra_sad_x9_8x8_sse4;
+#if ARCH_X86_64
+ pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_sse4;
+#endif
}
pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse4;
- pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_sse4;
- /* Slower on Conroe, so only enable under SSE4 */
- pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_ssse3;
+ pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_sse4;
+ }
+
+ if( cpu&X264_CPU_AVX )
+ {
+ INIT8( satd, _avx );
+ INIT7( satd_x3, _avx );
+ INIT7( satd_x4, _avx );
+ INIT_ADS( _avx );
+ if( !(cpu&X264_CPU_STACK_MOD4) )
+ {
+ INIT4( hadamard_ac, _avx );
+ pixf->intra_sad_x9_4x4 = x264_intra_sad_x9_4x4_avx;
+ pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_avx;
+ pixf->intra_sad_x9_8x8 = x264_intra_sad_x9_8x8_avx;
+#if ARCH_X86_64
+ pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_avx;
+#endif
+ }
+ INIT5( ssd, _avx );
+ pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
+ pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_avx;
+ pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_avx;
+ pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_avx;
+ pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
+ pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_avx;
+ pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_avx;
+ pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_avx;
+ pixf->ssim_end4 = x264_pixel_ssim_end4_avx;
+ }
+
+ if( cpu&X264_CPU_XOP )
+ {
+ INIT7( satd, _xop );
+ INIT7( satd_x3, _xop );
+ INIT7( satd_x4, _xop );
+ if( !(cpu&X264_CPU_STACK_MOD4) )
+ {
+ INIT4( hadamard_ac, _xop );
+ pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_xop;
+ }
+ INIT5( ssd, _xop );
+ pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_xop;
+ pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_xop;
+ pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_xop;
+ pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_xop;
+ pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_xop;
+ pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_xop;
+ pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_xop;
+ pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_xop;
}
#endif //HAVE_MMX
pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_neon;
pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_neon;
- pixf->var2_8x8 = x264_pixel_var2_8x8_neon;
+ pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_neon;
pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_neon;
pixf->ssim_end4 = x264_pixel_ssim_end4_neon;
}
}
#endif
-#endif // !X264_HIGH_BIT_DEPTH
+#endif // HIGH_BIT_DEPTH
#if HAVE_ALTIVEC
if( cpu&X264_CPU_ALTIVEC )
{
x264_pixel_altivec_init( pixf );
}
#endif
-#if !X264_HIGH_BIT_DEPTH
-#if ARCH_UltraSparc
+#if !HIGH_BIT_DEPTH
+#if ARCH_UltraSPARC
INIT4( sad, _vis );
INIT4( sad_x3, _vis );
INIT4( sad_x4, _vis );
#endif
-#endif // !X264_HIGH_BIT_DEPTH
+#endif // !HIGH_BIT_DEPTH
pixf->ads[PIXEL_8x16] =
pixf->ads[PIXEL_8x4] =