/*****************************************************************************
* checkasm.c: assembly check tool
*****************************************************************************
- * Copyright (C) 2003-2012 x264 project
+ * Copyright (C) 2003-2013 x264 project
*
* Authors: Loren Merritt <lorenm@u.washington.edu>
* Laurent Aimar <fenrir@via.ecp.fr>
{
void *pointer; // just for detecting duplicates
uint32_t cpu;
- uint32_t cycles;
+ uint64_t cycles;
uint32_t den;
} bench_t;
static void print_bench(void)
{
- uint16_t nops[10000] = {0};
+ uint16_t nops[10000];
int nfuncs, nop_time=0;
for( int i = 0; i < 10000; i++ )
{
- int t = read_time();
+ uint32_t t = read_time();
nops[i] = read_time() - t;
}
qsort( nops, 10000, sizeof(uint16_t), cmp_nop );
if( k < j )
continue;
printf( "%s_%s%s: %"PRId64"\n", benchs[i].name,
+#if HAVE_MMX
+ b->cpu&X264_CPU_AVX2 && b->cpu&X264_CPU_FMA3 ? "avx2_fma3" :
+ b->cpu&X264_CPU_AVX2 ? "avx2" :
+ b->cpu&X264_CPU_FMA3 ? "fma3" :
b->cpu&X264_CPU_FMA4 ? "fma4" :
b->cpu&X264_CPU_XOP ? "xop" :
b->cpu&X264_CPU_AVX ? "avx" :
/* print sse2slow only if there's also a sse2fast version of the same func */
b->cpu&X264_CPU_SSE2_IS_SLOW && j<MAX_CPUS-1 && b[1].cpu&X264_CPU_SSE2_IS_FAST && !(b[1].cpu&X264_CPU_SSE3) ? "sse2slow" :
b->cpu&X264_CPU_SSE2 ? "sse2" :
+ b->cpu&X264_CPU_SSE ? "sse" :
b->cpu&X264_CPU_MMX ? "mmx" :
+#elif ARCH_PPC
b->cpu&X264_CPU_ALTIVEC ? "altivec" :
+#elif ARCH_ARM
b->cpu&X264_CPU_NEON ? "neon" :
- b->cpu&X264_CPU_ARMV6 ? "armv6" : "c",
+ b->cpu&X264_CPU_ARMV6 ? "armv6" :
+#endif
+ "c",
+#if HAVE_MMX
b->cpu&X264_CPU_CACHELINE_32 ? "_c32" :
+ b->cpu&X264_CPU_SLOW_ATOM && b->cpu&X264_CPU_CACHELINE_64 ? "_c64_atom" :
b->cpu&X264_CPU_CACHELINE_64 ? "_c64" :
- b->cpu&X264_CPU_SHUFFLE_IS_FAST && !(b->cpu&X264_CPU_SSE4) ? "_fastshuffle" :
+ b->cpu&X264_CPU_SLOW_SHUFFLE ? "_slowshuffle" :
b->cpu&X264_CPU_SSE_MISALIGN ? "_misalign" :
b->cpu&X264_CPU_LZCNT ? "_lzcnt" :
- b->cpu&X264_CPU_FAST_NEON_MRC ? "_fast_mrc" :
+ b->cpu&X264_CPU_BMI2 ? "_bmi2" :
+ b->cpu&X264_CPU_BMI1 ? "_bmi1" :
b->cpu&X264_CPU_SLOW_CTZ ? "_slow_ctz" :
- b->cpu&X264_CPU_SLOW_ATOM ? "_slow_atom" : "",
+ b->cpu&X264_CPU_SLOW_ATOM ? "_atom" :
+#elif ARCH_ARM
+ b->cpu&X264_CPU_FAST_NEON_MRC ? "_fast_mrc" :
+#endif
+ "",
((int64_t)10*b->cycles/b->den - nop_time)/4 );
}
}
#if ARCH_X86 || ARCH_X86_64
int x264_stack_pagealign( int (*func)(), int align );
+
+/* detect when callee-saved regs aren't saved
+ * needs an explicit asm check because it only sometimes crashes in normal use. */
+intptr_t x264_checkasm_call( intptr_t (*func)(), int *ok, ... );
#else
#define x264_stack_pagealign( func, align ) func()
#endif
#define call_c1(func,...) func(__VA_ARGS__)
-#if ARCH_X86 || defined(_WIN64)
-/* detect when callee-saved regs aren't saved.
- * needs an explicit asm check because it only sometimes crashes in normal use. */
-intptr_t x264_checkasm_call( intptr_t (*func)(), int *ok, ... );
-#define call_a1(func,...) x264_checkasm_call((intptr_t(*)())func, &ok, __VA_ARGS__)
+#if ARCH_X86_64
+/* Evil hack: detect incorrect assumptions that 32-bit ints are zero-extended to 64-bit.
+ * This is done by clobbering the stack with junk around the stack pointer and calling the
+ * assembly function through x264_checkasm_call with added dummy arguments which forces all
+ * real arguments to be passed on the stack and not in registers. For 32-bit argument the
+ * upper half of the 64-bit register location on the stack will now contain junk. Note that
+ * this is dependant on compiler behaviour and that interrupts etc. at the wrong time may
+ * overwrite the junk written to the stack so there's no guarantee that it will always
+ * detect all functions that assumes zero-extension.
+ */
+void x264_checkasm_stack_clobber( uint64_t clobber, ... );
+#define call_a1(func,...) ({ \
+ uint64_t r = (rand() & 0xffff) * 0x0001000100010001ULL; \
+ x264_checkasm_stack_clobber( r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r ); /* max_args+6 */ \
+ x264_checkasm_call(( intptr_t(*)())func, &ok, 0, 0, 0, 0, __VA_ARGS__ ); })
+#elif ARCH_X86
+#define call_a1(func,...) x264_checkasm_call( (intptr_t(*)())func, &ok, __VA_ARGS__ )
#else
#define call_a1 call_c1
#endif
#define call_bench(func,cpu,...)\
if( do_bench && !strncmp(func_name, bench_pattern, bench_pattern_len) )\
{\
- uint32_t tsum = 0;\
+ uint64_t tsum = 0;\
int tcount = 0;\
call_a1(func, __VA_ARGS__);\
for( int ti = 0; ti < (cpu?BENCH_RUNS:BENCH_RUNS/4); ti++ )\
func(__VA_ARGS__);\
func(__VA_ARGS__);\
t = read_time() - t;\
- if( t*tcount <= tsum*4 && ti > 0 )\
+ if( (uint64_t)t*tcount <= tsum*4 && ti > 0 )\
{\
tsum += t;\
tcount++;\
#define TEST_PIXEL( name, align ) \
ok = 1, used_asm = 0; \
- for( int i = 0; i < 8; i++ ) \
+ for( int i = 0; i < ARRAY_ELEMS(pixel_c.name); i++ ) \
{ \
int res_c, res_asm; \
if( pixel_asm.name[i] != pixel_ref.name[i] ) \
used_asm = 1; \
for( int j = 0; j < 64; j++ ) \
{ \
- res_c = call_c( pixel_c.name[i], pbuf1, 16, pbuf2+j*!align, 64 ); \
- res_asm = call_a( pixel_asm.name[i], pbuf1, 16, pbuf2+j*!align, 64 ); \
+ res_c = call_c( pixel_c.name[i], pbuf1, (intptr_t)16, pbuf2+j*!align, (intptr_t)64 ); \
+ res_asm = call_a( pixel_asm.name[i], pbuf1, (intptr_t)16, pbuf2+j*!align, (intptr_t)64 ); \
if( res_c != res_asm ) \
{ \
ok = 0; \
TEST_PIXEL( satd, 0 );
TEST_PIXEL( sa8d, 1 );
+ ok = 1, used_asm = 0;
+ if( pixel_asm.sa8d_satd[PIXEL_16x16] != pixel_ref.sa8d_satd[PIXEL_16x16] )
+ {
+ set_func_name( "sa8d_satd_%s", pixel_names[PIXEL_16x16] );
+ used_asm = 1;
+ for( int j = 0; j < 64; j++ )
+ {
+ uint32_t cost8_c = pixel_c.sa8d[PIXEL_16x16]( pbuf1, 16, pbuf2, 64 );
+ uint32_t cost4_c = pixel_c.satd[PIXEL_16x16]( pbuf1, 16, pbuf2, 64 );
+ uint64_t res_a = call_a( pixel_asm.sa8d_satd[PIXEL_16x16], pbuf1, (intptr_t)16, pbuf2, (intptr_t)64 );
+ uint32_t cost8_a = res_a;
+ uint32_t cost4_a = res_a >> 32;
+ if( cost8_a != cost8_c || cost4_a != cost4_c )
+ {
+ ok = 0;
+ fprintf( stderr, "sa8d_satd [%d]: (%d,%d) != (%d,%d) [FAILED]\n", PIXEL_16x16,
+ cost8_c, cost4_c, cost8_a, cost4_a );
+ break;
+ }
+ }
+ for( int j = 0; j < 0x1000 && ok; j += 256 ) \
+ {
+ uint32_t cost8_c = pixel_c.sa8d[PIXEL_16x16]( pbuf3+j, 16, pbuf4+j, 16 );
+ uint32_t cost4_c = pixel_c.satd[PIXEL_16x16]( pbuf3+j, 16, pbuf4+j, 16 );
+ uint64_t res_a = pixel_asm.sa8d_satd[PIXEL_16x16]( pbuf3+j, 16, pbuf4+j, 16 );
+ uint32_t cost8_a = res_a;
+ uint32_t cost4_a = res_a >> 32;
+ if( cost8_a != cost8_c || cost4_a != cost4_c )
+ {
+ ok = 0;
+ fprintf( stderr, "sa8d_satd [%d]: overflow (%d,%d) != (%d,%d) [FAILED]\n", PIXEL_16x16,
+ cost8_c, cost4_c, cost8_a, cost4_a );
+ }
+ }
+ }
+ report( "pixel sa8d_satd :" );
+
#define TEST_PIXEL_X( N ) \
ok = 1; used_asm = 0; \
for( int i = 0; i < 7; i++ ) \
{ \
- int res_c[4]={0}, res_asm[4]={0}; \
+ ALIGNED_16( int res_c[4] ) = {0}; \
+ ALIGNED_16( int res_asm[4] ) = {0}; \
if( pixel_asm.sad_x##N[i] && pixel_asm.sad_x##N[i] != pixel_ref.sad_x##N[i] ) \
{ \
set_func_name( "sad_x%d_%s", N, pixel_names[i] ); \
for( int j = 0; j < 64; j++ ) \
{ \
pixel *pix2 = pbuf2+j; \
- res_c[0] = pixel_c.sad[i]( pbuf1, 16, pix2, 64 ); \
+ res_c[0] = pixel_c.sad[i]( pbuf1, 16, pix2, 64 ); \
res_c[1] = pixel_c.sad[i]( pbuf1, 16, pix2+6, 64 ); \
res_c[2] = pixel_c.sad[i]( pbuf1, 16, pix2+1, 64 ); \
if( N == 4 ) \
{ \
res_c[3] = pixel_c.sad[i]( pbuf1, 16, pix2+10, 64 ); \
- call_a( pixel_asm.sad_x4[i], pbuf1, pix2, pix2+6, pix2+1, pix2+10, 64, res_asm ); \
+ call_a( pixel_asm.sad_x4[i], pbuf1, pix2, pix2+6, pix2+1, pix2+10, (intptr_t)64, res_asm ); \
} \
else \
- call_a( pixel_asm.sad_x3[i], pbuf1, pix2, pix2+6, pix2+1, 64, res_asm ); \
+ call_a( pixel_asm.sad_x3[i], pbuf1, pix2, pix2+6, pix2+1, (intptr_t)64, res_asm ); \
if( memcmp(res_c, res_asm, sizeof(res_c)) ) \
{ \
ok = 0; \
res_asm[0], res_asm[1], res_asm[2], res_asm[3] ); \
} \
if( N == 4 ) \
- call_c2( pixel_c.sad_x4[i], pbuf1, pix2, pix2+6, pix2+1, pix2+10, 64, res_asm ); \
+ call_c2( pixel_c.sad_x4[i], pbuf1, pix2, pix2+6, pix2+1, pix2+10, (intptr_t)64, res_asm ); \
else \
- call_c2( pixel_c.sad_x3[i], pbuf1, pix2, pix2+6, pix2+1, 64, res_asm ); \
+ call_c2( pixel_c.sad_x3[i], pbuf1, pix2, pix2+6, pix2+1, (intptr_t)64, res_asm ); \
} \
} \
} \
set_func_name( "%s_%s", "var", pixel_names[i] ); \
used_asm = 1; \
/* abi-check wrapper can't return uint64_t, so separate it from return value check */ \
- call_c1( pixel_c.var[i], pbuf1, 16 ); \
- call_a1( pixel_asm.var[i], pbuf1, 16 ); \
+ call_c1( pixel_c.var[i], pbuf1, 16 ); \
+ call_a1( pixel_asm.var[i], pbuf1, (intptr_t)16 ); \
uint64_t res_c = pixel_c.var[i]( pbuf1, 16 ); \
uint64_t res_asm = pixel_asm.var[i]( pbuf1, 16 ); \
if( res_c != res_asm ) \
ok = 0; \
fprintf( stderr, "var[%d]: %d %d != %d %d [FAILED]\n", i, (int)res_c, (int)(res_c>>32), (int)res_asm, (int)(res_asm>>32) ); \
} \
- call_c2( pixel_c.var[i], pbuf1, 16 ); \
- call_a2( pixel_asm.var[i], pbuf1, 16 ); \
+ call_c2( pixel_c.var[i], pbuf1, (intptr_t)16 ); \
+ call_a2( pixel_asm.var[i], pbuf1, (intptr_t)16 ); \
}
ok = 1; used_asm = 0;
int res_c, res_asm, ssd_c, ssd_asm; \
set_func_name( "%s_%s", "var2", pixel_names[i] ); \
used_asm = 1; \
- res_c = call_c( pixel_c.var2[i], pbuf1, 16, pbuf2, 16, &ssd_c ); \
- res_asm = call_a( pixel_asm.var2[i], pbuf1, 16, pbuf2, 16, &ssd_asm ); \
+ res_c = call_c( pixel_c.var2[i], pbuf1, (intptr_t)16, pbuf2, (intptr_t)16, &ssd_c ); \
+ res_asm = call_a( pixel_asm.var2[i], pbuf1, (intptr_t)16, pbuf2, (intptr_t)16, &ssd_asm ); \
if( res_c != res_asm || ssd_c != ssd_asm ) \
{ \
ok = 0; \
for( int j = 0; j < 32; j++ )
{
pixel *pix = (j&16 ? pbuf1 : pbuf3) + (j&15)*256;
- call_c1( pixel_c.hadamard_ac[i], pbuf1, 16 );
- call_a1( pixel_asm.hadamard_ac[i], pbuf1, 16 );
+ call_c1( pixel_c.hadamard_ac[i], pbuf1, (intptr_t)16 );
+ call_a1( pixel_asm.hadamard_ac[i], pbuf1, (intptr_t)16 );
uint64_t rc = pixel_c.hadamard_ac[i]( pix, 16 );
uint64_t ra = pixel_asm.hadamard_ac[i]( pix, 16 );
if( rc != ra )
break;
}
}
- call_c2( pixel_c.hadamard_ac[i], pbuf1, 16 );
- call_a2( pixel_asm.hadamard_ac[i], pbuf1, 16 );
+ call_c2( pixel_c.hadamard_ac[i], pbuf1, (intptr_t)16 );
+ call_a2( pixel_asm.hadamard_ac[i], pbuf1, (intptr_t)16 );
}
report( "pixel hadamard_ac :" );
for( int j = 0; j < 2 && ok; j++ )
{
pixel *p = j ? pbuf4 : pbuf1;
- res_c = call_c( pixel_c.vsad, p, 16, h );
- res_asm = call_a( pixel_asm.vsad, p, 16, h );
+ res_c = call_c( pixel_c.vsad, p, (intptr_t)16, h );
+ res_asm = call_a( pixel_asm.vsad, p, (intptr_t)16, h );
if( res_c != res_asm )
{
ok = 0;
}
report( "pixel vsad :" );
+ ok = 1; used_asm = 0;
+ if( pixel_asm.asd8 != pixel_ref.asd8 )
+ {
+ set_func_name( "asd8" );
+ used_asm = 1;
+ int res_c = call_c( pixel_c.asd8, pbuf1, (intptr_t)8, pbuf2, (intptr_t)8, 16 );
+ int res_a = call_a( pixel_asm.asd8, pbuf1, (intptr_t)8, pbuf2, (intptr_t)8, 16 );
+ if( res_c != res_a )
+ {
+ ok = 0;
+ fprintf( stderr, "asd: %d != %d\n", res_c, res_a );
+ }
+ }
+ report( "pixel asd :" );
+
#define TEST_INTRA_X3( name, i8x8, ... ) \
if( pixel_asm.name && pixel_asm.name != pixel_ref.name ) \
{ \
- int res_c[3], res_asm[3]; \
+ ALIGNED_16( int res_c[3] ); \
+ ALIGNED_16( int res_asm[3] ); \
set_func_name( #name ); \
used_asm = 1; \
call_c( pixel_c.name, pbuf1+48, i8x8 ? edge : pbuf3+48, res_c ); \
fprintf( stderr, "ssd_nv12: %"PRIu64",%"PRIu64" != %"PRIu64",%"PRIu64"\n",
res_u_c, res_v_c, res_u_a, res_v_a );
}
- call_c( pixel_c.ssd_nv12_core, pbuf1, 368, pbuf2, 368, 360, 8, &res_u_c, &res_v_c );
- call_a( pixel_asm.ssd_nv12_core, pbuf1, 368, pbuf2, 368, 360, 8, &res_u_a, &res_v_a );
+ call_c( pixel_c.ssd_nv12_core, pbuf1, (intptr_t)368, pbuf2, (intptr_t)368, 360, 8, &res_u_c, &res_v_c );
+ call_a( pixel_asm.ssd_nv12_core, pbuf1, (intptr_t)368, pbuf2, (intptr_t)368, 360, 8, &res_u_a, &res_v_a );
}
report( "ssd_nv12 :" );
fprintf( stderr, "ssim: %.7f != %.7f [FAILED]\n", res_c, res_a );
}
set_func_name( "ssim_core" );
- call_c2( pixel_c.ssim_4x4x2_core, pbuf1+2, 32, pbuf2+2, 32, sums );
- call_a2( pixel_asm.ssim_4x4x2_core, pbuf1+2, 32, pbuf2+2, 32, sums );
+ call_c2( pixel_c.ssim_4x4x2_core, pbuf1+2, (intptr_t)32, pbuf2+2, (intptr_t)32, sums );
+ call_a2( pixel_asm.ssim_4x4x2_core, pbuf1+2, (intptr_t)32, pbuf2+2, (intptr_t)32, sums );
set_func_name( "ssim_end" );
call_c2( pixel_c.ssim_end4, sums, sums, 4 );
call_a2( pixel_asm.ssim_end4, sums, sums, 4 );
{
ALIGNED_16( uint16_t sums[72] );
ALIGNED_16( int dc[4] );
- ALIGNED_16( int16_t mvs_a[32] );
- ALIGNED_16( int16_t mvs_c[32] );
+ ALIGNED_16( int16_t mvs_a[48] );
+ ALIGNED_16( int16_t mvs_c[48] );
int mvn_a, mvn_c;
int thresh = rand() & 0x3fff;
set_func_name( "esa_ads" );
x264_dct_function_t dct_asm;
x264_quant_function_t qf;
int ret = 0, ok, used_asm, interlace = 0;
- ALIGNED_16( dctcoef dct1[16][16] );
- ALIGNED_16( dctcoef dct2[16][16] );
- ALIGNED_16( dctcoef dct4[16][16] );
- ALIGNED_16( dctcoef dct8[4][64] );
+ ALIGNED_ARRAY_N( dctcoef, dct1, [16],[16] );
+ ALIGNED_ARRAY_N( dctcoef, dct2, [16],[16] );
+ ALIGNED_ARRAY_N( dctcoef, dct4, [16],[16] );
+ ALIGNED_ARRAY_N( dctcoef, dct8, [4],[64] );
ALIGNED_16( dctcoef dctdc[2][8] );
x264_t h_buf;
x264_t *h = &h_buf;
call_a( zigzag_asm[interlace].name, t2, dct, buf4 ); \
if( memcmp( t1, t2, size*sizeof(dctcoef) ) || memcmp( buf3, buf4, 10 ) ) \
{ \
- ok = 0; \
+ ok = 0; printf("%d: %d %d %d %d\n%d %d %d %d\n\n",memcmp( t1, t2, size*sizeof(dctcoef) ),buf3[0], buf3[1], buf3[8], buf3[9], buf4[0], buf4[1], buf4[8], buf4[9]);break;\
} \
} \
}
x264_zigzag_init( cpu_new, &zigzag_asm[0], &zigzag_asm[1] );
ok = 1; used_asm = 0;
- TEST_INTERLEAVE( interleave_8x8_cavlc, level1, level2, dct1[0], 64 );
+ TEST_INTERLEAVE( interleave_8x8_cavlc, level1, level2, dct8[0], 64 );
report( "zigzag_interleave :" );
for( interlace = 0; interlace <= 1; interlace++ )
{
ok = 1; used_asm = 0;
- TEST_ZIGZAG_SCAN( scan_8x8, level1, level2, dct1[0], 8 );
+ TEST_ZIGZAG_SCAN( scan_8x8, level1, level2, dct8[0], 8 );
TEST_ZIGZAG_SCAN( scan_4x4, level1, level2, dct1[0], 4 );
TEST_ZIGZAG_SUB( sub_4x4, level1, level2, 16 );
TEST_ZIGZAG_SUBAC( sub_4x4ac, level1, level2 );
int ret = 0, ok, used_asm;
- x264_mc_init( 0, &mc_c );
- x264_mc_init( cpu_ref, &mc_ref );
- x264_mc_init( cpu_new, &mc_a );
+ x264_mc_init( 0, &mc_c, 0 );
+ x264_mc_init( cpu_ref, &mc_ref, 0 );
+ x264_mc_init( cpu_new, &mc_a, 0 );
x264_pixel_init( 0, &pixf );
#define MC_TEST_LUMA( w, h ) \
used_asm = 1; \
for( int i = 0; i < 1024; i++ ) \
pbuf3[i] = pbuf4[i] = 0xCD; \
- call_c( mc_c.mc_luma, dst1, 32, src2, 64, dx, dy, w, h, weight ); \
- call_a( mc_a.mc_luma, dst2, 32, src2, 64, dx, dy, w, h, weight ); \
+ call_c( mc_c.mc_luma, dst1, (intptr_t)32, src2, (intptr_t)64, dx, dy, w, h, weight ); \
+ call_a( mc_a.mc_luma, dst2, (intptr_t)32, src2, (intptr_t)64, dx, dy, w, h, weight ); \
if( memcmp( pbuf3, pbuf4, 1024 * sizeof(pixel) ) ) \
{ \
fprintf( stderr, "mc_luma[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w, h ); \
if( mc_a.get_ref != mc_ref.get_ref ) \
{ \
pixel *ref = dst2; \
- int ref_stride = 32; \
+ intptr_t ref_stride = 32; \
int w_checked = ( ( sizeof(pixel) == 2 && (w == 12 || w == 20)) ? w-2 : w ); \
const x264_weight_t *weight = x264_weight_none; \
set_func_name( "get_ref_%dx%d", w_checked, h ); \
used_asm = 1; \
for( int i = 0; i < 1024; i++ ) \
pbuf3[i] = pbuf4[i] = 0xCD; \
- call_c( mc_c.mc_luma, dst1, 32, src2, 64, dx, dy, w, h, weight ); \
- ref = (pixel*)call_a( mc_a.get_ref, ref, &ref_stride, src2, 64, dx, dy, w, h, weight ); \
+ call_c( mc_c.mc_luma, dst1, (intptr_t)32, src2, (intptr_t)64, dx, dy, w, h, weight ); \
+ ref = (pixel*)call_a( mc_a.get_ref, ref, &ref_stride, src2, (intptr_t)64, dx, dy, w, h, weight ); \
for( int i = 0; i < h; i++ ) \
if( memcmp( dst1+i*32, ref+i*ref_stride, w_checked * sizeof(pixel) ) ) \
{ \
used_asm = 1; \
for( int i = 0; i < 1024; i++ ) \
pbuf3[i] = pbuf4[i] = 0xCD; \
- call_c( mc_c.mc_chroma, dst1, dst1+8, 16, src, 64, dx, dy, w, h ); \
- call_a( mc_a.mc_chroma, dst2, dst2+8, 16, src, 64, dx, dy, w, h ); \
+ call_c( mc_c.mc_chroma, dst1, dst1+8, (intptr_t)16, src, (intptr_t)64, dx, dy, w, h ); \
+ call_a( mc_a.mc_chroma, dst2, dst2+8, (intptr_t)16, src, (intptr_t)64, dx, dy, w, h ); \
/* mc_chroma width=2 may write garbage to the right of dst. ignore that. */ \
for( int j = 0; j < h; j++ ) \
for( int i = w; i < 8; i++ ) \
{ \
dst2[i+j*16+8] = dst1[i+j*16+8]; \
- dst2[i+j*16] = dst1[i+j*16]; \
+ dst2[i+j*16 ] = dst1[i+j*16 ]; \
} \
if( memcmp( pbuf3, pbuf4, 1024 * sizeof(pixel) ) ) \
{ \
{ \
set_func_name( "%s_%s", #name, pixel_names[i] ); \
used_asm = 1; \
- call_c1( mc_c.name[i], pbuf3, 16, pbuf2+1, 16, pbuf1+18, 16, weight ); \
- call_a1( mc_a.name[i], pbuf4, 16, pbuf2+1, 16, pbuf1+18, 16, weight ); \
+ call_c1( mc_c.name[i], pbuf3, (intptr_t)16, pbuf2+1, (intptr_t)16, pbuf1+18, (intptr_t)16, weight ); \
+ call_a1( mc_a.name[i], pbuf4, (intptr_t)16, pbuf2+1, (intptr_t)16, pbuf1+18, (intptr_t)16, weight ); \
if( memcmp( pbuf3, pbuf4, 320 * sizeof(pixel) ) ) \
{ \
ok = 0; \
fprintf( stderr, #name "[%d]: [FAILED]\n", i ); \
} \
- call_c2( mc_c.name[i], pbuf3, 16, pbuf2+1, 16, pbuf1+18, 16, weight ); \
- call_a2( mc_a.name[i], pbuf4, 16, pbuf2+1, 16, pbuf1+18, 16, weight ); \
+ call_c2( mc_c.name[i], pbuf3, (intptr_t)16, pbuf2+1, (intptr_t)16, pbuf1+18, (intptr_t)16, weight ); \
+ call_a2( mc_a.name[i], pbuf4, (intptr_t)16, pbuf2+1, (intptr_t)16, pbuf1+18, (intptr_t)16, weight ); \
} \
} \
}
{ \
set_func_name( "%s_w%d", #name, j ); \
used_asm = 1; \
- call_c1( mc_c.weight[i], buffC, 32, pbuf2+align_off, 32, &weight, 16 ); \
+ call_c1( mc_c.weight[i], buffC, (intptr_t)32, pbuf2+align_off, (intptr_t)32, &weight, 16 ); \
mc_a.weight_cache(&ha, &weight); \
- call_a1( weight.weightfn[i], buffA, 32, pbuf2+align_off, 32, &weight, 16 ); \
+ call_a1( weight.weightfn[i], buffA, (intptr_t)32, pbuf2+align_off, (intptr_t)32, &weight, 16 ); \
for( int k = 0; k < 16; k++ ) \
if( memcmp( &buffC[k*32], &buffA[k*32], j * sizeof(pixel) ) ) \
{ \
fprintf( stderr, #name "[%d]: [FAILED] s:%d o:%d d%d\n", i, s, o, d ); \
break; \
} \
- call_c2( mc_c.weight[i], buffC, 32, pbuf2+align_off, 32, &weight, 16 ); \
- call_a2( weight.weightfn[i], buffA, 32, pbuf2+align_off, 32, &weight, 16 ); \
+ /* omit unlikely high scales for benchmarking */ \
+ if( (s << (8-d)) < 512 ) \
+ { \
+ call_c2( mc_c.weight[i], buffC, (intptr_t)32, pbuf2+align_off, (intptr_t)32, &weight, 16 ); \
+ call_a2( weight.weightfn[i], buffA, (intptr_t)32, pbuf2+align_off, (intptr_t)32, &weight, 16 ); \
+ } \
} \
}
used_asm = 1;
memset( pbuf3, 0, 64*height );
memset( pbuf4, 0, 64*height );
- call_c( mc_c.store_interleave_chroma, pbuf3, 64, pbuf1, pbuf1+16, height );
- call_a( mc_a.store_interleave_chroma, pbuf4, 64, pbuf1, pbuf1+16, height );
+ call_c( mc_c.store_interleave_chroma, pbuf3, (intptr_t)64, pbuf1, pbuf1+16, height );
+ call_a( mc_a.store_interleave_chroma, pbuf4, (intptr_t)64, pbuf1, pbuf1+16, height );
if( memcmp( pbuf3, pbuf4, 64*height ) )
{
ok = 0;
{
set_func_name( "load_deinterleave_chroma_fenc" );
used_asm = 1;
- call_c( mc_c.load_deinterleave_chroma_fenc, pbuf3, pbuf1, 64, height );
- call_a( mc_a.load_deinterleave_chroma_fenc, pbuf4, pbuf1, 64, height );
+ call_c( mc_c.load_deinterleave_chroma_fenc, pbuf3, pbuf1, (intptr_t)64, height );
+ call_a( mc_a.load_deinterleave_chroma_fenc, pbuf4, pbuf1, (intptr_t)64, height );
if( memcmp( pbuf3, pbuf4, FENC_STRIDE*height ) )
{
ok = 0;
{
set_func_name( "load_deinterleave_chroma_fdec" );
used_asm = 1;
- call_c( mc_c.load_deinterleave_chroma_fdec, pbuf3, pbuf1, 64, height );
- call_a( mc_a.load_deinterleave_chroma_fdec, pbuf4, pbuf1, 64, height );
+ call_c( mc_c.load_deinterleave_chroma_fdec, pbuf3, pbuf1, (intptr_t)64, height );
+ call_a( mc_a.load_deinterleave_chroma_fdec, pbuf4, pbuf1, (intptr_t)64, height );
if( memcmp( pbuf3, pbuf4, FDEC_STRIDE*height ) )
{
ok = 0;
{
int w = plane_specs[i].w;
int h = plane_specs[i].h;
- int src_stride = plane_specs[i].src_stride;
- int dst_stride = (w + 127) & ~63;
+ intptr_t src_stride = plane_specs[i].src_stride;
+ intptr_t dst_stride = (w + 127) & ~63;
assert( dst_stride * h <= 0x1000 );
pixel *src1 = pbuf1 + X264_MAX(0, -src_stride) * (h-1);
memset( pbuf3, 0, 0x1000*sizeof(pixel) );
if( memcmp( pbuf3+y*dst_stride, pbuf4+y*dst_stride, w*sizeof(pixel) ) )
{
ok = 0;
- fprintf( stderr, "plane_copy FAILED: w=%d h=%d stride=%d\n", w, h, src_stride );
+ fprintf( stderr, "plane_copy FAILED: w=%d h=%d stride=%d\n", w, h, (int)src_stride );
break;
}
}
{
int w = (plane_specs[i].w + 1) >> 1;
int h = plane_specs[i].h;
- int src_stride = (plane_specs[i].src_stride + 1) >> 1;
- int dst_stride = (2*w + 127) & ~63;
+ intptr_t src_stride = (plane_specs[i].src_stride + 1) >> 1;
+ intptr_t dst_stride = (2*w + 127) & ~63;
assert( dst_stride * h <= 0x1000 );
pixel *src1 = pbuf1 + X264_MAX(0, -src_stride) * (h-1);
memset( pbuf3, 0, 0x1000*sizeof(pixel) );
if( memcmp( pbuf3+y*dst_stride, pbuf4+y*dst_stride, 2*w*sizeof(pixel) ) )
{
ok = 0;
- fprintf( stderr, "plane_copy_interleave FAILED: w=%d h=%d stride=%d\n", w, h, src_stride );
+ fprintf( stderr, "plane_copy_interleave FAILED: w=%d h=%d stride=%d\n", w, h, (int)src_stride );
break;
}
}
{
int w = (plane_specs[i].w + 1) >> 1;
int h = plane_specs[i].h;
- int dst_stride = w;
- int src_stride = (2*w + 127) & ~63;
- int offv = (dst_stride*h + 31) & ~15;
+ intptr_t dst_stride = w;
+ intptr_t src_stride = (2*w + 127) & ~63;
+ intptr_t offv = (dst_stride*h + 31) & ~15;
memset( pbuf3, 0, 0x1000 );
memset( pbuf4, 0, 0x1000 );
call_c( mc_c.plane_copy_deinterleave, pbuf3, dst_stride, pbuf3+offv, dst_stride, pbuf1, src_stride, w, h );
memcmp( pbuf3+y*dst_stride+offv, pbuf4+y*dst_stride+offv, w ) )
{
ok = 0;
- fprintf( stderr, "plane_copy_deinterleave FAILED: w=%d h=%d stride=%d\n", w, h, src_stride );
+ fprintf( stderr, "plane_copy_deinterleave FAILED: w=%d h=%d stride=%d\n", w, h, (int)src_stride );
break;
}
}
ok = 1; used_asm = 1;
memset( pbuf3, 0, 4096 * sizeof(pixel) );
memset( pbuf4, 0, 4096 * sizeof(pixel) );
- call_c( mc_c.hpel_filter, dstc[0], dstc[1], dstc[2], srchpel, 64, 48, 10, tmp );
- call_a( mc_a.hpel_filter, dsta[0], dsta[1], dsta[2], srchpel, 64, 48, 10, tmp );
+ call_c( mc_c.hpel_filter, dstc[0], dstc[1], dstc[2], srchpel, (intptr_t)64, 48, 10, tmp );
+ call_a( mc_a.hpel_filter, dsta[0], dsta[1], dsta[2], srchpel, (intptr_t)64, 48, 10, tmp );
for( int i = 0; i < 3; i++ )
for( int j = 0; j < 10; j++ )
//FIXME ideally the first pixels would match too, but they aren't actually used
pixel *dsta[4] = { pbuf4, pbuf4+1024, pbuf4+2048, pbuf4+3072 };
set_func_name( "lowres_init" );
ok = 1; used_asm = 1;
- for( int w = 40; w <= 48; w += 8 )
+ for( int w = 96; w <= 96+24; w += 8 )
{
- int stride = (w+8)&~15;
- call_c( mc_c.frame_init_lowres_core, pbuf1, dstc[0], dstc[1], dstc[2], dstc[3], w*2, stride, w, 16 );
- call_a( mc_a.frame_init_lowres_core, pbuf1, dsta[0], dsta[1], dsta[2], dsta[3], w*2, stride, w, 16 );
- for( int i = 0; i < 16; i++ )
+ intptr_t stride = (w*2+31)&~31;
+ intptr_t stride_lowres = (w+31)&~31;
+ call_c( mc_c.frame_init_lowres_core, pbuf1, dstc[0], dstc[1], dstc[2], dstc[3], stride, stride_lowres, w, 8 );
+ call_a( mc_a.frame_init_lowres_core, pbuf1, dsta[0], dsta[1], dsta[2], dsta[3], stride, stride_lowres, w, 8 );
+ for( int i = 0; i < 8; i++ )
{
for( int j = 0; j < 4; j++ )
- if( memcmp( dstc[j]+i*stride, dsta[j]+i*stride, w * sizeof(pixel) ) )
+ if( memcmp( dstc[j]+i*stride_lowres, dsta[j]+i*stride_lowres, w * sizeof(pixel) ) )
{
ok = 0;
fprintf( stderr, "frame_init_lowres differs at plane %d line %d\n", j, i );
for( int k = 0; k < w; k++ )
- printf( "%d ", dstc[j][k+i*stride] );
+ printf( "%d ", dstc[j][k+i*stride_lowres] );
printf( "\n" );
for( int k = 0; k < w; k++ )
- printf( "%d ", dsta[j][k+i*stride] );
+ printf( "%d ", dsta[j][k+i*stride_lowres] );
printf( "\n" );
break;
}
#define INTEGRAL_INIT( name, size, ... )\
if( mc_a.name != mc_ref.name )\
{\
- int stride = 80;\
+ intptr_t stride = 96;\
set_func_name( #name );\
used_asm = 1;\
memcpy( buf3, buf1, size*2*stride );\
{
set_func_name( "memcpy_aligned" );
ok = 1; used_asm = 1;
- for( int size = 16; size < 256; size += 16 )
+ for( size_t size = 16; size < 256; size += 16 )
{
memset( buf4, 0xAA, size + 1 );
call_c( mc_c.memcpy_aligned, buf3, buf1, size );
if( memcmp( buf3, buf4, size ) || buf4[size] != 0xAA )
{
ok = 0;
- fprintf( stderr, "memcpy_aligned FAILED: size=%d\n", size );
+ fprintf( stderr, "memcpy_aligned FAILED: size=%d\n", (int)size );
break;
}
}
{
set_func_name( "memzero_aligned" );
ok = 1; used_asm = 1;
- for( int size = 128; size < 1024; size += 128 )
+ for( size_t size = 128; size < 1024; size += 128 )
{
memset( buf4, 0xAA, size + 1 );
call_c( mc_c.memzero_aligned, buf3, size );
if( memcmp( buf3, buf4, size ) || buf4[size] != 0xAA )
{
ok = 0;
- fprintf( stderr, "memzero_aligned FAILED: size=%d\n", size );
+ fprintf( stderr, "memzero_aligned FAILED: size=%d\n", (int)size );
break;
}
}
#define TEST_DEBLOCK( name, align, ... ) \
for( int i = 0; i < 36; i++ ) \
{ \
- int off = 8*32 + (i&15)*4*!align; /* benchmark various alignments of h filter */ \
+ intptr_t off = 8*32 + (i&15)*4*!align; /* benchmark various alignments of h filter */ \
for( int j = 0; j < 1024; j++ ) \
/* two distributions of random to excersize different failure modes */ \
pbuf3[j] = rand() & (i&1 ? 0xf : PIXEL_MAX ); \
{ \
set_func_name( #name ); \
used_asm = 1; \
- call_c1( db_c.name, pbuf3+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
- call_a1( db_a.name, pbuf4+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
+ call_c1( db_c.name, pbuf3+off, (intptr_t)32, alphas[i], betas[i], ##__VA_ARGS__ ); \
+ call_a1( db_a.name, pbuf4+off, (intptr_t)32, alphas[i], betas[i], ##__VA_ARGS__ ); \
if( memcmp( pbuf3, pbuf4, 1024 * sizeof(pixel) ) ) \
{ \
ok = 0; \
fprintf( stderr, #name "(a=%d, b=%d): [FAILED]\n", alphas[i], betas[i] ); \
break; \
} \
- call_c2( db_c.name, pbuf3+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
- call_a2( db_a.name, pbuf4+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
+ call_c2( db_c.name, pbuf3+off, (intptr_t)32, alphas[i], betas[i], ##__VA_ARGS__ ); \
+ call_a2( db_a.name, pbuf4+off, (intptr_t)32, alphas[i], betas[i], ##__VA_ARGS__ ); \
} \
}
ALIGNED_ARRAY_16( uint8_t, nnz, [X264_SCAN8_SIZE] );
ALIGNED_4( int8_t ref[2][X264_SCAN8_LUMA_SIZE] );
ALIGNED_ARRAY_16( int16_t, mv, [2],[X264_SCAN8_LUMA_SIZE][2] );
- ALIGNED_ARRAY_16( uint8_t, bs, [2],[2][8][4] );
- memset( bs, 99, sizeof(bs) );
+ ALIGNED_ARRAY_N( uint8_t, bs, [2],[2][8][4] );
+ memset( bs, 99, sizeof(uint8_t)*2*4*8*2 );
for( int j = 0; j < X264_SCAN8_SIZE; j++ )
nnz[j] = ((rand()&7) == 7) * rand() & 0xf;
for( int j = 0; j < 2; j++ )
set_func_name( "deblock_strength" );
call_c( db_c.deblock_strength, nnz, ref, mv, bs[0], 2<<(i&1), ((i>>1)&1) );
call_a( db_a.deblock_strength, nnz, ref, mv, bs[1], 2<<(i&1), ((i>>1)&1) );
- if( memcmp( bs[0], bs[1], sizeof(bs[0]) ) )
+ if( memcmp( bs[0], bs[1], sizeof(uint8_t)*2*4*8 ) )
{
ok = 0;
fprintf( stderr, "deblock_strength: [FAILED]\n" );
x264_quant_function_t qf_c;
x264_quant_function_t qf_ref;
x264_quant_function_t qf_a;
- ALIGNED_16( dctcoef dct1[64] );
- ALIGNED_16( dctcoef dct2[64] );
- ALIGNED_16( dctcoef dct3[8][16] );
- ALIGNED_16( dctcoef dct4[8][16] );
- ALIGNED_16( uint8_t cqm_buf[64] );
+ ALIGNED_ARRAY_N( dctcoef, dct1,[64] );
+ ALIGNED_ARRAY_N( dctcoef, dct2,[64] );
+ ALIGNED_ARRAY_N( dctcoef, dct3,[8],[16] );
+ ALIGNED_ARRAY_N( dctcoef, dct4,[8],[16] );
+ ALIGNED_ARRAY_N( uint8_t, cqm_buf,[64] );
int ret = 0, ok, used_asm;
int oks[3] = {1,1,1}, used_asms[3] = {0,0,0};
x264_t h_buf;
x264_quant_init( h, cpu_ref, &qf_ref );
x264_quant_init( h, cpu_new, &qf_a );
-#define INIT_QUANT8(j) \
+#define INIT_QUANT8(j,max) \
{ \
static const int scale1d[8] = {32,31,24,31,32,31,24,31}; \
- for( int i = 0; i < 64; i++ ) \
+ for( int i = 0; i < max; i++ ) \
{ \
- unsigned int scale = (255*scale1d[i>>3]*scale1d[i&7])/16; \
- dct1[i] = dct2[i] = j ? (rand()%(2*scale+1))-scale : 0; \
+ unsigned int scale = (255*scale1d[(i>>3)&7]*scale1d[i&7])/16; \
+ dct1[i] = dct2[i] = (j>>(i>>6))&1 ? (rand()%(2*scale+1))-scale : 0; \
} \
}
-#define INIT_QUANT4(j) \
+#define INIT_QUANT4(j,max) \
{ \
static const int scale1d[4] = {4,6,4,6}; \
- for( int i = 0; i < 16; i++ ) \
+ for( int i = 0; i < max; i++ ) \
{ \
- unsigned int scale = 255*scale1d[i>>2]*scale1d[i&3]; \
- dct1[i] = dct2[i] = j ? (rand()%(2*scale+1))-scale : 0; \
+ unsigned int scale = 255*scale1d[(i>>2)&3]*scale1d[i&3]; \
+ dct1[i] = dct2[i] = (j>>(i>>4))&1 ? (rand()%(2*scale+1))-scale : 0; \
} \
}
} \
}
-#define TEST_QUANT( qname, block, w ) \
+#define TEST_QUANT( qname, block, type, w, maxj ) \
if( qf_a.qname != qf_ref.qname ) \
{ \
set_func_name( #qname ); \
used_asms[0] = 1; \
for( int qp = h->param.rc.i_qp_max; qp >= h->param.rc.i_qp_min; qp-- ) \
{ \
- for( int j = 0; j < 2; j++ ) \
+ for( int j = 0; j < maxj; j++ ) \
{ \
- INIT_QUANT##w(j) \
- int result_c = call_c1( qf_c.qname, dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
- int result_a = call_a1( qf_a.qname, dct2, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
+ INIT_QUANT##type(j, w*w) \
+ int result_c = call_c1( qf_c.qname, (void*)dct1, h->quant##type##_mf[block][qp], h->quant##type##_bias[block][qp] ); \
+ int result_a = call_a1( qf_a.qname, (void*)dct2, h->quant##type##_mf[block][qp], h->quant##type##_bias[block][qp] ); \
if( memcmp( dct1, dct2, w*w*sizeof(dctcoef) ) || result_c != result_a ) \
{ \
oks[0] = 0; \
fprintf( stderr, #qname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \
break; \
} \
- call_c2( qf_c.qname, dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
- call_a2( qf_a.qname, dct2, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
+ call_c2( qf_c.qname, (void*)dct1, h->quant##type##_mf[block][qp], h->quant##type##_bias[block][qp] ); \
+ call_a2( qf_a.qname, (void*)dct2, h->quant##type##_mf[block][qp], h->quant##type##_bias[block][qp] ); \
} \
} \
}
- TEST_QUANT( quant_8x8, CQM_8IY, 8 );
- TEST_QUANT( quant_8x8, CQM_8PY, 8 );
- TEST_QUANT( quant_4x4, CQM_4IY, 4 );
- TEST_QUANT( quant_4x4, CQM_4PY, 4 );
+ TEST_QUANT( quant_8x8, CQM_8IY, 8, 8, 2 );
+ TEST_QUANT( quant_8x8, CQM_8PY, 8, 8, 2 );
+ TEST_QUANT( quant_4x4, CQM_4IY, 4, 4, 2 );
+ TEST_QUANT( quant_4x4, CQM_4PY, 4, 4, 2 );
+ TEST_QUANT( quant_4x4x4, CQM_4IY, 4, 8, 16 );
+ TEST_QUANT( quant_4x4x4, CQM_4PY, 4, 8, 16 );
TEST_QUANT_DC( quant_4x4_dc, **h->quant4_mf[CQM_4IY] );
TEST_QUANT_DC( quant_2x2_dc, **h->quant4_mf[CQM_4IC] );
used_asms[1] = 1; \
for( int qp = h->param.rc.i_qp_max; qp >= h->param.rc.i_qp_min; qp-- ) \
{ \
- INIT_QUANT##w(1) \
+ INIT_QUANT##w(1, w*w) \
qf_c.qname( dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
memcpy( dct2, dct1, w*w*sizeof(dctcoef) ); \
call_c1( qf_c.dqname, dct1, h->dequant##w##_mf[block], qp ); \
memcpy( dct1, buf1, size*sizeof(dctcoef) );
memcpy( dct2, buf1, size*sizeof(dctcoef) );
memcpy( buf3+256, buf3, 256 );
- call_c1( qf_c.denoise_dct, dct1, (uint32_t*)buf3, (udctcoef*)buf2, size );
+ call_c1( qf_c.denoise_dct, dct1, (uint32_t*)buf3, (udctcoef*)buf2, size );
call_a1( qf_a.denoise_dct, dct2, (uint32_t*)(buf3+256), (udctcoef*)buf2, size );
if( memcmp( dct1, dct2, size*sizeof(dctcoef) ) || memcmp( buf3+4, buf3+256+4, (size-1)*sizeof(uint32_t) ) )
ok = 0;
- call_c2( qf_c.denoise_dct, dct1, (uint32_t*)buf3, (udctcoef*)buf2, size );
+ call_c2( qf_c.denoise_dct, dct1, (uint32_t*)buf3, (udctcoef*)buf2, size );
call_a2( qf_a.denoise_dct, dct2, (uint32_t*)(buf3+256), (udctcoef*)buf2, size );
}
}
int result_a = call_a( qf_a.lastname, dct1+ac, &runlevel_a ); \
if( result_c != result_a || runlevel_c.last != runlevel_a.last || \
runlevel_c.mask != runlevel_a.mask || \
- memcmp(runlevel_c.level, runlevel_a.level, sizeof(dctcoef)*result_c) || \
- memcmp(runlevel_c.run, runlevel_a.run, sizeof(uint8_t)*(result_c-1)) ) \
+ memcmp(runlevel_c.level, runlevel_a.level, sizeof(dctcoef)*result_c)) \
{ \
ok = 0; \
fprintf( stderr, #name ": [FAILED]\n" ); \
int ret = 0, ok = 1, used_asm = 0;
ALIGNED_ARRAY_32( pixel, edge,[36] );
ALIGNED_ARRAY_32( pixel, edge2,[36] );
- ALIGNED_16( pixel fdec[FDEC_STRIDE*20] );
+ ALIGNED_ARRAY_32( pixel, fdec,[FDEC_STRIDE*20] );
struct
{
x264_predict_t predict_16x16[4+3];
#define run_cabac_terminal_asm run_cabac_terminal_c
#endif
+extern const uint8_t x264_count_cat_m1[14];
+void x264_cabac_block_residual_c( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l );
+void x264_cabac_block_residual_8x8_rd_c( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l );
+void x264_cabac_block_residual_rd_c( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l );
+
static int check_cabac( int cpu_ref, int cpu_new )
{
- int ret = 0, ok, used_asm = 1;
+ int ret = 0, ok = 1, used_asm = 0;
x264_t h;
h.sps->i_chroma_format_idc = 3;
+
+ x264_bitstream_function_t bs_ref;
+ x264_bitstream_function_t bs_a;
+ x264_bitstream_init( cpu_ref, &bs_ref );
+ x264_bitstream_init( cpu_new, &bs_a );
+ x264_quant_init( &h, cpu_new, &h.quantf );
+ h.quantf.coeff_last[DCT_CHROMA_DC] = h.quantf.coeff_last4;
+
+#define CABAC_RESIDUAL(name, start, end, rd)\
+{\
+ if( bs_a.name##_internal && (bs_a.name##_internal != bs_ref.name##_internal || (cpu_new&X264_CPU_SSE2_IS_SLOW)) )\
+ {\
+ used_asm = 1;\
+ set_func_name( #name );\
+ for( int i = 0; i < 2; i++ )\
+ {\
+ for( intptr_t ctx_block_cat = start; ctx_block_cat <= end; ctx_block_cat++ )\
+ {\
+ for( int j = 0; j < 256; j++ )\
+ {\
+ ALIGNED_ARRAY_N( dctcoef, dct, [2],[64] );\
+ uint8_t bitstream[2][1<<16];\
+ static const uint8_t ctx_ac[14] = {0,1,0,0,1,0,0,1,0,0,0,1,0,0};\
+ int ac = ctx_ac[ctx_block_cat];\
+ int nz = 0;\
+ while( !nz )\
+ {\
+ for( int k = 0; k <= x264_count_cat_m1[ctx_block_cat]; k++ )\
+ {\
+ /* Very rough distribution that covers possible inputs */\
+ int rnd = rand();\
+ int coef = !(rnd&3);\
+ coef += !(rnd& 15) * (rand()&0x0006);\
+ coef += !(rnd& 63) * (rand()&0x0008);\
+ coef += !(rnd& 255) * (rand()&0x00F0);\
+ coef += !(rnd&1023) * (rand()&0x7F00);\
+ nz |= dct[0][ac+k] = dct[1][ac+k] = coef * ((rand()&1) ? 1 : -1);\
+ }\
+ }\
+ h.mb.b_interlaced = i;\
+ x264_cabac_t cb[2];\
+ x264_cabac_context_init( &h, &cb[0], SLICE_TYPE_P, 26, 0 );\
+ x264_cabac_context_init( &h, &cb[1], SLICE_TYPE_P, 26, 0 );\
+ x264_cabac_encode_init( &cb[0], bitstream[0], bitstream[0]+0xfff0 );\
+ x264_cabac_encode_init( &cb[1], bitstream[1], bitstream[1]+0xfff0 );\
+ cb[0].f8_bits_encoded = 0;\
+ cb[1].f8_bits_encoded = 0;\
+ if( !rd ) memcpy( bitstream[1], bitstream[0], 0x400 );\
+ call_c1( x264_##name##_c, &h, &cb[0], ctx_block_cat, dct[0]+ac );\
+ call_a1( bs_a.name##_internal, dct[1]+ac, i, ctx_block_cat, &cb[1] );\
+ ok = cb[0].f8_bits_encoded == cb[1].f8_bits_encoded && !memcmp(cb[0].state, cb[1].state, 1024);\
+ if( !rd ) ok |= !memcmp( bitstream[1], bitstream[0], 0x400 ) && !memcmp( &cb[1], &cb[0], offsetof(x264_cabac_t, p_start) );\
+ if( !ok )\
+ {\
+ fprintf( stderr, #name " : [FAILED] ctx_block_cat %d", (int)ctx_block_cat );\
+ if( rd && cb[0].f8_bits_encoded != cb[1].f8_bits_encoded )\
+ fprintf( stderr, " (%d != %d)", cb[0].f8_bits_encoded, cb[1].f8_bits_encoded );\
+ fprintf( stderr, "\n");\
+ goto name##fail;\
+ }\
+ if( (j&15) == 0 )\
+ {\
+ call_c2( x264_##name##_c, &h, &cb[0], ctx_block_cat, dct[0]+ac );\
+ call_a2( bs_a.name##_internal, dct[1]+ac, i, ctx_block_cat, &cb[1] );\
+ }\
+ }\
+ }\
+ }\
+ }\
+}\
+name##fail:
+
+ CABAC_RESIDUAL( cabac_block_residual, 0, DCT_LUMA_8x8, 0 )
+ report( "cabac residual:" );
+
+ ok = 1; used_asm = 0;
+ CABAC_RESIDUAL( cabac_block_residual_rd, 0, DCT_LUMA_8x8-1, 1 )
+ CABAC_RESIDUAL( cabac_block_residual_8x8_rd, DCT_LUMA_8x8, DCT_LUMA_8x8, 1 )
+ report( "cabac residual rd:" );
+
if( cpu_ref || run_cabac_decision_c == run_cabac_decision_asm )
- return 0;
+ return ret;
+ ok = 1; used_asm = 0;
x264_cabac_init( &h );
set_func_name( "cabac_encode_decision" );
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_CTZ, "MMX SlowCTZ" );
cpu1 &= ~X264_CPU_SLOW_CTZ;
}
+ if( x264_cpu_detect() & X264_CPU_SSE )
+ ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE, "SSE" );
if( x264_cpu_detect() & X264_CPU_SSE2 )
{
- ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE | X264_CPU_SSE2 | X264_CPU_SSE2_IS_SLOW, "SSE2Slow" );
+ ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE2 | X264_CPU_SSE2_IS_SLOW, "SSE2Slow" );
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE2_IS_FAST, "SSE2Fast" );
ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSE2Fast Cache64" );
cpu1 &= ~X264_CPU_CACHELINE_64;
- ret |= add_flags( &cpu0, &cpu1, X264_CPU_SHUFFLE_IS_FAST, "SSE2 FastShuffle" );
- cpu1 &= ~X264_CPU_SHUFFLE_IS_FAST;
+ ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_SHUFFLE, "SSE2 SlowShuffle" );
+ cpu1 &= ~X264_CPU_SLOW_SHUFFLE;
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_CTZ, "SSE2 SlowCTZ" );
cpu1 &= ~X264_CPU_SLOW_CTZ;
- ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_ATOM, "SSE2 SlowAtom" );
- cpu1 &= ~X264_CPU_SLOW_ATOM;
}
if( x264_cpu_detect() & X264_CPU_SSE_MISALIGN )
{
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSSE3, "SSSE3" );
ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSSE3 Cache64" );
cpu1 &= ~X264_CPU_CACHELINE_64;
- ret |= add_flags( &cpu0, &cpu1, X264_CPU_SHUFFLE_IS_FAST, "SSSE3 FastShuffle" );
- cpu1 &= ~X264_CPU_SHUFFLE_IS_FAST;
+ ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_SHUFFLE, "SSSE3 SlowShuffle" );
+ cpu1 &= ~X264_CPU_SLOW_SHUFFLE;
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_CTZ, "SSSE3 SlowCTZ" );
cpu1 &= ~X264_CPU_SLOW_CTZ;
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_ATOM, "SSSE3 SlowAtom" );
+ ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSSE3 Cache64 SlowAtom" );
+ cpu1 &= ~X264_CPU_CACHELINE_64;
cpu1 &= ~X264_CPU_SLOW_ATOM;
}
if( x264_cpu_detect() & X264_CPU_SSE4 )
- ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE4 | X264_CPU_SHUFFLE_IS_FAST, "SSE4" );
+ ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE4, "SSE4" );
if( x264_cpu_detect() & X264_CPU_AVX )
ret |= add_flags( &cpu0, &cpu1, X264_CPU_AVX, "AVX" );
if( x264_cpu_detect() & X264_CPU_XOP )
ret |= add_flags( &cpu0, &cpu1, X264_CPU_XOP, "XOP" );
if( x264_cpu_detect() & X264_CPU_FMA4 )
+ {
ret |= add_flags( &cpu0, &cpu1, X264_CPU_FMA4, "FMA4" );
+ cpu1 &= ~X264_CPU_FMA4;
+ }
+ if( x264_cpu_detect() & X264_CPU_BMI1 )
+ {
+ ret |= add_flags( &cpu0, &cpu1, X264_CPU_BMI1, "BMI1" );
+ cpu1 &= ~X264_CPU_BMI1;
+ }
+ if( x264_cpu_detect() & X264_CPU_AVX2 )
+ {
+ ret |= add_flags( &cpu0, &cpu1, X264_CPU_AVX2, "AVX2" );
+ if( x264_cpu_detect() & X264_CPU_LZCNT )
+ {
+ ret |= add_flags( &cpu0, &cpu1, X264_CPU_LZCNT, "AVX2_LZCNT" );
+ cpu1 &= ~X264_CPU_LZCNT;
+ }
+ }
+ if( x264_cpu_detect() & X264_CPU_BMI2 )
+ {
+ ret |= add_flags( &cpu0, &cpu1, X264_CPU_BMI1|X264_CPU_BMI2, "BMI2" );
+ cpu1 &= ~(X264_CPU_BMI1|X264_CPU_BMI2);
+ }
+ if( x264_cpu_detect() & X264_CPU_FMA3 )
+ {
+ ret |= add_flags( &cpu0, &cpu1, X264_CPU_FMA3, "FMA3" );
+ cpu1 &= ~X264_CPU_FMA3;
+ }
#elif ARCH_PPC
if( x264_cpu_detect() & X264_CPU_ALTIVEC )
{
fprintf( stderr, "x264: using random seed %u\n", seed );
srand( seed );
- buf1 = x264_malloc( 0x1e00 + 0x2000*sizeof(pixel) + 16*BENCH_ALIGNS );
- pbuf1 = x264_malloc( 0x1e00*sizeof(pixel) + 16*BENCH_ALIGNS );
+ buf1 = x264_malloc( 0x1e00 + 0x2000*sizeof(pixel) + 32*BENCH_ALIGNS );
+ pbuf1 = x264_malloc( 0x1e00*sizeof(pixel) + 32*BENCH_ALIGNS );
if( !buf1 || !pbuf1 )
{
fprintf( stderr, "malloc failed, unable to initiate tests!\n" );
}
memset( buf1+0x1e00, 0, 0x2000*sizeof(pixel) );
- /* 16-byte alignment is guaranteed whenever it's useful, but some functions also vary in speed depending on %64 */
+ /* 32-byte alignment is guaranteed whenever it's useful, but some functions also vary in speed depending on %64 */
if( do_bench )
for( int i = 0; i < BENCH_ALIGNS && !ret; i++ )
{
INIT_POINTER_OFFSETS;
- ret |= x264_stack_pagealign( check_all_flags, i*16 );
- buf1 += 16;
- pbuf1 += 16;
+ ret |= x264_stack_pagealign( check_all_flags, i*32 );
+ buf1 += 32;
+ pbuf1 += 32;
quiet = 1;
fprintf( stderr, "%d/%d\r", i+1, BENCH_ALIGNS );
}
else
- ret = check_all_flags();
+ ret = x264_stack_pagealign( check_all_flags, 0 );
if( ret )
{