6 #include "common/common.h"
7 #include "common/cpu.h"
9 /* buf1, buf2: initialised to random data and shouldn't write into them */
10 uint8_t * buf1, * buf2;
11 /* buf3, buf4: used to store output */
12 uint8_t * buf3, * buf4;
16 #define report( name ) { \
17 if( used_asm && !quiet ) \
18 fprintf( stderr, " - %-21s [%s]\n", name, ok ? "OK" : "FAILED" ); \
22 #define BENCH_RUNS 100 // tradeoff between accuracy and speed
23 #define BENCH_ALIGNS 16 // number of stack+heap data alignments (another accuracy vs speed tradeoff)
24 #define MAX_FUNCS 1000 // just has to be big enough to hold all the existing functions
25 #define MAX_CPUS 10 // number of different combinations of cpu flags
28 void *pointer; // just for detecting duplicates
36 bench_t vers[MAX_CPUS];
40 int bench_pattern_len = 0;
41 const char *bench_pattern = "";
43 static bench_func_t benchs[MAX_FUNCS];
45 static const char *pixel_names[10] = { "16x16", "16x8", "8x16", "8x8", "8x4", "4x8", "4x4", "4x2", "2x4", "2x2" };
46 static const char *intra_predict_16x16_names[7] = { "v", "h", "dc", "p", "dcl", "dct", "dc8" };
47 static const char *intra_predict_8x8c_names[7] = { "dc", "h", "v", "p", "dcl", "dct", "dc8" };
48 static const char *intra_predict_4x4_names[12] = { "v", "h", "dc", "ddl", "ddr", "vr", "hd", "vl", "hu", "dcl", "dct", "dc8" };
49 static const char **intra_predict_8x8_names = intra_predict_4x4_names;
51 #define set_func_name(...) snprintf( func_name, sizeof(func_name), __VA_ARGS__ )
53 static inline uint32_t read_time(void)
55 #if defined(__GNUC__) && (defined(ARCH_X86) || defined(ARCH_X86_64))
57 asm volatile( "rdtsc" :"=a"(a) ::"edx" );
64 static bench_t* get_bench( const char *name, int cpu )
67 for( i=0; benchs[i].name && strcmp(name, benchs[i].name); i++ )
68 assert( i < MAX_FUNCS );
70 benchs[i].name = strdup( name );
72 return &benchs[i].vers[0];
73 for( j=1; benchs[i].vers[j].cpu && benchs[i].vers[j].cpu != cpu; j++ )
74 assert( j < MAX_CPUS );
75 benchs[i].vers[j].cpu = cpu;
76 return &benchs[i].vers[j];
79 int cmp_nop( const void *a, const void *b )
81 return *(uint16_t*)a - *(uint16_t*)b;
84 int cmp_bench( const void *a, const void *b )
86 // asciibetical sort except preserving numbers
87 const char *sa = ((bench_func_t*)a)->name;
88 const char *sb = ((bench_func_t*)b)->name;
91 if( !*sa && !*sb ) return 0;
92 if( isdigit(*sa) && isdigit(*sb) && isdigit(sa[1]) != isdigit(sb[1]) )
93 return isdigit(sa[1]) - isdigit(sb[1]);
94 if( *sa != *sb ) return *sa - *sb;
98 static void print_bench(void)
100 uint16_t nops[10000] = {0};
101 int i, j, k, nfuncs, nop_time=0;
103 for( i=0; i<10000; i++ )
106 nops[i] = read_time() - t;
108 qsort( nops, 10000, sizeof(uint16_t), cmp_nop );
109 for( i=500; i<9500; i++ )
112 printf( "nop: %d\n", nop_time );
114 for( i=0; i<MAX_FUNCS && benchs[i].name; i++ );
116 qsort( benchs, nfuncs, sizeof(bench_func_t), cmp_bench );
117 for( i=0; i<nfuncs; i++ )
118 for( j=0; j<MAX_CPUS && (!j || benchs[i].vers[j].cpu); j++ )
120 bench_t *b = &benchs[i].vers[j];
121 if( !b->den ) continue;
122 for( k=0; k<j && benchs[i].vers[k].pointer != b->pointer; k++ );
124 printf( "%s_%s%s: %"PRId64"\n", benchs[i].name,
125 b->cpu&X264_CPU_PHADD_IS_FAST ? "phadd" :
126 b->cpu&X264_CPU_SSSE3 ? "ssse3" :
127 b->cpu&X264_CPU_SSE3 ? "sse3" :
128 /* print sse2slow only if there's also a sse2fast version of the same func */
129 b->cpu&X264_CPU_SSE2_IS_SLOW && j<MAX_CPUS && b[1].cpu&X264_CPU_SSE2_IS_FAST && !(b[1].cpu&X264_CPU_SSE3) ? "sse2slow" :
130 b->cpu&X264_CPU_SSE2 ? "sse2" :
131 b->cpu&X264_CPU_MMX ? "mmx" : "c",
132 b->cpu&X264_CPU_CACHELINE_32 ? "_c32" :
133 b->cpu&X264_CPU_CACHELINE_64 ? "_c64" : "",
134 ((int64_t)10*b->cycles/b->den - nop_time)/4 );
138 #if defined(ARCH_X86) || defined(ARCH_X86_64)
139 int x264_stack_pagealign( int (*func)(), int align );
141 #define x264_stack_pagealign( func, align ) func()
144 #define call_c1(func,...) func(__VA_ARGS__)
147 /* detect when callee-saved regs aren't saved.
148 * needs an explicit asm check because it only sometimes crashes in normal use. */
149 long x264_checkasm_call( long (*func)(), int *ok, ... );
150 #define call_a1(func,...) x264_checkasm_call((long(*)())func, &ok, __VA_ARGS__)
152 #define call_a1 call_c1
155 #define call_bench(func,cpu,...)\
156 if( do_bench && !strncmp(func_name, bench_pattern, bench_pattern_len) )\
161 call_a1(func, __VA_ARGS__);\
162 for( ti=0; ti<(cpu?BENCH_RUNS:BENCH_RUNS/4); ti++ )\
164 uint32_t t = read_time();\
169 t = read_time() - t;\
170 if( t*tcount <= tsum*4 && ti > 0 )\
176 bench_t *b = get_bench( func_name, cpu );\
182 /* for most functions, run benchmark and correctness test at the same time.
183 * for those that modify their inputs, run the above macros separately */
184 #define call_a(func,...) ({ call_a2(func,__VA_ARGS__); call_a1(func,__VA_ARGS__); })
185 #define call_c(func,...) ({ call_c2(func,__VA_ARGS__); call_c1(func,__VA_ARGS__); })
186 #define call_a2(func,...) ({ call_bench(func,cpu_new,__VA_ARGS__); })
187 #define call_c2(func,...) ({ call_bench(func,0,__VA_ARGS__); })
190 static int check_pixel( int cpu_ref, int cpu_new )
192 x264_pixel_function_t pixel_c;
193 x264_pixel_function_t pixel_ref;
194 x264_pixel_function_t pixel_asm;
195 x264_predict_t predict_16x16[4+3];
196 x264_predict_t predict_8x8c[4+3];
197 x264_predict_t predict_4x4[9+3];
198 x264_predict8x8_t predict_8x8[9+3];
199 DECLARE_ALIGNED_16( uint8_t edge[33] );
200 uint16_t cost_mv[32];
201 int ret = 0, ok, used_asm;
204 x264_pixel_init( 0, &pixel_c );
205 x264_pixel_init( cpu_ref, &pixel_ref );
206 x264_pixel_init( cpu_new, &pixel_asm );
207 x264_predict_16x16_init( 0, predict_16x16 );
208 x264_predict_8x8c_init( 0, predict_8x8c );
209 x264_predict_8x8_init( 0, predict_8x8 );
210 x264_predict_4x4_init( 0, predict_4x4 );
211 x264_predict_8x8_filter( buf2+40, edge, ALL_NEIGHBORS, ALL_NEIGHBORS );
213 #define TEST_PIXEL( name, align ) \
214 for( i = 0, ok = 1, used_asm = 0; i < 7; i++ ) \
216 int res_c, res_asm; \
217 if( pixel_asm.name[i] != pixel_ref.name[i] ) \
219 set_func_name( "%s_%s", #name, pixel_names[i] ); \
220 for( j=0; j<64; j++ ) \
223 res_c = call_c( pixel_c.name[i], buf1, 16, buf2+j*!align, 64 ); \
224 res_asm = call_a( pixel_asm.name[i], buf1, 16, buf2+j*!align, 64 ); \
225 if( res_c != res_asm ) \
228 fprintf( stderr, #name "[%d]: %d != %d [FAILED]\n", i, res_c, res_asm ); \
234 report( "pixel " #name " :" );
236 TEST_PIXEL( sad, 0 );
237 TEST_PIXEL( ssd, 1 );
238 TEST_PIXEL( satd, 0 );
239 TEST_PIXEL( sa8d, 0 );
241 #define TEST_PIXEL_X( N ) \
242 for( i = 0, ok = 1, used_asm = 0; i < 7; i++ ) \
244 int res_c[4]={0}, res_asm[4]={0}; \
245 if( pixel_asm.sad_x##N[i] && pixel_asm.sad_x##N[i] != pixel_ref.sad_x##N[i] ) \
247 set_func_name( "sad_x%d_%s", N, pixel_names[i] ); \
248 for( j=0; j<64; j++) \
250 uint8_t *pix2 = buf2+j; \
252 res_c[0] = pixel_c.sad[i]( buf1, 16, pix2, 64 ); \
253 res_c[1] = pixel_c.sad[i]( buf1, 16, pix2+6, 64 ); \
254 res_c[2] = pixel_c.sad[i]( buf1, 16, pix2+1, 64 ); \
257 res_c[3] = pixel_c.sad[i]( buf1, 16, pix2+10, 64 ); \
258 call_a( pixel_asm.sad_x4[i], buf1, pix2, pix2+6, pix2+1, pix2+10, 64, res_asm ); \
261 call_a( pixel_asm.sad_x3[i], buf1, pix2, pix2+6, pix2+1, 64, res_asm ); \
262 if( memcmp(res_c, res_asm, sizeof(res_c)) ) \
265 fprintf( stderr, "sad_x"#N"[%d]: %d,%d,%d,%d != %d,%d,%d,%d [FAILED]\n", \
266 i, res_c[0], res_c[1], res_c[2], res_c[3], \
267 res_asm[0], res_asm[1], res_asm[2], res_asm[3] ); \
270 call_c2( pixel_c.sad_x4[i], buf1, pix2, pix2+6, pix2+1, pix2+10, 64, res_asm ); \
272 call_c2( pixel_c.sad_x3[i], buf1, pix2, pix2+6, pix2+1, 64, res_asm ); \
276 report( "pixel sad_x"#N" :" );
281 #define TEST_INTRA_SATD( name, pred, satd, i8x8, ... ) \
282 if( pixel_asm.name && pixel_asm.name != pixel_ref.name ) \
284 int res_c[3], res_asm[3]; \
285 set_func_name( #name );\
287 memcpy( buf3, buf2, 1024 ); \
288 for( i=0; i<3; i++ ) \
290 pred[i]( buf3+40, ##__VA_ARGS__ ); \
291 res_c[i] = pixel_c.satd( buf1+40, 16, buf3+40, 32 ); \
293 call_a( pixel_asm.name, buf1+40, i8x8 ? edge : buf3+40, res_asm ); \
294 if( memcmp(res_c, res_asm, sizeof(res_c)) ) \
297 fprintf( stderr, #name": %d,%d,%d != %d,%d,%d [FAILED]\n", \
298 res_c[0], res_c[1], res_c[2], \
299 res_asm[0], res_asm[1], res_asm[2] ); \
303 ok = 1; used_asm = 0;
304 TEST_INTRA_SATD( intra_satd_x3_16x16, predict_16x16, satd[PIXEL_16x16], 0 );
305 TEST_INTRA_SATD( intra_satd_x3_8x8c, predict_8x8c, satd[PIXEL_8x8], 0 );
306 TEST_INTRA_SATD( intra_satd_x3_4x4, predict_4x4, satd[PIXEL_4x4], 0 );
307 TEST_INTRA_SATD( intra_sa8d_x3_8x8, predict_8x8, sa8d[PIXEL_8x8], 1, edge );
308 report( "intra satd_x3 :" );
310 if( pixel_asm.ssim_4x4x2_core != pixel_ref.ssim_4x4x2_core ||
311 pixel_asm.ssim_end4 != pixel_ref.ssim_end4 )
314 int sums[5][4] = {{0}};
317 res_c = x264_pixel_ssim_wxh( &pixel_c, buf1+2, 32, buf2+2, 32, 32, 28 );
318 res_a = x264_pixel_ssim_wxh( &pixel_asm, buf1+2, 32, buf2+2, 32, 32, 28 );
319 if( fabs(res_c - res_a) > 1e-6 )
322 fprintf( stderr, "ssim: %.7f != %.7f [FAILED]\n", res_c, res_a );
324 set_func_name( "ssim_core" );
325 call_c2( pixel_c.ssim_4x4x2_core, buf1+2, 32, buf2+2, 32, sums );
326 call_a2( pixel_asm.ssim_4x4x2_core, buf1+2, 32, buf2+2, 32, sums );
327 set_func_name( "ssim_end" );
328 call_c2( pixel_c.ssim_end4, sums, sums, 4 );
329 call_a2( pixel_asm.ssim_end4, sums, sums, 4 );
333 ok = 1; used_asm = 0;
334 for( i=0; i<32; i++ )
336 for( i=0; i<100 && ok; i++ )
337 if( pixel_asm.ads[i&3] != pixel_ref.ads[i&3] )
339 DECLARE_ALIGNED_16( uint16_t sums[72] );
340 DECLARE_ALIGNED_16( int dc[4] );
341 int16_t mvs_a[32], mvs_c[32];
343 int thresh = rand() & 0x3fff;
344 set_func_name( "esa_ads" );
345 for( j=0; j<72; j++ )
346 sums[j] = rand() & 0x3fff;
348 dc[j] = rand() & 0x3fff;
350 mvn_c = call_c( pixel_c.ads[i&3], dc, sums, 32, cost_mv, mvs_c, 28, thresh );
351 mvn_a = call_a( pixel_asm.ads[i&3], dc, sums, 32, cost_mv, mvs_a, 28, thresh );
352 if( mvn_c != mvn_a || memcmp( mvs_c, mvs_a, mvn_c*sizeof(*mvs_c) ) )
355 printf("c%d: ", i&3);
356 for(j=0; j<mvn_c; j++)
357 printf("%d ", mvs_c[j]);
358 printf("\na%d: ", i&3);
359 for(j=0; j<mvn_a; j++)
360 printf("%d ", mvs_a[j]);
364 report( "esa ads:" );
369 static int check_dct( int cpu_ref, int cpu_new )
371 x264_dct_function_t dct_c;
372 x264_dct_function_t dct_ref;
373 x264_dct_function_t dct_asm;
374 x264_quant_function_t qf;
375 int ret = 0, ok, used_asm, i, interlace;
376 DECLARE_ALIGNED_16( int16_t dct1[16][4][4] );
377 DECLARE_ALIGNED_16( int16_t dct2[16][4][4] );
378 DECLARE_ALIGNED_16( int16_t dct4[16][4][4] );
379 DECLARE_ALIGNED_16( int16_t dct8[4][8][8] );
383 x264_dct_init( 0, &dct_c );
384 x264_dct_init( cpu_ref, &dct_ref);
385 x264_dct_init( cpu_new, &dct_asm );
387 memset( h, 0, sizeof(*h) );
388 h->pps = h->pps_array;
389 x264_param_default( &h->param );
390 h->param.analyse.i_luma_deadzone[0] = 0;
391 h->param.analyse.i_luma_deadzone[1] = 0;
392 h->param.analyse.b_transform_8x8 = 1;
394 h->pps->scaling_list[i] = x264_cqm_flat16;
396 x264_quant_init( h, 0, &qf );
398 #define TEST_DCT( name, t1, t2, size ) \
399 if( dct_asm.name != dct_ref.name ) \
401 set_func_name( #name );\
403 call_c( dct_c.name, t1, buf1, buf2 ); \
404 call_a( dct_asm.name, t2, buf1, buf2 ); \
405 if( memcmp( t1, t2, size ) ) \
408 fprintf( stderr, #name " [FAILED]\n" ); \
411 ok = 1; used_asm = 0;
412 TEST_DCT( sub4x4_dct, dct1[0], dct2[0], 16*2 );
413 TEST_DCT( sub8x8_dct, dct1, dct2, 16*2*4 );
414 TEST_DCT( sub16x16_dct, dct1, dct2, 16*2*16 );
415 report( "sub_dct4 :" );
417 ok = 1; used_asm = 0;
418 TEST_DCT( sub8x8_dct8, (void*)dct1[0], (void*)dct2[0], 64*2 );
419 TEST_DCT( sub16x16_dct8, (void*)dct1, (void*)dct2, 64*2*4 );
420 report( "sub_dct8 :" );
423 // fdct and idct are denormalized by different factors, so quant/dequant
424 // is needed to force the coefs into the right range.
425 dct_c.sub16x16_dct( dct4, buf1, buf2 );
426 dct_c.sub16x16_dct8( dct8, buf1, buf2 );
427 for( i=0; i<16; i++ )
429 qf.quant_4x4( dct4[i], h->quant4_mf[CQM_4IY][20], h->quant4_bias[CQM_4IY][20] );
430 qf.dequant_4x4( dct4[i], h->dequant4_mf[CQM_4IY], 20 );
434 qf.quant_8x8( dct8[i], h->quant8_mf[CQM_8IY][20], h->quant8_bias[CQM_8IY][20] );
435 qf.dequant_8x8( dct8[i], h->dequant8_mf[CQM_8IY], 20 );
438 #define TEST_IDCT( name, src ) \
439 if( dct_asm.name != dct_ref.name ) \
441 set_func_name( #name );\
443 memcpy( buf3, buf1, 32*32 ); \
444 memcpy( buf4, buf1, 32*32 ); \
445 memcpy( dct1, src, 512 ); \
446 memcpy( dct2, src, 512 ); \
447 call_c1( dct_c.name, buf3, (void*)dct1 ); \
448 call_a1( dct_asm.name, buf4, (void*)dct2 ); \
449 if( memcmp( buf3, buf4, 32*32 ) ) \
452 fprintf( stderr, #name " [FAILED]\n" ); \
454 call_c2( dct_c.name, buf3, (void*)dct1 ); \
455 call_a2( dct_asm.name, buf4, (void*)dct2 ); \
457 ok = 1; used_asm = 0;
458 TEST_IDCT( add4x4_idct, dct4 );
459 TEST_IDCT( add8x8_idct, dct4 );
460 TEST_IDCT( add16x16_idct, dct4 );
461 report( "add_idct4 :" );
463 ok = 1; used_asm = 0;
464 TEST_IDCT( add8x8_idct8, dct8 );
465 TEST_IDCT( add16x16_idct8, dct8 );
466 report( "add_idct8 :" );
469 ok = 1; used_asm = 0;
470 if( dct_asm.dct4x4dc != dct_ref.dct4x4dc )
472 DECLARE_ALIGNED_16( int16_t dct1[4][4] ) = {{-12, 42, 23, 67},{2, 90, 89,56},{67,43,-76,91},{56,-78,-54,1}};
473 DECLARE_ALIGNED_16( int16_t dct2[4][4] ) = {{-12, 42, 23, 67},{2, 90, 89,56},{67,43,-76,91},{56,-78,-54,1}};
474 set_func_name( "dct4x4dc" );
476 call_c1( dct_c.dct4x4dc, dct1 );
477 call_a1( dct_asm.dct4x4dc, dct2 );
478 if( memcmp( dct1, dct2, 32 ) )
481 fprintf( stderr, " - dct4x4dc : [FAILED]\n" );
483 call_c2( dct_c.dct4x4dc, dct1 );
484 call_a2( dct_asm.dct4x4dc, dct2 );
486 if( dct_asm.idct4x4dc != dct_ref.idct4x4dc )
488 DECLARE_ALIGNED_16( int16_t dct1[4][4] ) = {{-12, 42, 23, 67},{2, 90, 89,56},{67,43,-76,91},{56,-78,-54,1}};
489 DECLARE_ALIGNED_16( int16_t dct2[4][4] ) = {{-12, 42, 23, 67},{2, 90, 89,56},{67,43,-76,91},{56,-78,-54,1}};
490 set_func_name( "idct4x4dc" );
492 call_c1( dct_c.idct4x4dc, dct1 );
493 call_a1( dct_asm.idct4x4dc, dct2 );
494 if( memcmp( dct1, dct2, 32 ) )
497 fprintf( stderr, " - idct4x4dc : [FAILED]\n" );
499 call_c2( dct_c.idct4x4dc, dct1 );
500 call_a2( dct_asm.idct4x4dc, dct2 );
502 report( "(i)dct4x4dc :" );
504 ok = 1; used_asm = 0;
505 if( dct_asm.dct2x2dc != dct_ref.dct2x2dc )
507 DECLARE_ALIGNED_16( int16_t dct1[2][2] ) = {{-12, 42},{2, 90}};
508 DECLARE_ALIGNED_16( int16_t dct2[2][2] ) = {{-12, 42},{2, 90}};
509 set_func_name( "dct2x2dc" );
511 call_c( dct_c.dct2x2dc, dct1 );
512 call_a( dct_asm.dct2x2dc, dct2 );
513 if( memcmp( dct1, dct2, 4*2 ) )
516 fprintf( stderr, " - dct2x2dc : [FAILED]\n" );
519 if( dct_asm.idct2x2dc != dct_ref.idct2x2dc )
521 DECLARE_ALIGNED_16( int16_t dct1[2][2] ) = {{-12, 42},{2, 90}};
522 DECLARE_ALIGNED_16( int16_t dct2[2][2] ) = {{-12, 42},{2, 90}};
523 set_func_name( "idct2x2dc" );
525 call_c( dct_c.idct2x2dc, dct1 );
526 call_a( dct_asm.idct2x2dc, dct2 );
527 if( memcmp( dct1, dct2, 4*2 ) )
530 fprintf( stderr, " - idct2x2dc : [FAILED]\n" );
533 report( "(i)dct2x2dc :" );
535 x264_zigzag_function_t zigzag_c;
536 x264_zigzag_function_t zigzag_ref;
537 x264_zigzag_function_t zigzag_asm;
539 DECLARE_ALIGNED_16( int16_t level1[64] );
540 DECLARE_ALIGNED_16( int16_t level2[64] );
542 #define TEST_ZIGZAG_SCAN( name, t1, t2, dct, size ) \
543 if( zigzag_asm.name != zigzag_ref.name ) \
545 set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" );\
547 call_c( zigzag_c.name, t1, dct ); \
548 call_a( zigzag_asm.name, t2, dct ); \
549 if( memcmp( t1, t2, size*sizeof(int16_t) ) ) \
552 fprintf( stderr, #name " [FAILED]\n" ); \
556 #define TEST_ZIGZAG_SUB( name, t1, t2, size ) \
557 if( zigzag_asm.name != zigzag_ref.name ) \
559 set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" );\
561 memcpy( buf3, buf1, 16*FDEC_STRIDE ); \
562 memcpy( buf4, buf1, 16*FDEC_STRIDE ); \
563 call_c1( zigzag_c.name, t1, buf2, buf3 ); \
564 call_a1( zigzag_asm.name, t2, buf2, buf4 ); \
565 if( memcmp( t1, t2, size*sizeof(int16_t) )|| memcmp( buf3, buf4, 16*FDEC_STRIDE ) ) \
568 fprintf( stderr, #name " [FAILED]\n" ); \
570 call_c2( zigzag_c.name, t1, buf2, buf3 ); \
571 call_a2( zigzag_asm.name, t2, buf2, buf4 ); \
575 x264_zigzag_init( 0, &zigzag_c, 0 );
576 x264_zigzag_init( cpu_ref, &zigzag_ref, 0 );
577 x264_zigzag_init( cpu_new, &zigzag_asm, 0 );
579 ok = 1; used_asm = 0;
580 TEST_ZIGZAG_SCAN( scan_8x8, level1, level2, (void*)dct1, 64 );
581 TEST_ZIGZAG_SCAN( scan_4x4, level1, level2, dct1[0], 16 );
582 TEST_ZIGZAG_SUB( sub_4x4, level1, level2, 16 );
583 report( "zigzag_frame :" );
586 x264_zigzag_init( 0, &zigzag_c, 1 );
587 x264_zigzag_init( cpu_ref, &zigzag_ref, 1 );
588 x264_zigzag_init( cpu_new, &zigzag_asm, 1 );
590 ok = 1; used_asm = 0;
591 TEST_ZIGZAG_SCAN( scan_8x8, level1, level2, (void*)dct1, 64 );
592 TEST_ZIGZAG_SCAN( scan_4x4, level1, level2, dct1[0], 16 );
593 TEST_ZIGZAG_SUB( sub_4x4, level1, level2, 16 );
594 report( "zigzag_field :" );
595 #undef TEST_ZIGZAG_SCAN
596 #undef TEST_ZIGZAG_SUB
601 static int check_mc( int cpu_ref, int cpu_new )
603 x264_mc_functions_t mc_c;
604 x264_mc_functions_t mc_ref;
605 x264_mc_functions_t mc_a;
606 x264_pixel_function_t pixel;
608 uint8_t *src = &buf1[2*32+2];
609 uint8_t *src2[4] = { &buf1[3*64+2], &buf1[5*64+2],
610 &buf1[7*64+2], &buf1[9*64+2] };
611 uint8_t *dst1 = buf3;
612 uint8_t *dst2 = buf4;
614 int dx, dy, i, j, k, w;
615 int ret = 0, ok, used_asm;
617 x264_mc_init( 0, &mc_c );
618 x264_mc_init( cpu_ref, &mc_ref );
619 x264_mc_init( cpu_new, &mc_a );
620 x264_pixel_init( 0, &pixel );
622 #define MC_TEST_LUMA( w, h ) \
623 if( mc_a.mc_luma != mc_ref.mc_luma && !(w&(w-1)) && h<=16 ) \
625 set_func_name( "mc_luma_%dx%d", w, h );\
627 memset(buf3, 0xCD, 1024); \
628 memset(buf4, 0xCD, 1024); \
629 call_c( mc_c.mc_luma, dst1, 32, src2, 64, dx, dy, w, h ); \
630 call_a( mc_a.mc_luma, dst2, 32, src2, 64, dx, dy, w, h ); \
631 if( memcmp( buf3, buf4, 1024 ) ) \
633 fprintf( stderr, "mc_luma[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w, h ); \
637 if( mc_a.get_ref != mc_ref.get_ref ) \
639 uint8_t *ref = dst2; \
640 int ref_stride = 32; \
641 set_func_name( "get_ref_%dx%d", w, h );\
643 memset(buf3, 0xCD, 1024); \
644 memset(buf4, 0xCD, 1024); \
645 call_c( mc_c.mc_luma, dst1, 32, src2, 64, dx, dy, w, h ); \
646 ref = (uint8_t*) call_a( mc_a.get_ref, ref, &ref_stride, src2, 64, dx, dy, w, h ); \
647 for( i=0; i<h; i++ ) \
648 if( memcmp( dst1+i*32, ref+i*ref_stride, w ) ) \
650 fprintf( stderr, "get_ref[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w, h ); \
656 #define MC_TEST_CHROMA( w, h ) \
657 if( mc_a.mc_chroma != mc_ref.mc_chroma ) \
659 set_func_name( "mc_chroma_%dx%d", w, h );\
661 memset(buf3, 0xCD, 1024); \
662 memset(buf4, 0xCD, 1024); \
663 call_c( mc_c.mc_chroma, dst1, 16, src, 32, dx, dy, w, h ); \
664 call_a( mc_a.mc_chroma, dst2, 16, src, 32, dx, dy, w, h ); \
665 /* mc_chroma width=2 may write garbage to the right of dst. ignore that. */\
666 for( j=0; j<h; j++ ) \
667 for( i=w; i<4; i++ ) \
668 dst2[i+j*16] = dst1[i+j*16]; \
669 if( memcmp( buf3, buf4, 1024 ) ) \
671 fprintf( stderr, "mc_chroma[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w, h ); \
675 ok = 1; used_asm = 0;
676 for( dy = -8; dy < 8; dy++ )
677 for( dx = -128; dx < 128; dx++ )
679 if( rand()&15 ) continue; // running all of them is too slow
680 MC_TEST_LUMA( 20, 18 );
681 MC_TEST_LUMA( 16, 16 );
682 MC_TEST_LUMA( 16, 8 );
683 MC_TEST_LUMA( 12, 10 );
684 MC_TEST_LUMA( 8, 16 );
685 MC_TEST_LUMA( 8, 8 );
686 MC_TEST_LUMA( 8, 4 );
687 MC_TEST_LUMA( 4, 8 );
688 MC_TEST_LUMA( 4, 4 );
690 report( "mc luma :" );
692 ok = 1; used_asm = 0;
693 for( dy = -1; dy < 9; dy++ )
694 for( dx = -1; dx < 9; dx++ )
696 MC_TEST_CHROMA( 8, 8 );
697 MC_TEST_CHROMA( 8, 4 );
698 MC_TEST_CHROMA( 4, 8 );
699 MC_TEST_CHROMA( 4, 4 );
700 MC_TEST_CHROMA( 4, 2 );
701 MC_TEST_CHROMA( 2, 4 );
702 MC_TEST_CHROMA( 2, 2 );
704 report( "mc chroma :" );
706 #undef MC_TEST_CHROMA
708 #define MC_TEST_AVG( name, ... ) \
709 for( i = 0, ok = 1, used_asm = 0; i < 10; i++ ) \
711 memcpy( buf3, buf1, 1024 ); \
712 memcpy( buf4, buf1, 1024 ); \
713 if( mc_a.name[i] != mc_ref.name[i] ) \
715 set_func_name( "%s_%s", #name, pixel_names[i] );\
717 call_c1( mc_c.name[i], buf3, 32, buf2, 16, ##__VA_ARGS__ ); \
718 call_a1( mc_a.name[i], buf4, 32, buf2, 16, ##__VA_ARGS__ ); \
719 if( memcmp( buf3, buf4, 1024 ) ) \
722 fprintf( stderr, #name "[%d]: [FAILED]\n", i ); \
724 call_c2( mc_c.name[i], buf3, 32, buf2, 16, ##__VA_ARGS__ ); \
725 call_a2( mc_a.name[i], buf4, 32, buf2, 16, ##__VA_ARGS__ ); \
729 report( "mc avg :" );
730 ok = 1; used_asm = 0;
731 for( w = -64; w <= 128 && ok; w++ )
732 MC_TEST_AVG( avg_weight, w );
733 report( "mc wpredb :" );
735 if( mc_a.hpel_filter != mc_ref.hpel_filter )
737 uint8_t *src = buf1+8+2*64;
738 uint8_t *dstc[3] = { buf3+8, buf3+8+16*64, buf3+8+32*64 };
739 uint8_t *dsta[3] = { buf4+8, buf4+8+16*64, buf4+8+32*64 };
740 set_func_name( "hpel_filter" );
741 ok = 1; used_asm = 1;
742 memset( buf3, 0, 4096 );
743 memset( buf4, 0, 4096 );
744 call_c( mc_c.hpel_filter, dstc[0], dstc[1], dstc[2], src, 64, 48, 10 );
745 call_a( mc_a.hpel_filter, dsta[0], dsta[1], dsta[2], src, 64, 48, 10 );
747 for( j=0; j<10; j++ )
748 //FIXME ideally the first pixels would match too, but they aren't actually used
749 if( memcmp( dstc[i]+j*64+2, dsta[i]+j*64+2, 43 ) )
752 fprintf( stderr, "hpel filter differs at plane %c line %d\n", "hvc"[i], j );
753 for( k=0; k<48; k++ )
754 printf("%02x%s", dstc[i][j*64+k], (k+1)&3 ? "" : " ");
756 for( k=0; k<48; k++ )
757 printf("%02x%s", dsta[i][j*64+k], (k+1)&3 ? "" : " ");
761 report( "hpel filter :" );
764 if( mc_a.frame_init_lowres_core != mc_ref.frame_init_lowres_core )
766 uint8_t *dstc[4] = { buf3, buf3+1024, buf3+2048, buf3+3072 };
767 uint8_t *dsta[4] = { buf4, buf4+1024, buf4+2048, buf3+3072 };
768 set_func_name( "lowres_init" );
769 for( w=40; w<=48; w+=8 )
770 if( mc_a.frame_init_lowres_core != mc_ref.frame_init_lowres_core )
772 int stride = (w+8)&~15;
774 call_c( mc_c.frame_init_lowres_core, buf1, dstc[0], dstc[1], dstc[2], dstc[3], w*2, stride, w, 16 );
775 call_a( mc_a.frame_init_lowres_core, buf1, dsta[0], dsta[1], dsta[2], dsta[3], w*2, stride, w, 16 );
779 if( memcmp( dstc[j]+i*stride, dsta[j]+i*stride, w ) )
782 fprintf( stderr, "frame_init_lowres differs at plane %d line %d\n", j, i );
784 printf( "%d ", dstc[j][k+i*stride] );
787 printf( "%d ", dsta[j][k+i*stride] );
793 report( "lowres init :" );
799 static int check_deblock( int cpu_ref, int cpu_new )
801 x264_deblock_function_t db_c;
802 x264_deblock_function_t db_ref;
803 x264_deblock_function_t db_a;
804 int ret = 0, ok = 1, used_asm = 0;
805 int alphas[36], betas[36];
809 x264_deblock_init( 0, &db_c );
810 x264_deblock_init( cpu_ref, &db_ref );
811 x264_deblock_init( cpu_new, &db_a );
813 /* not exactly the real values of a,b,tc but close enough */
815 for( i = 35; i >= 0; i-- )
819 tcs[i][0] = tcs[i][2] = (c+6)/10;
820 tcs[i][1] = tcs[i][3] = (c+9)/20;
825 #define TEST_DEBLOCK( name, align, ... ) \
826 for( i = 0; i < 36; i++ ) \
828 int off = 8*32 + (i&15)*4*!align; /* benchmark various alignments of h filter */\
829 for( j = 0; j < 1024; j++ ) \
830 /* two distributions of random to excersize different failure modes */\
831 buf3[j] = rand() & (i&1 ? 0xf : 0xff ); \
832 memcpy( buf4, buf3, 1024 ); \
833 if( db_a.name != db_ref.name ) \
835 set_func_name( #name );\
837 call_c1( db_c.name, buf3+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
838 call_a1( db_a.name, buf4+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
839 if( memcmp( buf3, buf4, 1024 ) ) \
842 fprintf( stderr, #name "(a=%d, b=%d): [FAILED]\n", alphas[i], betas[i] ); \
845 call_c2( db_c.name, buf3+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
846 call_a2( db_a.name, buf4+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
850 TEST_DEBLOCK( deblock_h_luma, 0, tcs[i] );
851 TEST_DEBLOCK( deblock_v_luma, 1, tcs[i] );
852 TEST_DEBLOCK( deblock_h_chroma, 0, tcs[i] );
853 TEST_DEBLOCK( deblock_v_chroma, 1, tcs[i] );
854 TEST_DEBLOCK( deblock_h_luma_intra, 0 );
855 TEST_DEBLOCK( deblock_v_luma_intra, 1 );
856 TEST_DEBLOCK( deblock_h_chroma_intra, 0 );
857 TEST_DEBLOCK( deblock_v_chroma_intra, 1 );
859 report( "deblock :" );
864 static int check_quant( int cpu_ref, int cpu_new )
866 x264_quant_function_t qf_c;
867 x264_quant_function_t qf_ref;
868 x264_quant_function_t qf_a;
869 DECLARE_ALIGNED_16( int16_t dct1[64] );
870 DECLARE_ALIGNED_16( int16_t dct2[64] );
871 DECLARE_ALIGNED_16( uint8_t cqm_buf[64] );
872 int ret = 0, ok, used_asm;
873 int oks[2] = {1,1}, used_asms[2] = {0,0};
877 memset( h, 0, sizeof(*h) );
878 h->pps = h->pps_array;
879 x264_param_default( &h->param );
880 h->param.rc.i_qp_min = 26;
881 h->param.analyse.b_transform_8x8 = 1;
883 for( i_cqm = 0; i_cqm < 4; i_cqm++ )
887 for( i = 0; i < 6; i++ )
888 h->pps->scaling_list[i] = x264_cqm_flat16;
889 h->param.i_cqm_preset = h->pps->i_cqm_preset = X264_CQM_FLAT;
891 else if( i_cqm == 1 )
893 for( i = 0; i < 6; i++ )
894 h->pps->scaling_list[i] = x264_cqm_jvt[i];
895 h->param.i_cqm_preset = h->pps->i_cqm_preset = X264_CQM_JVT;
900 for( i = 0; i < 64; i++ )
901 cqm_buf[i] = 10 + rand() % 246;
903 for( i = 0; i < 64; i++ )
905 for( i = 0; i < 6; i++ )
906 h->pps->scaling_list[i] = cqm_buf;
907 h->param.i_cqm_preset = h->pps->i_cqm_preset = X264_CQM_CUSTOM;
911 x264_quant_init( h, 0, &qf_c );
912 x264_quant_init( h, cpu_ref, &qf_ref );
913 x264_quant_init( h, cpu_new, &qf_a );
915 #define INIT_QUANT8() \
917 static const int scale1d[8] = {32,31,24,31,32,31,24,31}; \
919 for( y = 0; y < 8; y++ ) \
920 for( x = 0; x < 8; x++ ) \
922 unsigned int scale = (255*scale1d[y]*scale1d[x])/16; \
923 dct1[y*8+x] = dct2[y*8+x] = (rand()%(2*scale+1))-scale; \
927 #define INIT_QUANT4() \
929 static const int scale1d[4] = {4,6,4,6}; \
931 for( y = 0; y < 4; y++ ) \
932 for( x = 0; x < 4; x++ ) \
934 unsigned int scale = 255*scale1d[y]*scale1d[x]; \
935 dct1[y*4+x] = dct2[y*4+x] = (rand()%(2*scale+1))-scale; \
939 #define TEST_QUANT_DC( name, cqm ) \
940 if( qf_a.name != qf_ref.name ) \
942 set_func_name( #name ); \
944 for( qp = 51; qp > 0; qp-- ) \
946 for( i = 0; i < 16; i++ ) \
947 dct1[i] = dct2[i] = (rand() & 0x1fff) - 0xfff; \
948 call_c1( qf_c.name, (void*)dct1, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
949 call_a1( qf_a.name, (void*)dct2, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
950 if( memcmp( dct1, dct2, 16*2 ) ) \
953 fprintf( stderr, #name "(cqm=%d): [FAILED]\n", i_cqm ); \
956 call_c2( qf_c.name, (void*)dct1, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
957 call_a2( qf_a.name, (void*)dct2, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
961 #define TEST_QUANT( qname, block, w ) \
962 if( qf_a.qname != qf_ref.qname ) \
964 set_func_name( #qname ); \
966 for( qp = 51; qp > 0; qp-- ) \
969 call_c1( qf_c.qname, (void*)dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
970 call_a1( qf_a.qname, (void*)dct2, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
971 if( memcmp( dct1, dct2, w*w*2 ) ) \
974 fprintf( stderr, #qname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \
977 call_c2( qf_c.qname, (void*)dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
978 call_a2( qf_a.qname, (void*)dct2, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
982 TEST_QUANT( quant_8x8, CQM_8IY, 8 );
983 TEST_QUANT( quant_8x8, CQM_8PY, 8 );
984 TEST_QUANT( quant_4x4, CQM_4IY, 4 );
985 TEST_QUANT( quant_4x4, CQM_4PY, 4 );
986 TEST_QUANT_DC( quant_4x4_dc, **h->quant4_mf[CQM_4IY] );
987 TEST_QUANT_DC( quant_2x2_dc, **h->quant4_mf[CQM_4IC] );
989 #define TEST_DEQUANT( qname, dqname, block, w ) \
990 if( qf_a.dqname != qf_ref.dqname ) \
992 set_func_name( "%s_%s", #dqname, i_cqm?"cqm":"flat" ); \
994 for( qp = 51; qp > 0; qp-- ) \
997 call_c( qf_c.qname, (void*)dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
998 memcpy( dct2, dct1, w*w*2 ); \
999 call_c1( qf_c.dqname, (void*)dct1, h->dequant##w##_mf[block], qp ); \
1000 call_a1( qf_a.dqname, (void*)dct2, h->dequant##w##_mf[block], qp ); \
1001 if( memcmp( dct1, dct2, w*w*2 ) ) \
1004 fprintf( stderr, #dqname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \
1007 call_c2( qf_c.dqname, (void*)dct1, h->dequant##w##_mf[block], qp ); \
1008 call_a2( qf_a.dqname, (void*)dct2, h->dequant##w##_mf[block], qp ); \
1012 TEST_DEQUANT( quant_8x8, dequant_8x8, CQM_8IY, 8 );
1013 TEST_DEQUANT( quant_8x8, dequant_8x8, CQM_8PY, 8 );
1014 TEST_DEQUANT( quant_4x4, dequant_4x4, CQM_4IY, 4 );
1015 TEST_DEQUANT( quant_4x4, dequant_4x4, CQM_4PY, 4 );
1017 x264_cqm_delete( h );
1020 ok = oks[0]; used_asm = used_asms[0];
1021 report( "quant :" );
1023 ok = oks[1]; used_asm = used_asms[1];
1024 report( "dequant :" );
1027 if( qf_a.denoise_dct_core != qf_ref.denoise_dct_core )
1030 for( size = 16; size <= 64; size += 48 )
1032 set_func_name( "denoise_dct" );
1034 memcpy(dct1, buf1, size*2);
1035 memcpy(dct2, buf1, size*2);
1036 memcpy(buf3+256, buf3, 256);
1037 call_c1( qf_c.denoise_dct_core, dct1, (uint32_t*)buf3, (uint16_t*)buf2, size );
1038 call_a1( qf_a.denoise_dct_core, dct2, (uint32_t*)(buf3+256), (uint16_t*)buf2, size );
1039 if( memcmp( dct1, dct2, size*2 ) || memcmp( buf3+4, buf3+256+4, (size-1)*sizeof(uint32_t) ) )
1041 call_c2( qf_c.denoise_dct_core, dct1, (uint32_t*)buf3, (uint16_t*)buf2, size );
1042 call_a2( qf_a.denoise_dct_core, dct2, (uint32_t*)(buf3+256), (uint16_t*)buf2, size );
1045 report( "denoise dct :" );
1050 static int check_intra( int cpu_ref, int cpu_new )
1052 int ret = 0, ok = 1, used_asm = 0;
1054 DECLARE_ALIGNED_16( uint8_t edge[33] );
1057 x264_predict_t predict_16x16[4+3];
1058 x264_predict_t predict_8x8c[4+3];
1059 x264_predict8x8_t predict_8x8[9+3];
1060 x264_predict_t predict_4x4[9+3];
1061 } ip_c, ip_ref, ip_a;
1063 x264_predict_16x16_init( 0, ip_c.predict_16x16 );
1064 x264_predict_8x8c_init( 0, ip_c.predict_8x8c );
1065 x264_predict_8x8_init( 0, ip_c.predict_8x8 );
1066 x264_predict_4x4_init( 0, ip_c.predict_4x4 );
1068 x264_predict_16x16_init( cpu_ref, ip_ref.predict_16x16 );
1069 x264_predict_8x8c_init( cpu_ref, ip_ref.predict_8x8c );
1070 x264_predict_8x8_init( cpu_ref, ip_ref.predict_8x8 );
1071 x264_predict_4x4_init( cpu_ref, ip_ref.predict_4x4 );
1073 x264_predict_16x16_init( cpu_new, ip_a.predict_16x16 );
1074 x264_predict_8x8c_init( cpu_new, ip_a.predict_8x8c );
1075 x264_predict_8x8_init( cpu_new, ip_a.predict_8x8 );
1076 x264_predict_4x4_init( cpu_new, ip_a.predict_4x4 );
1078 x264_predict_8x8_filter( buf1+48, edge, ALL_NEIGHBORS, ALL_NEIGHBORS );
1080 #define INTRA_TEST( name, dir, w, ... ) \
1081 if( ip_a.name[dir] != ip_ref.name[dir] )\
1083 set_func_name( "intra_%s_%s", #name, intra_##name##_names[dir] );\
1085 memcpy( buf3, buf1, 32*20 );\
1086 memcpy( buf4, buf1, 32*20 );\
1087 call_c( ip_c.name[dir], buf3+48, ##__VA_ARGS__ );\
1088 call_a( ip_a.name[dir], buf4+48, ##__VA_ARGS__ );\
1089 if( memcmp( buf3, buf4, 32*20 ) )\
1091 fprintf( stderr, #name "[%d] : [FAILED]\n", dir );\
1094 for(k=-1; k<16; k++)\
1095 printf("%2x ", edge[16+k]);\
1097 for(j=0; j<w; j++){\
1098 printf("%2x ", edge[14-j]);\
1100 printf("%2x ", buf4[48+k+j*32]);\
1104 for(j=0; j<w; j++){\
1107 printf("%2x ", buf3[48+k+j*32]);\
1113 for( i = 0; i < 12; i++ )
1114 INTRA_TEST( predict_4x4, i, 4 );
1115 for( i = 0; i < 7; i++ )
1116 INTRA_TEST( predict_8x8c, i, 8 );
1117 for( i = 0; i < 7; i++ )
1118 INTRA_TEST( predict_16x16, i, 16 );
1119 for( i = 0; i < 12; i++ )
1120 INTRA_TEST( predict_8x8, i, 8, edge );
1122 report( "intra pred :" );
1126 #define DECL_CABAC(cpu) \
1127 static void run_cabac_##cpu( uint8_t *dst )\
1131 x264_cabac_context_init( &cb, SLICE_TYPE_P, 26, 0 );\
1132 x264_cabac_encode_init( &cb, dst, dst+0xff0 );\
1133 for( i=0; i<0x1000; i++ )\
1134 x264_cabac_encode_decision_##cpu( &cb, buf1[i]>>1, buf1[i]&1 );\
1140 #define run_cabac_asm run_cabac_c
1143 static int check_cabac( int cpu_ref, int cpu_new )
1145 int ret = 0, ok, used_asm = 1;
1146 if( cpu_ref || run_cabac_c == run_cabac_asm)
1148 set_func_name( "cabac_encode_decision" );
1149 memcpy( buf4, buf3, 0x1000 );
1150 call_c( run_cabac_c, buf3 );
1151 call_a( run_cabac_asm, buf4 );
1152 ok = !memcmp( buf3, buf4, 0x1000 );
1153 report( "cabac :" );
1157 int check_all_funcs( int cpu_ref, int cpu_new )
1159 return check_pixel( cpu_ref, cpu_new )
1160 + check_dct( cpu_ref, cpu_new )
1161 + check_mc( cpu_ref, cpu_new )
1162 + check_intra( cpu_ref, cpu_new )
1163 + check_deblock( cpu_ref, cpu_new )
1164 + check_quant( cpu_ref, cpu_new )
1165 + check_cabac( cpu_ref, cpu_new );
1168 int add_flags( int *cpu_ref, int *cpu_new, int flags, const char *name )
1170 *cpu_ref = *cpu_new;
1172 if( *cpu_new & X264_CPU_SSE2_IS_FAST )
1173 *cpu_new &= ~X264_CPU_SSE2_IS_SLOW;
1175 fprintf( stderr, "x264: %s\n", name );
1176 return check_all_funcs( *cpu_ref, *cpu_new );
1179 int check_all_flags( void )
1182 int cpu0 = 0, cpu1 = 0;
1184 if( x264_cpu_detect() & X264_CPU_MMXEXT )
1186 ret |= add_flags( &cpu0, &cpu1, X264_CPU_MMX | X264_CPU_MMXEXT, "MMX" );
1187 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "MMX Cache64" );
1188 cpu1 &= ~X264_CPU_CACHELINE_64;
1190 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_32, "MMX Cache32" );
1191 cpu1 &= ~X264_CPU_CACHELINE_32;
1194 if( x264_cpu_detect() & X264_CPU_SSE2 )
1196 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE | X264_CPU_SSE2 | X264_CPU_SSE2_IS_SLOW, "SSE2Slow" );
1197 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE2_IS_FAST, "SSE2Fast" );
1198 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSE2Fast Cache64" );
1200 if( x264_cpu_detect() & X264_CPU_SSE3 )
1201 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE3 | X264_CPU_CACHELINE_64, "SSE3" );
1202 if( x264_cpu_detect() & X264_CPU_SSSE3 )
1204 cpu1 &= ~X264_CPU_CACHELINE_64;
1205 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSSE3, "SSSE3" );
1206 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSSE3 Cache64" );
1207 ret |= add_flags( &cpu0, &cpu1, X264_CPU_PHADD_IS_FAST, "PHADD" );
1210 if( x264_cpu_detect() & X264_CPU_ALTIVEC )
1212 fprintf( stderr, "x264: ALTIVEC against C\n" );
1213 ret = check_all_funcs( 0, X264_CPU_ALTIVEC );
1219 int main(int argc, char *argv[])
1224 if( argc > 1 && !strncmp( argv[1], "--bench", 7 ) )
1226 #if !defined(ARCH_X86) && !defined(ARCH_X86_64)
1227 fprintf( stderr, "no --bench for your cpu until you port rdtsc\n" );
1231 if( argv[1][7] == '=' )
1233 bench_pattern = argv[1]+8;
1234 bench_pattern_len = strlen(bench_pattern);
1240 i = ( argc > 1 ) ? atoi(argv[1]) : x264_mdate();
1241 fprintf( stderr, "x264: using random seed %u\n", i );
1244 buf1 = x264_malloc( 0x3e00 + 16*BENCH_ALIGNS );
1245 buf2 = buf1 + 0xf00;
1246 buf3 = buf2 + 0xf00;
1247 buf4 = buf3 + 0x1000;
1248 for( i=0; i<0x1e00; i++ )
1249 buf1[i] = rand() & 0xFF;
1250 memset( buf1+0x1e00, 0, 0x2000 );
1252 /* 16-byte alignment is guaranteed whenever it's useful, but some functions also vary in speed depending on %64 */
1254 for( i=0; i<BENCH_ALIGNS && !ret; i++ )
1256 buf2 = buf1 + 0xf00;
1257 buf3 = buf2 + 0xf00;
1258 buf4 = buf3 + 0x1000;
1259 ret |= x264_stack_pagealign( check_all_flags, i*16 );
1262 fprintf( stderr, "%d/%d\r", i+1, BENCH_ALIGNS );
1265 ret = check_all_flags();
1269 fprintf( stderr, "x264: at least one test has failed. Go and fix that Right Now!\n" );
1272 fprintf( stderr, "x264: All tests passed Yeah :)\n" );