1 /*****************************************************************************
2 * checkasm.c: assembly check tool
3 *****************************************************************************
4 * Copyright (C) 2003-2008 x264 project
6 * Authors: Loren Merritt <lorenm@u.washington.edu>
7 * Laurent Aimar <fenrir@via.ecp.fr>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 *****************************************************************************/
30 #include "common/common.h"
31 #include "common/cpu.h"
33 /* buf1, buf2: initialised to random data and shouldn't write into them */
34 uint8_t * buf1, * buf2;
35 /* buf3, buf4: used to store output */
36 uint8_t * buf3, * buf4;
40 #define report( name ) { \
41 if( used_asm && !quiet ) \
42 fprintf( stderr, " - %-21s [%s]\n", name, ok ? "OK" : "FAILED" ); \
46 #define BENCH_RUNS 100 // tradeoff between accuracy and speed
47 #define BENCH_ALIGNS 16 // number of stack+heap data alignments (another accuracy vs speed tradeoff)
48 #define MAX_FUNCS 1000 // just has to be big enough to hold all the existing functions
49 #define MAX_CPUS 10 // number of different combinations of cpu flags
52 void *pointer; // just for detecting duplicates
60 bench_t vers[MAX_CPUS];
64 int bench_pattern_len = 0;
65 const char *bench_pattern = "";
67 static bench_func_t benchs[MAX_FUNCS];
69 static const char *pixel_names[10] = { "16x16", "16x8", "8x16", "8x8", "8x4", "4x8", "4x4", "4x2", "2x4", "2x2" };
70 static const char *intra_predict_16x16_names[7] = { "v", "h", "dc", "p", "dcl", "dct", "dc8" };
71 static const char *intra_predict_8x8c_names[7] = { "dc", "h", "v", "p", "dcl", "dct", "dc8" };
72 static const char *intra_predict_4x4_names[12] = { "v", "h", "dc", "ddl", "ddr", "vr", "hd", "vl", "hu", "dcl", "dct", "dc8" };
73 static const char **intra_predict_8x8_names = intra_predict_4x4_names;
75 #define set_func_name(...) snprintf( func_name, sizeof(func_name), __VA_ARGS__ )
77 static inline uint32_t read_time(void)
79 #if defined(__GNUC__) && (defined(ARCH_X86) || defined(ARCH_X86_64))
81 asm volatile( "rdtsc" :"=a"(a) ::"edx" );
88 static bench_t* get_bench( const char *name, int cpu )
91 for( i=0; benchs[i].name && strcmp(name, benchs[i].name); i++ )
92 assert( i < MAX_FUNCS );
94 benchs[i].name = strdup( name );
96 return &benchs[i].vers[0];
97 for( j=1; benchs[i].vers[j].cpu && benchs[i].vers[j].cpu != cpu; j++ )
98 assert( j < MAX_CPUS );
99 benchs[i].vers[j].cpu = cpu;
100 return &benchs[i].vers[j];
103 static int cmp_nop( const void *a, const void *b )
105 return *(uint16_t*)a - *(uint16_t*)b;
108 static int cmp_bench( const void *a, const void *b )
110 // asciibetical sort except preserving numbers
111 const char *sa = ((bench_func_t*)a)->name;
112 const char *sb = ((bench_func_t*)b)->name;
115 if( !*sa && !*sb ) return 0;
116 if( isdigit(*sa) && isdigit(*sb) && isdigit(sa[1]) != isdigit(sb[1]) )
117 return isdigit(sa[1]) - isdigit(sb[1]);
118 if( *sa != *sb ) return *sa - *sb;
122 static void print_bench(void)
124 uint16_t nops[10000] = {0};
125 int i, j, k, nfuncs, nop_time=0;
127 for( i=0; i<10000; i++ )
130 nops[i] = read_time() - t;
132 qsort( nops, 10000, sizeof(uint16_t), cmp_nop );
133 for( i=500; i<9500; i++ )
136 printf( "nop: %d\n", nop_time );
138 for( i=0; i<MAX_FUNCS && benchs[i].name; i++ );
140 qsort( benchs, nfuncs, sizeof(bench_func_t), cmp_bench );
141 for( i=0; i<nfuncs; i++ )
142 for( j=0; j<MAX_CPUS && (!j || benchs[i].vers[j].cpu); j++ )
144 bench_t *b = &benchs[i].vers[j];
145 if( !b->den ) continue;
146 for( k=0; k<j && benchs[i].vers[k].pointer != b->pointer; k++ );
148 printf( "%s_%s%s: %"PRId64"\n", benchs[i].name,
149 b->cpu&X264_CPU_SSE4 ? "sse4" :
150 b->cpu&X264_CPU_PHADD_IS_FAST ? "phadd" :
151 b->cpu&X264_CPU_SSSE3 ? "ssse3" :
152 b->cpu&X264_CPU_SSE3 ? "sse3" :
153 /* print sse2slow only if there's also a sse2fast version of the same func */
154 b->cpu&X264_CPU_SSE2_IS_SLOW && j<MAX_CPUS && b[1].cpu&X264_CPU_SSE2_IS_FAST && !(b[1].cpu&X264_CPU_SSE3) ? "sse2slow" :
155 b->cpu&X264_CPU_SSE2 ? "sse2" :
156 b->cpu&X264_CPU_MMX ? "mmx" : "c",
157 b->cpu&X264_CPU_CACHELINE_32 ? "_c32" :
158 b->cpu&X264_CPU_CACHELINE_64 ? "_c64" :
159 b->cpu&X264_CPU_SSE_MISALIGN ? "_misalign" : "",
160 ((int64_t)10*b->cycles/b->den - nop_time)/4 );
164 #if defined(ARCH_X86) || defined(ARCH_X86_64)
165 int x264_stack_pagealign( int (*func)(), int align );
167 #define x264_stack_pagealign( func, align ) func()
170 #define call_c1(func,...) func(__VA_ARGS__)
173 /* detect when callee-saved regs aren't saved.
174 * needs an explicit asm check because it only sometimes crashes in normal use. */
175 long x264_checkasm_call( long (*func)(), int *ok, ... );
176 #define call_a1(func,...) x264_checkasm_call((long(*)())func, &ok, __VA_ARGS__)
178 #define call_a1 call_c1
181 #define call_bench(func,cpu,...)\
182 if( do_bench && !strncmp(func_name, bench_pattern, bench_pattern_len) )\
187 call_a1(func, __VA_ARGS__);\
188 for( ti=0; ti<(cpu?BENCH_RUNS:BENCH_RUNS/4); ti++ )\
190 uint32_t t = read_time();\
195 t = read_time() - t;\
196 if( t*tcount <= tsum*4 && ti > 0 )\
202 bench_t *b = get_bench( func_name, cpu );\
208 /* for most functions, run benchmark and correctness test at the same time.
209 * for those that modify their inputs, run the above macros separately */
210 #define call_a(func,...) ({ call_a2(func,__VA_ARGS__); call_a1(func,__VA_ARGS__); })
211 #define call_c(func,...) ({ call_c2(func,__VA_ARGS__); call_c1(func,__VA_ARGS__); })
212 #define call_a2(func,...) ({ call_bench(func,cpu_new,__VA_ARGS__); })
213 #define call_c2(func,...) ({ call_bench(func,0,__VA_ARGS__); })
216 static int check_pixel( int cpu_ref, int cpu_new )
218 x264_pixel_function_t pixel_c;
219 x264_pixel_function_t pixel_ref;
220 x264_pixel_function_t pixel_asm;
221 x264_predict_t predict_16x16[4+3];
222 x264_predict_t predict_8x8c[4+3];
223 x264_predict_t predict_4x4[9+3];
224 x264_predict8x8_t predict_8x8[9+3];
225 DECLARE_ALIGNED_16( uint8_t edge[33] );
226 uint16_t cost_mv[32];
227 int ret = 0, ok, used_asm;
230 x264_pixel_init( 0, &pixel_c );
231 x264_pixel_init( cpu_ref, &pixel_ref );
232 x264_pixel_init( cpu_new, &pixel_asm );
233 x264_predict_16x16_init( 0, predict_16x16 );
234 x264_predict_8x8c_init( 0, predict_8x8c );
235 x264_predict_8x8_init( 0, predict_8x8 );
236 x264_predict_4x4_init( 0, predict_4x4 );
237 x264_predict_8x8_filter( buf2+40, edge, ALL_NEIGHBORS, ALL_NEIGHBORS );
240 for( i=0; i<256; i++ )
245 buf3[i] = ~(buf4[i] = -(z&1));
247 // random pattern made of maxed pixel differences, in case an intermediate value overflows
248 for( ; i<0x1000; i++ )
249 buf3[i] = ~(buf4[i] = -(buf1[i&~0x88]&1));
251 #define TEST_PIXEL( name, align ) \
252 for( i = 0, ok = 1, used_asm = 0; i < 7; i++ ) \
254 int res_c, res_asm; \
255 if( pixel_asm.name[i] != pixel_ref.name[i] ) \
257 set_func_name( "%s_%s", #name, pixel_names[i] ); \
259 for( j=0; j<64; j++ ) \
261 res_c = call_c( pixel_c.name[i], buf1, 16, buf2+j*!align, 64 ); \
262 res_asm = call_a( pixel_asm.name[i], buf1, 16, buf2+j*!align, 64 ); \
263 if( res_c != res_asm ) \
266 fprintf( stderr, #name "[%d]: %d != %d [FAILED]\n", i, res_c, res_asm ); \
270 for( j=0; j<0x1000 && ok; j+=256 ) \
272 res_c = pixel_c .name[i]( buf3+j, 16, buf4+j, 16 ); \
273 res_asm = pixel_asm.name[i]( buf3+j, 16, buf4+j, 16 ); \
274 if( res_c != res_asm ) \
277 fprintf( stderr, #name "[%d]: overflow %d != %d\n", i, res_c, res_asm ); \
282 report( "pixel " #name " :" );
284 TEST_PIXEL( sad, 0 );
285 TEST_PIXEL( sad_aligned, 1 );
286 TEST_PIXEL( ssd, 1 );
287 TEST_PIXEL( satd, 0 );
288 TEST_PIXEL( sa8d, 0 );
290 #define TEST_PIXEL_X( N ) \
291 for( i = 0, ok = 1, used_asm = 0; i < 7; i++ ) \
293 int res_c[4]={0}, res_asm[4]={0}; \
294 if( pixel_asm.sad_x##N[i] && pixel_asm.sad_x##N[i] != pixel_ref.sad_x##N[i] ) \
296 set_func_name( "sad_x%d_%s", N, pixel_names[i] ); \
298 for( j=0; j<64; j++) \
300 uint8_t *pix2 = buf2+j; \
301 res_c[0] = pixel_c.sad[i]( buf1, 16, pix2, 64 ); \
302 res_c[1] = pixel_c.sad[i]( buf1, 16, pix2+6, 64 ); \
303 res_c[2] = pixel_c.sad[i]( buf1, 16, pix2+1, 64 ); \
306 res_c[3] = pixel_c.sad[i]( buf1, 16, pix2+10, 64 ); \
307 call_a( pixel_asm.sad_x4[i], buf1, pix2, pix2+6, pix2+1, pix2+10, 64, res_asm ); \
310 call_a( pixel_asm.sad_x3[i], buf1, pix2, pix2+6, pix2+1, 64, res_asm ); \
311 if( memcmp(res_c, res_asm, sizeof(res_c)) ) \
314 fprintf( stderr, "sad_x"#N"[%d]: %d,%d,%d,%d != %d,%d,%d,%d [FAILED]\n", \
315 i, res_c[0], res_c[1], res_c[2], res_c[3], \
316 res_asm[0], res_asm[1], res_asm[2], res_asm[3] ); \
319 call_c2( pixel_c.sad_x4[i], buf1, pix2, pix2+6, pix2+1, pix2+10, 64, res_asm ); \
321 call_c2( pixel_c.sad_x3[i], buf1, pix2, pix2+6, pix2+1, 64, res_asm ); \
325 report( "pixel sad_x"#N" :" );
330 #define TEST_PIXEL_VAR( i ) \
331 if( pixel_asm.var[i] != pixel_ref.var[i] ) \
333 uint32_t res_c, res_asm; \
334 uint32_t sad_c, sad_asm; \
335 set_func_name( "%s_%s", "var", pixel_names[i] ); \
337 res_c = call_c( pixel_c.var[i], buf1, 16, &sad_c ); \
338 res_asm = call_a( pixel_asm.var[i], buf1, 16, &sad_asm ); \
339 if( (res_c != res_asm) || (sad_c != sad_asm) ) \
342 fprintf( stderr, "var[%d]: %d,%d != %d,%d [FAILED]\n", i, res_c, sad_c, res_asm, sad_asm ); \
346 ok = 1; used_asm = 0;
347 TEST_PIXEL_VAR( PIXEL_16x16 );
348 TEST_PIXEL_VAR( PIXEL_8x8 );
349 report( "pixel var :" );
351 for( i=0, ok=1, used_asm=0; i<4; i++ )
352 if( pixel_asm.hadamard_ac[i] != pixel_ref.hadamard_ac[i] )
354 set_func_name( "hadamard_ac_%s", pixel_names[i] );
356 for( j=0; j<32; j++ )
358 uint8_t *pix = (j&16 ? buf1 : buf3) + (j&15)*256;
359 uint64_t rc = pixel_c.hadamard_ac[i]( pix, 16 );
360 uint64_t ra = pixel_asm.hadamard_ac[i]( pix, 16 );
364 fprintf( stderr, "hadamard_ac[%d]: %d,%d != %d,%d\n", i, (int)rc, (int)(rc>>32), (int)ra, (int)(ra>>32) );
368 call_c2( pixel_c.hadamard_ac[i], buf1, 16 );
369 call_a2( pixel_asm.hadamard_ac[i], buf1, 16 );
371 report( "pixel hadamard_ac :" );
373 #define TEST_INTRA_MBCMP( name, pred, satd, i8x8, ... ) \
374 if( pixel_asm.name && pixel_asm.name != pixel_ref.name ) \
376 int res_c[3], res_asm[3]; \
377 set_func_name( #name );\
379 memcpy( buf3, buf2, 1024 ); \
380 for( i=0; i<3; i++ ) \
382 pred[i]( buf3+48, ##__VA_ARGS__ ); \
383 res_c[i] = pixel_c.satd( buf1+48, 16, buf3+48, 32 ); \
385 call_a( pixel_asm.name, buf1+48, i8x8 ? edge : buf3+48, res_asm ); \
386 if( memcmp(res_c, res_asm, sizeof(res_c)) ) \
389 fprintf( stderr, #name": %d,%d,%d != %d,%d,%d [FAILED]\n", \
390 res_c[0], res_c[1], res_c[2], \
391 res_asm[0], res_asm[1], res_asm[2] ); \
395 ok = 1; used_asm = 0;
396 TEST_INTRA_MBCMP( intra_satd_x3_16x16, predict_16x16, satd[PIXEL_16x16], 0 );
397 TEST_INTRA_MBCMP( intra_satd_x3_8x8c , predict_8x8c , satd[PIXEL_8x8] , 0 );
398 TEST_INTRA_MBCMP( intra_satd_x3_4x4 , predict_4x4 , satd[PIXEL_4x4] , 0 );
399 TEST_INTRA_MBCMP( intra_sa8d_x3_8x8 , predict_8x8 , sa8d[PIXEL_8x8] , 1, edge );
400 report( "intra satd_x3 :" );
401 TEST_INTRA_MBCMP( intra_sad_x3_16x16 , predict_16x16, sad [PIXEL_16x16], 0 );
402 report( "intra sad_x3 :" );
404 if( pixel_asm.ssim_4x4x2_core != pixel_ref.ssim_4x4x2_core ||
405 pixel_asm.ssim_end4 != pixel_ref.ssim_end4 )
408 int sums[5][4] = {{0}};
411 res_c = x264_pixel_ssim_wxh( &pixel_c, buf1+2, 32, buf2+2, 32, 32, 28 );
412 res_a = x264_pixel_ssim_wxh( &pixel_asm, buf1+2, 32, buf2+2, 32, 32, 28 );
413 if( fabs(res_c - res_a) > 1e-6 )
416 fprintf( stderr, "ssim: %.7f != %.7f [FAILED]\n", res_c, res_a );
418 set_func_name( "ssim_core" );
419 call_c2( pixel_c.ssim_4x4x2_core, buf1+2, 32, buf2+2, 32, sums );
420 call_a2( pixel_asm.ssim_4x4x2_core, buf1+2, 32, buf2+2, 32, sums );
421 set_func_name( "ssim_end" );
422 call_c2( pixel_c.ssim_end4, sums, sums, 4 );
423 call_a2( pixel_asm.ssim_end4, sums, sums, 4 );
427 ok = 1; used_asm = 0;
428 for( i=0; i<32; i++ )
430 for( i=0; i<100 && ok; i++ )
431 if( pixel_asm.ads[i&3] != pixel_ref.ads[i&3] )
433 DECLARE_ALIGNED_16( uint16_t sums[72] );
434 DECLARE_ALIGNED_16( int dc[4] );
435 int16_t mvs_a[32], mvs_c[32];
437 int thresh = rand() & 0x3fff;
438 set_func_name( "esa_ads" );
439 for( j=0; j<72; j++ )
440 sums[j] = rand() & 0x3fff;
442 dc[j] = rand() & 0x3fff;
444 mvn_c = call_c( pixel_c.ads[i&3], dc, sums, 32, cost_mv, mvs_c, 28, thresh );
445 mvn_a = call_a( pixel_asm.ads[i&3], dc, sums, 32, cost_mv, mvs_a, 28, thresh );
446 if( mvn_c != mvn_a || memcmp( mvs_c, mvs_a, mvn_c*sizeof(*mvs_c) ) )
449 printf("c%d: ", i&3);
450 for(j=0; j<mvn_c; j++)
451 printf("%d ", mvs_c[j]);
452 printf("\na%d: ", i&3);
453 for(j=0; j<mvn_a; j++)
454 printf("%d ", mvs_a[j]);
458 report( "esa ads:" );
463 static int check_dct( int cpu_ref, int cpu_new )
465 x264_dct_function_t dct_c;
466 x264_dct_function_t dct_ref;
467 x264_dct_function_t dct_asm;
468 x264_quant_function_t qf;
469 int ret = 0, ok, used_asm, i, j, interlace;
470 DECLARE_ALIGNED_16( int16_t dct1[16][4][4] );
471 DECLARE_ALIGNED_16( int16_t dct2[16][4][4] );
472 DECLARE_ALIGNED_16( int16_t dct4[16][4][4] );
473 DECLARE_ALIGNED_16( int16_t dct8[4][8][8] );
477 x264_dct_init( 0, &dct_c );
478 x264_dct_init( cpu_ref, &dct_ref);
479 x264_dct_init( cpu_new, &dct_asm );
481 memset( h, 0, sizeof(*h) );
482 h->pps = h->pps_array;
483 x264_param_default( &h->param );
484 h->param.analyse.i_luma_deadzone[0] = 0;
485 h->param.analyse.i_luma_deadzone[1] = 0;
486 h->param.analyse.b_transform_8x8 = 1;
488 h->pps->scaling_list[i] = x264_cqm_flat16;
490 x264_quant_init( h, 0, &qf );
492 #define TEST_DCT( name, t1, t2, size ) \
493 if( dct_asm.name != dct_ref.name ) \
495 set_func_name( #name );\
497 call_c( dct_c.name, t1, buf1, buf2 ); \
498 call_a( dct_asm.name, t2, buf1, buf2 ); \
499 if( memcmp( t1, t2, size ) ) \
502 fprintf( stderr, #name " [FAILED]\n" ); \
505 ok = 1; used_asm = 0;
506 TEST_DCT( sub4x4_dct, dct1[0], dct2[0], 16*2 );
507 TEST_DCT( sub8x8_dct, dct1, dct2, 16*2*4 );
508 TEST_DCT( sub16x16_dct, dct1, dct2, 16*2*16 );
509 report( "sub_dct4 :" );
511 ok = 1; used_asm = 0;
512 TEST_DCT( sub8x8_dct8, (void*)dct1[0], (void*)dct2[0], 64*2 );
513 TEST_DCT( sub16x16_dct8, (void*)dct1, (void*)dct2, 64*2*4 );
514 report( "sub_dct8 :" );
517 // fdct and idct are denormalized by different factors, so quant/dequant
518 // is needed to force the coefs into the right range.
519 dct_c.sub16x16_dct( dct4, buf1, buf2 );
520 dct_c.sub16x16_dct8( dct8, buf1, buf2 );
521 for( i=0; i<16; i++ )
523 qf.quant_4x4( dct4[i], h->quant4_mf[CQM_4IY][20], h->quant4_bias[CQM_4IY][20] );
524 qf.dequant_4x4( dct4[i], h->dequant4_mf[CQM_4IY], 20 );
528 qf.quant_8x8( dct8[i], h->quant8_mf[CQM_8IY][20], h->quant8_bias[CQM_8IY][20] );
529 qf.dequant_8x8( dct8[i], h->dequant8_mf[CQM_8IY], 20 );
532 #define TEST_IDCT( name, src ) \
533 if( dct_asm.name != dct_ref.name ) \
535 set_func_name( #name );\
537 memcpy( buf3, buf1, 32*32 ); \
538 memcpy( buf4, buf1, 32*32 ); \
539 memcpy( dct1, src, 512 ); \
540 memcpy( dct2, src, 512 ); \
541 call_c1( dct_c.name, buf3, (void*)dct1 ); \
542 call_a1( dct_asm.name, buf4, (void*)dct2 ); \
543 if( memcmp( buf3, buf4, 32*32 ) ) \
546 fprintf( stderr, #name " [FAILED]\n" ); \
548 call_c2( dct_c.name, buf3, (void*)dct1 ); \
549 call_a2( dct_asm.name, buf4, (void*)dct2 ); \
551 ok = 1; used_asm = 0;
552 TEST_IDCT( add4x4_idct, dct4 );
553 TEST_IDCT( add8x8_idct, dct4 );
554 TEST_IDCT( add16x16_idct, dct4 );
555 report( "add_idct4 :" );
557 ok = 1; used_asm = 0;
558 TEST_IDCT( add8x8_idct8, dct8 );
559 TEST_IDCT( add16x16_idct8, dct8 );
560 report( "add_idct8 :" );
563 #define TEST_DCTDC( name )\
564 ok = 1; used_asm = 0;\
565 if( dct_asm.name != dct_ref.name )\
567 set_func_name( #name );\
569 uint16_t *p = (uint16_t*)buf1;\
570 for( i=0; i<16 && ok; i++ )\
572 for( j=0; j<16; j++ )\
573 dct1[0][0][j] = !i ? (j^j>>1^j>>2^j>>3)&1 ? 4080 : -4080 /* max dc */\
574 : i<8 ? (*p++)&1 ? 4080 : -4080 /* max elements */\
575 : ((*p++)&0x1fff)-0x1000; /* general case */\
576 memcpy( dct2, dct1, 32 );\
577 call_c1( dct_c.name, dct1[0] );\
578 call_a1( dct_asm.name, dct2[0] );\
579 if( memcmp( dct1, dct2, 32 ) )\
582 call_c2( dct_c.name, dct1[0] );\
583 call_a2( dct_asm.name, dct2[0] );\
585 report( #name " :" );
587 TEST_DCTDC( dct4x4dc );
588 TEST_DCTDC( idct4x4dc );
591 x264_zigzag_function_t zigzag_c;
592 x264_zigzag_function_t zigzag_ref;
593 x264_zigzag_function_t zigzag_asm;
595 DECLARE_ALIGNED_16( int16_t level1[64] );
596 DECLARE_ALIGNED_16( int16_t level2[64] );
598 #define TEST_ZIGZAG_SCAN( name, t1, t2, dct, size ) \
599 if( zigzag_asm.name != zigzag_ref.name ) \
601 set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" );\
603 memcpy(dct, buf1, size*sizeof(int16_t));\
604 call_c( zigzag_c.name, t1, dct ); \
605 call_a( zigzag_asm.name, t2, dct ); \
606 if( memcmp( t1, t2, size*sizeof(int16_t) ) ) \
609 fprintf( stderr, #name " [FAILED]\n" ); \
613 #define TEST_ZIGZAG_SUB( name, t1, t2, size ) \
614 if( zigzag_asm.name != zigzag_ref.name ) \
616 set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" );\
618 memcpy( buf3, buf1, 16*FDEC_STRIDE ); \
619 memcpy( buf4, buf1, 16*FDEC_STRIDE ); \
620 call_c1( zigzag_c.name, t1, buf2, buf3 ); \
621 call_a1( zigzag_asm.name, t2, buf2, buf4 ); \
622 if( memcmp( t1, t2, size*sizeof(int16_t) )|| memcmp( buf3, buf4, 16*FDEC_STRIDE ) ) \
625 fprintf( stderr, #name " [FAILED]\n" ); \
627 call_c2( zigzag_c.name, t1, buf2, buf3 ); \
628 call_a2( zigzag_asm.name, t2, buf2, buf4 ); \
632 x264_zigzag_init( 0, &zigzag_c, 0 );
633 x264_zigzag_init( cpu_ref, &zigzag_ref, 0 );
634 x264_zigzag_init( cpu_new, &zigzag_asm, 0 );
636 ok = 1; used_asm = 0;
637 TEST_ZIGZAG_SCAN( scan_8x8, level1, level2, (void*)dct1, 64 );
638 TEST_ZIGZAG_SCAN( scan_4x4, level1, level2, dct1[0], 16 );
639 TEST_ZIGZAG_SCAN( interleave_8x8_cavlc, level1, level2, (void*)dct1, 64 );
640 TEST_ZIGZAG_SUB( sub_4x4, level1, level2, 16 );
641 report( "zigzag_frame :" );
644 x264_zigzag_init( 0, &zigzag_c, 1 );
645 x264_zigzag_init( cpu_ref, &zigzag_ref, 1 );
646 x264_zigzag_init( cpu_new, &zigzag_asm, 1 );
648 ok = 1; used_asm = 0;
649 TEST_ZIGZAG_SCAN( scan_8x8, level1, level2, (void*)dct1, 64 );
650 TEST_ZIGZAG_SCAN( scan_4x4, level1, level2, dct1[0], 16 );
651 TEST_ZIGZAG_SUB( sub_4x4, level1, level2, 16 );
652 report( "zigzag_field :" );
653 #undef TEST_ZIGZAG_SCAN
654 #undef TEST_ZIGZAG_SUB
659 static int check_mc( int cpu_ref, int cpu_new )
661 x264_mc_functions_t mc_c;
662 x264_mc_functions_t mc_ref;
663 x264_mc_functions_t mc_a;
664 x264_pixel_function_t pixel;
666 uint8_t *src = &buf1[2*32+2];
667 uint8_t *src2[4] = { &buf1[3*64+2], &buf1[5*64+2],
668 &buf1[7*64+2], &buf1[9*64+2] };
669 uint8_t *dst1 = buf3;
670 uint8_t *dst2 = buf4;
672 int dx, dy, i, j, k, w;
673 int ret = 0, ok, used_asm;
675 x264_mc_init( 0, &mc_c );
676 x264_mc_init( cpu_ref, &mc_ref );
677 x264_mc_init( cpu_new, &mc_a );
678 x264_pixel_init( 0, &pixel );
680 #define MC_TEST_LUMA( w, h ) \
681 if( mc_a.mc_luma != mc_ref.mc_luma && !(w&(w-1)) && h<=16 ) \
683 set_func_name( "mc_luma_%dx%d", w, h );\
685 memset(buf3, 0xCD, 1024); \
686 memset(buf4, 0xCD, 1024); \
687 call_c( mc_c.mc_luma, dst1, 32, src2, 64, dx, dy, w, h ); \
688 call_a( mc_a.mc_luma, dst2, 32, src2, 64, dx, dy, w, h ); \
689 if( memcmp( buf3, buf4, 1024 ) ) \
691 fprintf( stderr, "mc_luma[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w, h ); \
695 if( mc_a.get_ref != mc_ref.get_ref ) \
697 uint8_t *ref = dst2; \
698 int ref_stride = 32; \
699 set_func_name( "get_ref_%dx%d", w, h );\
701 memset(buf3, 0xCD, 1024); \
702 memset(buf4, 0xCD, 1024); \
703 call_c( mc_c.mc_luma, dst1, 32, src2, 64, dx, dy, w, h ); \
704 ref = (uint8_t*) call_a( mc_a.get_ref, ref, &ref_stride, src2, 64, dx, dy, w, h ); \
705 for( i=0; i<h; i++ ) \
706 if( memcmp( dst1+i*32, ref+i*ref_stride, w ) ) \
708 fprintf( stderr, "get_ref[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w, h ); \
714 #define MC_TEST_CHROMA( w, h ) \
715 if( mc_a.mc_chroma != mc_ref.mc_chroma ) \
717 set_func_name( "mc_chroma_%dx%d", w, h );\
719 memset(buf3, 0xCD, 1024); \
720 memset(buf4, 0xCD, 1024); \
721 call_c( mc_c.mc_chroma, dst1, 16, src, 32, dx, dy, w, h ); \
722 call_a( mc_a.mc_chroma, dst2, 16, src, 32, dx, dy, w, h ); \
723 /* mc_chroma width=2 may write garbage to the right of dst. ignore that. */\
724 for( j=0; j<h; j++ ) \
725 for( i=w; i<4; i++ ) \
726 dst2[i+j*16] = dst1[i+j*16]; \
727 if( memcmp( buf3, buf4, 1024 ) ) \
729 fprintf( stderr, "mc_chroma[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w, h ); \
733 ok = 1; used_asm = 0;
734 for( dy = -8; dy < 8; dy++ )
735 for( dx = -128; dx < 128; dx++ )
737 if( rand()&15 ) continue; // running all of them is too slow
738 MC_TEST_LUMA( 20, 18 );
739 MC_TEST_LUMA( 16, 16 );
740 MC_TEST_LUMA( 16, 8 );
741 MC_TEST_LUMA( 12, 10 );
742 MC_TEST_LUMA( 8, 16 );
743 MC_TEST_LUMA( 8, 8 );
744 MC_TEST_LUMA( 8, 4 );
745 MC_TEST_LUMA( 4, 8 );
746 MC_TEST_LUMA( 4, 4 );
748 report( "mc luma :" );
750 ok = 1; used_asm = 0;
751 for( dy = -1; dy < 9; dy++ )
752 for( dx = -1; dx < 9; dx++ )
754 MC_TEST_CHROMA( 8, 8 );
755 MC_TEST_CHROMA( 8, 4 );
756 MC_TEST_CHROMA( 4, 8 );
757 MC_TEST_CHROMA( 4, 4 );
758 MC_TEST_CHROMA( 4, 2 );
759 MC_TEST_CHROMA( 2, 4 );
760 MC_TEST_CHROMA( 2, 2 );
762 report( "mc chroma :" );
764 #undef MC_TEST_CHROMA
766 #define MC_TEST_AVG( name, weight ) \
767 for( i = 0, ok = 1, used_asm = 0; i < 10; i++ ) \
769 memcpy( buf3, buf1+320, 320 ); \
770 memcpy( buf4, buf1+320, 320 ); \
771 if( mc_a.name[i] != mc_ref.name[i] ) \
773 set_func_name( "%s_%s", #name, pixel_names[i] );\
775 call_c1( mc_c.name[i], buf3, 16, buf2+1, 16, buf1+18, 16, weight ); \
776 call_a1( mc_a.name[i], buf4, 16, buf2+1, 16, buf1+18, 16, weight ); \
777 if( memcmp( buf3, buf4, 320 ) ) \
780 fprintf( stderr, #name "[%d]: [FAILED]\n", i ); \
782 call_c2( mc_c.name[i], buf3, 16, buf2+1, 16, buf1+18, 16, weight ); \
783 call_a2( mc_a.name[i], buf4, 16, buf2+1, 16, buf1+18, 16, weight ); \
786 ok = 1; used_asm = 0;
787 for( w = -63; w <= 127 && ok; w++ )
788 MC_TEST_AVG( avg, w );
789 report( "mc wpredb :" );
791 if( mc_a.hpel_filter != mc_ref.hpel_filter )
793 uint8_t *src = buf1+8+2*64;
794 uint8_t *dstc[3] = { buf3+8, buf3+8+16*64, buf3+8+32*64 };
795 uint8_t *dsta[3] = { buf4+8, buf4+8+16*64, buf4+8+32*64 };
796 set_func_name( "hpel_filter" );
797 ok = 1; used_asm = 1;
798 memset( buf3, 0, 4096 );
799 memset( buf4, 0, 4096 );
800 call_c( mc_c.hpel_filter, dstc[0], dstc[1], dstc[2], src, 64, 48, 10 );
801 call_a( mc_a.hpel_filter, dsta[0], dsta[1], dsta[2], src, 64, 48, 10 );
803 for( j=0; j<10; j++ )
804 //FIXME ideally the first pixels would match too, but they aren't actually used
805 if( memcmp( dstc[i]+j*64+2, dsta[i]+j*64+2, 43 ) )
808 fprintf( stderr, "hpel filter differs at plane %c line %d\n", "hvc"[i], j );
809 for( k=0; k<48; k++ )
810 printf("%02x%s", dstc[i][j*64+k], (k+1)&3 ? "" : " ");
812 for( k=0; k<48; k++ )
813 printf("%02x%s", dsta[i][j*64+k], (k+1)&3 ? "" : " ");
817 report( "hpel filter :" );
820 if( mc_a.frame_init_lowres_core != mc_ref.frame_init_lowres_core )
822 uint8_t *dstc[4] = { buf3, buf3+1024, buf3+2048, buf3+3072 };
823 uint8_t *dsta[4] = { buf4, buf4+1024, buf4+2048, buf3+3072 };
824 set_func_name( "lowres_init" );
825 for( w=40; w<=48; w+=8 )
826 if( mc_a.frame_init_lowres_core != mc_ref.frame_init_lowres_core )
828 int stride = (w+8)&~15;
830 call_c( mc_c.frame_init_lowres_core, buf1, dstc[0], dstc[1], dstc[2], dstc[3], w*2, stride, w, 16 );
831 call_a( mc_a.frame_init_lowres_core, buf1, dsta[0], dsta[1], dsta[2], dsta[3], w*2, stride, w, 16 );
835 if( memcmp( dstc[j]+i*stride, dsta[j]+i*stride, w ) )
838 fprintf( stderr, "frame_init_lowres differs at plane %d line %d\n", j, i );
840 printf( "%d ", dstc[j][k+i*stride] );
843 printf( "%d ", dsta[j][k+i*stride] );
849 report( "lowres init :" );
855 static int check_deblock( int cpu_ref, int cpu_new )
857 x264_deblock_function_t db_c;
858 x264_deblock_function_t db_ref;
859 x264_deblock_function_t db_a;
860 int ret = 0, ok = 1, used_asm = 0;
861 int alphas[36], betas[36];
865 x264_deblock_init( 0, &db_c );
866 x264_deblock_init( cpu_ref, &db_ref );
867 x264_deblock_init( cpu_new, &db_a );
869 /* not exactly the real values of a,b,tc but close enough */
871 for( i = 35; i >= 0; i-- )
875 tcs[i][0] = tcs[i][2] = (c+6)/10;
876 tcs[i][1] = tcs[i][3] = (c+9)/20;
881 #define TEST_DEBLOCK( name, align, ... ) \
882 for( i = 0; i < 36; i++ ) \
884 int off = 8*32 + (i&15)*4*!align; /* benchmark various alignments of h filter */\
885 for( j = 0; j < 1024; j++ ) \
886 /* two distributions of random to excersize different failure modes */\
887 buf3[j] = rand() & (i&1 ? 0xf : 0xff ); \
888 memcpy( buf4, buf3, 1024 ); \
889 if( db_a.name != db_ref.name ) \
891 set_func_name( #name );\
893 call_c1( db_c.name, buf3+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
894 call_a1( db_a.name, buf4+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
895 if( memcmp( buf3, buf4, 1024 ) ) \
898 fprintf( stderr, #name "(a=%d, b=%d): [FAILED]\n", alphas[i], betas[i] ); \
901 call_c2( db_c.name, buf3+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
902 call_a2( db_a.name, buf4+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
906 TEST_DEBLOCK( deblock_h_luma, 0, tcs[i] );
907 TEST_DEBLOCK( deblock_v_luma, 1, tcs[i] );
908 TEST_DEBLOCK( deblock_h_chroma, 0, tcs[i] );
909 TEST_DEBLOCK( deblock_v_chroma, 1, tcs[i] );
910 TEST_DEBLOCK( deblock_h_luma_intra, 0 );
911 TEST_DEBLOCK( deblock_v_luma_intra, 1 );
912 TEST_DEBLOCK( deblock_h_chroma_intra, 0 );
913 TEST_DEBLOCK( deblock_v_chroma_intra, 1 );
915 report( "deblock :" );
920 static int check_quant( int cpu_ref, int cpu_new )
922 x264_quant_function_t qf_c;
923 x264_quant_function_t qf_ref;
924 x264_quant_function_t qf_a;
925 DECLARE_ALIGNED_16( int16_t dct1[64] );
926 DECLARE_ALIGNED_16( int16_t dct2[64] );
927 DECLARE_ALIGNED_16( uint8_t cqm_buf[64] );
928 int ret = 0, ok, used_asm;
929 int oks[2] = {1,1}, used_asms[2] = {0,0};
933 memset( h, 0, sizeof(*h) );
934 h->pps = h->pps_array;
935 x264_param_default( &h->param );
936 h->param.rc.i_qp_min = 26;
937 h->param.analyse.b_transform_8x8 = 1;
939 for( i_cqm = 0; i_cqm < 4; i_cqm++ )
943 for( i = 0; i < 6; i++ )
944 h->pps->scaling_list[i] = x264_cqm_flat16;
945 h->param.i_cqm_preset = h->pps->i_cqm_preset = X264_CQM_FLAT;
947 else if( i_cqm == 1 )
949 for( i = 0; i < 6; i++ )
950 h->pps->scaling_list[i] = x264_cqm_jvt[i];
951 h->param.i_cqm_preset = h->pps->i_cqm_preset = X264_CQM_JVT;
956 for( i = 0; i < 64; i++ )
957 cqm_buf[i] = 10 + rand() % 246;
959 for( i = 0; i < 64; i++ )
961 for( i = 0; i < 6; i++ )
962 h->pps->scaling_list[i] = cqm_buf;
963 h->param.i_cqm_preset = h->pps->i_cqm_preset = X264_CQM_CUSTOM;
967 x264_quant_init( h, 0, &qf_c );
968 x264_quant_init( h, cpu_ref, &qf_ref );
969 x264_quant_init( h, cpu_new, &qf_a );
971 #define INIT_QUANT8() \
973 static const int scale1d[8] = {32,31,24,31,32,31,24,31}; \
975 for( y = 0; y < 8; y++ ) \
976 for( x = 0; x < 8; x++ ) \
978 unsigned int scale = (255*scale1d[y]*scale1d[x])/16; \
979 dct1[y*8+x] = dct2[y*8+x] = (rand()%(2*scale+1))-scale; \
983 #define INIT_QUANT4() \
985 static const int scale1d[4] = {4,6,4,6}; \
987 for( y = 0; y < 4; y++ ) \
988 for( x = 0; x < 4; x++ ) \
990 unsigned int scale = 255*scale1d[y]*scale1d[x]; \
991 dct1[y*4+x] = dct2[y*4+x] = (rand()%(2*scale+1))-scale; \
995 #define TEST_QUANT_DC( name, cqm ) \
996 if( qf_a.name != qf_ref.name ) \
998 set_func_name( #name ); \
1000 for( qp = 51; qp > 0; qp-- ) \
1002 for( i = 0; i < 16; i++ ) \
1003 dct1[i] = dct2[i] = (rand() & 0x1fff) - 0xfff; \
1004 call_c1( qf_c.name, (void*)dct1, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
1005 call_a1( qf_a.name, (void*)dct2, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
1006 if( memcmp( dct1, dct2, 16*2 ) ) \
1009 fprintf( stderr, #name "(cqm=%d): [FAILED]\n", i_cqm ); \
1012 call_c2( qf_c.name, (void*)dct1, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
1013 call_a2( qf_a.name, (void*)dct2, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
1017 #define TEST_QUANT( qname, block, w ) \
1018 if( qf_a.qname != qf_ref.qname ) \
1020 set_func_name( #qname ); \
1022 for( qp = 51; qp > 0; qp-- ) \
1025 call_c1( qf_c.qname, (void*)dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
1026 call_a1( qf_a.qname, (void*)dct2, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
1027 if( memcmp( dct1, dct2, w*w*2 ) ) \
1030 fprintf( stderr, #qname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \
1033 call_c2( qf_c.qname, (void*)dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
1034 call_a2( qf_a.qname, (void*)dct2, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
1038 TEST_QUANT( quant_8x8, CQM_8IY, 8 );
1039 TEST_QUANT( quant_8x8, CQM_8PY, 8 );
1040 TEST_QUANT( quant_4x4, CQM_4IY, 4 );
1041 TEST_QUANT( quant_4x4, CQM_4PY, 4 );
1042 TEST_QUANT_DC( quant_4x4_dc, **h->quant4_mf[CQM_4IY] );
1043 TEST_QUANT_DC( quant_2x2_dc, **h->quant4_mf[CQM_4IC] );
1045 #define TEST_DEQUANT( qname, dqname, block, w ) \
1046 if( qf_a.dqname != qf_ref.dqname ) \
1048 set_func_name( "%s_%s", #dqname, i_cqm?"cqm":"flat" ); \
1050 for( qp = 51; qp > 0; qp-- ) \
1053 call_c( qf_c.qname, (void*)dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
1054 memcpy( dct2, dct1, w*w*2 ); \
1055 call_c1( qf_c.dqname, (void*)dct1, h->dequant##w##_mf[block], qp ); \
1056 call_a1( qf_a.dqname, (void*)dct2, h->dequant##w##_mf[block], qp ); \
1057 if( memcmp( dct1, dct2, w*w*2 ) ) \
1060 fprintf( stderr, #dqname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \
1063 call_c2( qf_c.dqname, (void*)dct1, h->dequant##w##_mf[block], qp ); \
1064 call_a2( qf_a.dqname, (void*)dct2, h->dequant##w##_mf[block], qp ); \
1068 TEST_DEQUANT( quant_8x8, dequant_8x8, CQM_8IY, 8 );
1069 TEST_DEQUANT( quant_8x8, dequant_8x8, CQM_8PY, 8 );
1070 TEST_DEQUANT( quant_4x4, dequant_4x4, CQM_4IY, 4 );
1071 TEST_DEQUANT( quant_4x4, dequant_4x4, CQM_4PY, 4 );
1073 x264_cqm_delete( h );
1076 ok = oks[0]; used_asm = used_asms[0];
1077 report( "quant :" );
1079 ok = oks[1]; used_asm = used_asms[1];
1080 report( "dequant :" );
1083 if( qf_a.denoise_dct != qf_ref.denoise_dct )
1087 for( size = 16; size <= 64; size += 48 )
1089 set_func_name( "denoise_dct" );
1090 memcpy(dct1, buf1, size*2);
1091 memcpy(dct2, buf1, size*2);
1092 memcpy(buf3+256, buf3, 256);
1093 call_c1( qf_c.denoise_dct, dct1, (uint32_t*)buf3, (uint16_t*)buf2, size );
1094 call_a1( qf_a.denoise_dct, dct2, (uint32_t*)(buf3+256), (uint16_t*)buf2, size );
1095 if( memcmp( dct1, dct2, size*2 ) || memcmp( buf3+4, buf3+256+4, (size-1)*sizeof(uint32_t) ) )
1097 call_c2( qf_c.denoise_dct, dct1, (uint32_t*)buf3, (uint16_t*)buf2, size );
1098 call_a2( qf_a.denoise_dct, dct2, (uint32_t*)(buf3+256), (uint16_t*)buf2, size );
1101 report( "denoise dct :" );
1103 #define TEST_DECIMATE( qname, decname, block, w, ac, thresh ) \
1104 if( qf_a.decname != qf_ref.decname ) \
1106 set_func_name( #decname ); \
1108 for( i = 0; i < 100; i++ ) \
1110 int result_c, result_a, idx; \
1111 for( idx = 0; idx < w*w; idx++ ) \
1112 dct1[idx] = !(rand()&3) + (!(rand()&15))*(rand()&3); \
1115 memcpy( dct2, dct1, w*w*2 ); \
1116 result_c = call_c1( qf_c.decname, (void*)dct2 ); \
1117 result_a = call_a1( qf_a.decname, (void*)dct2 ); \
1118 if( X264_MIN(result_c,thresh) != X264_MIN(result_a,thresh) ) \
1121 fprintf( stderr, #decname ": [FAILED]\n" ); \
1124 call_c2( qf_c.decname, (void*)dct2 ); \
1125 call_a2( qf_a.decname, (void*)dct2 ); \
1130 TEST_DECIMATE( quant_8x8, decimate_score64, CQM_8IY, 8, 0, 6 );
1131 TEST_DECIMATE( quant_4x4, decimate_score16, CQM_4IY, 4, 0, 6 );
1132 TEST_DECIMATE( quant_4x4, decimate_score15, CQM_4IY, 4, 1, 7 );
1133 report( "decimate_score :" );
1138 static int check_intra( int cpu_ref, int cpu_new )
1140 int ret = 0, ok = 1, used_asm = 0;
1142 DECLARE_ALIGNED_16( uint8_t edge[33] );
1145 x264_predict_t predict_16x16[4+3];
1146 x264_predict_t predict_8x8c[4+3];
1147 x264_predict8x8_t predict_8x8[9+3];
1148 x264_predict_t predict_4x4[9+3];
1149 } ip_c, ip_ref, ip_a;
1151 x264_predict_16x16_init( 0, ip_c.predict_16x16 );
1152 x264_predict_8x8c_init( 0, ip_c.predict_8x8c );
1153 x264_predict_8x8_init( 0, ip_c.predict_8x8 );
1154 x264_predict_4x4_init( 0, ip_c.predict_4x4 );
1156 x264_predict_16x16_init( cpu_ref, ip_ref.predict_16x16 );
1157 x264_predict_8x8c_init( cpu_ref, ip_ref.predict_8x8c );
1158 x264_predict_8x8_init( cpu_ref, ip_ref.predict_8x8 );
1159 x264_predict_4x4_init( cpu_ref, ip_ref.predict_4x4 );
1161 x264_predict_16x16_init( cpu_new, ip_a.predict_16x16 );
1162 x264_predict_8x8c_init( cpu_new, ip_a.predict_8x8c );
1163 x264_predict_8x8_init( cpu_new, ip_a.predict_8x8 );
1164 x264_predict_4x4_init( cpu_new, ip_a.predict_4x4 );
1166 x264_predict_8x8_filter( buf1+48, edge, ALL_NEIGHBORS, ALL_NEIGHBORS );
1168 #define INTRA_TEST( name, dir, w, ... ) \
1169 if( ip_a.name[dir] != ip_ref.name[dir] )\
1171 set_func_name( "intra_%s_%s", #name, intra_##name##_names[dir] );\
1173 memcpy( buf3, buf1, 32*20 );\
1174 memcpy( buf4, buf1, 32*20 );\
1175 call_c( ip_c.name[dir], buf3+48, ##__VA_ARGS__ );\
1176 call_a( ip_a.name[dir], buf4+48, ##__VA_ARGS__ );\
1177 if( memcmp( buf3, buf4, 32*20 ) )\
1179 fprintf( stderr, #name "[%d] : [FAILED]\n", dir );\
1182 for(k=-1; k<16; k++)\
1183 printf("%2x ", edge[16+k]);\
1185 for(j=0; j<w; j++){\
1186 printf("%2x ", edge[14-j]);\
1188 printf("%2x ", buf4[48+k+j*32]);\
1192 for(j=0; j<w; j++){\
1195 printf("%2x ", buf3[48+k+j*32]);\
1201 for( i = 0; i < 12; i++ )
1202 INTRA_TEST( predict_4x4, i, 4 );
1203 for( i = 0; i < 7; i++ )
1204 INTRA_TEST( predict_8x8c, i, 8 );
1205 for( i = 0; i < 7; i++ )
1206 INTRA_TEST( predict_16x16, i, 16 );
1207 for( i = 0; i < 12; i++ )
1208 INTRA_TEST( predict_8x8, i, 8, edge );
1210 report( "intra pred :" );
1214 #define DECL_CABAC(cpu) \
1215 static void run_cabac_##cpu( uint8_t *dst )\
1219 x264_cabac_context_init( &cb, SLICE_TYPE_P, 26, 0 );\
1220 x264_cabac_encode_init( &cb, dst, dst+0xff0 );\
1221 for( i=0; i<0x1000; i++ )\
1222 x264_cabac_encode_decision_##cpu( &cb, buf1[i]>>1, buf1[i]&1 );\
1228 #define run_cabac_asm run_cabac_c
1231 static int check_cabac( int cpu_ref, int cpu_new )
1233 int ret = 0, ok, used_asm = 1;
1234 if( cpu_ref || run_cabac_c == run_cabac_asm)
1236 set_func_name( "cabac_encode_decision" );
1237 memcpy( buf4, buf3, 0x1000 );
1238 call_c( run_cabac_c, buf3 );
1239 call_a( run_cabac_asm, buf4 );
1240 ok = !memcmp( buf3, buf4, 0x1000 );
1241 report( "cabac :" );
1245 static int check_all_funcs( int cpu_ref, int cpu_new )
1247 return check_pixel( cpu_ref, cpu_new )
1248 + check_dct( cpu_ref, cpu_new )
1249 + check_mc( cpu_ref, cpu_new )
1250 + check_intra( cpu_ref, cpu_new )
1251 + check_deblock( cpu_ref, cpu_new )
1252 + check_quant( cpu_ref, cpu_new )
1253 + check_cabac( cpu_ref, cpu_new );
1256 static int add_flags( int *cpu_ref, int *cpu_new, int flags, const char *name )
1258 *cpu_ref = *cpu_new;
1260 if( *cpu_new & X264_CPU_SSE2_IS_FAST )
1261 *cpu_new &= ~X264_CPU_SSE2_IS_SLOW;
1263 fprintf( stderr, "x264: %s\n", name );
1264 return check_all_funcs( *cpu_ref, *cpu_new );
1267 static int check_all_flags( void )
1270 int cpu0 = 0, cpu1 = 0;
1272 if( x264_cpu_detect() & X264_CPU_MMXEXT )
1274 ret |= add_flags( &cpu0, &cpu1, X264_CPU_MMX | X264_CPU_MMXEXT, "MMX" );
1275 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "MMX Cache64" );
1276 cpu1 &= ~X264_CPU_CACHELINE_64;
1278 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_32, "MMX Cache32" );
1279 cpu1 &= ~X264_CPU_CACHELINE_32;
1282 if( x264_cpu_detect() & X264_CPU_SSE2 )
1284 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE | X264_CPU_SSE2 | X264_CPU_SSE2_IS_SLOW, "SSE2Slow" );
1285 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE2_IS_FAST, "SSE2Fast" );
1286 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSE2Fast Cache64" );
1288 if( x264_cpu_detect() & X264_CPU_SSE_MISALIGN )
1290 cpu1 &= ~X264_CPU_CACHELINE_64;
1291 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE_MISALIGN, "SSE_Misalign" );
1292 cpu1 &= ~X264_CPU_SSE_MISALIGN;
1294 if( x264_cpu_detect() & X264_CPU_SSE3 )
1295 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE3 | X264_CPU_CACHELINE_64, "SSE3" );
1296 if( x264_cpu_detect() & X264_CPU_SSSE3 )
1298 cpu1 &= ~X264_CPU_CACHELINE_64;
1299 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSSE3, "SSSE3" );
1300 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSSE3 Cache64" );
1301 ret |= add_flags( &cpu0, &cpu1, X264_CPU_PHADD_IS_FAST, "PHADD" );
1303 if( x264_cpu_detect() & X264_CPU_SSE4 )
1305 cpu1 &= ~X264_CPU_CACHELINE_64;
1306 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE4, "SSE4" );
1309 if( x264_cpu_detect() & X264_CPU_ALTIVEC )
1311 fprintf( stderr, "x264: ALTIVEC against C\n" );
1312 ret = check_all_funcs( 0, X264_CPU_ALTIVEC );
1318 int main(int argc, char *argv[])
1323 if( argc > 1 && !strncmp( argv[1], "--bench", 7 ) )
1325 #if !defined(ARCH_X86) && !defined(ARCH_X86_64)
1326 fprintf( stderr, "no --bench for your cpu until you port rdtsc\n" );
1330 if( argv[1][7] == '=' )
1332 bench_pattern = argv[1]+8;
1333 bench_pattern_len = strlen(bench_pattern);
1339 i = ( argc > 1 ) ? atoi(argv[1]) : x264_mdate();
1340 fprintf( stderr, "x264: using random seed %u\n", i );
1343 buf1 = x264_malloc( 0x3e00 + 16*BENCH_ALIGNS );
1344 buf2 = buf1 + 0xf00;
1345 buf3 = buf2 + 0xf00;
1346 buf4 = buf3 + 0x1000;
1347 for( i=0; i<0x1e00; i++ )
1348 buf1[i] = rand() & 0xFF;
1349 memset( buf1+0x1e00, 0, 0x2000 );
1351 /* 16-byte alignment is guaranteed whenever it's useful, but some functions also vary in speed depending on %64 */
1353 for( i=0; i<BENCH_ALIGNS && !ret; i++ )
1355 buf2 = buf1 + 0xf00;
1356 buf3 = buf2 + 0xf00;
1357 buf4 = buf3 + 0x1000;
1358 ret |= x264_stack_pagealign( check_all_flags, i*16 );
1361 fprintf( stderr, "%d/%d\r", i+1, BENCH_ALIGNS );
1364 ret = check_all_flags();
1368 fprintf( stderr, "x264: at least one test has failed. Go and fix that Right Now!\n" );
1371 fprintf( stderr, "x264: All tests passed Yeah :)\n" );