1 /*****************************************************************************
2 * checkasm.c: assembly check tool
3 *****************************************************************************
4 * Copyright (C) 2003-2008 x264 project
6 * Authors: Loren Merritt <lorenm@u.washington.edu>
7 * Laurent Aimar <fenrir@via.ecp.fr>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 *****************************************************************************/
30 #include "common/common.h"
31 #include "common/cpu.h"
33 // GCC doesn't align stack variables on ARM, so use .bss
36 #define ALIGNED_16( var ) DECLARE_ALIGNED( static var, 16 )
39 /* buf1, buf2: initialised to random data and shouldn't write into them */
40 uint8_t * buf1, * buf2;
41 /* buf3, buf4: used to store output */
42 uint8_t * buf3, * buf4;
46 #define report( name ) { \
47 if( used_asm && !quiet ) \
48 fprintf( stderr, " - %-21s [%s]\n", name, ok ? "OK" : "FAILED" ); \
52 #define BENCH_RUNS 100 // tradeoff between accuracy and speed
53 #define BENCH_ALIGNS 16 // number of stack+heap data alignments (another accuracy vs speed tradeoff)
54 #define MAX_FUNCS 1000 // just has to be big enough to hold all the existing functions
55 #define MAX_CPUS 10 // number of different combinations of cpu flags
58 void *pointer; // just for detecting duplicates
66 bench_t vers[MAX_CPUS];
70 int bench_pattern_len = 0;
71 const char *bench_pattern = "";
73 static bench_func_t benchs[MAX_FUNCS];
75 static const char *pixel_names[10] = { "16x16", "16x8", "8x16", "8x8", "8x4", "4x8", "4x4", "4x2", "2x4", "2x2" };
76 static const char *intra_predict_16x16_names[7] = { "v", "h", "dc", "p", "dcl", "dct", "dc8" };
77 static const char *intra_predict_8x8c_names[7] = { "dc", "h", "v", "p", "dcl", "dct", "dc8" };
78 static const char *intra_predict_4x4_names[12] = { "v", "h", "dc", "ddl", "ddr", "vr", "hd", "vl", "hu", "dcl", "dct", "dc8" };
79 static const char **intra_predict_8x8_names = intra_predict_4x4_names;
81 #define set_func_name(...) snprintf( func_name, sizeof(func_name), __VA_ARGS__ )
83 static inline uint32_t read_time(void)
86 #if defined(__GNUC__) && (defined(ARCH_X86) || defined(ARCH_X86_64))
87 asm volatile( "rdtsc" :"=a"(a) ::"edx" );
88 #elif defined(ARCH_PPC)
89 asm volatile( "mftb %0" : "=r" (a) );
90 #elif defined(ARCH_ARM) // ARMv7 only
91 asm volatile( "mrc p15, 0, %0, c9, c13, 0" : "=r"(a) );
96 static bench_t* get_bench( const char *name, int cpu )
99 for( i=0; benchs[i].name && strcmp(name, benchs[i].name); i++ )
100 assert( i < MAX_FUNCS );
101 if( !benchs[i].name )
102 benchs[i].name = strdup( name );
104 return &benchs[i].vers[0];
105 for( j=1; benchs[i].vers[j].cpu && benchs[i].vers[j].cpu != cpu; j++ )
106 assert( j < MAX_CPUS );
107 benchs[i].vers[j].cpu = cpu;
108 return &benchs[i].vers[j];
111 static int cmp_nop( const void *a, const void *b )
113 return *(uint16_t*)a - *(uint16_t*)b;
116 static int cmp_bench( const void *a, const void *b )
118 // asciibetical sort except preserving numbers
119 const char *sa = ((bench_func_t*)a)->name;
120 const char *sb = ((bench_func_t*)b)->name;
123 if( !*sa && !*sb ) return 0;
124 if( isdigit(*sa) && isdigit(*sb) && isdigit(sa[1]) != isdigit(sb[1]) )
125 return isdigit(sa[1]) - isdigit(sb[1]);
126 if( *sa != *sb ) return *sa - *sb;
130 static void print_bench(void)
132 uint16_t nops[10000] = {0};
133 int i, j, k, nfuncs, nop_time=0;
135 for( i=0; i<10000; i++ )
138 nops[i] = read_time() - t;
140 qsort( nops, 10000, sizeof(uint16_t), cmp_nop );
141 for( i=500; i<9500; i++ )
144 printf( "nop: %d\n", nop_time );
146 for( i=0; i<MAX_FUNCS && benchs[i].name; i++ );
148 qsort( benchs, nfuncs, sizeof(bench_func_t), cmp_bench );
149 for( i=0; i<nfuncs; i++ )
150 for( j=0; j<MAX_CPUS && (!j || benchs[i].vers[j].cpu); j++ )
152 bench_t *b = &benchs[i].vers[j];
153 if( !b->den ) continue;
154 for( k=0; k<j && benchs[i].vers[k].pointer != b->pointer; k++ );
156 printf( "%s_%s%s: %"PRId64"\n", benchs[i].name,
157 b->cpu&X264_CPU_SSE4 ? "sse4" :
158 b->cpu&X264_CPU_SHUFFLE_IS_FAST ? "fastshuffle" :
159 b->cpu&X264_CPU_SSSE3 ? "ssse3" :
160 b->cpu&X264_CPU_SSE3 ? "sse3" :
161 /* print sse2slow only if there's also a sse2fast version of the same func */
162 b->cpu&X264_CPU_SSE2_IS_SLOW && j<MAX_CPUS && b[1].cpu&X264_CPU_SSE2_IS_FAST && !(b[1].cpu&X264_CPU_SSE3) ? "sse2slow" :
163 b->cpu&X264_CPU_SSE2 ? "sse2" :
164 b->cpu&X264_CPU_MMX ? "mmx" :
165 b->cpu&X264_CPU_ALTIVEC ? "altivec" :
166 b->cpu&X264_CPU_NEON ? "neon" :
167 b->cpu&X264_CPU_ARMV6 ? "armv6" : "c",
168 b->cpu&X264_CPU_CACHELINE_32 ? "_c32" :
169 b->cpu&X264_CPU_CACHELINE_64 ? "_c64" :
170 b->cpu&X264_CPU_SSE_MISALIGN ? "_misalign" :
171 b->cpu&X264_CPU_LZCNT ? "_lzcnt" :
172 b->cpu&X264_CPU_FAST_NEON_MRC ? "_fast_mrc" : "",
173 ((int64_t)10*b->cycles/b->den - nop_time)/4 );
177 #if defined(ARCH_X86) || defined(ARCH_X86_64)
178 int x264_stack_pagealign( int (*func)(), int align );
180 #define x264_stack_pagealign( func, align ) func()
183 #define call_c1(func,...) func(__VA_ARGS__)
185 #if defined(ARCH_X86) || defined(_WIN64)
186 /* detect when callee-saved regs aren't saved.
187 * needs an explicit asm check because it only sometimes crashes in normal use. */
188 intptr_t x264_checkasm_call( intptr_t (*func)(), int *ok, ... );
189 #define call_a1(func,...) x264_checkasm_call((intptr_t(*)())func, &ok, __VA_ARGS__)
191 #define call_a1 call_c1
194 #define call_bench(func,cpu,...)\
195 if( do_bench && !strncmp(func_name, bench_pattern, bench_pattern_len) )\
200 call_a1(func, __VA_ARGS__);\
201 for( ti=0; ti<(cpu?BENCH_RUNS:BENCH_RUNS/4); ti++ )\
203 uint32_t t = read_time();\
208 t = read_time() - t;\
209 if( t*tcount <= tsum*4 && ti > 0 )\
215 bench_t *b = get_bench( func_name, cpu );\
221 /* for most functions, run benchmark and correctness test at the same time.
222 * for those that modify their inputs, run the above macros separately */
223 #define call_a(func,...) ({ call_a2(func,__VA_ARGS__); call_a1(func,__VA_ARGS__); })
224 #define call_c(func,...) ({ call_c2(func,__VA_ARGS__); call_c1(func,__VA_ARGS__); })
225 #define call_a2(func,...) ({ call_bench(func,cpu_new,__VA_ARGS__); })
226 #define call_c2(func,...) ({ call_bench(func,0,__VA_ARGS__); })
229 static int check_pixel( int cpu_ref, int cpu_new )
231 x264_pixel_function_t pixel_c;
232 x264_pixel_function_t pixel_ref;
233 x264_pixel_function_t pixel_asm;
234 x264_predict_t predict_16x16[4+3];
235 x264_predict_t predict_8x8c[4+3];
236 x264_predict_t predict_4x4[9+3];
237 x264_predict8x8_t predict_8x8[9+3];
238 x264_predict_8x8_filter_t predict_8x8_filter;
239 ALIGNED_16( uint8_t edge[33] );
240 uint16_t cost_mv[32];
241 int ret = 0, ok, used_asm;
244 x264_pixel_init( 0, &pixel_c );
245 x264_pixel_init( cpu_ref, &pixel_ref );
246 x264_pixel_init( cpu_new, &pixel_asm );
247 x264_predict_16x16_init( 0, predict_16x16 );
248 x264_predict_8x8c_init( 0, predict_8x8c );
249 x264_predict_8x8_init( 0, predict_8x8, &predict_8x8_filter );
250 x264_predict_4x4_init( 0, predict_4x4 );
251 predict_8x8_filter( buf2+40, edge, ALL_NEIGHBORS, ALL_NEIGHBORS );
254 for( i=0; i<256; i++ )
259 buf3[i] = ~(buf4[i] = -(z&1));
261 // random pattern made of maxed pixel differences, in case an intermediate value overflows
262 for( ; i<0x1000; i++ )
263 buf3[i] = ~(buf4[i] = -(buf1[i&~0x88]&1));
265 #define TEST_PIXEL( name, align ) \
266 for( i = 0, ok = 1, used_asm = 0; i < 7; i++ ) \
268 int res_c, res_asm; \
269 if( pixel_asm.name[i] != pixel_ref.name[i] ) \
271 set_func_name( "%s_%s", #name, pixel_names[i] ); \
273 for( j=0; j<64; j++ ) \
275 res_c = call_c( pixel_c.name[i], buf1, 16, buf2+j*!align, 64 ); \
276 res_asm = call_a( pixel_asm.name[i], buf1, 16, buf2+j*!align, 64 ); \
277 if( res_c != res_asm ) \
280 fprintf( stderr, #name "[%d]: %d != %d [FAILED]\n", i, res_c, res_asm ); \
284 for( j=0; j<0x1000 && ok; j+=256 ) \
286 res_c = pixel_c .name[i]( buf3+j, 16, buf4+j, 16 ); \
287 res_asm = pixel_asm.name[i]( buf3+j, 16, buf4+j, 16 ); \
288 if( res_c != res_asm ) \
291 fprintf( stderr, #name "[%d]: overflow %d != %d\n", i, res_c, res_asm ); \
296 report( "pixel " #name " :" );
298 TEST_PIXEL( sad, 0 );
299 TEST_PIXEL( sad_aligned, 1 );
300 TEST_PIXEL( ssd, 1 );
301 TEST_PIXEL( satd, 0 );
302 TEST_PIXEL( sa8d, 1 );
304 #define TEST_PIXEL_X( N ) \
305 for( i = 0, ok = 1, used_asm = 0; i < 7; i++ ) \
307 int res_c[4]={0}, res_asm[4]={0}; \
308 if( pixel_asm.sad_x##N[i] && pixel_asm.sad_x##N[i] != pixel_ref.sad_x##N[i] ) \
310 set_func_name( "sad_x%d_%s", N, pixel_names[i] ); \
312 for( j=0; j<64; j++) \
314 uint8_t *pix2 = buf2+j; \
315 res_c[0] = pixel_c.sad[i]( buf1, 16, pix2, 64 ); \
316 res_c[1] = pixel_c.sad[i]( buf1, 16, pix2+6, 64 ); \
317 res_c[2] = pixel_c.sad[i]( buf1, 16, pix2+1, 64 ); \
320 res_c[3] = pixel_c.sad[i]( buf1, 16, pix2+10, 64 ); \
321 call_a( pixel_asm.sad_x4[i], buf1, pix2, pix2+6, pix2+1, pix2+10, 64, res_asm ); \
324 call_a( pixel_asm.sad_x3[i], buf1, pix2, pix2+6, pix2+1, 64, res_asm ); \
325 if( memcmp(res_c, res_asm, sizeof(res_c)) ) \
328 fprintf( stderr, "sad_x"#N"[%d]: %d,%d,%d,%d != %d,%d,%d,%d [FAILED]\n", \
329 i, res_c[0], res_c[1], res_c[2], res_c[3], \
330 res_asm[0], res_asm[1], res_asm[2], res_asm[3] ); \
333 call_c2( pixel_c.sad_x4[i], buf1, pix2, pix2+6, pix2+1, pix2+10, 64, res_asm ); \
335 call_c2( pixel_c.sad_x3[i], buf1, pix2, pix2+6, pix2+1, 64, res_asm ); \
339 report( "pixel sad_x"#N" :" );
344 #define TEST_PIXEL_VAR( i ) \
345 if( pixel_asm.var[i] != pixel_ref.var[i] ) \
347 set_func_name( "%s_%s", "var", pixel_names[i] ); \
349 /* abi-check wrapper can't return uint64_t, so separate it from return value check */\
350 call_c1( pixel_c.var[i], buf1, 16 ); \
351 call_a1( pixel_asm.var[i], buf1, 16 ); \
352 uint64_t res_c = pixel_c.var[i]( buf1, 16 ); \
353 uint64_t res_asm = pixel_asm.var[i]( buf1, 16 ); \
354 if( res_c != res_asm ) \
357 fprintf( stderr, "var[%d]: %d %d != %d %d [FAILED]\n", i, (int)res_c, (int)(res_c>>32), (int)res_asm, (int)(res_asm>>32) ); \
359 call_c2( pixel_c.var[i], buf1, 16 ); \
360 call_a2( pixel_asm.var[i], buf1, 16 ); \
363 ok = 1; used_asm = 0;
364 TEST_PIXEL_VAR( PIXEL_16x16 );
365 TEST_PIXEL_VAR( PIXEL_8x8 );
366 report( "pixel var :" );
368 ok = 1; used_asm = 0;
369 if( pixel_asm.var2_8x8 != pixel_ref.var2_8x8 )
371 int res_c, res_asm, ssd_c, ssd_asm;
372 set_func_name( "var2_8x8" );
374 res_c = call_c( pixel_c.var2_8x8, buf1, 16, buf2, 16, &ssd_c );
375 res_asm = call_a( pixel_asm.var2_8x8, buf1, 16, buf2, 16, &ssd_asm );
376 if( res_c != res_asm || ssd_c != ssd_asm )
379 fprintf( stderr, "var[%d]: %d != %d or %d != %d [FAILED]\n", i, res_c, res_asm, ssd_c, ssd_asm );
383 report( "pixel var2 :" );
385 for( i=0, ok=1, used_asm=0; i<4; i++ )
386 if( pixel_asm.hadamard_ac[i] != pixel_ref.hadamard_ac[i] )
388 set_func_name( "hadamard_ac_%s", pixel_names[i] );
390 for( j=0; j<32; j++ )
392 uint8_t *pix = (j&16 ? buf1 : buf3) + (j&15)*256;
393 call_c1( pixel_c.hadamard_ac[i], buf1, 16 );
394 call_a1( pixel_asm.hadamard_ac[i], buf1, 16 );
395 uint64_t rc = pixel_c.hadamard_ac[i]( pix, 16 );
396 uint64_t ra = pixel_asm.hadamard_ac[i]( pix, 16 );
400 fprintf( stderr, "hadamard_ac[%d]: %d,%d != %d,%d\n", i, (int)rc, (int)(rc>>32), (int)ra, (int)(ra>>32) );
404 call_c2( pixel_c.hadamard_ac[i], buf1, 16 );
405 call_a2( pixel_asm.hadamard_ac[i], buf1, 16 );
407 report( "pixel hadamard_ac :" );
409 #define TEST_INTRA_MBCMP( name, pred, satd, i8x8, ... ) \
410 if( pixel_asm.name && pixel_asm.name != pixel_ref.name ) \
412 int res_c[3], res_asm[3]; \
413 set_func_name( #name );\
415 memcpy( buf3, buf2, 1024 ); \
416 for( i=0; i<3; i++ ) \
418 pred[i]( buf3+48, ##__VA_ARGS__ ); \
419 res_c[i] = pixel_c.satd( buf1+48, 16, buf3+48, 32 ); \
421 call_a( pixel_asm.name, buf1+48, i8x8 ? edge : buf3+48, res_asm ); \
422 if( memcmp(res_c, res_asm, sizeof(res_c)) ) \
425 fprintf( stderr, #name": %d,%d,%d != %d,%d,%d [FAILED]\n", \
426 res_c[0], res_c[1], res_c[2], \
427 res_asm[0], res_asm[1], res_asm[2] ); \
431 ok = 1; used_asm = 0;
432 TEST_INTRA_MBCMP( intra_satd_x3_16x16, predict_16x16, satd[PIXEL_16x16], 0 );
433 TEST_INTRA_MBCMP( intra_satd_x3_8x8c , predict_8x8c , satd[PIXEL_8x8] , 0 );
434 TEST_INTRA_MBCMP( intra_satd_x3_4x4 , predict_4x4 , satd[PIXEL_4x4] , 0 );
435 TEST_INTRA_MBCMP( intra_sa8d_x3_8x8 , predict_8x8 , sa8d[PIXEL_8x8] , 1, edge );
436 report( "intra satd_x3 :" );
437 TEST_INTRA_MBCMP( intra_sad_x3_16x16 , predict_16x16, sad [PIXEL_16x16], 0 );
438 TEST_INTRA_MBCMP( intra_sad_x3_8x8c , predict_8x8c , sad [PIXEL_8x8] , 0 );
439 TEST_INTRA_MBCMP( intra_sad_x3_8x8 , predict_8x8 , sad [PIXEL_8x8] , 1, edge );
440 TEST_INTRA_MBCMP( intra_sad_x3_4x4 , predict_4x4 , sad [PIXEL_4x4] , 0 );
441 report( "intra sad_x3 :" );
443 if( pixel_asm.ssim_4x4x2_core != pixel_ref.ssim_4x4x2_core ||
444 pixel_asm.ssim_end4 != pixel_ref.ssim_end4 )
447 ALIGNED_16( int sums[5][4] ) = {{0}};
450 res_c = x264_pixel_ssim_wxh( &pixel_c, buf1+2, 32, buf2+2, 32, 32, 28, buf3 );
451 res_a = x264_pixel_ssim_wxh( &pixel_asm, buf1+2, 32, buf2+2, 32, 32, 28, buf3 );
452 if( fabs(res_c - res_a) > 1e-6 )
455 fprintf( stderr, "ssim: %.7f != %.7f [FAILED]\n", res_c, res_a );
457 set_func_name( "ssim_core" );
458 call_c2( pixel_c.ssim_4x4x2_core, buf1+2, 32, buf2+2, 32, sums );
459 call_a2( pixel_asm.ssim_4x4x2_core, buf1+2, 32, buf2+2, 32, sums );
460 set_func_name( "ssim_end" );
461 call_c2( pixel_c.ssim_end4, sums, sums, 4 );
462 call_a2( pixel_asm.ssim_end4, sums, sums, 4 );
466 ok = 1; used_asm = 0;
467 for( i=0; i<32; i++ )
469 for( i=0; i<100 && ok; i++ )
470 if( pixel_asm.ads[i&3] != pixel_ref.ads[i&3] )
472 ALIGNED_16( uint16_t sums[72] );
473 ALIGNED_16( int dc[4] );
474 int16_t mvs_a[32], mvs_c[32];
476 int thresh = rand() & 0x3fff;
477 set_func_name( "esa_ads" );
478 for( j=0; j<72; j++ )
479 sums[j] = rand() & 0x3fff;
481 dc[j] = rand() & 0x3fff;
483 mvn_c = call_c( pixel_c.ads[i&3], dc, sums, 32, cost_mv, mvs_c, 28, thresh );
484 mvn_a = call_a( pixel_asm.ads[i&3], dc, sums, 32, cost_mv, mvs_a, 28, thresh );
485 if( mvn_c != mvn_a || memcmp( mvs_c, mvs_a, mvn_c*sizeof(*mvs_c) ) )
488 printf("c%d: ", i&3);
489 for(j=0; j<mvn_c; j++)
490 printf("%d ", mvs_c[j]);
491 printf("\na%d: ", i&3);
492 for(j=0; j<mvn_a; j++)
493 printf("%d ", mvs_a[j]);
497 report( "esa ads:" );
502 static int check_dct( int cpu_ref, int cpu_new )
504 x264_dct_function_t dct_c;
505 x264_dct_function_t dct_ref;
506 x264_dct_function_t dct_asm;
507 x264_quant_function_t qf;
508 int ret = 0, ok, used_asm, i, j, interlace;
509 ALIGNED_16( int16_t dct1[16][16] );
510 ALIGNED_16( int16_t dct2[16][16] );
511 ALIGNED_16( int16_t dct4[16][16] );
512 ALIGNED_16( int16_t dct8[4][64] );
513 ALIGNED_8( int16_t dctdc[2][4] );
517 x264_dct_init( 0, &dct_c );
518 x264_dct_init( cpu_ref, &dct_ref);
519 x264_dct_init( cpu_new, &dct_asm );
521 memset( h, 0, sizeof(*h) );
522 h->pps = h->pps_array;
523 x264_param_default( &h->param );
524 h->chroma_qp_table = i_chroma_qp_table + 12;
525 h->param.analyse.i_luma_deadzone[0] = 0;
526 h->param.analyse.i_luma_deadzone[1] = 0;
527 h->param.analyse.b_transform_8x8 = 1;
529 h->pps->scaling_list[i] = x264_cqm_flat16;
531 x264_quant_init( h, 0, &qf );
533 #define TEST_DCT( name, t1, t2, size ) \
534 if( dct_asm.name != dct_ref.name ) \
536 set_func_name( #name );\
538 call_c( dct_c.name, t1, buf1, buf2 ); \
539 call_a( dct_asm.name, t2, buf1, buf2 ); \
540 if( memcmp( t1, t2, size ) ) \
543 fprintf( stderr, #name " [FAILED]\n" ); \
546 ok = 1; used_asm = 0;
547 TEST_DCT( sub4x4_dct, dct1[0], dct2[0], 16*2 );
548 TEST_DCT( sub8x8_dct, dct1, dct2, 16*2*4 );
549 TEST_DCT( sub8x8_dct_dc, dctdc[0], dctdc[1], 4*2 );
550 TEST_DCT( sub16x16_dct, dct1, dct2, 16*2*16 );
551 report( "sub_dct4 :" );
553 ok = 1; used_asm = 0;
554 TEST_DCT( sub8x8_dct8, (void*)dct1[0], (void*)dct2[0], 64*2 );
555 TEST_DCT( sub16x16_dct8, (void*)dct1, (void*)dct2, 64*2*4 );
556 report( "sub_dct8 :" );
559 // fdct and idct are denormalized by different factors, so quant/dequant
560 // is needed to force the coefs into the right range.
561 dct_c.sub16x16_dct( dct4, buf1, buf2 );
562 dct_c.sub16x16_dct8( dct8, buf1, buf2 );
563 for( i=0; i<16; i++ )
565 qf.quant_4x4( dct4[i], h->quant4_mf[CQM_4IY][20], h->quant4_bias[CQM_4IY][20] );
566 qf.dequant_4x4( dct4[i], h->dequant4_mf[CQM_4IY], 20 );
570 qf.quant_8x8( dct8[i], h->quant8_mf[CQM_8IY][20], h->quant8_bias[CQM_8IY][20] );
571 qf.dequant_8x8( dct8[i], h->dequant8_mf[CQM_8IY], 20 );
574 #define TEST_IDCT( name, src ) \
575 if( dct_asm.name != dct_ref.name ) \
577 set_func_name( #name );\
579 memcpy( buf3, buf1, 32*32 ); \
580 memcpy( buf4, buf1, 32*32 ); \
581 memcpy( dct1, src, 512 ); \
582 memcpy( dct2, src, 512 ); \
583 call_c1( dct_c.name, buf3, (void*)dct1 ); \
584 call_a1( dct_asm.name, buf4, (void*)dct2 ); \
585 if( memcmp( buf3, buf4, 32*32 ) ) \
588 fprintf( stderr, #name " [FAILED]\n" ); \
590 call_c2( dct_c.name, buf3, (void*)dct1 ); \
591 call_a2( dct_asm.name, buf4, (void*)dct2 ); \
593 ok = 1; used_asm = 0;
594 TEST_IDCT( add4x4_idct, dct4 );
595 TEST_IDCT( add8x8_idct, dct4 );
596 TEST_IDCT( add8x8_idct_dc, dct4 );
597 TEST_IDCT( add16x16_idct, dct4 );
598 TEST_IDCT( add16x16_idct_dc, dct4 );
599 report( "add_idct4 :" );
601 ok = 1; used_asm = 0;
602 TEST_IDCT( add8x8_idct8, dct8 );
603 TEST_IDCT( add16x16_idct8, dct8 );
604 report( "add_idct8 :" );
607 #define TEST_DCTDC( name )\
608 ok = 1; used_asm = 0;\
609 if( dct_asm.name != dct_ref.name )\
611 set_func_name( #name );\
613 uint16_t *p = (uint16_t*)buf1;\
614 for( i=0; i<16 && ok; i++ )\
616 for( j=0; j<16; j++ )\
617 dct1[0][j] = !i ? (j^j>>1^j>>2^j>>3)&1 ? 4080 : -4080 /* max dc */\
618 : i<8 ? (*p++)&1 ? 4080 : -4080 /* max elements */\
619 : ((*p++)&0x1fff)-0x1000; /* general case */\
620 memcpy( dct2, dct1, 32 );\
621 call_c1( dct_c.name, dct1[0] );\
622 call_a1( dct_asm.name, dct2[0] );\
623 if( memcmp( dct1, dct2, 32 ) )\
626 call_c2( dct_c.name, dct1[0] );\
627 call_a2( dct_asm.name, dct2[0] );\
629 report( #name " :" );
631 TEST_DCTDC( dct4x4dc );
632 TEST_DCTDC( idct4x4dc );
635 x264_zigzag_function_t zigzag_c;
636 x264_zigzag_function_t zigzag_ref;
637 x264_zigzag_function_t zigzag_asm;
639 ALIGNED_16( int16_t level1[64] );
640 ALIGNED_16( int16_t level2[64] );
642 #define TEST_ZIGZAG_SCAN( name, t1, t2, dct, size ) \
643 if( zigzag_asm.name != zigzag_ref.name ) \
645 set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" );\
647 memcpy(dct, buf1, size*sizeof(int16_t));\
648 call_c( zigzag_c.name, t1, dct ); \
649 call_a( zigzag_asm.name, t2, dct ); \
650 if( memcmp( t1, t2, size*sizeof(int16_t) ) ) \
653 fprintf( stderr, #name " [FAILED]\n" ); \
657 #define TEST_ZIGZAG_SUB( name, t1, t2, size ) \
658 if( zigzag_asm.name != zigzag_ref.name ) \
661 set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" );\
663 memcpy( buf3, buf1, 16*FDEC_STRIDE ); \
664 memcpy( buf4, buf1, 16*FDEC_STRIDE ); \
665 nz_c = call_c1( zigzag_c.name, t1, buf2, buf3 ); \
666 nz_a = call_a1( zigzag_asm.name, t2, buf2, buf4 ); \
667 if( memcmp( t1, t2, size*sizeof(int16_t) )|| memcmp( buf3, buf4, 16*FDEC_STRIDE ) || nz_c != nz_a ) \
670 fprintf( stderr, #name " [FAILED]\n" ); \
672 call_c2( zigzag_c.name, t1, buf2, buf3 ); \
673 call_a2( zigzag_asm.name, t2, buf2, buf4 ); \
676 #define TEST_ZIGZAG_SUBAC( name, t1, t2 ) \
677 if( zigzag_asm.name != zigzag_ref.name ) \
680 int16_t dc_a, dc_c; \
681 set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" );\
683 for( i = 0; i < 2; i++ ) \
685 memcpy( buf3, buf2, 16*FDEC_STRIDE ); \
686 memcpy( buf4, buf2, 16*FDEC_STRIDE ); \
687 for( j = 0; j < 4; j++ ) \
689 memcpy( buf3 + j*FDEC_STRIDE, (i?buf1:buf2) + j*FENC_STRIDE, 4 ); \
690 memcpy( buf4 + j*FDEC_STRIDE, (i?buf1:buf2) + j*FENC_STRIDE, 4 ); \
692 nz_c = call_c1( zigzag_c.name, t1, buf2, buf3, &dc_c ); \
693 nz_a = call_a1( zigzag_asm.name, t2, buf2, buf4, &dc_a ); \
694 if( memcmp( t1+1, t2+1, 15*sizeof(int16_t) ) || memcmp( buf3, buf4, 16*FDEC_STRIDE ) || nz_c != nz_a || dc_c != dc_a ) \
697 fprintf( stderr, #name " [FAILED]\n" ); \
701 call_c2( zigzag_c.name, t1, buf2, buf3, &dc_c ); \
702 call_a2( zigzag_asm.name, t2, buf2, buf4, &dc_a ); \
705 #define TEST_INTERLEAVE( name, t1, t2, dct, size ) \
706 if( zigzag_asm.name != zigzag_ref.name ) \
708 for( j=0; j<100; j++ ) \
710 set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" );\
712 memcpy(dct, buf1, size*sizeof(int16_t));\
713 for( i=0; i<size; i++ ) \
714 dct[i] = rand()&0x1F ? 0 : dct[i]; \
715 memcpy(buf3, buf4, 10*sizeof(uint8_t)); \
716 call_c( zigzag_c.name, t1, dct, buf3 ); \
717 call_a( zigzag_asm.name, t2, dct, buf4 ); \
718 if( memcmp( t1, t2, size*sizeof(int16_t) ) || memcmp( buf3, buf4, 10*sizeof(uint8_t) ) ) \
726 x264_zigzag_init( 0, &zigzag_c, 0 );
727 x264_zigzag_init( cpu_ref, &zigzag_ref, 0 );
728 x264_zigzag_init( cpu_new, &zigzag_asm, 0 );
730 ok = 1; used_asm = 0;
731 TEST_ZIGZAG_SCAN( scan_8x8, level1, level2, (void*)dct1, 64 );
732 TEST_ZIGZAG_SCAN( scan_4x4, level1, level2, dct1[0], 16 );
733 TEST_ZIGZAG_SUB( sub_4x4, level1, level2, 16 );
734 TEST_ZIGZAG_SUBAC( sub_4x4ac, level1, level2 );
735 report( "zigzag_frame :" );
738 x264_zigzag_init( 0, &zigzag_c, 1 );
739 x264_zigzag_init( cpu_ref, &zigzag_ref, 1 );
740 x264_zigzag_init( cpu_new, &zigzag_asm, 1 );
742 ok = 1; used_asm = 0;
743 TEST_ZIGZAG_SCAN( scan_8x8, level1, level2, (void*)dct1, 64 );
744 TEST_ZIGZAG_SCAN( scan_4x4, level1, level2, dct1[0], 16 );
745 TEST_ZIGZAG_SUB( sub_4x4, level1, level2, 16 );
746 TEST_ZIGZAG_SUBAC( sub_4x4ac, level1, level2 );
747 report( "zigzag_field :" );
749 ok = 1; used_asm = 0;
750 TEST_INTERLEAVE( interleave_8x8_cavlc, level1, level2, dct1[0], 64 );
751 report( "zigzag_interleave :" );
752 #undef TEST_ZIGZAG_SCAN
753 #undef TEST_ZIGZAG_SUB
758 static int check_mc( int cpu_ref, int cpu_new )
760 x264_mc_functions_t mc_c;
761 x264_mc_functions_t mc_ref;
762 x264_mc_functions_t mc_a;
763 x264_pixel_function_t pixel;
765 uint8_t *src = &buf1[2*64+2];
766 uint8_t *src2[4] = { &buf1[3*64+2], &buf1[5*64+2],
767 &buf1[7*64+2], &buf1[9*64+2] };
768 uint8_t *dst1 = buf3;
769 uint8_t *dst2 = buf4;
771 int dx, dy, i, j, k, w;
772 int ret = 0, ok, used_asm;
774 x264_mc_init( 0, &mc_c );
775 x264_mc_init( cpu_ref, &mc_ref );
776 x264_mc_init( cpu_new, &mc_a );
777 x264_pixel_init( 0, &pixel );
779 #define MC_TEST_LUMA( w, h ) \
780 if( mc_a.mc_luma != mc_ref.mc_luma && !(w&(w-1)) && h<=16 ) \
782 const x264_weight_t *weight = weight_none; \
783 set_func_name( "mc_luma_%dx%d", w, h );\
785 memset(buf3, 0xCD, 1024); \
786 memset(buf4, 0xCD, 1024); \
787 call_c( mc_c.mc_luma, dst1, 32, src2, 64, dx, dy, w, h, weight ); \
788 call_a( mc_a.mc_luma, dst2, 32, src2, 64, dx, dy, w, h, weight ); \
789 if( memcmp( buf3, buf4, 1024 ) ) \
791 fprintf( stderr, "mc_luma[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w, h ); \
795 if( mc_a.get_ref != mc_ref.get_ref ) \
797 uint8_t *ref = dst2; \
798 int ref_stride = 32; \
799 const x264_weight_t *weight = weight_none; \
800 set_func_name( "get_ref_%dx%d", w, h );\
802 memset(buf3, 0xCD, 1024); \
803 memset(buf4, 0xCD, 1024); \
804 call_c( mc_c.mc_luma, dst1, 32, src2, 64, dx, dy, w, h, weight ); \
805 ref = (uint8_t*) call_a( mc_a.get_ref, ref, &ref_stride, src2, 64, dx, dy, w, h, weight ); \
806 for( i=0; i<h; i++ ) \
807 if( memcmp( dst1+i*32, ref+i*ref_stride, w ) ) \
809 fprintf( stderr, "get_ref[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w, h ); \
815 #define MC_TEST_CHROMA( w, h ) \
816 if( mc_a.mc_chroma != mc_ref.mc_chroma ) \
818 set_func_name( "mc_chroma_%dx%d", w, h );\
820 memset(buf3, 0xCD, 1024); \
821 memset(buf4, 0xCD, 1024); \
822 call_c( mc_c.mc_chroma, dst1, 16, src, 64, dx, dy, w, h ); \
823 call_a( mc_a.mc_chroma, dst2, 16, src, 64, dx, dy, w, h ); \
824 /* mc_chroma width=2 may write garbage to the right of dst. ignore that. */\
825 for( j=0; j<h; j++ ) \
826 for( i=w; i<4; i++ ) \
827 dst2[i+j*16] = dst1[i+j*16]; \
828 if( memcmp( buf3, buf4, 1024 ) ) \
830 fprintf( stderr, "mc_chroma[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w, h ); \
834 ok = 1; used_asm = 0;
835 for( dy = -8; dy < 8; dy++ )
836 for( dx = -128; dx < 128; dx++ )
838 if( rand()&15 ) continue; // running all of them is too slow
839 MC_TEST_LUMA( 20, 18 );
840 MC_TEST_LUMA( 16, 16 );
841 MC_TEST_LUMA( 16, 8 );
842 MC_TEST_LUMA( 12, 10 );
843 MC_TEST_LUMA( 8, 16 );
844 MC_TEST_LUMA( 8, 8 );
845 MC_TEST_LUMA( 8, 4 );
846 MC_TEST_LUMA( 4, 8 );
847 MC_TEST_LUMA( 4, 4 );
849 report( "mc luma :" );
851 ok = 1; used_asm = 0;
852 for( dy = -1; dy < 9; dy++ )
853 for( dx = -128; dx < 128; dx++ )
855 if( rand()&15 ) continue;
856 MC_TEST_CHROMA( 8, 8 );
857 MC_TEST_CHROMA( 8, 4 );
858 MC_TEST_CHROMA( 4, 8 );
859 MC_TEST_CHROMA( 4, 4 );
860 MC_TEST_CHROMA( 4, 2 );
861 MC_TEST_CHROMA( 2, 4 );
862 MC_TEST_CHROMA( 2, 2 );
864 report( "mc chroma :" );
866 #undef MC_TEST_CHROMA
868 #define MC_TEST_AVG( name, weight ) \
869 for( i = 0, ok = 1, used_asm = 0; i < 10; i++ ) \
871 memcpy( buf3, buf1+320, 320 ); \
872 memcpy( buf4, buf1+320, 320 ); \
873 if( mc_a.name[i] != mc_ref.name[i] ) \
875 set_func_name( "%s_%s", #name, pixel_names[i] );\
877 call_c1( mc_c.name[i], buf3, 16, buf2+1, 16, buf1+18, 16, weight ); \
878 call_a1( mc_a.name[i], buf4, 16, buf2+1, 16, buf1+18, 16, weight ); \
879 if( memcmp( buf3, buf4, 320 ) ) \
882 fprintf( stderr, #name "[%d]: [FAILED]\n", i ); \
884 call_c2( mc_c.name[i], buf3, 16, buf2+1, 16, buf1+18, 16, weight ); \
885 call_a2( mc_a.name[i], buf4, 16, buf2+1, 16, buf1+18, 16, weight ); \
888 ok = 1; used_asm = 0;
889 for( w = -63; w <= 127 && ok; w++ )
890 MC_TEST_AVG( avg, w );
891 report( "mc wpredb :" );
893 #define MC_TEST_WEIGHT( name, weight, aligned ) \
894 int align_off = (aligned ? 0 : rand()%16); \
895 for( i = 1, ok = 1, used_asm = 0; i <= 5; i++ ) \
897 ALIGNED_16( uint8_t buffC[640] ); \
898 ALIGNED_16( uint8_t buffA[640] ); \
899 j = X264_MAX( i*4, 2 ); \
900 memset( buffC, 0, 640 ); \
901 memset( buffA, 0, 640 ); \
904 /* w12 is the same as w16 in some cases */ \
905 if( i == 3 && mc_a.name[i] == mc_a.name[i+1] ) \
907 if( mc_a.name[i] != mc_ref.name[i] ) \
910 set_func_name( "%s_w%d", #name, j ); \
912 call_c1( mc_c.weight[i], buffC, 32, buf2+align_off, 32, &weight, 16 ); \
913 mc_a.weight_cache(&ha, &weight); \
914 call_a1( weight.weightfn[i], buffA, 32, buf2+align_off, 32, &weight, 16 ); \
915 for( k = 0; k < 16; k++ ) \
916 if( memcmp( &buffC[k*32], &buffA[k*32], j ) ) \
919 fprintf( stderr, #name "[%d]: [FAILED] s:%d o:%d d%d\n", i, s, o, d ); \
922 call_c2( mc_c.weight[i], buffC, 32, buf2+align_off, 32, &weight, 16 ); \
923 call_a2( weight.weightfn[i], buffA, 32, buf2+align_off, 32, &weight, 16 ); \
927 ok = 1; used_asm = 0;
931 for( s = 0; s <= 127 && ok; s++ )
933 for( o = -128; o <= 127 && ok; o++ )
935 if( rand() & 2047 ) continue;
936 for( d = 0; d <= 7 && ok; d++ )
940 x264_weight_t weight = { .i_scale = s, .i_denom = d, .i_offset = o };
941 MC_TEST_WEIGHT( weight, weight, (align_cnt++ % 4) );
946 report( "mc weight :" );
948 ok = 1; used_asm = 0;
950 for( o = 0; o <= 127 && ok; o++ )
952 if( rand() & 15 ) continue;
953 x264_weight_t weight = { .i_scale = 1, .i_denom = 0, .i_offset = o };
954 MC_TEST_WEIGHT( offsetadd, weight, (align_cnt++ % 4) );
956 report( "mc offsetadd :" );
957 ok = 1; used_asm = 0;
958 for( o = -128; o < 0 && ok; o++ )
960 if( rand() & 15 ) continue;
961 x264_weight_t weight = { .i_scale = 1, .i_denom = 0, .i_offset = o };
962 MC_TEST_WEIGHT( offsetsub, weight, (align_cnt++ % 4) );
964 report( "mc offsetsub :" );
966 if( mc_a.hpel_filter != mc_ref.hpel_filter )
968 uint8_t *src = buf1+8+2*64;
969 uint8_t *dstc[3] = { buf3+8, buf3+8+16*64, buf3+8+32*64 };
970 uint8_t *dsta[3] = { buf4+8, buf4+8+16*64, buf4+8+32*64 };
971 void *tmp = buf3+49*64;
972 set_func_name( "hpel_filter" );
973 ok = 1; used_asm = 1;
974 memset( buf3, 0, 4096 );
975 memset( buf4, 0, 4096 );
976 call_c( mc_c.hpel_filter, dstc[0], dstc[1], dstc[2], src, 64, 48, 10, tmp );
977 call_a( mc_a.hpel_filter, dsta[0], dsta[1], dsta[2], src, 64, 48, 10, tmp );
979 for( j=0; j<10; j++ )
980 //FIXME ideally the first pixels would match too, but they aren't actually used
981 if( memcmp( dstc[i]+j*64+2, dsta[i]+j*64+2, 43 ) )
984 fprintf( stderr, "hpel filter differs at plane %c line %d\n", "hvc"[i], j );
985 for( k=0; k<48; k++ )
986 printf("%02x%s", dstc[i][j*64+k], (k+1)&3 ? "" : " ");
988 for( k=0; k<48; k++ )
989 printf("%02x%s", dsta[i][j*64+k], (k+1)&3 ? "" : " ");
993 report( "hpel filter :" );
996 if( mc_a.frame_init_lowres_core != mc_ref.frame_init_lowres_core )
998 uint8_t *dstc[4] = { buf3, buf3+1024, buf3+2048, buf3+3072 };
999 uint8_t *dsta[4] = { buf4, buf4+1024, buf4+2048, buf4+3072 };
1000 set_func_name( "lowres_init" );
1001 ok = 1; used_asm = 1;
1002 for( w=40; w<=48; w+=8 )
1004 int stride = (w+8)&~15;
1005 call_c( mc_c.frame_init_lowres_core, buf1, dstc[0], dstc[1], dstc[2], dstc[3], w*2, stride, w, 16 );
1006 call_a( mc_a.frame_init_lowres_core, buf1, dsta[0], dsta[1], dsta[2], dsta[3], w*2, stride, w, 16 );
1007 for( i=0; i<16; i++)
1010 if( memcmp( dstc[j]+i*stride, dsta[j]+i*stride, w ) )
1013 fprintf( stderr, "frame_init_lowres differs at plane %d line %d\n", j, i );
1014 for( k=0; k<w; k++ )
1015 printf( "%d ", dstc[j][k+i*stride] );
1017 for( k=0; k<w; k++ )
1018 printf( "%d ", dsta[j][k+i*stride] );
1024 report( "lowres init :" );
1027 #define INTEGRAL_INIT( name, size, ... )\
1028 if( mc_a.name != mc_ref.name )\
1031 set_func_name( #name );\
1033 memcpy( buf3, buf1, size*2*stride );\
1034 memcpy( buf4, buf1, size*2*stride );\
1035 uint16_t *sum = (uint16_t*)buf3;\
1036 call_c1( mc_c.name, __VA_ARGS__ );\
1037 sum = (uint16_t*)buf4;\
1038 call_a1( mc_a.name, __VA_ARGS__ );\
1039 if( memcmp( buf3, buf4, (stride-8)*2 )\
1040 || (size>9 && memcmp( buf3+18*stride, buf4+18*stride, (stride-8)*2 )))\
1042 call_c2( mc_c.name, __VA_ARGS__ );\
1043 call_a2( mc_a.name, __VA_ARGS__ );\
1045 ok = 1; used_asm = 0;
1046 INTEGRAL_INIT( integral_init4h, 2, sum+stride, buf2, stride );
1047 INTEGRAL_INIT( integral_init8h, 2, sum+stride, buf2, stride );
1048 INTEGRAL_INIT( integral_init4v, 14, sum, sum+9*stride, stride );
1049 INTEGRAL_INIT( integral_init8v, 9, sum, stride );
1050 report( "integral init :" );
1052 if( mc_a.mbtree_propagate_cost != mc_ref.mbtree_propagate_cost )
1054 ok = 1; used_asm = 1;
1055 set_func_name( "mbtree_propagate" );
1056 int *dsta = (int*)buf3;
1057 int *dstc = dsta+400;
1058 uint16_t *prop = (uint16_t*)buf1;
1059 uint16_t *intra = (uint16_t*)buf4;
1060 uint16_t *inter = intra+400;
1061 uint16_t *qscale = inter+400;
1062 uint16_t *rand = (uint16_t*)buf2;
1064 for( i=0; i<400; i++ )
1066 intra[i] = *rand++ & 0x7fff;
1067 intra[i] += !intra[i];
1068 inter[i] = *rand++ & 0x7fff;
1069 qscale[i] = *rand++ & 0x7fff;
1071 call_c( mc_c.mbtree_propagate_cost, dstc, prop, intra, inter, qscale, 400 );
1072 call_a( mc_a.mbtree_propagate_cost, dsta, prop, intra, inter, qscale, 400 );
1073 // I don't care about exact rounding, this is just how close the floating-point implementation happens to be
1075 for( i=0; i<400; i++ )
1076 ok &= abs(dstc[i]-dsta[i]) <= (abs(dstc[i])>512) || fabs((double)dstc[i]/dsta[i]-1) < 1e-6;
1077 report( "mbtree propagate :" );
1083 static int check_deblock( int cpu_ref, int cpu_new )
1085 x264_deblock_function_t db_c;
1086 x264_deblock_function_t db_ref;
1087 x264_deblock_function_t db_a;
1088 int ret = 0, ok = 1, used_asm = 0;
1089 int alphas[36], betas[36];
1093 x264_deblock_init( 0, &db_c );
1094 x264_deblock_init( cpu_ref, &db_ref );
1095 x264_deblock_init( cpu_new, &db_a );
1097 /* not exactly the real values of a,b,tc but close enough */
1099 for( i = 35; i >= 0; i-- )
1103 tcs[i][0] = tcs[i][2] = (c+6)/10;
1104 tcs[i][1] = tcs[i][3] = (c+9)/20;
1109 #define TEST_DEBLOCK( name, align, ... ) \
1110 for( i = 0; i < 36; i++ ) \
1112 int off = 8*32 + (i&15)*4*!align; /* benchmark various alignments of h filter */\
1113 for( j = 0; j < 1024; j++ ) \
1114 /* two distributions of random to excersize different failure modes */\
1115 buf3[j] = rand() & (i&1 ? 0xf : 0xff ); \
1116 memcpy( buf4, buf3, 1024 ); \
1117 if( db_a.name != db_ref.name ) \
1119 set_func_name( #name );\
1121 call_c1( db_c.name, buf3+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
1122 call_a1( db_a.name, buf4+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
1123 if( memcmp( buf3, buf4, 1024 ) ) \
1126 fprintf( stderr, #name "(a=%d, b=%d): [FAILED]\n", alphas[i], betas[i] ); \
1129 call_c2( db_c.name, buf3+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
1130 call_a2( db_a.name, buf4+off, 32, alphas[i], betas[i], ##__VA_ARGS__ ); \
1134 TEST_DEBLOCK( deblock_h_luma, 0, tcs[i] );
1135 TEST_DEBLOCK( deblock_v_luma, 1, tcs[i] );
1136 TEST_DEBLOCK( deblock_h_chroma, 0, tcs[i] );
1137 TEST_DEBLOCK( deblock_v_chroma, 1, tcs[i] );
1138 TEST_DEBLOCK( deblock_h_luma_intra, 0 );
1139 TEST_DEBLOCK( deblock_v_luma_intra, 1 );
1140 TEST_DEBLOCK( deblock_h_chroma_intra, 0 );
1141 TEST_DEBLOCK( deblock_v_chroma_intra, 1 );
1143 report( "deblock :" );
1148 static int check_quant( int cpu_ref, int cpu_new )
1150 x264_quant_function_t qf_c;
1151 x264_quant_function_t qf_ref;
1152 x264_quant_function_t qf_a;
1153 ALIGNED_16( int16_t dct1[64] );
1154 ALIGNED_16( int16_t dct2[64] );
1155 ALIGNED_16( uint8_t cqm_buf[64] );
1156 int ret = 0, ok, used_asm;
1157 int oks[2] = {1,1}, used_asms[2] = {0,0};
1158 int i, j, i_cqm, qp;
1161 memset( h, 0, sizeof(*h) );
1162 h->pps = h->pps_array;
1163 x264_param_default( &h->param );
1164 h->chroma_qp_table = i_chroma_qp_table + 12;
1165 h->param.rc.i_qp_min = 26;
1166 h->param.analyse.b_transform_8x8 = 1;
1168 for( i_cqm = 0; i_cqm < 4; i_cqm++ )
1172 for( i = 0; i < 6; i++ )
1173 h->pps->scaling_list[i] = x264_cqm_flat16;
1174 h->param.i_cqm_preset = h->pps->i_cqm_preset = X264_CQM_FLAT;
1176 else if( i_cqm == 1 )
1178 for( i = 0; i < 6; i++ )
1179 h->pps->scaling_list[i] = x264_cqm_jvt[i];
1180 h->param.i_cqm_preset = h->pps->i_cqm_preset = X264_CQM_JVT;
1185 for( i = 0; i < 64; i++ )
1186 cqm_buf[i] = 10 + rand() % 246;
1188 for( i = 0; i < 64; i++ )
1190 for( i = 0; i < 6; i++ )
1191 h->pps->scaling_list[i] = cqm_buf;
1192 h->param.i_cqm_preset = h->pps->i_cqm_preset = X264_CQM_CUSTOM;
1196 x264_quant_init( h, 0, &qf_c );
1197 x264_quant_init( h, cpu_ref, &qf_ref );
1198 x264_quant_init( h, cpu_new, &qf_a );
1200 #define INIT_QUANT8() \
1202 static const int scale1d[8] = {32,31,24,31,32,31,24,31}; \
1203 for( i = 0; i < 64; i++ ) \
1205 unsigned int scale = (255*scale1d[i>>3]*scale1d[i&7])/16; \
1206 dct1[i] = dct2[i] = j ? (rand()%(2*scale+1))-scale : 0; \
1210 #define INIT_QUANT4() \
1212 static const int scale1d[4] = {4,6,4,6}; \
1213 for( i = 0; i < 16; i++ ) \
1215 unsigned int scale = 255*scale1d[i>>2]*scale1d[i&3]; \
1216 dct1[i] = dct2[i] = j ? (rand()%(2*scale+1))-scale : 0; \
1220 #define TEST_QUANT_DC( name, cqm ) \
1221 if( qf_a.name != qf_ref.name ) \
1223 set_func_name( #name ); \
1225 for( qp = 51; qp > 0; qp-- ) \
1227 for( j = 0; j < 2; j++ ) \
1229 int result_c, result_a; \
1230 for( i = 0; i < 16; i++ ) \
1231 dct1[i] = dct2[i] = j ? (rand() & 0x1fff) - 0xfff : 0; \
1232 result_c = call_c1( qf_c.name, dct1, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
1233 result_a = call_a1( qf_a.name, dct2, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
1234 if( memcmp( dct1, dct2, 16*2 ) || result_c != result_a ) \
1237 fprintf( stderr, #name "(cqm=%d): [FAILED]\n", i_cqm ); \
1240 call_c2( qf_c.name, dct1, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
1241 call_a2( qf_a.name, dct2, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
1246 #define TEST_QUANT( qname, block, w ) \
1247 if( qf_a.qname != qf_ref.qname ) \
1249 set_func_name( #qname ); \
1251 for( qp = 51; qp > 0; qp-- ) \
1253 for( j = 0; j < 2; j++ ) \
1255 int result_c, result_a; \
1257 result_c = call_c1( qf_c.qname, dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
1258 result_a = call_a1( qf_a.qname, dct2, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
1259 if( memcmp( dct1, dct2, w*w*2 ) || result_c != result_a ) \
1262 fprintf( stderr, #qname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \
1265 call_c2( qf_c.qname, dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
1266 call_a2( qf_a.qname, dct2, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
1271 TEST_QUANT( quant_8x8, CQM_8IY, 8 );
1272 TEST_QUANT( quant_8x8, CQM_8PY, 8 );
1273 TEST_QUANT( quant_4x4, CQM_4IY, 4 );
1274 TEST_QUANT( quant_4x4, CQM_4PY, 4 );
1275 TEST_QUANT_DC( quant_4x4_dc, **h->quant4_mf[CQM_4IY] );
1276 TEST_QUANT_DC( quant_2x2_dc, **h->quant4_mf[CQM_4IC] );
1278 #define TEST_DEQUANT( qname, dqname, block, w ) \
1279 if( qf_a.dqname != qf_ref.dqname ) \
1281 set_func_name( "%s_%s", #dqname, i_cqm?"cqm":"flat" ); \
1284 for( qp = 51; qp > 0; qp-- ) \
1287 call_c1( qf_c.qname, dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
1288 memcpy( dct2, dct1, w*w*2 ); \
1289 call_c1( qf_c.dqname, dct1, h->dequant##w##_mf[block], qp ); \
1290 call_a1( qf_a.dqname, dct2, h->dequant##w##_mf[block], qp ); \
1291 if( memcmp( dct1, dct2, w*w*2 ) ) \
1294 fprintf( stderr, #dqname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \
1297 call_c2( qf_c.dqname, dct1, h->dequant##w##_mf[block], qp ); \
1298 call_a2( qf_a.dqname, dct2, h->dequant##w##_mf[block], qp ); \
1302 TEST_DEQUANT( quant_8x8, dequant_8x8, CQM_8IY, 8 );
1303 TEST_DEQUANT( quant_8x8, dequant_8x8, CQM_8PY, 8 );
1304 TEST_DEQUANT( quant_4x4, dequant_4x4, CQM_4IY, 4 );
1305 TEST_DEQUANT( quant_4x4, dequant_4x4, CQM_4PY, 4 );
1307 #define TEST_DEQUANT_DC( qname, dqname, block, w ) \
1308 if( qf_a.dqname != qf_ref.dqname ) \
1310 set_func_name( "%s_%s", #dqname, i_cqm?"cqm":"flat" ); \
1312 for( qp = 51; qp > 0; qp-- ) \
1314 for( i = 0; i < 16; i++ ) \
1316 call_c1( qf_c.qname, dct1, h->quant##w##_mf[block][qp][0]>>1, h->quant##w##_bias[block][qp][0]>>1 ); \
1317 memcpy( dct2, dct1, w*w*2 ); \
1318 call_c1( qf_c.dqname, dct1, h->dequant##w##_mf[block], qp ); \
1319 call_a1( qf_a.dqname, dct2, h->dequant##w##_mf[block], qp ); \
1320 if( memcmp( dct1, dct2, w*w*2 ) ) \
1323 fprintf( stderr, #dqname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \
1325 call_c2( qf_c.dqname, dct1, h->dequant##w##_mf[block], qp ); \
1326 call_a2( qf_a.dqname, dct2, h->dequant##w##_mf[block], qp ); \
1330 TEST_DEQUANT_DC( quant_4x4_dc, dequant_4x4_dc, CQM_4IY, 4 );
1332 x264_cqm_delete( h );
1335 ok = oks[0]; used_asm = used_asms[0];
1336 report( "quant :" );
1338 ok = oks[1]; used_asm = used_asms[1];
1339 report( "dequant :" );
1341 ok = 1; used_asm = 0;
1342 if( qf_a.denoise_dct != qf_ref.denoise_dct )
1346 for( size = 16; size <= 64; size += 48 )
1348 set_func_name( "denoise_dct" );
1349 memcpy(dct1, buf1, size*2);
1350 memcpy(dct2, buf1, size*2);
1351 memcpy(buf3+256, buf3, 256);
1352 call_c1( qf_c.denoise_dct, dct1, (uint32_t*)buf3, (uint16_t*)buf2, size );
1353 call_a1( qf_a.denoise_dct, dct2, (uint32_t*)(buf3+256), (uint16_t*)buf2, size );
1354 if( memcmp( dct1, dct2, size*2 ) || memcmp( buf3+4, buf3+256+4, (size-1)*sizeof(uint32_t) ) )
1356 call_c2( qf_c.denoise_dct, dct1, (uint32_t*)buf3, (uint16_t*)buf2, size );
1357 call_a2( qf_a.denoise_dct, dct2, (uint32_t*)(buf3+256), (uint16_t*)buf2, size );
1360 report( "denoise dct :" );
1362 #define TEST_DECIMATE( decname, w, ac, thresh ) \
1363 if( qf_a.decname != qf_ref.decname ) \
1365 set_func_name( #decname ); \
1367 for( i = 0; i < 100; i++ ) \
1369 int result_c, result_a, idx; \
1370 for( idx = 0; idx < w*w; idx++ ) \
1371 dct1[idx] = !(rand()&3) + (!(rand()&15))*(rand()&3); \
1374 result_c = call_c( qf_c.decname, dct1 ); \
1375 result_a = call_a( qf_a.decname, dct1 ); \
1376 if( X264_MIN(result_c,thresh) != X264_MIN(result_a,thresh) ) \
1379 fprintf( stderr, #decname ": [FAILED]\n" ); \
1385 ok = 1; used_asm = 0;
1386 TEST_DECIMATE( decimate_score64, 8, 0, 6 );
1387 TEST_DECIMATE( decimate_score16, 4, 0, 6 );
1388 TEST_DECIMATE( decimate_score15, 4, 1, 7 );
1389 report( "decimate_score :" );
1391 #define TEST_LAST( last, lastname, w, ac ) \
1392 if( qf_a.last != qf_ref.last ) \
1394 set_func_name( #lastname ); \
1396 for( i = 0; i < 100; i++ ) \
1398 int result_c, result_a, idx, nnz=0; \
1399 int max = rand() & (w*w-1); \
1400 memset( dct1, 0, w*w*2 ); \
1401 for( idx = ac; idx < max; idx++ ) \
1402 nnz |= dct1[idx] = !(rand()&3) + (!(rand()&15))*rand(); \
1405 result_c = call_c( qf_c.last, dct1+ac ); \
1406 result_a = call_a( qf_a.last, dct1+ac ); \
1407 if( result_c != result_a ) \
1410 fprintf( stderr, #lastname ": [FAILED]\n" ); \
1416 ok = 1; used_asm = 0;
1417 TEST_LAST( coeff_last[DCT_CHROMA_DC], coeff_last4, 2, 0 );
1418 TEST_LAST( coeff_last[ DCT_LUMA_AC], coeff_last15, 4, 1 );
1419 TEST_LAST( coeff_last[ DCT_LUMA_4x4], coeff_last16, 4, 0 );
1420 TEST_LAST( coeff_last[ DCT_LUMA_8x8], coeff_last64, 8, 0 );
1421 report( "coeff_last :" );
1423 #define TEST_LEVELRUN( lastname, name, w, ac ) \
1424 if( qf_a.lastname != qf_ref.lastname ) \
1426 set_func_name( #name ); \
1428 for( i = 0; i < 100; i++ ) \
1430 x264_run_level_t runlevel_c, runlevel_a; \
1431 int result_c, result_a, idx, nnz=0; \
1432 int max = rand() & (w*w-1); \
1433 memset( dct1, 0, w*w*2 ); \
1434 memcpy( &runlevel_a, buf1+i, sizeof(x264_run_level_t) ); \
1435 memcpy( &runlevel_c, buf1+i, sizeof(x264_run_level_t) ); \
1436 for( idx = ac; idx < max; idx++ ) \
1437 nnz |= dct1[idx] = !(rand()&3) + (!(rand()&15))*rand(); \
1440 result_c = call_c( qf_c.lastname, dct1+ac, &runlevel_c ); \
1441 result_a = call_a( qf_a.lastname, dct1+ac, &runlevel_a ); \
1442 if( result_c != result_a || runlevel_c.last != runlevel_a.last || \
1443 memcmp(runlevel_c.level, runlevel_a.level, sizeof(int16_t)*result_c) || \
1444 memcmp(runlevel_c.run, runlevel_a.run, sizeof(uint8_t)*(result_c-1)) ) \
1447 fprintf( stderr, #name ": [FAILED]\n" ); \
1453 ok = 1; used_asm = 0;
1454 TEST_LEVELRUN( coeff_level_run[DCT_CHROMA_DC], coeff_level_run4, 2, 0 );
1455 TEST_LEVELRUN( coeff_level_run[ DCT_LUMA_AC], coeff_level_run15, 4, 1 );
1456 TEST_LEVELRUN( coeff_level_run[ DCT_LUMA_4x4], coeff_level_run16, 4, 0 );
1457 report( "coeff_level_run :" );
1462 static int check_intra( int cpu_ref, int cpu_new )
1464 int ret = 0, ok = 1, used_asm = 0;
1466 ALIGNED_16( uint8_t edge[33] );
1467 ALIGNED_16( uint8_t edge2[33] );
1470 x264_predict_t predict_16x16[4+3];
1471 x264_predict_t predict_8x8c[4+3];
1472 x264_predict8x8_t predict_8x8[9+3];
1473 x264_predict_t predict_4x4[9+3];
1474 x264_predict_8x8_filter_t predict_8x8_filter;
1475 } ip_c, ip_ref, ip_a;
1477 x264_predict_16x16_init( 0, ip_c.predict_16x16 );
1478 x264_predict_8x8c_init( 0, ip_c.predict_8x8c );
1479 x264_predict_8x8_init( 0, ip_c.predict_8x8, &ip_c.predict_8x8_filter );
1480 x264_predict_4x4_init( 0, ip_c.predict_4x4 );
1482 x264_predict_16x16_init( cpu_ref, ip_ref.predict_16x16 );
1483 x264_predict_8x8c_init( cpu_ref, ip_ref.predict_8x8c );
1484 x264_predict_8x8_init( cpu_ref, ip_ref.predict_8x8, &ip_ref.predict_8x8_filter );
1485 x264_predict_4x4_init( cpu_ref, ip_ref.predict_4x4 );
1487 x264_predict_16x16_init( cpu_new, ip_a.predict_16x16 );
1488 x264_predict_8x8c_init( cpu_new, ip_a.predict_8x8c );
1489 x264_predict_8x8_init( cpu_new, ip_a.predict_8x8, &ip_a.predict_8x8_filter );
1490 x264_predict_4x4_init( cpu_new, ip_a.predict_4x4 );
1492 ip_c.predict_8x8_filter( buf1+48, edge, ALL_NEIGHBORS, ALL_NEIGHBORS );
1494 #define INTRA_TEST( name, dir, w, ... ) \
1495 if( ip_a.name[dir] != ip_ref.name[dir] )\
1497 set_func_name( "intra_%s_%s", #name, intra_##name##_names[dir] );\
1499 memcpy( buf3, buf1, 32*20 );\
1500 memcpy( buf4, buf1, 32*20 );\
1501 call_c( ip_c.name[dir], buf3+48, ##__VA_ARGS__ );\
1502 call_a( ip_a.name[dir], buf4+48, ##__VA_ARGS__ );\
1503 if( memcmp( buf3, buf4, 32*20 ) )\
1505 fprintf( stderr, #name "[%d] : [FAILED]\n", dir );\
1508 for(k=-1; k<16; k++)\
1509 printf("%2x ", edge[16+k]);\
1511 for(j=0; j<w; j++){\
1512 printf("%2x ", edge[14-j]);\
1514 printf("%2x ", buf4[48+k+j*32]);\
1518 for(j=0; j<w; j++){\
1521 printf("%2x ", buf3[48+k+j*32]);\
1527 for( i = 0; i < 12; i++ )
1528 INTRA_TEST( predict_4x4, i, 4 );
1529 for( i = 0; i < 7; i++ )
1530 INTRA_TEST( predict_8x8c, i, 8 );
1531 for( i = 0; i < 7; i++ )
1532 INTRA_TEST( predict_16x16, i, 16 );
1533 for( i = 0; i < 12; i++ )
1534 INTRA_TEST( predict_8x8, i, 8, edge );
1536 set_func_name("intra_predict_8x8_filter");
1537 if( ip_a.predict_8x8_filter != ip_ref.predict_8x8_filter )
1540 for( i = 0; i < 32; i++ )
1542 memcpy( edge2, edge, 33 );
1543 call_c(ip_c.predict_8x8_filter, buf1+48, edge, (i&24)>>1, i&7);
1544 call_a(ip_a.predict_8x8_filter, buf1+48, edge2, (i&24)>>1, i&7);
1545 if( memcmp( edge, edge2, 33 ) )
1547 fprintf( stderr, "predict_8x8_filter : [FAILED] %d %d\n", (i&24)>>1, i&7);
1553 report( "intra pred :" );
1557 #define DECL_CABAC(cpu) \
1558 static void run_cabac_##cpu( uint8_t *dst )\
1562 x264_cabac_context_init( &cb, SLICE_TYPE_P, 26, 0 );\
1563 x264_cabac_encode_init( &cb, dst, dst+0xff0 );\
1564 for( i=0; i<0x1000; i++ )\
1565 x264_cabac_encode_decision_##cpu( &cb, buf1[i]>>1, buf1[i]&1 );\
1571 #define run_cabac_asm run_cabac_c
1574 static int check_cabac( int cpu_ref, int cpu_new )
1576 int ret = 0, ok, used_asm = 1;
1577 if( cpu_ref || run_cabac_c == run_cabac_asm)
1579 set_func_name( "cabac_encode_decision" );
1580 memcpy( buf4, buf3, 0x1000 );
1581 call_c( run_cabac_c, buf3 );
1582 call_a( run_cabac_asm, buf4 );
1583 ok = !memcmp( buf3, buf4, 0x1000 );
1584 report( "cabac :" );
1588 static int check_all_funcs( int cpu_ref, int cpu_new )
1590 return check_pixel( cpu_ref, cpu_new )
1591 + check_dct( cpu_ref, cpu_new )
1592 + check_mc( cpu_ref, cpu_new )
1593 + check_intra( cpu_ref, cpu_new )
1594 + check_deblock( cpu_ref, cpu_new )
1595 + check_quant( cpu_ref, cpu_new )
1596 + check_cabac( cpu_ref, cpu_new );
1599 static int add_flags( int *cpu_ref, int *cpu_new, int flags, const char *name )
1601 *cpu_ref = *cpu_new;
1603 if( *cpu_new & X264_CPU_SSE2_IS_FAST )
1604 *cpu_new &= ~X264_CPU_SSE2_IS_SLOW;
1606 fprintf( stderr, "x264: %s\n", name );
1607 return check_all_funcs( *cpu_ref, *cpu_new );
1610 static int check_all_flags( void )
1613 int cpu0 = 0, cpu1 = 0;
1615 if( x264_cpu_detect() & X264_CPU_MMXEXT )
1617 ret |= add_flags( &cpu0, &cpu1, X264_CPU_MMX | X264_CPU_MMXEXT, "MMX" );
1618 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "MMX Cache64" );
1619 cpu1 &= ~X264_CPU_CACHELINE_64;
1621 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_32, "MMX Cache32" );
1622 cpu1 &= ~X264_CPU_CACHELINE_32;
1624 if( x264_cpu_detect() & X264_CPU_LZCNT )
1626 ret |= add_flags( &cpu0, &cpu1, X264_CPU_LZCNT, "MMX_LZCNT" );
1627 cpu1 &= ~X264_CPU_LZCNT;
1630 if( x264_cpu_detect() & X264_CPU_SSE2 )
1632 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE | X264_CPU_SSE2 | X264_CPU_SSE2_IS_SLOW, "SSE2Slow" );
1633 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE2_IS_FAST, "SSE2Fast" );
1634 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSE2Fast Cache64" );
1635 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SHUFFLE_IS_FAST, "SSE2 FastShuffle" );
1636 cpu1 &= ~X264_CPU_SHUFFLE_IS_FAST;
1638 if( x264_cpu_detect() & X264_CPU_SSE_MISALIGN )
1640 cpu1 &= ~X264_CPU_CACHELINE_64;
1641 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE_MISALIGN, "SSE_Misalign" );
1642 cpu1 &= ~X264_CPU_SSE_MISALIGN;
1644 if( x264_cpu_detect() & X264_CPU_LZCNT )
1646 cpu1 &= ~X264_CPU_CACHELINE_64;
1647 ret |= add_flags( &cpu0, &cpu1, X264_CPU_LZCNT, "SSE_LZCNT" );
1648 cpu1 &= ~X264_CPU_LZCNT;
1650 if( x264_cpu_detect() & X264_CPU_SSE3 )
1651 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE3 | X264_CPU_CACHELINE_64, "SSE3" );
1652 if( x264_cpu_detect() & X264_CPU_SSSE3 )
1654 cpu1 &= ~X264_CPU_CACHELINE_64;
1655 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSSE3, "SSSE3" );
1656 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSSE3 Cache64" );
1657 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SHUFFLE_IS_FAST, "SSSE3 FastShuffle" );
1658 cpu1 &= ~X264_CPU_SHUFFLE_IS_FAST;
1660 if( x264_cpu_detect() & X264_CPU_SSE4 )
1662 cpu1 &= ~X264_CPU_CACHELINE_64;
1663 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE4, "SSE4" );
1666 if( x264_cpu_detect() & X264_CPU_ALTIVEC )
1668 fprintf( stderr, "x264: ALTIVEC against C\n" );
1669 ret = check_all_funcs( 0, X264_CPU_ALTIVEC );
1672 if( x264_cpu_detect() & X264_CPU_ARMV6 )
1673 ret |= add_flags( &cpu0, &cpu1, X264_CPU_ARMV6, "ARMv6" );
1674 if( x264_cpu_detect() & X264_CPU_NEON )
1675 ret |= add_flags( &cpu0, &cpu1, X264_CPU_NEON, "NEON" );
1676 if( x264_cpu_detect() & X264_CPU_FAST_NEON_MRC )
1677 ret |= add_flags( &cpu0, &cpu1, X264_CPU_FAST_NEON_MRC, "Fast NEON MRC" );
1682 int main(int argc, char *argv[])
1687 if( argc > 1 && !strncmp( argv[1], "--bench", 7 ) )
1689 #if !defined(ARCH_X86) && !defined(ARCH_X86_64) && !defined(ARCH_PPC) && !defined(ARCH_ARM)
1690 fprintf( stderr, "no --bench for your cpu until you port rdtsc\n" );
1694 if( argv[1][7] == '=' )
1696 bench_pattern = argv[1]+8;
1697 bench_pattern_len = strlen(bench_pattern);
1703 i = ( argc > 1 ) ? atoi(argv[1]) : x264_mdate();
1704 fprintf( stderr, "x264: using random seed %u\n", i );
1707 buf1 = x264_malloc( 0x3e00 + 16*BENCH_ALIGNS );
1710 fprintf( stderr, "malloc failed, unable to initiate tests!\n" );
1713 buf2 = buf1 + 0xf00;
1714 buf3 = buf2 + 0xf00;
1715 buf4 = buf3 + 0x1000;
1716 for( i=0; i<0x1e00; i++ )
1717 buf1[i] = rand() & 0xFF;
1718 memset( buf1+0x1e00, 0, 0x2000 );
1720 /* 16-byte alignment is guaranteed whenever it's useful, but some functions also vary in speed depending on %64 */
1722 for( i=0; i<BENCH_ALIGNS && !ret; i++ )
1724 buf2 = buf1 + 0xf00;
1725 buf3 = buf2 + 0xf00;
1726 buf4 = buf3 + 0x1000;
1727 ret |= x264_stack_pagealign( check_all_flags, i*16 );
1730 fprintf( stderr, "%d/%d\r", i+1, BENCH_ALIGNS );
1733 ret = check_all_flags();
1737 fprintf( stderr, "x264: at least one test has failed. Go and fix that Right Now!\n" );
1740 fprintf( stderr, "x264: All tests passed Yeah :)\n" );