1 /*****************************************************************************
2 * cpu.c: h264 encoder library
3 *****************************************************************************
4 * Copyright (C) 2003-2008 x264 project
6 * Authors: Loren Merritt <lorenm@u.washington.edu>
7 * Laurent Aimar <fenrir@via.ecp.fr>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 *****************************************************************************/
25 #define _GNU_SOURCE // for sched_getaffinity
29 #if HAVE_PTHREAD && SYS_LINUX
33 #include <kernel/OS.h>
35 #if SYS_MACOSX || SYS_FREEBSD
36 #include <sys/types.h>
37 #include <sys/sysctl.h>
40 #include <sys/param.h>
41 #include <sys/sysctl.h>
42 #include <machine/cpu.h>
45 const x264_cpu_name_t x264_cpu_names[] = {
46 {"Altivec", X264_CPU_ALTIVEC},
47 // {"MMX", X264_CPU_MMX}, // we don't support asm on mmx1 cpus anymore
48 {"MMX2", X264_CPU_MMX|X264_CPU_MMXEXT},
49 {"MMXEXT", X264_CPU_MMX|X264_CPU_MMXEXT},
50 // {"SSE", X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE}, // there are no sse1 functions in x264
51 {"SSE2Slow",X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2|X264_CPU_SSE2_IS_SLOW},
52 {"SSE2", X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2},
53 {"SSE2Fast",X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2|X264_CPU_SSE2_IS_FAST},
54 {"SSE3", X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2|X264_CPU_SSE3},
55 {"SSSE3", X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2|X264_CPU_SSE3|X264_CPU_SSSE3},
56 {"FastShuffle", X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2|X264_CPU_SHUFFLE_IS_FAST},
57 {"SSE4.1", X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2|X264_CPU_SSE3|X264_CPU_SSSE3|X264_CPU_SSE4},
58 {"SSE4.2", X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2|X264_CPU_SSE3|X264_CPU_SSSE3|X264_CPU_SSE4|X264_CPU_SSE42},
59 {"Cache32", X264_CPU_CACHELINE_32},
60 {"Cache64", X264_CPU_CACHELINE_64},
61 {"SSEMisalign", X264_CPU_SSE_MISALIGN},
62 {"LZCNT", X264_CPU_LZCNT},
63 {"Slow_mod4_stack", X264_CPU_STACK_MOD4},
64 {"ARMv6", X264_CPU_ARMV6},
65 {"NEON", X264_CPU_NEON},
66 {"Fast_NEON_MRC", X264_CPU_FAST_NEON_MRC},
67 {"SlowCTZ", X264_CPU_SLOW_CTZ},
68 {"SlowAtom", X264_CPU_SLOW_ATOM},
72 #if (ARCH_PPC && SYS_LINUX) || (ARCH_ARM && !HAVE_NEON)
75 static sigjmp_buf jmpbuf;
76 static volatile sig_atomic_t canjump = 0;
78 static void sigill_handler( int sig )
82 signal( sig, SIG_DFL );
87 siglongjmp( jmpbuf, 1 );
92 int x264_cpu_cpuid_test( void );
93 uint32_t x264_cpu_cpuid( uint32_t op, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx );
95 uint32_t x264_cpu_detect( void )
98 uint32_t eax, ebx, ecx, edx;
99 uint32_t vendor[4] = {0};
100 int max_extended_cap;
104 if( !x264_cpu_cpuid_test() )
108 x264_cpu_cpuid( 0, &eax, vendor+0, vendor+2, vendor+1 );
112 x264_cpu_cpuid( 1, &eax, &ebx, &ecx, &edx );
118 cpu |= X264_CPU_MMXEXT|X264_CPU_SSE;
120 cpu |= X264_CPU_SSE2;
122 cpu |= X264_CPU_SSE3;
124 cpu |= X264_CPU_SSSE3;
126 cpu |= X264_CPU_SSE4;
128 cpu |= X264_CPU_SSE42;
130 if( cpu & X264_CPU_SSSE3 )
131 cpu |= X264_CPU_SSE2_IS_FAST;
132 if( cpu & X264_CPU_SSE4 )
133 cpu |= X264_CPU_SHUFFLE_IS_FAST;
135 x264_cpu_cpuid( 0x80000000, &eax, &ebx, &ecx, &edx );
136 max_extended_cap = eax;
138 if( !strcmp((char*)vendor, "AuthenticAMD") && max_extended_cap >= 0x80000001 )
140 cpu |= X264_CPU_SLOW_CTZ;
141 x264_cpu_cpuid( 0x80000001, &eax, &ebx, &ecx, &edx );
143 cpu |= X264_CPU_MMXEXT;
144 if( cpu & X264_CPU_SSE2 )
146 if( ecx&0x00000040 ) /* SSE4a */
148 cpu |= X264_CPU_SSE2_IS_FAST;
149 cpu |= X264_CPU_LZCNT;
150 cpu |= X264_CPU_SHUFFLE_IS_FAST;
151 cpu &= ~X264_CPU_SLOW_CTZ;
154 cpu |= X264_CPU_SSE2_IS_SLOW;
156 if( ecx&0x00000080 ) /* Misalign SSE */
158 cpu |= X264_CPU_SSE_MISALIGN;
159 x264_cpu_mask_misalign_sse();
164 if( !strcmp((char*)vendor, "GenuineIntel") )
166 x264_cpu_cpuid( 1, &eax, &ebx, &ecx, &edx );
167 int family = ((eax>>8)&0xf) + ((eax>>20)&0xff);
168 int model = ((eax>>4)&0xf) + ((eax>>12)&0xf0);
169 /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and 6/14 (core1 "yonah")
170 * theoretically support sse2, but it's significantly slower than mmx for
171 * almost all of x264's functions, so let's just pretend they don't. */
172 if( family == 6 && (model == 9 || model == 13 || model == 14) )
174 cpu &= ~(X264_CPU_SSE2|X264_CPU_SSE3);
175 assert(!(cpu&(X264_CPU_SSSE3|X264_CPU_SSE4)));
177 /* Detect Atom CPU */
178 if( family == 6 && model == 28 )
180 cpu |= X264_CPU_SLOW_ATOM;
181 cpu |= X264_CPU_SLOW_CTZ;
185 if( (!strcmp((char*)vendor, "GenuineIntel") || !strcmp((char*)vendor, "CyrixInstead")) && !(cpu&X264_CPU_SSE42))
187 /* cacheline size is specified in 3 places, any of which may be missing */
188 x264_cpu_cpuid( 1, &eax, &ebx, &ecx, &edx );
189 cache = (ebx&0xff00)>>5; // cflush size
190 if( !cache && max_extended_cap >= 0x80000006 )
192 x264_cpu_cpuid( 0x80000006, &eax, &ebx, &ecx, &edx );
193 cache = ecx&0xff; // cacheline size
197 // Cache and TLB Information
198 static const char cache32_ids[] = { 0x0a, 0x0c, 0x41, 0x42, 0x43, 0x44, 0x45, 0x82, 0x83, 0x84, 0x85, 0 };
199 static const char cache64_ids[] = { 0x22, 0x23, 0x25, 0x29, 0x2c, 0x46, 0x47, 0x49, 0x60, 0x66, 0x67,
200 0x68, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7c, 0x7f, 0x86, 0x87, 0 };
204 x264_cpu_cpuid( 2, buf+0, buf+1, buf+2, buf+3 );
207 for( int j = 0; j < 4; j++ )
211 if( strchr( cache32_ids, buf[j]&0xff ) )
213 if( strchr( cache64_ids, buf[j]&0xff ) )
217 } while( ++i < max );
221 cpu |= X264_CPU_CACHELINE_32;
222 else if( cache == 64 )
223 cpu |= X264_CPU_CACHELINE_64;
225 x264_log( NULL, X264_LOG_WARNING, "unable to determine cacheline size\n" );
228 #if BROKEN_STACK_ALIGNMENT
229 cpu |= X264_CPU_STACK_MOD4;
237 #if SYS_MACOSX || SYS_OPENBSD
238 #include <sys/sysctl.h>
239 uint32_t x264_cpu_detect( void )
244 int selectors[2] = { CTL_MACHDEP, CPU_ALTIVEC };
246 int selectors[2] = { CTL_HW, HW_VECTORUNIT };
249 size_t length = sizeof( has_altivec );
250 int error = sysctl( selectors, 2, &has_altivec, &length, NULL, 0 );
252 if( error == 0 && has_altivec != 0 )
253 cpu |= X264_CPU_ALTIVEC;
260 uint32_t x264_cpu_detect( void )
262 static void (*oldsig)( int );
264 oldsig = signal( SIGILL, sigill_handler );
265 if( sigsetjmp( jmpbuf, 1 ) )
267 signal( SIGILL, oldsig );
272 asm volatile( "mtspr 256, %0\n\t"
278 signal( SIGILL, oldsig );
280 return X264_CPU_ALTIVEC;
286 void x264_cpu_neon_test();
287 int x264_cpu_fast_neon_mrc_test();
289 uint32_t x264_cpu_detect( void )
293 flags |= X264_CPU_ARMV6;
295 // don't do this hack if compiled with -mfpu=neon
297 static void (* oldsig)( int );
298 oldsig = signal( SIGILL, sigill_handler );
299 if( sigsetjmp( jmpbuf, 1 ) )
301 signal( SIGILL, oldsig );
306 x264_cpu_neon_test();
308 signal( SIGILL, oldsig );
311 flags |= X264_CPU_NEON;
313 // fast neon -> arm (Cortex-A9) detection relies on user access to the
314 // cycle counter; this assumes ARMv7 performance counters.
315 // NEON requires at least ARMv7, ARMv8 may require changes here, but
316 // hopefully this hacky detection method will have been replaced by then.
317 // Note that there is potential for a race condition if another program or
318 // x264 instance disables or reinits the counters while x264 is using them,
319 // which may result in incorrect detection and the counters stuck enabled.
320 flags |= x264_cpu_fast_neon_mrc_test() ? X264_CPU_FAST_NEON_MRC : 0;
321 // TODO: write dual issue test? currently it's A8 (dual issue) vs. A9 (fast mrc)
328 uint32_t x264_cpu_detect( void )
335 int x264_cpu_num_processors( void )
340 #elif defined(_WIN32)
341 return pthread_num_processors_np();
347 memset( &p_aff, 0, sizeof(p_aff) );
348 sched_getaffinity( 0, sizeof(p_aff), &p_aff );
349 for( np = 0, bit = 0; bit < sizeof(p_aff); bit++ )
350 np += (((uint8_t *)&p_aff)[bit / 8] >> (bit % 8)) & 1;
355 get_system_info( &info );
356 return info.cpu_count;
358 #elif SYS_MACOSX || SYS_FREEBSD || SYS_OPENBSD
360 size_t length = sizeof( numberOfCPUs );
362 int mib[2] = { CTL_HW, HW_NCPU };
363 if( sysctl(mib, 2, &numberOfCPUs, &length, NULL, 0) )
365 if( sysctlbyname("hw.ncpu", &numberOfCPUs, &length, NULL, 0) )