]> git.sesse.net Git - x264/blobdiff - common/cpu.c
Bump dates to 2016
[x264] / common / cpu.c
index db2d4578d66e96217ddcae0e9eca5d2b6e1773a3..441b8ce017433ebff6d381d61a1bf7701a63134d 100644 (file)
@@ -1,7 +1,7 @@
 /*****************************************************************************
- * cpu.c: h264 encoder library
+ * cpu.c: cpu detection
  *****************************************************************************
- * Copyright (C) 2003-2008 x264 project
+ * Copyright (C) 2003-2016 x264 project
  *
  * Authors: Loren Merritt <lorenm@u.washington.edu>
  *          Laurent Aimar <fenrir@via.ecp.fr>
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at licensing@x264.com.
  *****************************************************************************/
 
 #define _GNU_SOURCE // for sched_getaffinity
 #include "common.h"
 #include "cpu.h"
 
-#if defined(HAVE_PTHREAD) && defined(SYS_LINUX)
+#if HAVE_POSIXTHREAD && SYS_LINUX
 #include <sched.h>
 #endif
-#ifdef SYS_BEOS
+#if SYS_BEOS
 #include <kernel/OS.h>
 #endif
-#if defined(SYS_MACOSX) || defined(SYS_FREEBSD)
+#if SYS_MACOSX || SYS_FREEBSD
 #include <sys/types.h>
 #include <sys/sysctl.h>
 #endif
-#ifdef SYS_OPENBSD
+#if SYS_OPENBSD
 #include <sys/param.h>
 #include <sys/sysctl.h>
 #include <machine/cpu.h>
 #endif
 
-const x264_cpu_name_t x264_cpu_names[] = {
-    {"Altivec", X264_CPU_ALTIVEC},
-//  {"MMX",     X264_CPU_MMX}, // we don't support asm on mmx1 cpus anymore
-    {"MMX2",    X264_CPU_MMX|X264_CPU_MMXEXT},
-    {"MMXEXT",  X264_CPU_MMX|X264_CPU_MMXEXT},
-//  {"SSE",     X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE}, // there are no sse1 functions in x264
-    {"SSE2Slow",X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2|X264_CPU_SSE2_IS_SLOW},
-    {"SSE2",    X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2},
-    {"SSE2Fast",X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2|X264_CPU_SSE2_IS_FAST},
-    {"SSE3",    X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2|X264_CPU_SSE3},
-    {"SSSE3",   X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2|X264_CPU_SSE3|X264_CPU_SSSE3},
-    {"FastShuffle",   X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2|X264_CPU_SHUFFLE_IS_FAST},
-    {"SSE4.1",  X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2|X264_CPU_SSE3|X264_CPU_SSSE3|X264_CPU_SSE4},
-    {"SSE4.2",  X264_CPU_MMX|X264_CPU_MMXEXT|X264_CPU_SSE|X264_CPU_SSE2|X264_CPU_SSE3|X264_CPU_SSSE3|X264_CPU_SSE4|X264_CPU_SSE42},
-    {"Cache32", X264_CPU_CACHELINE_32},
-    {"Cache64", X264_CPU_CACHELINE_64},
-    {"SSEMisalign", X264_CPU_SSE_MISALIGN},
-    {"LZCNT", X264_CPU_LZCNT},
-    {"Slow_mod4_stack", X264_CPU_STACK_MOD4},
-    {"ARMv6", X264_CPU_ARMV6},
-    {"NEON",  X264_CPU_NEON},
-    {"Fast_NEON_MRC",  X264_CPU_FAST_NEON_MRC},
+const x264_cpu_name_t x264_cpu_names[] =
+{
+#if HAVE_MMX
+//  {"MMX",         X264_CPU_MMX},  // we don't support asm on mmx1 cpus anymore
+//  {"CMOV",        X264_CPU_CMOV}, // we require this unconditionally, so don't print it
+#define MMX2 X264_CPU_MMX|X264_CPU_MMX2|X264_CPU_CMOV
+    {"MMX2",        MMX2},
+    {"MMXEXT",      MMX2},
+    {"SSE",         MMX2|X264_CPU_SSE},
+#define SSE2 MMX2|X264_CPU_SSE|X264_CPU_SSE2
+    {"SSE2Slow",    SSE2|X264_CPU_SSE2_IS_SLOW},
+    {"SSE2",        SSE2},
+    {"SSE2Fast",    SSE2|X264_CPU_SSE2_IS_FAST},
+    {"SSE3",        SSE2|X264_CPU_SSE3},
+    {"SSSE3",       SSE2|X264_CPU_SSE3|X264_CPU_SSSE3},
+    {"SSE4.1",      SSE2|X264_CPU_SSE3|X264_CPU_SSSE3|X264_CPU_SSE4},
+    {"SSE4",        SSE2|X264_CPU_SSE3|X264_CPU_SSSE3|X264_CPU_SSE4},
+    {"SSE4.2",      SSE2|X264_CPU_SSE3|X264_CPU_SSSE3|X264_CPU_SSE4|X264_CPU_SSE42},
+#define AVX SSE2|X264_CPU_SSE3|X264_CPU_SSSE3|X264_CPU_SSE4|X264_CPU_SSE42|X264_CPU_AVX
+    {"AVX",         AVX},
+    {"XOP",         AVX|X264_CPU_XOP},
+    {"FMA4",        AVX|X264_CPU_FMA4},
+    {"FMA3",        AVX|X264_CPU_FMA3},
+    {"AVX2",        AVX|X264_CPU_FMA3|X264_CPU_AVX2},
+#undef AVX
+#undef SSE2
+#undef MMX2
+    {"Cache32",         X264_CPU_CACHELINE_32},
+    {"Cache64",         X264_CPU_CACHELINE_64},
+    {"LZCNT",           X264_CPU_LZCNT},
+    {"BMI1",            X264_CPU_BMI1},
+    {"BMI2",            X264_CPU_BMI1|X264_CPU_BMI2},
+    {"SlowCTZ",         X264_CPU_SLOW_CTZ},
+    {"SlowAtom",        X264_CPU_SLOW_ATOM},
+    {"SlowPshufb",      X264_CPU_SLOW_PSHUFB},
+    {"SlowPalignr",     X264_CPU_SLOW_PALIGNR},
+    {"SlowShuffle",     X264_CPU_SLOW_SHUFFLE},
+    {"UnalignedStack",  X264_CPU_STACK_MOD4},
+#elif ARCH_PPC
+    {"Altivec",         X264_CPU_ALTIVEC},
+#elif ARCH_ARM
+    {"ARMv6",           X264_CPU_ARMV6},
+    {"NEON",            X264_CPU_NEON},
+    {"FastNeonMRC",     X264_CPU_FAST_NEON_MRC},
+#elif ARCH_AARCH64
+    {"ARMv8",           X264_CPU_ARMV8},
+    {"NEON",            X264_CPU_NEON},
+#elif ARCH_MIPS
+    {"MSA",             X264_CPU_MSA},
+#endif
     {"", 0},
 };
 
-#if (defined(ARCH_PPC) && defined(SYS_LINUX)) || (defined(ARCH_ARM) && !defined(HAVE_NEON))
+#if (ARCH_PPC && SYS_LINUX) || (ARCH_ARM && !HAVE_NEON)
 #include <signal.h>
 #include <setjmp.h>
 static sigjmp_buf jmpbuf;
@@ -86,34 +117,40 @@ static void sigill_handler( int sig )
 }
 #endif
 
-#ifdef HAVE_MMX
-extern int  x264_cpu_cpuid_test( void );
-extern uint32_t  x264_cpu_cpuid( uint32_t op, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx );
+#if HAVE_MMX
+int x264_cpu_cpuid_test( void );
+void x264_cpu_cpuid( uint32_t op, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx );
+void x264_cpu_xgetbv( uint32_t op, uint32_t *eax, uint32_t *edx );
 
 uint32_t x264_cpu_detect( void )
 {
     uint32_t cpu = 0;
     uint32_t eax, ebx, ecx, edx;
     uint32_t vendor[4] = {0};
-    int max_extended_cap;
+    uint32_t max_extended_cap, max_basic_cap;
     int cache;
 
-#ifndef ARCH_X86_64
+#if !ARCH_X86_64
     if( !x264_cpu_cpuid_test() )
         return 0;
 #endif
 
     x264_cpu_cpuid( 0, &eax, vendor+0, vendor+2, vendor+1 );
-    if( eax == 0 )
+    max_basic_cap = eax;
+    if( max_basic_cap == 0 )
         return 0;
 
     x264_cpu_cpuid( 1, &eax, &ebx, &ecx, &edx );
     if( edx&0x00800000 )
         cpu |= X264_CPU_MMX;
     else
-        return 0;
+        return cpu;
     if( edx&0x02000000 )
-        cpu |= X264_CPU_MMXEXT|X264_CPU_SSE;
+        cpu |= X264_CPU_MMX2|X264_CPU_SSE;
+    if( edx&0x00008000 )
+        cpu |= X264_CPU_CMOV;
+    else
+        return cpu;
     if( edx&0x04000000 )
         cpu |= X264_CPU_SSE2;
     if( ecx&0x00000001 )
@@ -124,53 +161,108 @@ uint32_t x264_cpu_detect( void )
         cpu |= X264_CPU_SSE4;
     if( ecx&0x00100000 )
         cpu |= X264_CPU_SSE42;
+    /* Check OXSAVE and AVX bits */
+    if( (ecx&0x18000000) == 0x18000000 )
+    {
+        /* Check for OS support */
+        x264_cpu_xgetbv( 0, &eax, &edx );
+        if( (eax&0x6) == 0x6 )
+        {
+            cpu |= X264_CPU_AVX;
+            if( ecx&0x00001000 )
+                cpu |= X264_CPU_FMA3;
+        }
+    }
+
+    if( max_basic_cap >= 7 )
+    {
+        x264_cpu_cpuid( 7, &eax, &ebx, &ecx, &edx );
+        /* AVX2 requires OS support, but BMI1/2 don't. */
+        if( (cpu&X264_CPU_AVX) && (ebx&0x00000020) )
+            cpu |= X264_CPU_AVX2;
+        if( ebx&0x00000008 )
+        {
+            cpu |= X264_CPU_BMI1;
+            if( ebx&0x00000100 )
+                cpu |= X264_CPU_BMI2;
+        }
+    }
 
     if( cpu & X264_CPU_SSSE3 )
         cpu |= X264_CPU_SSE2_IS_FAST;
-    if( cpu & X264_CPU_SSE4 )
-        cpu |= X264_CPU_SHUFFLE_IS_FAST;
 
     x264_cpu_cpuid( 0x80000000, &eax, &ebx, &ecx, &edx );
     max_extended_cap = eax;
 
-    if( !strcmp((char*)vendor, "AuthenticAMD") && max_extended_cap >= 0x80000001 )
+    if( max_extended_cap >= 0x80000001 )
     {
         x264_cpu_cpuid( 0x80000001, &eax, &ebx, &ecx, &edx );
-        if( edx&0x00400000 )
-            cpu |= X264_CPU_MMXEXT;
-        if( cpu & X264_CPU_SSE2 )
+
+        if( ecx&0x00000020 )
+            cpu |= X264_CPU_LZCNT;             /* Supported by Intel chips starting with Haswell */
+        if( ecx&0x00000040 ) /* SSE4a, AMD only */
         {
-            if( ecx&0x00000040 ) /* SSE4a */
+            int family = ((eax>>8)&0xf) + ((eax>>20)&0xff);
+            cpu |= X264_CPU_SSE2_IS_FAST;      /* Phenom and later CPUs have fast SSE units */
+            if( family == 0x14 )
             {
-                cpu |= X264_CPU_SSE2_IS_FAST;
-                cpu |= X264_CPU_LZCNT;
-                cpu |= X264_CPU_SHUFFLE_IS_FAST;
+                cpu &= ~X264_CPU_SSE2_IS_FAST; /* SSSE3 doesn't imply fast SSE anymore... */
+                cpu |= X264_CPU_SSE2_IS_SLOW;  /* Bobcat has 64-bit SIMD units */
+                cpu |= X264_CPU_SLOW_PALIGNR;  /* palignr is insanely slow on Bobcat */
             }
-            else
-                cpu |= X264_CPU_SSE2_IS_SLOW;
-
-            if( ecx&0x00000080 ) /* Misalign SSE */
+            if( family == 0x16 )
             {
-                cpu |= X264_CPU_SSE_MISALIGN;
-                x264_cpu_mask_misalign_sse();
+                cpu |= X264_CPU_SLOW_PSHUFB;   /* Jaguar's pshufb isn't that slow, but it's slow enough
+                                                * compared to alternate instruction sequences that this
+                                                * is equal or faster on almost all such functions. */
             }
         }
+
+        if( cpu & X264_CPU_AVX )
+        {
+            if( ecx&0x00000800 ) /* XOP */
+                cpu |= X264_CPU_XOP;
+            if( ecx&0x00010000 ) /* FMA4 */
+                cpu |= X264_CPU_FMA4;
+        }
+
+        if( !strcmp((char*)vendor, "AuthenticAMD") )
+        {
+            if( edx&0x00400000 )
+                cpu |= X264_CPU_MMX2;
+            if( !(cpu&X264_CPU_LZCNT) )
+                cpu |= X264_CPU_SLOW_CTZ;
+            if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_FAST) )
+                cpu |= X264_CPU_SSE2_IS_SLOW; /* AMD CPUs come in two types: terrible at SSE and great at it */
+        }
     }
 
     if( !strcmp((char*)vendor, "GenuineIntel") )
     {
-        int family, model, stepping;
         x264_cpu_cpuid( 1, &eax, &ebx, &ecx, &edx );
-        family = ((eax>>8)&0xf) + ((eax>>20)&0xff);
-        model  = ((eax>>4)&0xf) + ((eax>>12)&0xf0);
-        stepping = eax&0xf;
-        /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and 6/14 (core1 "yonah")
-         * theoretically support sse2, but it's significantly slower than mmx for
-         * almost all of x264's functions, so let's just pretend they don't. */
-        if( family == 6 && (model == 9 || model == 13 || model == 14) )
+        int family = ((eax>>8)&0xf) + ((eax>>20)&0xff);
+        int model  = ((eax>>4)&0xf) + ((eax>>12)&0xf0);
+        if( family == 6 )
         {
-            cpu &= ~(X264_CPU_SSE2|X264_CPU_SSE3);
-            assert(!(cpu&(X264_CPU_SSSE3|X264_CPU_SSE4)));
+            /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and 6/14 (core1 "yonah")
+             * theoretically support sse2, but it's significantly slower than mmx for
+             * almost all of x264's functions, so let's just pretend they don't. */
+            if( model == 9 || model == 13 || model == 14 )
+            {
+                cpu &= ~(X264_CPU_SSE2|X264_CPU_SSE3);
+                assert(!(cpu&(X264_CPU_SSSE3|X264_CPU_SSE4)));
+            }
+            /* Detect Atom CPU */
+            else if( model == 28 )
+            {
+                cpu |= X264_CPU_SLOW_ATOM;
+                cpu |= X264_CPU_SLOW_CTZ;
+                cpu |= X264_CPU_SLOW_PSHUFB;
+            }
+            /* Conroe has a slow shuffle unit. Check the model number to make sure not
+             * to include crippled low-end Penryns and Nehalems that don't have SSE4. */
+            else if( (cpu&X264_CPU_SSSE3) && !(cpu&X264_CPU_SSE4) && model < 23 )
+                cpu |= X264_CPU_SLOW_SHUFFLE;
         }
     }
 
@@ -184,7 +276,7 @@ uint32_t x264_cpu_detect( void )
             x264_cpu_cpuid( 0x80000006, &eax, &ebx, &ecx, &edx );
             cache = ecx&0xff; // cacheline size
         }
-        if( !cache )
+        if( !cache && max_basic_cap >= 2 )
         {
             // Cache and TLB Information
             static const char cache32_ids[] = { 0x0a, 0x0c, 0x41, 0x42, 0x43, 0x44, 0x45, 0x82, 0x83, 0x84, 0x85, 0 };
@@ -217,29 +309,33 @@ uint32_t x264_cpu_detect( void )
             x264_log( NULL, X264_LOG_WARNING, "unable to determine cacheline size\n" );
     }
 
-#ifdef BROKEN_STACK_ALIGNMENT
+#if STACK_ALIGNMENT < 16
     cpu |= X264_CPU_STACK_MOD4;
 #endif
 
     return cpu;
 }
 
-#elif defined( ARCH_PPC )
+#elif ARCH_PPC
 
-#if defined(SYS_MACOSX) || defined(SYS_OPENBSD)
+#if SYS_MACOSX || SYS_OPENBSD || SYS_FREEBSD
 #include <sys/sysctl.h>
 uint32_t x264_cpu_detect( void )
 {
     /* Thank you VLC */
     uint32_t cpu = 0;
-#ifdef SYS_OPENBSD
+#if SYS_OPENBSD
     int      selectors[2] = { CTL_MACHDEP, CPU_ALTIVEC };
-#else
+#elif SYS_MACOSX
     int      selectors[2] = { CTL_HW, HW_VECTORUNIT };
 #endif
     int      has_altivec = 0;
     size_t   length = sizeof( has_altivec );
+#if SYS_MACOSX || SYS_OPENBSD
     int      error = sysctl( selectors, 2, &has_altivec, &length, NULL, 0 );
+#else
+    int      error = sysctlbyname( "hw.altivec", &has_altivec, &length, NULL, 0 );
+#endif
 
     if( error == 0 && has_altivec != 0 )
         cpu |= X264_CPU_ALTIVEC;
@@ -247,10 +343,13 @@ uint32_t x264_cpu_detect( void )
     return cpu;
 }
 
-#elif defined( SYS_LINUX )
+#elif SYS_LINUX
 
 uint32_t x264_cpu_detect( void )
 {
+#ifdef __NO_FPRS__
+    return 0;
+#else
     static void (*oldsig)( int );
 
     oldsig = signal( SIGILL, sigill_handler );
@@ -270,22 +369,23 @@ uint32_t x264_cpu_detect( void )
     signal( SIGILL, oldsig );
 
     return X264_CPU_ALTIVEC;
+#endif
 }
 #endif
 
-#elif defined( ARCH_ARM )
+#elif ARCH_ARM
 
-void x264_cpu_neon_test();
-int x264_cpu_fast_neon_mrc_test();
+void x264_cpu_neon_test( void );
+int x264_cpu_fast_neon_mrc_test( void );
 
 uint32_t x264_cpu_detect( void )
 {
     int flags = 0;
-#ifdef HAVE_ARMV6
+#if HAVE_ARMV6
     flags |= X264_CPU_ARMV6;
 
     // don't do this hack if compiled with -mfpu=neon
-#ifndef HAVE_NEON
+#if !HAVE_NEON
     static void (* oldsig)( int );
     oldsig = signal( SIGILL, sigill_handler );
     if( sigsetjmp( jmpbuf, 1 ) )
@@ -309,64 +409,90 @@ uint32_t x264_cpu_detect( void )
     // Note that there is potential for a race condition if another program or
     // x264 instance disables or reinits the counters while x264 is using them,
     // which may result in incorrect detection and the counters stuck enabled.
+    // right now Apple does not seem to support performance counters for this test
+#ifndef __MACH__
     flags |= x264_cpu_fast_neon_mrc_test() ? X264_CPU_FAST_NEON_MRC : 0;
+#endif
     // TODO: write dual issue test? currently it's A8 (dual issue) vs. A9 (fast mrc)
 #endif
     return flags;
 }
 
-#else
+#elif ARCH_AARCH64
 
 uint32_t x264_cpu_detect( void )
 {
-    return 0;
+    return X264_CPU_ARMV8 | X264_CPU_NEON;
 }
 
+#elif ARCH_MIPS
+
+uint32_t x264_cpu_detect( void )
+{
+    uint32_t flags = 0;
+#if HAVE_MSA
+    flags |= X264_CPU_MSA;
 #endif
+    return flags;
+}
+
+#else
 
-#ifndef HAVE_MMX
-void x264_emms( void )
+uint32_t x264_cpu_detect( void )
 {
+    return 0;
 }
-#endif
 
+#endif
 
 int x264_cpu_num_processors( void )
 {
-#if !defined(HAVE_PTHREAD)
+#if !HAVE_THREAD
     return 1;
 
-#elif defined(_WIN32)
-    return pthread_num_processors_np();
+#elif SYS_WINDOWS
+    return x264_pthread_num_processors_np();
+
+#elif SYS_CYGWIN || SYS_SunOS
+    return sysconf( _SC_NPROCESSORS_ONLN );
 
-#elif defined(SYS_LINUX)
-    unsigned int bit;
-    int np;
+#elif SYS_LINUX
+#ifdef __ANDROID__
+    // Android NDK does not expose sched_getaffinity
+    return sysconf( _SC_NPROCESSORS_CONF );
+#else
     cpu_set_t p_aff;
     memset( &p_aff, 0, sizeof(p_aff) );
-    sched_getaffinity( 0, sizeof(p_aff), &p_aff );
-    for( np = 0, bit = 0; bit < sizeof(p_aff); bit++ )
+    if( sched_getaffinity( 0, sizeof(p_aff), &p_aff ) )
+        return 1;
+#if HAVE_CPU_COUNT
+    return CPU_COUNT(&p_aff);
+#else
+    int np = 0;
+    for( unsigned int bit = 0; bit < 8 * sizeof(p_aff); bit++ )
         np += (((uint8_t *)&p_aff)[bit / 8] >> (bit % 8)) & 1;
     return np;
+#endif
+#endif
 
-#elif defined(SYS_BEOS)
+#elif SYS_BEOS
     system_info info;
     get_system_info( &info );
     return info.cpu_count;
 
-#elif defined(SYS_MACOSX) || defined(SYS_FREEBSD) || defined(SYS_OPENBSD)
-    int numberOfCPUs;
-    size_t length = sizeof( numberOfCPUs );
-#ifdef SYS_OPENBSD
+#elif SYS_MACOSX || SYS_FREEBSD || SYS_OPENBSD
+    int ncpu;
+    size_t length = sizeof( ncpu );
+#if SYS_OPENBSD
     int mib[2] = { CTL_HW, HW_NCPU };
-    if( sysctl(mib, 2, &numberOfCPUs, &length, NULL, 0) )
+    if( sysctl(mib, 2, &ncpu, &length, NULL, 0) )
 #else
-    if( sysctlbyname("hw.ncpu", &numberOfCPUs, &length, NULL, 0) )
+    if( sysctlbyname("hw.ncpu", &ncpu, &length, NULL, 0) )
 #endif
     {
-        numberOfCPUs = 1;
+        ncpu = 1;
     }
-    return numberOfCPUs;
+    return ncpu;
 
 #else
     return 1;