X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=common%2Fcpu.h;h=138141946fc4ff6929a5aa7eed29bb95643478be;hb=7650a1367003e24f4f1b831682c012b5ba3e6c69;hp=6901e1e18c99f8be9e6177b2b147c040a649e65a;hpb=6940dcaef140d8a0c43c9a62db158e9d71a8fdeb;p=x264 diff --git a/common/cpu.h b/common/cpu.h index 6901e1e1..13814194 100644 --- a/common/cpu.h +++ b/common/cpu.h @@ -1,7 +1,9 @@ /***************************************************************************** - * cpu.h: h264 encoder library + * cpu.h: cpu detection ***************************************************************************** - * Copyright (C) 2004-2008 Loren Merritt + * Copyright (C) 2004-2016 x264 project + * + * Authors: Loren Merritt * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -16,6 +18,9 @@ * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. + * + * This program is also available under a commercial proprietary license. + * For more information, contact us at licensing@x264.com. *****************************************************************************/ #ifndef X264_CPU_H @@ -23,27 +28,45 @@ uint32_t x264_cpu_detect( void ); int x264_cpu_num_processors( void ); -void x264_emms( void ); -void x264_cpu_mask_misalign_sse( void ); +void x264_cpu_emms( void ); +void x264_cpu_sfence( void ); +#if HAVE_MMX +/* There is no way to forbid the compiler from using float instructions + * before the emms so miscompilation could theoretically occur in the + * unlikely event that the compiler reorders emms and float instructions. */ +#if HAVE_X86_INLINE_ASM +/* Clobbering memory makes the compiler less likely to reorder code. */ +#define x264_emms() asm volatile( "emms":::"memory","st","st(1)","st(2)", \ + "st(3)","st(4)","st(5)","st(6)","st(7)" ) +#else +#define x264_emms() x264_cpu_emms() +#endif +#else +#define x264_emms() +#endif +#define x264_sfence x264_cpu_sfence -/* kluge: +/* kludge: * gcc can't give variables any greater alignment than the stack frame has. - * We need 16 byte alignment for SSE2, so here we make sure that the stack is - * aligned to 16 bytes. + * We need 32 byte alignment for AVX2, so here we make sure that the stack is + * aligned to 32 bytes. * gcc 4.2 introduced __attribute__((force_align_arg_pointer)) to fix this * problem, but I don't want to require such a new version. - * This applies only to x86_32, since other architectures that need alignment - * either have ABIs that ensure aligned stack, or don't support it at all. */ -#if defined(ARCH_X86) && defined(HAVE_MMX) -int x264_stack_align( void (*func)(), ... ); + * aligning to 32 bytes only works if the compiler supports keeping that + * alignment between functions (osdep.h handles manual alignment of arrays + * if it doesn't). + */ +#if (ARCH_X86 || STACK_ALIGNMENT > 16) && HAVE_MMX +intptr_t x264_stack_align( void (*func)(), ... ); #define x264_stack_align(func,...) x264_stack_align((void (*)())func, __VA_ARGS__) #else #define x264_stack_align(func,...) func(__VA_ARGS__) #endif -typedef struct { +typedef struct +{ const char name[16]; - int flags; + uint32_t flags; } x264_cpu_name_t; extern const x264_cpu_name_t x264_cpu_names[];