fast_64bit
fast_clz
fast_cmov
-- local_aligned_8
-- local_aligned_16
-- local_aligned_32
++ local_aligned
simd_align_16
simd_align_32
"
cpuflags="-march=$cpu"
- enable local_aligned_8 local_aligned_16 local_aligned_32
+ if [ "$cpu" != "generic" ]; then
+ disable mips32r2
+ disable mips32r5
+ disable mips64r2
+ disable mips32r6
+ disable mips64r6
+ disable loongson2
+ disable loongson3
+
+ case $cpu in
+ 24kc|24kf*|24kec|34kc|1004kc|24kef*|34kf*|1004kf*|74kc|74kf)
+ enable mips32r2
+ disable msa
+ ;;
+ p5600|i6400|p6600)
+ disable mipsdsp
+ disable mipsdspr2
+ ;;
+ loongson*)
+ enable loongson2
+ enable loongson3
++ enable local_aligned
+ enable simd_align_16
+ enable fast_64bit
+ enable fast_clz
+ enable fast_cmov
+ enable fast_unaligned
+ disable aligned_stack
+ disable mipsfpu
+ disable mipsdsp
+ disable mipsdspr2
+ case $cpu in
+ loongson3*)
+ cpuflags="-march=loongson3a -mhard-float -fno-expensive-optimizations"
+ ;;
+ loongson2e)
+ cpuflags="-march=loongson2e -mhard-float -fno-expensive-optimizations"
+ ;;
+ loongson2f)
+ cpuflags="-march=loongson2f -mhard-float -fno-expensive-optimizations"
+ ;;
+ esac
+ ;;
+ *)
+ # Unknown CPU. Disable everything.
+ warn "unknown CPU. Disabling all MIPS optimizations."
+ disable mipsfpu
+ disable mipsdsp
+ disable mipsdspr2
+ disable msa
+ disable mmi
+ ;;
+ esac
+
+ case $cpu in
+ 24kc)
+ disable mipsfpu
+ disable mipsdsp
+ disable mipsdspr2
+ ;;
+ 24kf*)
+ disable mipsdsp
+ disable mipsdspr2
+ ;;
+ 24kec|34kc|1004kc)
+ disable mipsfpu
+ disable mipsdspr2
+ ;;
+ 24kef*|34kf*|1004kf*)
+ disable mipsdspr2
+ ;;
+ 74kc)
+ disable mipsfpu
+ ;;
+ p5600)
+ enable mips32r5
+ check_cflags "-mtune=p5600" && check_cflags "-msched-weight -mload-store-pairs -funroll-loops"
+ ;;
+ i6400)
+ enable mips64r6
+ check_cflags "-mtune=i6400 -mabi=64" && check_cflags "-msched-weight -mload-store-pairs -funroll-loops" && check_ldflags "-mabi=64"
+ ;;
+ p6600)
+ enable mips64r6
+ check_cflags "-mtune=p6600 -mabi=64" && check_cflags "-msched-weight -mload-store-pairs -funroll-loops" && check_ldflags "-mabi=64"
+ ;;
+ esac
+ else
+ # We do not disable anything. Is up to the user to disable the unwanted features.
+ warn 'generic cpu selected'
+ fi
+
elif enabled ppc; then
disable ldbrx
elif enabled ppc; then
-- enable local_aligned_8 local_aligned_16 local_aligned_32
++ enable local_aligned
check_inline_asm dcbzl '"dcbzl 0, %0" :: "r"(0)'
check_inline_asm ibm_asm '"add 0, 0, 0"'
check_builtin rdtsc intrin.h "__rdtsc()"
check_builtin mm_empty mmintrin.h "_mm_empty()"
-- enable local_aligned_8 local_aligned_16 local_aligned_32
++ enable local_aligned
# check whether EBP is available on x86
# As 'i' is stored on the stack, this program will crash
#include <assert.h>
#include "config.h"
#include "attributes.h"
+#include "timer.h"
+#include "cpu.h"
#include "dict.h"
#include "macros.h"
+ #include "mem.h"
#include "pixfmt.h"
+#include "version.h"
#if ARCH_X86
# include "x86/emms.h"
DECLARE_ALIGNED(a, t, la_##v) s o; \
t (*v) o = la_##v
- #define LOCAL_ALIGNED(a, t, v, ...) E1(LOCAL_ALIGNED_A(a, t, v, __VA_ARGS__,,))
+ #define LOCAL_ALIGNED(a, t, v, ...) LOCAL_ALIGNED_##a(t, v, __VA_ARGS__)
--#if HAVE_LOCAL_ALIGNED_8
++#if HAVE_LOCAL_ALIGNED
++# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_D(4, t, v, __VA_ARGS__,,))
++#else
++# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_A(4, t, v, __VA_ARGS__,,))
++#endif
++
++#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
#else
- # define LOCAL_ALIGNED_8(t, v, ...) LOCAL_ALIGNED(8, t, v, __VA_ARGS__)
+ # define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_A(8, t, v, __VA_ARGS__,,))
#endif
--#if HAVE_LOCAL_ALIGNED_16
++#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
#else
- # define LOCAL_ALIGNED_16(t, v, ...) LOCAL_ALIGNED(16, t, v, __VA_ARGS__)
+ # define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_A(16, t, v, __VA_ARGS__,,))
#endif
--#if HAVE_LOCAL_ALIGNED_32
++#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_D(32, t, v, __VA_ARGS__,,))
#else
- # define LOCAL_ALIGNED_32(t, v, ...) LOCAL_ALIGNED(32, t, v, __VA_ARGS__)
+ # define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_A(32, t, v, __VA_ARGS__,,))
#endif
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)\