1 #ifndef _TOOLS_LINUX_COMPILER_H_
2 #define _TOOLS_LINUX_COMPILER_H_
4 /* Optimization barrier */
5 /* The "volatile" is due to gcc bugs */
6 #define barrier() __asm__ __volatile__("": : :"memory")
7 #define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
9 #ifndef __always_inline
10 # define __always_inline inline __attribute__((always_inline))
13 #ifndef __attribute_const__
14 #define __attribute_const__ __attribute__((__const__))
19 * FIXME: Big hammer to get rid of tons of:
20 * "warning: always_inline function might not be inlinable"
22 * At least on android-ndk-r12/platforms/android-24/arch-arm
24 #undef __always_inline
25 #define __always_inline inline
29 #define noinline_for_stack noinline
34 #define __pure __attribute__((pure))
35 #define __aligned(x) __attribute__((aligned(x)))
36 #define __printf(a, b) __attribute__((format(printf, a, b)))
37 #define __used __attribute__((__used__))
38 #define __maybe_unused __attribute__((unused))
39 #define __always_unused __attribute__((unused))
40 #define __packed __attribute__((__packed__))
41 #define __flatten __attribute__((flatten))
45 #define __chk_user_ptr(x) (void)0
46 #define __chk_io_ptr(x) (void)0
47 #define __builtin_warning(x, y...) (1)
48 #define __must_hold(x)
51 #define __acquire(x) (void)0
52 #define __release(x) (void)0
53 #define __cond_lock(x,c) (c)
62 #define __weak __attribute__((weak))
63 #define likely(x) __builtin_expect(!!(x), 1)
64 #define unlikely(x) __builtin_expect(!!(x), 0)
65 #define unreachable() __builtin_unreachable()
66 #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
67 #define fallthrough __attribute__((__fallthrough__))
69 #define ___PASTE(a,b) a##b
70 #define __PASTE(a,b) ___PASTE(a,b)
71 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
73 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
75 #define __initcall(x) /* unimplemented */
76 #define __exitcall(x) /* unimplemented */
78 #include <linux/types.h>
81 * Following functions are taken from kernel sources and
82 * break aliasing rules in their original form.
84 * While kernel is compiled with -fno-strict-aliasing,
85 * perf uses -Wstrict-aliasing=3 which makes build fail
88 * Using extra __may_alias__ type to allow aliasing
91 typedef __u8 __attribute__((__may_alias__)) __u8_alias_t;
92 typedef __u16 __attribute__((__may_alias__)) __u16_alias_t;
93 typedef __u32 __attribute__((__may_alias__)) __u32_alias_t;
94 typedef __u64 __attribute__((__may_alias__)) __u64_alias_t;
96 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
99 case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break;
100 case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break;
101 case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break;
102 case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break;
105 __builtin_memcpy((void *)res, (const void *)p, size);
110 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
113 case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break;
114 case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break;
115 case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break;
116 case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break;
119 __builtin_memcpy((void *)p, (const void *)res, size);
125 * Prevent the compiler from merging or refetching reads or writes. The
126 * compiler is also forbidden from reordering successive instances of
127 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
128 * compiler is aware of some particular ordering. One way to make the
129 * compiler aware of ordering is to put the two invocations of READ_ONCE,
130 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
132 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
133 * data types like structs or unions. If the size of the accessed data
134 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
135 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
136 * compile-time warning.
138 * Their two major use cases are: (1) Mediating communication between
139 * process-level code and irq/NMI handlers, all running on the same CPU,
140 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
141 * mutilate accesses that either do not require ordering or that interact
142 * with an explicit memory barrier or atomic instruction that provides the
146 #define READ_ONCE(x) \
147 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
149 #define WRITE_ONCE(x, val) \
150 ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
152 #define lockless_dereference(p) \
154 typeof(p) _________p1 = READ_ONCE(p); \
155 typeof(*(p)) *___typecheck_p __maybe_unused; \
156 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
160 #define flush_cache_all() do { } while (0)
161 #define flush_cache_mm(mm) do { } while (0)
162 #define flush_cache_dup_mm(mm) do { } while (0)
163 #define flush_cache_range(vma, start, end) do { } while (0)
164 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
165 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
166 #define flush_dcache_page(page) do { } while (0)
167 #define flush_dcache_mmap_lock(mapping) do { } while (0)
168 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
169 #define flush_icache_range(start, end) do { } while (0)
170 #define flush_icache_page(vma,pg) do { } while (0)
171 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
172 #define flush_cache_vmap(start, end) do { } while (0)
173 #define flush_cache_vunmap(start, end) do { } while (0)
176 #define CONFIG_X86_64 y
179 #endif /* _TOOLS_LINUX_COMPILER_H */