1 // Copyright (c) 2011 Google Inc. All rights reserved.
3 // Redistribution and use in source and binary forms, with or without
4 // modification, are permitted provided that the following conditions are
7 // * Redistributions of source code must retain the above copyright
8 // notice, this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above
10 // copyright notice, this list of conditions and the following disclaimer
11 // in the documentation and/or other materials provided with the
13 // * Neither the name of Google Inc. nor the name Chromium Embedded
14 // Framework nor the names of its contributors may be used to endorse
15 // or promote products derived from this software without specific prior
16 // written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 // Do not include this header file directly. Use base/cef_atomicops.h
33 #ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_GCC_H_
34 #define CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_GCC_H_
36 // This struct is not part of the public API of this module; clients may not
38 // Features of this x86. Values may not be correct before main() is run,
39 // but are set conservatively.
40 struct AtomicOps_x86CPUFeatureStruct {
41 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
42 // after acquire compare-and-swap.
44 extern struct AtomicOps_x86CPUFeatureStruct
45 AtomicOps_Internalx86CPUFeatures;
47 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
52 // 32-bit low-level operations on any platform.
54 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
58 __asm__ __volatile__("lock; cmpxchgl %1,%2"
60 : "q" (new_value), "m" (*ptr), "0" (old_value)
65 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
67 __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
69 : "m" (*ptr), "0" (new_value)
71 return new_value; // Now it's the previous value.
74 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
76 Atomic32 temp = increment;
77 __asm__ __volatile__("lock; xaddl %0,%1"
78 : "+r" (temp), "+m" (*ptr)
80 // temp now holds the old value of *ptr
81 return temp + increment;
84 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
86 Atomic32 temp = increment;
87 __asm__ __volatile__("lock; xaddl %0,%1"
88 : "+r" (temp), "+m" (*ptr)
90 // temp now holds the old value of *ptr
91 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
92 __asm__ __volatile__("lfence" : : : "memory");
94 return temp + increment;
97 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
100 Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
101 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
102 __asm__ __volatile__("lfence" : : : "memory");
107 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
109 Atomic32 new_value) {
110 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
113 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
117 inline void MemoryBarrier() {
118 __asm__ __volatile__("mfence" : : : "memory");
121 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
126 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
127 ATOMICOPS_COMPILER_BARRIER();
128 *ptr = value; // An x86 store acts as a release barrier.
129 // See comments in Atomic64 version of Release_Store(), below.
132 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
136 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
137 Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
138 // See comments in Atomic64 version of Release_Store(), below.
139 ATOMICOPS_COMPILER_BARRIER();
143 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
148 #if defined(__x86_64__)
150 // 64-bit low-level operations on 64-bit platform.
152 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
154 Atomic64 new_value) {
156 __asm__ __volatile__("lock; cmpxchgq %1,%2"
158 : "q" (new_value), "m" (*ptr), "0" (old_value)
163 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
164 Atomic64 new_value) {
165 __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
167 : "m" (*ptr), "0" (new_value)
169 return new_value; // Now it's the previous value.
172 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
173 Atomic64 increment) {
174 Atomic64 temp = increment;
175 __asm__ __volatile__("lock; xaddq %0,%1"
176 : "+r" (temp), "+m" (*ptr)
178 // temp now contains the previous value of *ptr
179 return temp + increment;
182 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
183 Atomic64 increment) {
184 Atomic64 temp = increment;
185 __asm__ __volatile__("lock; xaddq %0,%1"
186 : "+r" (temp), "+m" (*ptr)
188 // temp now contains the previous value of *ptr
189 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
190 __asm__ __volatile__("lfence" : : : "memory");
192 return temp + increment;
195 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
199 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
204 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
205 ATOMICOPS_COMPILER_BARRIER();
207 *ptr = value; // An x86 store acts as a release barrier
208 // for current AMD/Intel chips as of Jan 2008.
209 // See also Acquire_Load(), below.
211 // When new chips come out, check:
212 // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
213 // System Programming Guide, Chatper 7: Multiple-processor management,
214 // Section 7.2, Memory Ordering.
216 // http://developer.intel.com/design/pentium4/manuals/index_new.htm
218 // x86 stores/loads fail to act as barriers for a few instructions (clflush
219 // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
220 // not generated by the compiler, and are rare. Users of these instructions
221 // need to know about cache behaviour in any case since all of these involve
222 // either flushing cache lines or non-temporal cache hints.
225 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
229 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
230 Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
231 // for current AMD/Intel chips as of Jan 2008.
232 // See also Release_Store(), above.
233 ATOMICOPS_COMPILER_BARRIER();
237 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
242 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
244 Atomic64 new_value) {
245 Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
246 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
247 __asm__ __volatile__("lfence" : : : "memory");
252 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
254 Atomic64 new_value) {
255 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
258 #endif // defined(__x86_64__)
260 } // namespace base::subtle
263 #undef ATOMICOPS_COMPILER_BARRIER
265 #endif // CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_GCC_H_