2 Copyright 2005-2011 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
29 #ifndef __TBB_atomic_H
30 #define __TBB_atomic_H
33 #include "tbb_stddef.h"
36 #define __TBB_LONG_LONG __int64
38 #define __TBB_LONG_LONG long long
41 #include "tbb_machine.h"
43 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
44 // Workaround for overzealous compiler warnings
45 #pragma warning (push)
46 #pragma warning (disable: 4244 4267)
51 //! Specifies memory fencing.
52 enum memory_semantics {
53 //! For internal use only.
64 #if __GNUC__ || __SUNPRO_CC || __IBMCPP__
65 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f __attribute__ ((aligned(a)));
66 #elif defined(__INTEL_COMPILER)||_MSC_VER >= 1300
67 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f;
69 #error Do not know syntax for forcing alignment.
73 struct atomic_rep; // Primary template declared, but never defined.
76 struct atomic_rep<1> { // Specialization
81 struct atomic_rep<2> { // Specialization
83 __TBB_DECL_ATOMIC_FIELD(int16_t,value,2)
86 struct atomic_rep<4> { // Specialization
87 #if _MSC_VER && __TBB_WORDSIZE==4
88 // Work-around that avoids spurious /Wp64 warnings
89 typedef intptr_t word;
93 __TBB_DECL_ATOMIC_FIELD(int32_t,value,4)
95 #if __TBB_64BIT_ATOMICS
97 struct atomic_rep<8> { // Specialization
99 __TBB_DECL_ATOMIC_FIELD(int64_t,value,8)
103 template<size_t Size, memory_semantics M>
104 struct atomic_traits; // Primary template declared, but not defined.
106 #define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M) \
107 template<> struct atomic_traits<S,M> { \
108 typedef atomic_rep<S>::word word; \
109 inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
110 return __TBB_CompareAndSwap##S##M(location,new_value,comparand); \
112 inline static word fetch_and_add( volatile void* location, word addend ) { \
113 return __TBB_FetchAndAdd##S##M(location,addend); \
115 inline static word fetch_and_store( volatile void* location, word value ) {\
116 return __TBB_FetchAndStore##S##M(location,value); \
120 #define __TBB_DECL_ATOMIC_PRIMITIVES(S) \
121 template<memory_semantics M> \
122 struct atomic_traits<S,M> { \
123 typedef atomic_rep<S>::word word; \
124 inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
125 return __TBB_CompareAndSwap##S(location,new_value,comparand); \
127 inline static word fetch_and_add( volatile void* location, word addend ) { \
128 return __TBB_FetchAndAdd##S(location,addend); \
130 inline static word fetch_and_store( volatile void* location, word value ) {\
131 return __TBB_FetchAndStore##S(location,value); \
135 #if __TBB_DECL_FENCED_ATOMICS
136 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,__TBB_full_fence)
137 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,__TBB_full_fence)
138 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,__TBB_full_fence)
139 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,__TBB_full_fence)
140 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire)
141 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire)
142 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire)
143 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release)
144 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release)
145 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release)
146 #if __TBB_64BIT_ATOMICS
147 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire)
148 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release)
151 __TBB_DECL_ATOMIC_PRIMITIVES(1)
152 __TBB_DECL_ATOMIC_PRIMITIVES(2)
153 __TBB_DECL_ATOMIC_PRIMITIVES(4)
154 #if __TBB_64BIT_ATOMICS
155 __TBB_DECL_ATOMIC_PRIMITIVES(8)
159 //! Additive inverse of 1 for type T.
160 /** Various compilers issue various warnings if -1 is used with various integer types.
161 The baroque expression below avoids all the warnings (we hope). */
162 #define __TBB_MINUS_ONE(T) (T(T(0)-T(1)))
164 //! Base class that provides basic functionality for atomic<T> without fetch_and_add.
165 /** Works for any type T that has the same size as an integral type, has a trivial constructor/destructor,
166 and can be copied/compared by memcpy/memcmp. */
170 atomic_rep<sizeof(T)> rep;
172 //! Union type used to convert type T to underlying integral type.
175 typename atomic_rep<sizeof(T)>::word bits;
178 typedef T value_type;
180 template<memory_semantics M>
181 value_type fetch_and_store( value_type value ) {
184 w.bits = internal::atomic_traits<sizeof(value_type),M>::fetch_and_store(&rep.value,u.bits);
188 value_type fetch_and_store( value_type value ) {
189 return fetch_and_store<__TBB_full_fence>(value);
192 template<memory_semantics M>
193 value_type compare_and_swap( value_type value, value_type comparand ) {
197 w.bits = internal::atomic_traits<sizeof(value_type),M>::compare_and_swap(&rep.value,u.bits,v.bits);
201 value_type compare_and_swap( value_type value, value_type comparand ) {
202 return compare_and_swap<__TBB_full_fence>(value,comparand);
205 operator value_type() const volatile { // volatile qualifier here for backwards compatibility
207 w.bits = __TBB_load_with_acquire( rep.value );
212 value_type store_with_release( value_type rhs ) {
215 __TBB_store_with_release(rep.value,u.bits);
220 //! Base class that provides basic functionality for atomic<T> with fetch_and_add.
221 /** I is the underlying type.
222 D is the difference type.
223 StepType should be char if I is an integral type, and T if I is a T*. */
224 template<typename I, typename D, typename StepType>
225 struct atomic_impl_with_arithmetic: atomic_impl<I> {
227 typedef I value_type;
229 template<memory_semantics M>
230 value_type fetch_and_add( D addend ) {
231 return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->rep.value, addend*sizeof(StepType) ));
234 value_type fetch_and_add( D addend ) {
235 return fetch_and_add<__TBB_full_fence>(addend);
238 template<memory_semantics M>
239 value_type fetch_and_increment() {
240 return fetch_and_add<M>(1);
243 value_type fetch_and_increment() {
244 return fetch_and_add(1);
247 template<memory_semantics M>
248 value_type fetch_and_decrement() {
249 return fetch_and_add<M>(__TBB_MINUS_ONE(D));
252 value_type fetch_and_decrement() {
253 return fetch_and_add(__TBB_MINUS_ONE(D));
257 value_type operator+=( D addend ) {
258 return fetch_and_add(addend)+addend;
261 value_type operator-=( D addend ) {
262 // Additive inverse of addend computed using binary minus,
263 // instead of unary minus, for sake of avoiding compiler warnings.
264 return operator+=(D(0)-addend);
267 value_type operator++() {
268 return fetch_and_add(1)+1;
271 value_type operator--() {
272 return fetch_and_add(__TBB_MINUS_ONE(D))-1;
275 value_type operator++(int) {
276 return fetch_and_add(1);
279 value_type operator--(int) {
280 return fetch_and_add(__TBB_MINUS_ONE(D));
287 //! Primary template for atomic.
288 /** See the Reference for details.
289 @ingroup synchronization */
291 struct atomic: internal::atomic_impl<T> {
292 T operator=( T rhs ) {
293 // "this" required here in strict ISO C++ because store_with_release is a dependent name
294 return this->store_with_release(rhs);
296 atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(rhs); return *this;}
299 #define __TBB_DECL_ATOMIC(T) \
300 template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \
301 T operator=( T rhs ) {return store_with_release(rhs);} \
302 atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \
305 #if __TBB_64BIT_ATOMICS
306 // otherwise size is verified by test_atomic
307 __TBB_DECL_ATOMIC(__TBB_LONG_LONG)
308 __TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG)
310 __TBB_DECL_ATOMIC(long)
311 __TBB_DECL_ATOMIC(unsigned long)
313 #if defined(_MSC_VER) && __TBB_WORDSIZE==4
314 /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option.
315 It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T)
316 with an operator=(U) that explicitly converts the U to a T. Types T and U should be
317 type synonyms on the platform. Type U should be the wider variant of T from the
318 perspective of /Wp64. */
319 #define __TBB_DECL_ATOMIC_ALT(T,U) \
320 template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \
321 T operator=( U rhs ) {return store_with_release(T(rhs));} \
322 atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \
324 __TBB_DECL_ATOMIC_ALT(unsigned,size_t)
325 __TBB_DECL_ATOMIC_ALT(int,ptrdiff_t)
327 __TBB_DECL_ATOMIC(unsigned)
328 __TBB_DECL_ATOMIC(int)
329 #endif /* defined(_MSC_VER) && __TBB_WORDSIZE==4 */
331 __TBB_DECL_ATOMIC(unsigned short)
332 __TBB_DECL_ATOMIC(short)
333 __TBB_DECL_ATOMIC(char)
334 __TBB_DECL_ATOMIC(signed char)
335 __TBB_DECL_ATOMIC(unsigned char)
337 #if !defined(_MSC_VER)||defined(_NATIVE_WCHAR_T_DEFINED)
338 __TBB_DECL_ATOMIC(wchar_t)
339 #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */
341 //! Specialization for atomic<T*> with arithmetic and operator->.
342 template<typename T> struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> {
343 T* operator=( T* rhs ) {
344 // "this" required here in strict ISO C++ because store_with_release is a dependent name
345 return this->store_with_release(rhs);
347 atomic<T*>& operator=( const atomic<T*>& rhs ) {
348 this->store_with_release(rhs); return *this;
350 T* operator->() const {
355 //! Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->.
356 template<> struct atomic<void*>: internal::atomic_impl<void*> {
357 void* operator=( void* rhs ) {
358 // "this" required here in strict ISO C++ because store_with_release is a dependent name
359 return this->store_with_release(rhs);
361 atomic<void*>& operator=( const atomic<void*>& rhs ) {
362 this->store_with_release(rhs); return *this;
368 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
369 #pragma warning (pop)
370 #endif // warnings 4244, 4267 are back
372 #endif /* __TBB_atomic_H */