2 Copyright 2005-2010 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
29 #ifndef __TBB_atomic_H
30 #define __TBB_atomic_H
33 #include "tbb_stddef.h"
36 #define __TBB_LONG_LONG __int64
38 #define __TBB_LONG_LONG long long
41 #include "tbb_machine.h"
43 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
44 // Workaround for overzealous compiler warnings
45 #pragma warning (push)
46 #pragma warning (disable: 4244 4267)
51 //! Specifies memory fencing.
52 enum memory_semantics {
53 //! For internal use only.
64 #if __GNUC__ || __SUNPRO_CC
65 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f __attribute__ ((aligned(a)));
66 #elif defined(__INTEL_COMPILER)||_MSC_VER >= 1300
67 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f;
69 #error Do not know syntax for forcing alignment.
73 struct atomic_rep; // Primary template declared, but never defined.
76 struct atomic_rep<1> { // Specialization
81 struct atomic_rep<2> { // Specialization
83 __TBB_DECL_ATOMIC_FIELD(int16_t,value,2)
86 struct atomic_rep<4> { // Specialization
87 #if _MSC_VER && __TBB_WORDSIZE==4
88 // Work-around that avoids spurious /Wp64 warnings
89 typedef intptr_t word;
93 __TBB_DECL_ATOMIC_FIELD(int32_t,value,4)
96 struct atomic_rep<8> { // Specialization
98 __TBB_DECL_ATOMIC_FIELD(int64_t,value,8)
101 template<size_t Size, memory_semantics M>
102 struct atomic_traits; // Primary template declared, but not defined.
104 #define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M) \
105 template<> struct atomic_traits<S,M> { \
106 typedef atomic_rep<S>::word word; \
107 inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
108 return __TBB_CompareAndSwap##S##M(location,new_value,comparand); \
110 inline static word fetch_and_add( volatile void* location, word addend ) { \
111 return __TBB_FetchAndAdd##S##M(location,addend); \
113 inline static word fetch_and_store( volatile void* location, word value ) {\
114 return __TBB_FetchAndStore##S##M(location,value); \
118 #define __TBB_DECL_ATOMIC_PRIMITIVES(S) \
119 template<memory_semantics M> \
120 struct atomic_traits<S,M> { \
121 typedef atomic_rep<S>::word word; \
122 inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
123 return __TBB_CompareAndSwap##S(location,new_value,comparand); \
125 inline static word fetch_and_add( volatile void* location, word addend ) { \
126 return __TBB_FetchAndAdd##S(location,addend); \
128 inline static word fetch_and_store( volatile void* location, word value ) {\
129 return __TBB_FetchAndStore##S(location,value); \
133 #if __TBB_DECL_FENCED_ATOMICS
134 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,__TBB_full_fence)
135 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,__TBB_full_fence)
136 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,__TBB_full_fence)
137 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,__TBB_full_fence)
138 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire)
139 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire)
140 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire)
141 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire)
142 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release)
143 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release)
144 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release)
145 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release)
147 __TBB_DECL_ATOMIC_PRIMITIVES(1)
148 __TBB_DECL_ATOMIC_PRIMITIVES(2)
149 __TBB_DECL_ATOMIC_PRIMITIVES(4)
150 __TBB_DECL_ATOMIC_PRIMITIVES(8)
153 //! Additive inverse of 1 for type T.
154 /** Various compilers issue various warnings if -1 is used with various integer types.
155 The baroque expression below avoids all the warnings (we hope). */
156 #define __TBB_MINUS_ONE(T) (T(T(0)-T(1)))
158 //! Base class that provides basic functionality for atomic<T> without fetch_and_add.
159 /** Works for any type T that has the same size as an integral type, has a trivial constructor/destructor,
160 and can be copied/compared by memcpy/memcmp. */
164 atomic_rep<sizeof(T)> rep;
166 //! Union type used to convert type T to underlying integral type.
169 typename atomic_rep<sizeof(T)>::word bits;
172 typedef T value_type;
174 template<memory_semantics M>
175 value_type fetch_and_store( value_type value ) {
178 w.bits = internal::atomic_traits<sizeof(value_type),M>::fetch_and_store(&rep.value,u.bits);
182 value_type fetch_and_store( value_type value ) {
183 return fetch_and_store<__TBB_full_fence>(value);
186 template<memory_semantics M>
187 value_type compare_and_swap( value_type value, value_type comparand ) {
191 w.bits = internal::atomic_traits<sizeof(value_type),M>::compare_and_swap(&rep.value,u.bits,v.bits);
195 value_type compare_and_swap( value_type value, value_type comparand ) {
196 return compare_and_swap<__TBB_full_fence>(value,comparand);
199 operator value_type() const volatile { // volatile qualifier here for backwards compatibility
201 w.bits = __TBB_load_with_acquire( rep.value );
206 value_type store_with_release( value_type rhs ) {
209 __TBB_store_with_release(rep.value,u.bits);
214 //! Base class that provides basic functionality for atomic<T> with fetch_and_add.
215 /** I is the underlying type.
216 D is the difference type.
217 StepType should be char if I is an integral type, and T if I is a T*. */
218 template<typename I, typename D, typename StepType>
219 struct atomic_impl_with_arithmetic: atomic_impl<I> {
221 typedef I value_type;
223 template<memory_semantics M>
224 value_type fetch_and_add( D addend ) {
225 return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->rep.value, addend*sizeof(StepType) ));
228 value_type fetch_and_add( D addend ) {
229 return fetch_and_add<__TBB_full_fence>(addend);
232 template<memory_semantics M>
233 value_type fetch_and_increment() {
234 return fetch_and_add<M>(1);
237 value_type fetch_and_increment() {
238 return fetch_and_add(1);
241 template<memory_semantics M>
242 value_type fetch_and_decrement() {
243 return fetch_and_add<M>(__TBB_MINUS_ONE(D));
246 value_type fetch_and_decrement() {
247 return fetch_and_add(__TBB_MINUS_ONE(D));
251 value_type operator+=( D addend ) {
252 return fetch_and_add(addend)+addend;
255 value_type operator-=( D addend ) {
256 // Additive inverse of addend computed using binary minus,
257 // instead of unary minus, for sake of avoiding compiler warnings.
258 return operator+=(D(0)-addend);
261 value_type operator++() {
262 return fetch_and_add(1)+1;
265 value_type operator--() {
266 return fetch_and_add(__TBB_MINUS_ONE(D))-1;
269 value_type operator++(int) {
270 return fetch_and_add(1);
273 value_type operator--(int) {
274 return fetch_and_add(__TBB_MINUS_ONE(D));
278 #if __TBB_WORDSIZE == 4
279 // Plaforms with 32-bit hardware require special effort for 64-bit loads and stores.
280 #if defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400
283 inline atomic_impl<__TBB_LONG_LONG>::operator atomic_impl<__TBB_LONG_LONG>::value_type() const volatile {
284 return __TBB_Load8(&rep.value);
288 inline atomic_impl<unsigned __TBB_LONG_LONG>::operator atomic_impl<unsigned __TBB_LONG_LONG>::value_type() const volatile {
289 return __TBB_Load8(&rep.value);
293 inline atomic_impl<__TBB_LONG_LONG>::value_type atomic_impl<__TBB_LONG_LONG>::store_with_release( value_type rhs ) {
294 __TBB_Store8(&rep.value,rhs);
299 inline atomic_impl<unsigned __TBB_LONG_LONG>::value_type atomic_impl<unsigned __TBB_LONG_LONG>::store_with_release( value_type rhs ) {
300 __TBB_Store8(&rep.value,rhs);
304 #endif /* defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400 */
305 #endif /* __TBB_WORDSIZE==4 */
310 //! Primary template for atomic.
311 /** See the Reference for details.
312 @ingroup synchronization */
314 struct atomic: internal::atomic_impl<T> {
315 T operator=( T rhs ) {
316 // "this" required here in strict ISO C++ because store_with_release is a dependent name
317 return this->store_with_release(rhs);
319 atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(rhs); return *this;}
322 #define __TBB_DECL_ATOMIC(T) \
323 template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \
324 T operator=( T rhs ) {return store_with_release(rhs);} \
325 atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \
328 #if defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400
329 __TBB_DECL_ATOMIC(__TBB_LONG_LONG)
330 __TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG)
332 // Some old versions of MVSC cannot correctly compile templates with "long long".
333 #endif /* defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400 */
335 __TBB_DECL_ATOMIC(long)
336 __TBB_DECL_ATOMIC(unsigned long)
338 #if defined(_MSC_VER) && __TBB_WORDSIZE==4
339 /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option.
340 It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T)
341 with an operator=(U) that explicitly converts the U to a T. Types T and U should be
342 type synonyms on the platform. Type U should be the wider variant of T from the
343 perspective of /Wp64. */
344 #define __TBB_DECL_ATOMIC_ALT(T,U) \
345 template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \
346 T operator=( U rhs ) {return store_with_release(T(rhs));} \
347 atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \
349 __TBB_DECL_ATOMIC_ALT(unsigned,size_t)
350 __TBB_DECL_ATOMIC_ALT(int,ptrdiff_t)
352 __TBB_DECL_ATOMIC(unsigned)
353 __TBB_DECL_ATOMIC(int)
354 #endif /* defined(_MSC_VER) && __TBB_WORDSIZE==4 */
356 __TBB_DECL_ATOMIC(unsigned short)
357 __TBB_DECL_ATOMIC(short)
358 __TBB_DECL_ATOMIC(char)
359 __TBB_DECL_ATOMIC(signed char)
360 __TBB_DECL_ATOMIC(unsigned char)
362 #if !defined(_MSC_VER)||defined(_NATIVE_WCHAR_T_DEFINED)
363 __TBB_DECL_ATOMIC(wchar_t)
364 #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */
366 //! Specialization for atomic<T*> with arithmetic and operator->.
367 template<typename T> struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> {
368 T* operator=( T* rhs ) {
369 // "this" required here in strict ISO C++ because store_with_release is a dependent name
370 return this->store_with_release(rhs);
372 atomic<T*>& operator=( const atomic<T*>& rhs ) {
373 this->store_with_release(rhs); return *this;
375 T* operator->() const {
380 //! Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->.
381 template<> struct atomic<void*>: internal::atomic_impl<void*> {
382 void* operator=( void* rhs ) {
383 // "this" required here in strict ISO C++ because store_with_release is a dependent name
384 return this->store_with_release(rhs);
386 atomic<void*>& operator=( const atomic<void*>& rhs ) {
387 this->store_with_release(rhs); return *this;
393 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
394 #pragma warning (pop)
395 #endif // warnings 4244, 4267 are back
397 #endif /* __TBB_atomic_H */