2 Copyright 2005-2011 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
29 #ifndef __TBB_atomic_H
30 #define __TBB_atomic_H
33 #include "tbb_stddef.h"
36 #define __TBB_LONG_LONG __int64
38 #define __TBB_LONG_LONG long long
41 #include "tbb_machine.h"
43 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
44 // Workaround for overzealous compiler warnings
45 #pragma warning (push)
46 #pragma warning (disable: 4244 4267)
51 //! Specifies memory fencing.
52 enum memory_semantics {
53 //! Sequentially consistent fence.
66 #if __TBB_ATTRIBUTE_ALIGNED_PRESENT
67 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f __attribute__ ((aligned(a)));
68 #elif __TBB_DECLSPEC_ALIGN_PRESENT
69 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f;
71 #error Do not know syntax for forcing alignment.
75 struct atomic_rep; // Primary template declared, but never defined.
78 struct atomic_rep<1> { // Specialization
83 struct atomic_rep<2> { // Specialization
85 __TBB_DECL_ATOMIC_FIELD(int16_t,value,2)
88 struct atomic_rep<4> { // Specialization
89 #if _MSC_VER && __TBB_WORDSIZE==4
90 // Work-around that avoids spurious /Wp64 warnings
91 typedef intptr_t word;
95 __TBB_DECL_ATOMIC_FIELD(int32_t,value,4)
97 #if __TBB_64BIT_ATOMICS
99 struct atomic_rep<8> { // Specialization
100 typedef int64_t word;
101 __TBB_DECL_ATOMIC_FIELD(int64_t,value,8)
105 template<size_t Size, memory_semantics M>
106 struct atomic_traits; // Primary template declared, but not defined.
108 #define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M) \
109 template<> struct atomic_traits<S,M> { \
110 typedef atomic_rep<S>::word word; \
111 inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \
112 return __TBB_machine_cmpswp##S##M(location,new_value,comparand); \
114 inline static word fetch_and_add( volatile void* location, word addend ) { \
115 return __TBB_machine_fetchadd##S##M(location,addend); \
117 inline static word fetch_and_store( volatile void* location, word value ) { \
118 return __TBB_machine_fetchstore##S##M(location,value); \
122 #define __TBB_DECL_ATOMIC_PRIMITIVES(S) \
123 template<memory_semantics M> \
124 struct atomic_traits<S,M> { \
125 typedef atomic_rep<S>::word word; \
126 inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \
127 return __TBB_machine_cmpswp##S(location,new_value,comparand); \
129 inline static word fetch_and_add( volatile void* location, word addend ) { \
130 return __TBB_machine_fetchadd##S(location,addend); \
132 inline static word fetch_and_store( volatile void* location, word value ) { \
133 return __TBB_machine_fetchstore##S(location,value); \
137 template<memory_semantics M>
138 struct atomic_load_store_traits; // Primary template declaration
140 #define __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(M) \
141 template<> struct atomic_load_store_traits<M> { \
142 template <typename T> \
143 inline static T load( const volatile T& location ) { \
144 return __TBB_load_##M( location ); \
146 template <typename T> \
147 inline static void store( volatile T& location, T value ) { \
148 __TBB_store_##M( location, value ); \
152 #if __TBB_USE_FENCED_ATOMICS
153 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,full_fence)
154 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,full_fence)
155 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,full_fence)
156 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire)
157 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire)
158 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire)
159 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release)
160 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release)
161 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release)
162 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,relaxed)
163 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,relaxed)
164 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,relaxed)
165 #if __TBB_64BIT_ATOMICS
166 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,full_fence)
167 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire)
168 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release)
169 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,relaxed)
171 #else /* !__TBB_USE_FENCED_ATOMICS */
172 __TBB_DECL_ATOMIC_PRIMITIVES(1)
173 __TBB_DECL_ATOMIC_PRIMITIVES(2)
174 __TBB_DECL_ATOMIC_PRIMITIVES(4)
175 #if __TBB_64BIT_ATOMICS
176 __TBB_DECL_ATOMIC_PRIMITIVES(8)
178 #endif /* !__TBB_USE_FENCED_ATOMICS */
180 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(full_fence);
181 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(acquire);
182 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(release);
183 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(relaxed);
185 //! Additive inverse of 1 for type T.
186 /** Various compilers issue various warnings if -1 is used with various integer types.
187 The baroque expression below avoids all the warnings (we hope). */
188 #define __TBB_MINUS_ONE(T) (T(T(0)-T(1)))
190 //! Base class that provides basic functionality for atomic<T> without fetch_and_add.
191 /** Works for any type T that has the same size as an integral type, has a trivial constructor/destructor,
192 and can be copied/compared by memcpy/memcmp. */
196 atomic_rep<sizeof(T)> rep;
198 //! Union type used to convert type T to underlying integral type.
201 typename atomic_rep<sizeof(T)>::word bits;
204 typedef T value_type;
206 template<memory_semantics M>
207 value_type fetch_and_store( value_type value ) {
210 w.bits = internal::atomic_traits<sizeof(value_type),M>::fetch_and_store(&rep.value,u.bits);
214 value_type fetch_and_store( value_type value ) {
215 return fetch_and_store<full_fence>(value);
218 template<memory_semantics M>
219 value_type compare_and_swap( value_type value, value_type comparand ) {
223 w.bits = internal::atomic_traits<sizeof(value_type),M>::compare_and_swap(&rep.value,u.bits,v.bits);
227 value_type compare_and_swap( value_type value, value_type comparand ) {
228 return compare_and_swap<full_fence>(value,comparand);
231 operator value_type() const volatile { // volatile qualifier here for backwards compatibility
233 w.bits = __TBB_load_with_acquire( rep.value );
237 template<memory_semantics M>
238 value_type load () const {
240 u.bits = internal::atomic_load_store_traits<M>::load( rep.value );
244 value_type load () const {
245 return load<acquire>();
248 template<memory_semantics M>
249 void store ( value_type value ) {
252 internal::atomic_load_store_traits<M>::store( rep.value, u.bits );
255 void store ( value_type value ) {
256 store<release>( value );
260 value_type store_with_release( value_type rhs ) {
263 __TBB_store_with_release(rep.value,u.bits);
268 //! Base class that provides basic functionality for atomic<T> with fetch_and_add.
269 /** I is the underlying type.
270 D is the difference type.
271 StepType should be char if I is an integral type, and T if I is a T*. */
272 template<typename I, typename D, typename StepType>
273 struct atomic_impl_with_arithmetic: atomic_impl<I> {
275 typedef I value_type;
277 template<memory_semantics M>
278 value_type fetch_and_add( D addend ) {
279 return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->rep.value, addend*sizeof(StepType) ));
282 value_type fetch_and_add( D addend ) {
283 return fetch_and_add<full_fence>(addend);
286 template<memory_semantics M>
287 value_type fetch_and_increment() {
288 return fetch_and_add<M>(1);
291 value_type fetch_and_increment() {
292 return fetch_and_add(1);
295 template<memory_semantics M>
296 value_type fetch_and_decrement() {
297 return fetch_and_add<M>(__TBB_MINUS_ONE(D));
300 value_type fetch_and_decrement() {
301 return fetch_and_add(__TBB_MINUS_ONE(D));
305 value_type operator+=( D addend ) {
306 return fetch_and_add(addend)+addend;
309 value_type operator-=( D addend ) {
310 // Additive inverse of addend computed using binary minus,
311 // instead of unary minus, for sake of avoiding compiler warnings.
312 return operator+=(D(0)-addend);
315 value_type operator++() {
316 return fetch_and_add(1)+1;
319 value_type operator--() {
320 return fetch_and_add(__TBB_MINUS_ONE(D))-1;
323 value_type operator++(int) {
324 return fetch_and_add(1);
327 value_type operator--(int) {
328 return fetch_and_add(__TBB_MINUS_ONE(D));
335 //! Primary template for atomic.
336 /** See the Reference for details.
337 @ingroup synchronization */
339 struct atomic: internal::atomic_impl<T> {
340 T operator=( T rhs ) {
341 // "this" required here in strict ISO C++ because store_with_release is a dependent name
342 return this->store_with_release(rhs);
344 atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(rhs); return *this;}
347 #define __TBB_DECL_ATOMIC(T) \
348 template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \
349 T operator=( T rhs ) {return store_with_release(rhs);} \
350 atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \
353 #if __TBB_64BIT_ATOMICS
354 __TBB_DECL_ATOMIC(__TBB_LONG_LONG)
355 __TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG)
357 // test_atomic will verify that sizeof(long long)==8
359 __TBB_DECL_ATOMIC(long)
360 __TBB_DECL_ATOMIC(unsigned long)
362 #if defined(_MSC_VER) && __TBB_WORDSIZE==4
363 /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option.
364 It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T)
365 with an operator=(U) that explicitly converts the U to a T. Types T and U should be
366 type synonyms on the platform. Type U should be the wider variant of T from the
367 perspective of /Wp64. */
368 #define __TBB_DECL_ATOMIC_ALT(T,U) \
369 template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \
370 T operator=( U rhs ) {return store_with_release(T(rhs));} \
371 atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \
373 __TBB_DECL_ATOMIC_ALT(unsigned,size_t)
374 __TBB_DECL_ATOMIC_ALT(int,ptrdiff_t)
376 __TBB_DECL_ATOMIC(unsigned)
377 __TBB_DECL_ATOMIC(int)
378 #endif /* defined(_MSC_VER) && __TBB_WORDSIZE==4 */
380 __TBB_DECL_ATOMIC(unsigned short)
381 __TBB_DECL_ATOMIC(short)
382 __TBB_DECL_ATOMIC(char)
383 __TBB_DECL_ATOMIC(signed char)
384 __TBB_DECL_ATOMIC(unsigned char)
386 #if !defined(_MSC_VER)||defined(_NATIVE_WCHAR_T_DEFINED)
387 __TBB_DECL_ATOMIC(wchar_t)
388 #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */
390 //! Specialization for atomic<T*> with arithmetic and operator->.
391 template<typename T> struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> {
392 T* operator=( T* rhs ) {
393 // "this" required here in strict ISO C++ because store_with_release is a dependent name
394 return this->store_with_release(rhs);
396 atomic<T*>& operator=( const atomic<T*>& rhs ) {
397 this->store_with_release(rhs); return *this;
399 T* operator->() const {
404 //! Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->.
405 template<> struct atomic<void*>: internal::atomic_impl<void*> {
406 void* operator=( void* rhs ) {
407 // "this" required here in strict ISO C++ because store_with_release is a dependent name
408 return this->store_with_release(rhs);
410 atomic<void*>& operator=( const atomic<void*>& rhs ) {
411 this->store_with_release(rhs); return *this;
415 // Helpers to workaround ugly syntax of calling template member function of a
416 // template class with template argument dependent on template parameters.
418 template <memory_semantics M, typename T>
419 T load ( const atomic<T>& a ) { return a.template load<M>(); }
421 template <memory_semantics M, typename T>
422 void store ( atomic<T>& a, T value ) { return a.template store<M>(value); }
426 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
427 #pragma warning (pop)
428 #endif // warnings 4244, 4267 are back
430 #endif /* __TBB_atomic_H */