]> git.sesse.net Git - casparcg/blob - tbb/include/tbb/atomic.h
2.0.0.2: Updated tbb version.
[casparcg] / tbb / include / tbb / atomic.h
1 /*
2     Copyright 2005-2011 Intel Corporation.  All Rights Reserved.
3
4     This file is part of Threading Building Blocks.
5
6     Threading Building Blocks is free software; you can redistribute it
7     and/or modify it under the terms of the GNU General Public License
8     version 2 as published by the Free Software Foundation.
9
10     Threading Building Blocks is distributed in the hope that it will be
11     useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12     of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13     GNU General Public License for more details.
14
15     You should have received a copy of the GNU General Public License
16     along with Threading Building Blocks; if not, write to the Free Software
17     Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18
19     As a special exception, you may use this file as part of a free software
20     library without restriction.  Specifically, if other files instantiate
21     templates or use macros or inline functions from this file, or you compile
22     this file and link it with other files to produce an executable, this
23     file does not by itself cause the resulting executable to be covered by
24     the GNU General Public License.  This exception does not however
25     invalidate any other reasons why the executable file might be covered by
26     the GNU General Public License.
27 */
28
29 #ifndef __TBB_atomic_H
30 #define __TBB_atomic_H
31
32 #include <cstddef>
33 #include "tbb_stddef.h"
34
35 #if _MSC_VER 
36 #define __TBB_LONG_LONG __int64
37 #else
38 #define __TBB_LONG_LONG long long
39 #endif /* _MSC_VER */
40
41 #include "tbb_machine.h"
42
43 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
44     // Workaround for overzealous compiler warnings 
45     #pragma warning (push)
46     #pragma warning (disable: 4244 4267)
47 #endif
48
49 namespace tbb {
50
51 //! Specifies memory fencing.
52 enum memory_semantics {
53     //! For internal use only.
54     __TBB_full_fence,
55     //! Acquire fence
56     acquire,
57     //! Release fence
58     release
59 };
60
61 //! @cond INTERNAL
62 namespace internal {
63
64 #if __GNUC__ || __SUNPRO_CC || __IBMCPP__
65 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f  __attribute__ ((aligned(a)));
66 #elif defined(__INTEL_COMPILER)||_MSC_VER >= 1300
67 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f;
68 #else 
69 #error Do not know syntax for forcing alignment.
70 #endif /* __GNUC__ */
71
72 template<size_t S>
73 struct atomic_rep;           // Primary template declared, but never defined.
74
75 template<>
76 struct atomic_rep<1> {       // Specialization
77     typedef int8_t word;
78     int8_t value;
79 };
80 template<>
81 struct atomic_rep<2> {       // Specialization
82     typedef int16_t word;
83     __TBB_DECL_ATOMIC_FIELD(int16_t,value,2)
84 };
85 template<>
86 struct atomic_rep<4> {       // Specialization
87 #if _MSC_VER && __TBB_WORDSIZE==4
88     // Work-around that avoids spurious /Wp64 warnings
89     typedef intptr_t word;
90 #else
91     typedef int32_t word;
92 #endif
93     __TBB_DECL_ATOMIC_FIELD(int32_t,value,4)
94 };
95 #if __TBB_64BIT_ATOMICS
96 template<>
97 struct atomic_rep<8> {       // Specialization
98     typedef int64_t word;
99     __TBB_DECL_ATOMIC_FIELD(int64_t,value,8)
100 };
101 #endif
102
103 template<size_t Size, memory_semantics M>
104 struct atomic_traits;        // Primary template declared, but not defined.
105
106 #define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M)                         \
107     template<> struct atomic_traits<S,M> {                               \
108         typedef atomic_rep<S>::word word;                               \
109         inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
110             return __TBB_CompareAndSwap##S##M(location,new_value,comparand);    \
111         }                                                                       \
112         inline static word fetch_and_add( volatile void* location, word addend ) { \
113             return __TBB_FetchAndAdd##S##M(location,addend);                    \
114         }                                                                       \
115         inline static word fetch_and_store( volatile void* location, word value ) {\
116             return __TBB_FetchAndStore##S##M(location,value);                   \
117         }                                                                       \
118     };
119
120 #define __TBB_DECL_ATOMIC_PRIMITIVES(S)                                  \
121     template<memory_semantics M>                                         \
122     struct atomic_traits<S,M> {                                          \
123         typedef atomic_rep<S>::word word;                               \
124         inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
125             return __TBB_CompareAndSwap##S(location,new_value,comparand);       \
126         }                                                                       \
127         inline static word fetch_and_add( volatile void* location, word addend ) { \
128             return __TBB_FetchAndAdd##S(location,addend);                       \
129         }                                                                       \
130         inline static word fetch_and_store( volatile void* location, word value ) {\
131             return __TBB_FetchAndStore##S(location,value);                      \
132         }                                                                       \
133     };
134
135 #if __TBB_DECL_FENCED_ATOMICS
136 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,__TBB_full_fence)
137 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,__TBB_full_fence)
138 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,__TBB_full_fence)
139 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,__TBB_full_fence)
140 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire)
141 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire)
142 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire)
143 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release)
144 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release)
145 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release)
146 #if __TBB_64BIT_ATOMICS
147 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire)
148 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release)
149 #endif
150 #else
151 __TBB_DECL_ATOMIC_PRIMITIVES(1)
152 __TBB_DECL_ATOMIC_PRIMITIVES(2)
153 __TBB_DECL_ATOMIC_PRIMITIVES(4)
154 #if __TBB_64BIT_ATOMICS
155 __TBB_DECL_ATOMIC_PRIMITIVES(8)
156 #endif
157 #endif
158
159 //! Additive inverse of 1 for type T.
160 /** Various compilers issue various warnings if -1 is used with various integer types.
161     The baroque expression below avoids all the warnings (we hope). */
162 #define __TBB_MINUS_ONE(T) (T(T(0)-T(1)))
163
164 //! Base class that provides basic functionality for atomic<T> without fetch_and_add.
165 /** Works for any type T that has the same size as an integral type, has a trivial constructor/destructor, 
166     and can be copied/compared by memcpy/memcmp. */
167 template<typename T>
168 struct atomic_impl {
169 protected:
170     atomic_rep<sizeof(T)> rep;
171 private:
172     //! Union type used to convert type T to underlying integral type.
173     union converter {
174         T value;
175         typename atomic_rep<sizeof(T)>::word bits;
176     };
177 public:
178     typedef T value_type;
179
180     template<memory_semantics M>
181     value_type fetch_and_store( value_type value ) {
182         converter u, w;
183         u.value = value;
184         w.bits = internal::atomic_traits<sizeof(value_type),M>::fetch_and_store(&rep.value,u.bits);
185         return w.value;
186     }
187
188     value_type fetch_and_store( value_type value ) {
189         return fetch_and_store<__TBB_full_fence>(value);
190     }
191
192     template<memory_semantics M>
193     value_type compare_and_swap( value_type value, value_type comparand ) {
194         converter u, v, w;
195         u.value = value;
196         v.value = comparand;
197         w.bits = internal::atomic_traits<sizeof(value_type),M>::compare_and_swap(&rep.value,u.bits,v.bits);
198         return w.value;
199     }
200
201     value_type compare_and_swap( value_type value, value_type comparand ) {
202         return compare_and_swap<__TBB_full_fence>(value,comparand);
203     }
204
205     operator value_type() const volatile {                // volatile qualifier here for backwards compatibility 
206         converter w;
207         w.bits = __TBB_load_with_acquire( rep.value );
208         return w.value;
209     }
210
211 protected:
212     value_type store_with_release( value_type rhs ) {
213         converter u;
214         u.value = rhs;
215         __TBB_store_with_release(rep.value,u.bits);
216         return rhs;
217     }
218 };
219
220 //! Base class that provides basic functionality for atomic<T> with fetch_and_add.
221 /** I is the underlying type.
222     D is the difference type.
223     StepType should be char if I is an integral type, and T if I is a T*. */
224 template<typename I, typename D, typename StepType>
225 struct atomic_impl_with_arithmetic: atomic_impl<I> {
226 public:
227     typedef I value_type;
228
229     template<memory_semantics M>
230     value_type fetch_and_add( D addend ) {
231         return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->rep.value, addend*sizeof(StepType) ));
232     }
233
234     value_type fetch_and_add( D addend ) {
235         return fetch_and_add<__TBB_full_fence>(addend);
236     }
237
238     template<memory_semantics M>
239     value_type fetch_and_increment() {
240         return fetch_and_add<M>(1);
241     }
242
243     value_type fetch_and_increment() {
244         return fetch_and_add(1);
245     }
246
247     template<memory_semantics M>
248     value_type fetch_and_decrement() {
249         return fetch_and_add<M>(__TBB_MINUS_ONE(D));
250     }
251
252     value_type fetch_and_decrement() {
253         return fetch_and_add(__TBB_MINUS_ONE(D));
254     }
255
256 public:
257     value_type operator+=( D addend ) {
258         return fetch_and_add(addend)+addend;
259     }
260
261     value_type operator-=( D addend ) {
262         // Additive inverse of addend computed using binary minus,
263         // instead of unary minus, for sake of avoiding compiler warnings.
264         return operator+=(D(0)-addend);    
265     }
266
267     value_type operator++() {
268         return fetch_and_add(1)+1;
269     }
270
271     value_type operator--() {
272         return fetch_and_add(__TBB_MINUS_ONE(D))-1;
273     }
274
275     value_type operator++(int) {
276         return fetch_and_add(1);
277     }
278
279     value_type operator--(int) {
280         return fetch_and_add(__TBB_MINUS_ONE(D));
281     }
282 };
283
284 } /* Internal */
285 //! @endcond
286
287 //! Primary template for atomic.
288 /** See the Reference for details.
289     @ingroup synchronization */
290 template<typename T>
291 struct atomic: internal::atomic_impl<T> {
292     T operator=( T rhs ) {
293         // "this" required here in strict ISO C++ because store_with_release is a dependent name
294         return this->store_with_release(rhs);
295     }
296     atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(rhs); return *this;}
297 };
298
299 #define __TBB_DECL_ATOMIC(T) \
300     template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {  \
301         T operator=( T rhs ) {return store_with_release(rhs);}  \
302         atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
303     };
304
305 #if __TBB_64BIT_ATOMICS
306 // otherwise size is verified by test_atomic
307 __TBB_DECL_ATOMIC(__TBB_LONG_LONG)
308 __TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG)
309 #endif
310 __TBB_DECL_ATOMIC(long)
311 __TBB_DECL_ATOMIC(unsigned long)
312
313 #if defined(_MSC_VER) && __TBB_WORDSIZE==4
314 /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. 
315    It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) 
316    with an operator=(U) that explicitly converts the U to a T.  Types T and U should be
317    type synonyms on the platform.  Type U should be the wider variant of T from the
318    perspective of /Wp64. */
319 #define __TBB_DECL_ATOMIC_ALT(T,U) \
320     template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {  \
321         T operator=( U rhs ) {return store_with_release(T(rhs));}  \
322         atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
323     };
324 __TBB_DECL_ATOMIC_ALT(unsigned,size_t)
325 __TBB_DECL_ATOMIC_ALT(int,ptrdiff_t)
326 #else
327 __TBB_DECL_ATOMIC(unsigned)
328 __TBB_DECL_ATOMIC(int)
329 #endif /* defined(_MSC_VER) && __TBB_WORDSIZE==4 */
330
331 __TBB_DECL_ATOMIC(unsigned short)
332 __TBB_DECL_ATOMIC(short)
333 __TBB_DECL_ATOMIC(char)
334 __TBB_DECL_ATOMIC(signed char)
335 __TBB_DECL_ATOMIC(unsigned char)
336
337 #if !defined(_MSC_VER)||defined(_NATIVE_WCHAR_T_DEFINED) 
338 __TBB_DECL_ATOMIC(wchar_t)
339 #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */
340
341 //! Specialization for atomic<T*> with arithmetic and operator->.
342 template<typename T> struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> {
343     T* operator=( T* rhs ) {
344         // "this" required here in strict ISO C++ because store_with_release is a dependent name
345         return this->store_with_release(rhs);
346     }
347     atomic<T*>& operator=( const atomic<T*>& rhs ) {
348         this->store_with_release(rhs); return *this;
349     }
350     T* operator->() const {
351         return (*this);
352     }
353 };
354
355 //! Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->.
356 template<> struct atomic<void*>: internal::atomic_impl<void*> {
357     void* operator=( void* rhs ) {
358         // "this" required here in strict ISO C++ because store_with_release is a dependent name
359         return this->store_with_release(rhs);
360     }
361     atomic<void*>& operator=( const atomic<void*>& rhs ) {
362         this->store_with_release(rhs); return *this;
363     }
364 };
365
366 } // namespace tbb
367
368 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
369     #pragma warning (pop)
370 #endif // warnings 4244, 4267 are back
371
372 #endif /* __TBB_atomic_H */