]> git.sesse.net Git - casparcg/blob - tbb30_20100406oss/include/tbb/atomic.h
2.0.2: Updated to boost 1.48.
[casparcg] / tbb30_20100406oss / include / tbb / atomic.h
1 /*
2     Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
3
4     This file is part of Threading Building Blocks.
5
6     Threading Building Blocks is free software; you can redistribute it
7     and/or modify it under the terms of the GNU General Public License
8     version 2 as published by the Free Software Foundation.
9
10     Threading Building Blocks is distributed in the hope that it will be
11     useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12     of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13     GNU General Public License for more details.
14
15     You should have received a copy of the GNU General Public License
16     along with Threading Building Blocks; if not, write to the Free Software
17     Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18
19     As a special exception, you may use this file as part of a free software
20     library without restriction.  Specifically, if other files instantiate
21     templates or use macros or inline functions from this file, or you compile
22     this file and link it with other files to produce an executable, this
23     file does not by itself cause the resulting executable to be covered by
24     the GNU General Public License.  This exception does not however
25     invalidate any other reasons why the executable file might be covered by
26     the GNU General Public License.
27 */
28
29 #ifndef __TBB_atomic_H
30 #define __TBB_atomic_H
31
32 #include <cstddef>
33 #include "tbb_stddef.h"
34
35 #if _MSC_VER 
36 #define __TBB_LONG_LONG __int64
37 #else
38 #define __TBB_LONG_LONG long long
39 #endif /* _MSC_VER */
40
41 #include "tbb_machine.h"
42
43 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
44     // Workaround for overzealous compiler warnings 
45     #pragma warning (push)
46     #pragma warning (disable: 4244 4267)
47 #endif
48
49 namespace tbb {
50
51 //! Specifies memory fencing.
52 enum memory_semantics {
53     //! For internal use only.
54     __TBB_full_fence,
55     //! Acquire fence
56     acquire,
57     //! Release fence
58     release
59 };
60
61 //! @cond INTERNAL
62 namespace internal {
63
64 #if __GNUC__ || __SUNPRO_CC
65 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f  __attribute__ ((aligned(a)));
66 #elif defined(__INTEL_COMPILER)||_MSC_VER >= 1300
67 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f;
68 #else 
69 #error Do not know syntax for forcing alignment.
70 #endif /* __GNUC__ */
71
72 template<size_t S>
73 struct atomic_rep;           // Primary template declared, but never defined.
74
75 template<>
76 struct atomic_rep<1> {       // Specialization
77     typedef int8_t word;
78     int8_t value;
79 };
80 template<>
81 struct atomic_rep<2> {       // Specialization
82     typedef int16_t word;
83     __TBB_DECL_ATOMIC_FIELD(int16_t,value,2)
84 };
85 template<>
86 struct atomic_rep<4> {       // Specialization
87 #if _MSC_VER && __TBB_WORDSIZE==4
88     // Work-around that avoids spurious /Wp64 warnings
89     typedef intptr_t word;
90 #else
91     typedef int32_t word;
92 #endif
93     __TBB_DECL_ATOMIC_FIELD(int32_t,value,4)
94 };
95 template<>
96 struct atomic_rep<8> {       // Specialization
97     typedef int64_t word;
98     __TBB_DECL_ATOMIC_FIELD(int64_t,value,8)
99 };
100
101 template<size_t Size, memory_semantics M>
102 struct atomic_traits;        // Primary template declared, but not defined.
103
104 #define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M)                         \
105     template<> struct atomic_traits<S,M> {                               \
106         typedef atomic_rep<S>::word word;                               \
107         inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
108             return __TBB_CompareAndSwap##S##M(location,new_value,comparand);    \
109         }                                                                       \
110         inline static word fetch_and_add( volatile void* location, word addend ) { \
111             return __TBB_FetchAndAdd##S##M(location,addend);                    \
112         }                                                                       \
113         inline static word fetch_and_store( volatile void* location, word value ) {\
114             return __TBB_FetchAndStore##S##M(location,value);                   \
115         }                                                                       \
116     };
117
118 #define __TBB_DECL_ATOMIC_PRIMITIVES(S)                                  \
119     template<memory_semantics M>                                         \
120     struct atomic_traits<S,M> {                                          \
121         typedef atomic_rep<S>::word word;                               \
122         inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
123             return __TBB_CompareAndSwap##S(location,new_value,comparand);       \
124         }                                                                       \
125         inline static word fetch_and_add( volatile void* location, word addend ) { \
126             return __TBB_FetchAndAdd##S(location,addend);                       \
127         }                                                                       \
128         inline static word fetch_and_store( volatile void* location, word value ) {\
129             return __TBB_FetchAndStore##S(location,value);                      \
130         }                                                                       \
131     };
132
133 #if __TBB_DECL_FENCED_ATOMICS
134 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,__TBB_full_fence)
135 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,__TBB_full_fence)
136 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,__TBB_full_fence)
137 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,__TBB_full_fence)
138 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire)
139 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire)
140 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire)
141 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire)
142 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release)
143 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release)
144 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release)
145 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release)
146 #else
147 __TBB_DECL_ATOMIC_PRIMITIVES(1)
148 __TBB_DECL_ATOMIC_PRIMITIVES(2)
149 __TBB_DECL_ATOMIC_PRIMITIVES(4)
150 __TBB_DECL_ATOMIC_PRIMITIVES(8)
151 #endif
152
153 //! Additive inverse of 1 for type T.
154 /** Various compilers issue various warnings if -1 is used with various integer types.
155     The baroque expression below avoids all the warnings (we hope). */
156 #define __TBB_MINUS_ONE(T) (T(T(0)-T(1)))
157
158 //! Base class that provides basic functionality for atomic<T> without fetch_and_add.
159 /** Works for any type T that has the same size as an integral type, has a trivial constructor/destructor, 
160     and can be copied/compared by memcpy/memcmp. */
161 template<typename T>
162 struct atomic_impl {
163 protected:
164     atomic_rep<sizeof(T)> rep;
165 private:
166     //! Union type used to convert type T to underlying integral type.
167     union converter {
168         T value;
169         typename atomic_rep<sizeof(T)>::word bits;
170     };
171 public:
172     typedef T value_type;
173
174     template<memory_semantics M>
175     value_type fetch_and_store( value_type value ) {
176         converter u, w;
177         u.value = value;
178         w.bits = internal::atomic_traits<sizeof(value_type),M>::fetch_and_store(&rep.value,u.bits);
179         return w.value;
180     }
181
182     value_type fetch_and_store( value_type value ) {
183         return fetch_and_store<__TBB_full_fence>(value);
184     }
185
186     template<memory_semantics M>
187     value_type compare_and_swap( value_type value, value_type comparand ) {
188         converter u, v, w;
189         u.value = value;
190         v.value = comparand;
191         w.bits = internal::atomic_traits<sizeof(value_type),M>::compare_and_swap(&rep.value,u.bits,v.bits);
192         return w.value;
193     }
194
195     value_type compare_and_swap( value_type value, value_type comparand ) {
196         return compare_and_swap<__TBB_full_fence>(value,comparand);
197     }
198
199     operator value_type() const volatile {                // volatile qualifier here for backwards compatibility 
200         converter w;
201         w.bits = __TBB_load_with_acquire( rep.value );
202         return w.value;
203     }
204
205 protected:
206     value_type store_with_release( value_type rhs ) {
207         converter u;
208         u.value = rhs;
209         __TBB_store_with_release(rep.value,u.bits);
210         return rhs;
211     }
212 };
213
214 //! Base class that provides basic functionality for atomic<T> with fetch_and_add.
215 /** I is the underlying type.
216     D is the difference type.
217     StepType should be char if I is an integral type, and T if I is a T*. */
218 template<typename I, typename D, typename StepType>
219 struct atomic_impl_with_arithmetic: atomic_impl<I> {
220 public:
221     typedef I value_type;
222
223     template<memory_semantics M>
224     value_type fetch_and_add( D addend ) {
225         return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->rep.value, addend*sizeof(StepType) ));
226     }
227
228     value_type fetch_and_add( D addend ) {
229         return fetch_and_add<__TBB_full_fence>(addend);
230     }
231
232     template<memory_semantics M>
233     value_type fetch_and_increment() {
234         return fetch_and_add<M>(1);
235     }
236
237     value_type fetch_and_increment() {
238         return fetch_and_add(1);
239     }
240
241     template<memory_semantics M>
242     value_type fetch_and_decrement() {
243         return fetch_and_add<M>(__TBB_MINUS_ONE(D));
244     }
245
246     value_type fetch_and_decrement() {
247         return fetch_and_add(__TBB_MINUS_ONE(D));
248     }
249
250 public:
251     value_type operator+=( D addend ) {
252         return fetch_and_add(addend)+addend;
253     }
254
255     value_type operator-=( D addend ) {
256         // Additive inverse of addend computed using binary minus,
257         // instead of unary minus, for sake of avoiding compiler warnings.
258         return operator+=(D(0)-addend);    
259     }
260
261     value_type operator++() {
262         return fetch_and_add(1)+1;
263     }
264
265     value_type operator--() {
266         return fetch_and_add(__TBB_MINUS_ONE(D))-1;
267     }
268
269     value_type operator++(int) {
270         return fetch_and_add(1);
271     }
272
273     value_type operator--(int) {
274         return fetch_and_add(__TBB_MINUS_ONE(D));
275     }
276 };
277
278 #if __TBB_WORDSIZE == 4
279 // Plaforms with 32-bit hardware require special effort for 64-bit loads and stores.
280 #if defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400
281
282 template<>
283 inline atomic_impl<__TBB_LONG_LONG>::operator atomic_impl<__TBB_LONG_LONG>::value_type() const volatile {
284     return __TBB_Load8(&rep.value);
285 }
286
287 template<>
288 inline atomic_impl<unsigned __TBB_LONG_LONG>::operator atomic_impl<unsigned __TBB_LONG_LONG>::value_type() const volatile {
289     return __TBB_Load8(&rep.value);
290 }
291
292 template<>
293 inline atomic_impl<__TBB_LONG_LONG>::value_type atomic_impl<__TBB_LONG_LONG>::store_with_release( value_type rhs ) {
294     __TBB_Store8(&rep.value,rhs);
295     return rhs;
296 }
297
298 template<>
299 inline atomic_impl<unsigned __TBB_LONG_LONG>::value_type atomic_impl<unsigned __TBB_LONG_LONG>::store_with_release( value_type rhs ) {
300     __TBB_Store8(&rep.value,rhs);
301     return rhs;
302 }
303
304 #endif /* defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400 */
305 #endif /* __TBB_WORDSIZE==4 */
306
307 } /* Internal */
308 //! @endcond
309
310 //! Primary template for atomic.
311 /** See the Reference for details.
312     @ingroup synchronization */
313 template<typename T>
314 struct atomic: internal::atomic_impl<T> {
315     T operator=( T rhs ) {
316         // "this" required here in strict ISO C++ because store_with_release is a dependent name
317         return this->store_with_release(rhs);
318     }
319     atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(rhs); return *this;}
320 };
321
322 #define __TBB_DECL_ATOMIC(T) \
323     template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {  \
324         T operator=( T rhs ) {return store_with_release(rhs);}  \
325         atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
326     };
327
328 #if defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400
329 __TBB_DECL_ATOMIC(__TBB_LONG_LONG)
330 __TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG)
331 #else
332 // Some old versions of MVSC cannot correctly compile templates with "long long".
333 #endif /* defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400 */
334
335 __TBB_DECL_ATOMIC(long)
336 __TBB_DECL_ATOMIC(unsigned long)
337
338 #if defined(_MSC_VER) && __TBB_WORDSIZE==4
339 /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. 
340    It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) 
341    with an operator=(U) that explicitly converts the U to a T.  Types T and U should be
342    type synonyms on the platform.  Type U should be the wider variant of T from the
343    perspective of /Wp64. */
344 #define __TBB_DECL_ATOMIC_ALT(T,U) \
345     template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {  \
346         T operator=( U rhs ) {return store_with_release(T(rhs));}  \
347         atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
348     };
349 __TBB_DECL_ATOMIC_ALT(unsigned,size_t)
350 __TBB_DECL_ATOMIC_ALT(int,ptrdiff_t)
351 #else
352 __TBB_DECL_ATOMIC(unsigned)
353 __TBB_DECL_ATOMIC(int)
354 #endif /* defined(_MSC_VER) && __TBB_WORDSIZE==4 */
355
356 __TBB_DECL_ATOMIC(unsigned short)
357 __TBB_DECL_ATOMIC(short)
358 __TBB_DECL_ATOMIC(char)
359 __TBB_DECL_ATOMIC(signed char)
360 __TBB_DECL_ATOMIC(unsigned char)
361
362 #if !defined(_MSC_VER)||defined(_NATIVE_WCHAR_T_DEFINED) 
363 __TBB_DECL_ATOMIC(wchar_t)
364 #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */
365
366 //! Specialization for atomic<T*> with arithmetic and operator->.
367 template<typename T> struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> {
368     T* operator=( T* rhs ) {
369         // "this" required here in strict ISO C++ because store_with_release is a dependent name
370         return this->store_with_release(rhs);
371     }
372     atomic<T*>& operator=( const atomic<T*>& rhs ) {
373         this->store_with_release(rhs); return *this;
374     }
375     T* operator->() const {
376         return (*this);
377     }
378 };
379
380 //! Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->.
381 template<> struct atomic<void*>: internal::atomic_impl<void*> {
382     void* operator=( void* rhs ) {
383         // "this" required here in strict ISO C++ because store_with_release is a dependent name
384         return this->store_with_release(rhs);
385     }
386     atomic<void*>& operator=( const atomic<void*>& rhs ) {
387         this->store_with_release(rhs); return *this;
388     }
389 };
390
391 } // namespace tbb
392
393 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
394     #pragma warning (pop)
395 #endif // warnings 4244, 4267 are back
396
397 #endif /* __TBB_atomic_H */