]> git.sesse.net Git - casparcg/blob - dependencies/tbb/include/tbb/atomic.h
5257ff59ee07f9ae30d8f60d5e833a66d03ffeac
[casparcg] / dependencies / tbb / include / tbb / atomic.h
1 /*
2     Copyright 2005-2011 Intel Corporation.  All Rights Reserved.
3
4     This file is part of Threading Building Blocks.
5
6     Threading Building Blocks is free software; you can redistribute it
7     and/or modify it under the terms of the GNU General Public License
8     version 2 as published by the Free Software Foundation.
9
10     Threading Building Blocks is distributed in the hope that it will be
11     useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12     of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13     GNU General Public License for more details.
14
15     You should have received a copy of the GNU General Public License
16     along with Threading Building Blocks; if not, write to the Free Software
17     Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18
19     As a special exception, you may use this file as part of a free software
20     library without restriction.  Specifically, if other files instantiate
21     templates or use macros or inline functions from this file, or you compile
22     this file and link it with other files to produce an executable, this
23     file does not by itself cause the resulting executable to be covered by
24     the GNU General Public License.  This exception does not however
25     invalidate any other reasons why the executable file might be covered by
26     the GNU General Public License.
27 */
28
29 #ifndef __TBB_atomic_H
30 #define __TBB_atomic_H
31
32 #include <cstddef>
33 #include "tbb_stddef.h"
34
35 #if _MSC_VER 
36 #define __TBB_LONG_LONG __int64
37 #else
38 #define __TBB_LONG_LONG long long
39 #endif /* _MSC_VER */
40
41 #include "tbb_machine.h"
42
43 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
44     // Workaround for overzealous compiler warnings 
45     #pragma warning (push)
46     #pragma warning (disable: 4244 4267)
47 #endif
48
49 namespace tbb {
50
51 //! Specifies memory fencing.
52 enum memory_semantics {
53     //! Sequentially consistent fence.
54     full_fence,
55     //! Acquire fence
56     acquire,
57     //! Release fence
58     release,
59     //! No ordering
60     relaxed
61 };
62
63 //! @cond INTERNAL
64 namespace internal {
65
66 #if __TBB_ATTRIBUTE_ALIGNED_PRESENT
67     #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f  __attribute__ ((aligned(a)));
68 #elif __TBB_DECLSPEC_ALIGN_PRESENT
69     #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f;
70 #else 
71     #error Do not know syntax for forcing alignment.
72 #endif
73
74 template<size_t S>
75 struct atomic_rep;           // Primary template declared, but never defined.
76
77 template<>
78 struct atomic_rep<1> {       // Specialization
79     typedef int8_t word;
80     int8_t value;
81 };
82 template<>
83 struct atomic_rep<2> {       // Specialization
84     typedef int16_t word;
85     __TBB_DECL_ATOMIC_FIELD(int16_t,value,2)
86 };
87 template<>
88 struct atomic_rep<4> {       // Specialization
89 #if _MSC_VER && __TBB_WORDSIZE==4
90     // Work-around that avoids spurious /Wp64 warnings
91     typedef intptr_t word;
92 #else
93     typedef int32_t word;
94 #endif
95     __TBB_DECL_ATOMIC_FIELD(int32_t,value,4)
96 };
97 #if __TBB_64BIT_ATOMICS
98 template<>
99 struct atomic_rep<8> {       // Specialization
100     typedef int64_t word;
101     __TBB_DECL_ATOMIC_FIELD(int64_t,value,8)
102 };
103 #endif
104
105 template<size_t Size, memory_semantics M>
106 struct atomic_traits;        // Primary template declared, but not defined.
107
108 #define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M)                                                         \
109     template<> struct atomic_traits<S,M> {                                                               \
110         typedef atomic_rep<S>::word word;                                                                \
111         inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \
112             return __TBB_machine_cmpswp##S##M(location,new_value,comparand);                             \
113         }                                                                                                \
114         inline static word fetch_and_add( volatile void* location, word addend ) {                       \
115             return __TBB_machine_fetchadd##S##M(location,addend);                                        \
116         }                                                                                                \
117         inline static word fetch_and_store( volatile void* location, word value ) {                      \
118             return __TBB_machine_fetchstore##S##M(location,value);                                       \
119         }                                                                                                \
120     };
121
122 #define __TBB_DECL_ATOMIC_PRIMITIVES(S)                                                                  \
123     template<memory_semantics M>                                                                         \
124     struct atomic_traits<S,M> {                                                                          \
125         typedef atomic_rep<S>::word word;                                                                \
126         inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \
127             return __TBB_machine_cmpswp##S(location,new_value,comparand);                                \
128         }                                                                                                \
129         inline static word fetch_and_add( volatile void* location, word addend ) {                       \
130             return __TBB_machine_fetchadd##S(location,addend);                                           \
131         }                                                                                                \
132         inline static word fetch_and_store( volatile void* location, word value ) {                      \
133             return __TBB_machine_fetchstore##S(location,value);                                          \
134         }                                                                                                \
135     };
136
137 template<memory_semantics M>
138 struct atomic_load_store_traits;    // Primary template declaration
139
140 #define __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(M)                      \
141     template<> struct atomic_load_store_traits<M> {                     \
142         template <typename T>                                           \
143         inline static T load( const volatile T& location ) {            \
144             return __TBB_load_##M( location );                          \
145         }                                                               \
146         template <typename T>                                           \
147         inline static void store( volatile T& location, T value ) {     \
148             __TBB_store_##M( location, value );                         \
149         }                                                               \
150     }
151
152 #if __TBB_USE_FENCED_ATOMICS
153 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,full_fence)
154 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,full_fence)
155 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,full_fence)
156 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire)
157 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire)
158 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire)
159 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release)
160 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release)
161 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release)
162 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,relaxed)
163 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,relaxed)
164 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,relaxed)
165 #if __TBB_64BIT_ATOMICS
166 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,full_fence)
167 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire)
168 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release)
169 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,relaxed)
170 #endif
171 #else /* !__TBB_USE_FENCED_ATOMICS */
172 __TBB_DECL_ATOMIC_PRIMITIVES(1)
173 __TBB_DECL_ATOMIC_PRIMITIVES(2)
174 __TBB_DECL_ATOMIC_PRIMITIVES(4)
175 #if __TBB_64BIT_ATOMICS
176 __TBB_DECL_ATOMIC_PRIMITIVES(8)
177 #endif
178 #endif /* !__TBB_USE_FENCED_ATOMICS */
179
180 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(full_fence);
181 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(acquire);
182 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(release);
183 __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(relaxed);
184
185 //! Additive inverse of 1 for type T.
186 /** Various compilers issue various warnings if -1 is used with various integer types.
187     The baroque expression below avoids all the warnings (we hope). */
188 #define __TBB_MINUS_ONE(T) (T(T(0)-T(1)))
189
190 //! Base class that provides basic functionality for atomic<T> without fetch_and_add.
191 /** Works for any type T that has the same size as an integral type, has a trivial constructor/destructor, 
192     and can be copied/compared by memcpy/memcmp. */
193 template<typename T>
194 struct atomic_impl {
195 protected:
196     atomic_rep<sizeof(T)> rep;
197 private:
198     //! Union type used to convert type T to underlying integral type.
199     union converter {
200         T value;
201         typename atomic_rep<sizeof(T)>::word bits;
202     };
203 public:
204     typedef T value_type;
205
206     template<memory_semantics M>
207     value_type fetch_and_store( value_type value ) {
208         converter u, w;
209         u.value = value;
210         w.bits = internal::atomic_traits<sizeof(value_type),M>::fetch_and_store(&rep.value,u.bits);
211         return w.value;
212     }
213
214     value_type fetch_and_store( value_type value ) {
215         return fetch_and_store<full_fence>(value);
216     }
217
218     template<memory_semantics M>
219     value_type compare_and_swap( value_type value, value_type comparand ) {
220         converter u, v, w;
221         u.value = value;
222         v.value = comparand;
223         w.bits = internal::atomic_traits<sizeof(value_type),M>::compare_and_swap(&rep.value,u.bits,v.bits);
224         return w.value;
225     }
226
227     value_type compare_and_swap( value_type value, value_type comparand ) {
228         return compare_and_swap<full_fence>(value,comparand);
229     }
230
231     operator value_type() const volatile {                // volatile qualifier here for backwards compatibility 
232         converter w;
233         w.bits = __TBB_load_with_acquire( rep.value );
234         return w.value;
235     }
236
237     template<memory_semantics M>
238     value_type load () const {
239         converter u;
240         u.bits = internal::atomic_load_store_traits<M>::load( rep.value );
241         return u.value;
242     }
243
244     value_type load () const {
245         return load<acquire>();
246     }
247
248     template<memory_semantics M>
249     void store ( value_type value ) {
250         converter u;
251         u.value = value;
252         internal::atomic_load_store_traits<M>::store( rep.value, u.bits );
253     }
254
255     void store ( value_type value ) {
256         store<release>( value );
257     }
258
259 protected:
260     value_type store_with_release( value_type rhs ) {
261         converter u;
262         u.value = rhs;
263         __TBB_store_with_release(rep.value,u.bits);
264         return rhs;
265     }
266 };
267
268 //! Base class that provides basic functionality for atomic<T> with fetch_and_add.
269 /** I is the underlying type.
270     D is the difference type.
271     StepType should be char if I is an integral type, and T if I is a T*. */
272 template<typename I, typename D, typename StepType>
273 struct atomic_impl_with_arithmetic: atomic_impl<I> {
274 public:
275     typedef I value_type;
276
277     template<memory_semantics M>
278     value_type fetch_and_add( D addend ) {
279         return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->rep.value, addend*sizeof(StepType) ));
280     }
281
282     value_type fetch_and_add( D addend ) {
283         return fetch_and_add<full_fence>(addend);
284     }
285
286     template<memory_semantics M>
287     value_type fetch_and_increment() {
288         return fetch_and_add<M>(1);
289     }
290
291     value_type fetch_and_increment() {
292         return fetch_and_add(1);
293     }
294
295     template<memory_semantics M>
296     value_type fetch_and_decrement() {
297         return fetch_and_add<M>(__TBB_MINUS_ONE(D));
298     }
299
300     value_type fetch_and_decrement() {
301         return fetch_and_add(__TBB_MINUS_ONE(D));
302     }
303
304 public:
305     value_type operator+=( D addend ) {
306         return fetch_and_add(addend)+addend;
307     }
308
309     value_type operator-=( D addend ) {
310         // Additive inverse of addend computed using binary minus,
311         // instead of unary minus, for sake of avoiding compiler warnings.
312         return operator+=(D(0)-addend);    
313     }
314
315     value_type operator++() {
316         return fetch_and_add(1)+1;
317     }
318
319     value_type operator--() {
320         return fetch_and_add(__TBB_MINUS_ONE(D))-1;
321     }
322
323     value_type operator++(int) {
324         return fetch_and_add(1);
325     }
326
327     value_type operator--(int) {
328         return fetch_and_add(__TBB_MINUS_ONE(D));
329     }
330 };
331
332 } /* Internal */
333 //! @endcond
334
335 //! Primary template for atomic.
336 /** See the Reference for details.
337     @ingroup synchronization */
338 template<typename T>
339 struct atomic: internal::atomic_impl<T> {
340     T operator=( T rhs ) {
341         // "this" required here in strict ISO C++ because store_with_release is a dependent name
342         return this->store_with_release(rhs);
343     }
344     atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(rhs); return *this;}
345 };
346
347 #define __TBB_DECL_ATOMIC(T) \
348     template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {  \
349         T operator=( T rhs ) {return store_with_release(rhs);}  \
350         atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
351     };
352
353 #if __TBB_64BIT_ATOMICS
354 __TBB_DECL_ATOMIC(__TBB_LONG_LONG)
355 __TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG)
356 #else
357 // test_atomic will verify that sizeof(long long)==8
358 #endif
359 __TBB_DECL_ATOMIC(long)
360 __TBB_DECL_ATOMIC(unsigned long)
361
362 #if defined(_MSC_VER) && __TBB_WORDSIZE==4
363 /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. 
364    It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) 
365    with an operator=(U) that explicitly converts the U to a T.  Types T and U should be
366    type synonyms on the platform.  Type U should be the wider variant of T from the
367    perspective of /Wp64. */
368 #define __TBB_DECL_ATOMIC_ALT(T,U) \
369     template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {  \
370         T operator=( U rhs ) {return store_with_release(T(rhs));}  \
371         atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
372     };
373 __TBB_DECL_ATOMIC_ALT(unsigned,size_t)
374 __TBB_DECL_ATOMIC_ALT(int,ptrdiff_t)
375 #else
376 __TBB_DECL_ATOMIC(unsigned)
377 __TBB_DECL_ATOMIC(int)
378 #endif /* defined(_MSC_VER) && __TBB_WORDSIZE==4 */
379
380 __TBB_DECL_ATOMIC(unsigned short)
381 __TBB_DECL_ATOMIC(short)
382 __TBB_DECL_ATOMIC(char)
383 __TBB_DECL_ATOMIC(signed char)
384 __TBB_DECL_ATOMIC(unsigned char)
385
386 #if !defined(_MSC_VER)||defined(_NATIVE_WCHAR_T_DEFINED) 
387 __TBB_DECL_ATOMIC(wchar_t)
388 #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */
389
390 //! Specialization for atomic<T*> with arithmetic and operator->.
391 template<typename T> struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> {
392     T* operator=( T* rhs ) {
393         // "this" required here in strict ISO C++ because store_with_release is a dependent name
394         return this->store_with_release(rhs);
395     }
396     atomic<T*>& operator=( const atomic<T*>& rhs ) {
397         this->store_with_release(rhs); return *this;
398     }
399     T* operator->() const {
400         return (*this);
401     }
402 };
403
404 //! Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->.
405 template<> struct atomic<void*>: internal::atomic_impl<void*> {
406     void* operator=( void* rhs ) {
407         // "this" required here in strict ISO C++ because store_with_release is a dependent name
408         return this->store_with_release(rhs);
409     }
410     atomic<void*>& operator=( const atomic<void*>& rhs ) {
411         this->store_with_release(rhs); return *this;
412     }
413 };
414
415 // Helpers to workaround ugly syntax of calling template member function of a
416 // template class with template argument dependent on template parameters.
417
418 template <memory_semantics M, typename T>
419 T load ( const atomic<T>& a ) { return a.template load<M>(); }
420
421 template <memory_semantics M, typename T>
422 void store ( atomic<T>& a, T value ) { return a.template store<M>(value); }
423
424 } // namespace tbb
425
426 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
427     #pragma warning (pop)
428 #endif // warnings 4244, 4267 are back
429
430 #endif /* __TBB_atomic_H */