2 Copyright 2005-2011 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
29 #ifndef __TBB_machine_H
30 #define __TBB_machine_H
32 /** This header provides basic platform abstraction layer by hooking up appropriate
33 architecture/OS/compiler specific headers from the /include/tbb/machine directory.
34 If a plug-in header does not implement all the required APIs, it must specify
35 the missing ones by setting one or more of the following macros:
37 __TBB_USE_GENERIC_PART_WORD_CAS
38 __TBB_USE_GENERIC_PART_WORD_FETCH_ADD
39 __TBB_USE_GENERIC_PART_WORD_FETCH_STORE
40 __TBB_USE_GENERIC_FETCH_ADD
41 __TBB_USE_GENERIC_FETCH_STORE
42 __TBB_USE_GENERIC_DWORD_FETCH_ADD
43 __TBB_USE_GENERIC_DWORD_FETCH_STORE
44 __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE
45 __TBB_USE_GENERIC_FULL_FENCED_LOAD_STORE
46 __TBB_USE_GENERIC_RELAXED_LOAD_STORE
47 __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE
49 In this case tbb_machine.h will add missing functionality based on a minimal set
50 of APIs that are required to be implemented by all plug-n headers as described
52 Note that these generic implementations may be sub-optimal for a particular
53 architecture, and thus should be relied upon only after careful evaluation
54 or as the last resort.
56 Additionally __TBB_64BIT_ATOMICS can be set to 0 on a 32-bit architecture to
57 indicate that the port is not going to support double word atomics. It may also
58 be set to 1 explicitly, though normally this is not necessary as tbb_machine.h
59 will set it automatically.
61 Prerequisites for each architecture port
62 ----------------------------------------
63 The following functions have no generic implementation. Therefore they must be
64 implemented in each machine architecture specific header either as a conventional
65 function or as a functional macro.
68 Signals OS that the current thread is willing to relinquish the remainder
71 __TBB_full_memory_fence()
72 Must prevent all memory operations from being reordered across it (both
73 by hardware and compiler). All such fences must be totally ordered (or
74 sequentially consistent).
76 __TBB_machine_cmpswp4( volatile void *ptr, int32_t value, int32_t comparand )
77 Must be provided if __TBB_USE_FENCED_ATOMICS is not set.
79 __TBB_machine_cmpswp8( volatile void *ptr, int32_t value, int64_t comparand )
80 Must be provided for 64-bit architectures if __TBB_USE_FENCED_ATOMICS is not set,
81 and for 32-bit architectures if __TBB_64BIT_ATOMICS is set
83 __TBB_machine_<op><S><fence>(...), where
84 <op> = {cmpswp, fetchadd, fetchstore}
86 <fence> = {full_fence, acquire, release, relaxed}
87 Must be provided if __TBB_USE_FENCED_ATOMICS is set.
89 __TBB_control_consistency_helper()
90 Bridges the memory-semantics gap between architectures providing only
91 implicit C++0x "consume" semantics (like Power Architecture) and those
92 also implicitly obeying control dependencies (like Itanium).
93 It must be used only in conditional code where the condition is itself
94 data-dependent, and will then make subsequent code behave as if the
95 original data dependency were acquired.
96 It needs only an empty definition where implied by the architecture
97 either specifically (Itanium) or because generally stronger C++0x "acquire"
98 semantics are enforced (like x86).
100 __TBB_acquire_consistency_helper(), __TBB_release_consistency_helper()
101 Must be provided if __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE is set.
102 Enforce acquire and release semantics in generic implementations of fenced
103 store and load operations. Depending on the particular architecture/compiler
104 combination they may be a hardware fence, a compiler fence, both or nothing.
107 #include "tbb_stddef.h"
112 ////////////////////////////////////////////////////////////////////////////////
113 // Overridable helpers declarations
115 // A machine/*.h file may choose to define these templates, otherwise it must
116 // request default implementation by setting appropriate __TBB_USE_GENERIC_XXX macro(s).
118 template <typename T, std::size_t S>
119 struct machine_load_store;
121 template <typename T, std::size_t S>
122 struct machine_load_store_relaxed;
124 template <typename T, std::size_t S>
125 struct machine_load_store_seq_cst;
127 // End of overridable helpers declarations
128 ////////////////////////////////////////////////////////////////////////////////
130 template<size_t S> struct atomic_selector;
132 template<> struct atomic_selector<1> {
134 inline static word fetch_store ( volatile void* location, word value );
137 template<> struct atomic_selector<2> {
138 typedef int16_t word;
139 inline static word fetch_store ( volatile void* location, word value );
142 template<> struct atomic_selector<4> {
143 #if _MSC_VER && !_WIN64
144 // Work-around that avoids spurious /Wp64 warnings
145 typedef intptr_t word;
147 typedef int32_t word;
149 inline static word fetch_store ( volatile void* location, word value );
152 template<> struct atomic_selector<8> {
153 typedef int64_t word;
154 inline static word fetch_store ( volatile void* location, word value );
157 }} // namespaces internal, tbb
162 #pragma managed(push, off)
165 #if __MINGW64__ || __MINGW32__
166 extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void );
167 #define __TBB_Yield() SwitchToThread()
168 #if (TBB_USE_GCC_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT)
169 #include "machine/gcc_generic.h"
171 #include "machine/linux_intel64.h"
173 #include "machine/linux_ia32.h"
175 #elif defined(_M_IX86)
176 #include "machine/windows_ia32.h"
177 #elif defined(_M_X64)
178 #include "machine/windows_intel64.h"
180 #include "machine/xbox360_ppc.h"
187 #elif __linux__ || __FreeBSD__ || __NetBSD__
189 #if (TBB_USE_GCC_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT)
190 #include "machine/gcc_generic.h"
192 #include "machine/linux_ia32.h"
194 #include "machine/linux_intel64.h"
196 #include "machine/linux_ia64.h"
198 #include "machine/mac_ppc.h"
199 #elif __TBB_GCC_BUILTIN_ATOMICS_PRESENT
200 #include "machine/gcc_generic.h"
202 #include "machine/linux_common.h"
207 #include "machine/linux_ia32.h"
209 #include "machine/linux_intel64.h"
211 #include "machine/mac_ppc.h"
213 #include "machine/macos_common.h"
217 #include "machine/ibm_aix51.h"
219 #elif __sun || __SUNPRO_CC
222 #define __volatile__ volatile
224 #if __i386 || __i386__
225 #include "machine/linux_ia32.h"
227 #include "machine/linux_intel64.h"
229 #include "machine/sunos_sparc.h"
233 #define __TBB_Yield() sched_yield()
235 #endif /* OS selection */
237 #ifndef __TBB_64BIT_ATOMICS
238 #define __TBB_64BIT_ATOMICS 1
241 // Special atomic functions
242 #if __TBB_USE_FENCED_ATOMICS
243 #define __TBB_machine_cmpswp1 __TBB_machine_cmpswp1full_fence
244 #define __TBB_machine_cmpswp2 __TBB_machine_cmpswp2full_fence
245 #define __TBB_machine_cmpswp4 __TBB_machine_cmpswp4full_fence
246 #define __TBB_machine_cmpswp8 __TBB_machine_cmpswp8full_fence
248 #if __TBB_WORDSIZE==8
249 #define __TBB_machine_fetchadd8 __TBB_machine_fetchadd8full_fence
250 #define __TBB_machine_fetchstore8 __TBB_machine_fetchstore8full_fence
251 #define __TBB_FetchAndAddWrelease(P,V) __TBB_machine_fetchadd8release(P,V)
252 #define __TBB_FetchAndIncrementWacquire(P) __TBB_machine_fetchadd8acquire(P,1)
253 #define __TBB_FetchAndDecrementWrelease(P) __TBB_machine_fetchadd8release(P,(-1))
255 #error Define macros for 4-byte word, similarly to the above __TBB_WORDSIZE==8 branch.
256 #endif /* __TBB_WORDSIZE==4 */
257 #else /* !__TBB_USE_FENCED_ATOMICS */
258 #define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAddW(P,V)
259 #define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1)
260 #define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,(-1))
261 #endif /* !__TBB_USE_FENCED_ATOMICS */
263 #if __TBB_WORDSIZE==4
264 #define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp4(P,V,C)
265 #define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd4(P,V)
266 #define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore4(P,V)
267 #elif __TBB_WORDSIZE==8
268 #if __TBB_USE_GENERIC_DWORD_LOAD_STORE || __TBB_USE_GENERIC_DWORD_FETCH_ADD || __TBB_USE_GENERIC_DWORD_FETCH_STORE
269 #error These macros should only be used on 32-bit platforms.
272 #define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp8(P,V,C)
273 #define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd8(P,V)
274 #define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore8(P,V)
275 #else /* __TBB_WORDSIZE != 8 */
276 #error Unsupported machine word size.
277 #endif /* __TBB_WORDSIZE */
280 inline void __TBB_Pause(int32_t) {
287 //! Sequentially consistent full memory fence.
288 inline void atomic_fence () { __TBB_full_memory_fence(); }
292 //! Class that implements exponential backoff.
293 /** See implementation of spin_wait_while_eq for an example. */
294 class atomic_backoff : no_copy {
295 //! Time delay, in units of "pause" instructions.
296 /** Should be equal to approximately the number of "pause" instructions
297 that take the same time as an context switch. */
298 static const int32_t LOOPS_BEFORE_YIELD = 16;
301 atomic_backoff() : count(1) {}
303 //! Pause for a while.
305 if( count<=LOOPS_BEFORE_YIELD ) {
307 // Pause twice as long the next time.
310 // Pause is so long that we might as well yield CPU to scheduler.
315 // pause for a few times and then return false immediately.
316 bool bounded_pause() {
317 if( count<=LOOPS_BEFORE_YIELD ) {
319 // Pause twice as long the next time.
332 //! Spin WHILE the value of the variable is equal to a given value
333 /** T and U should be comparable types. */
334 template<typename T, typename U>
335 void spin_wait_while_eq( const volatile T& location, U value ) {
336 atomic_backoff backoff;
337 while( location==value ) backoff.pause();
340 //! Spin UNTIL the value of the variable is equal to a given value
341 /** T and U should be comparable types. */
342 template<typename T, typename U>
343 void spin_wait_until_eq( const volatile T& location, const U value ) {
344 atomic_backoff backoff;
345 while( location!=value ) backoff.pause();
348 // T should be unsigned, otherwise sign propagation will break correctness of bit manipulations.
349 // S should be either 1 or 2, for the mask calculation to work correctly.
350 // Together, these rules limit applicability of Masked CAS to unsigned char and unsigned short.
351 template<size_t S, typename T>
352 inline T __TBB_MaskedCompareAndSwap (volatile T *ptr, T value, T comparand ) {
353 volatile uint32_t * base = (uint32_t*)( (uintptr_t)ptr & ~(uintptr_t)0x3 );
355 const uint8_t bitoffset = uint8_t( 8*( 4-S - (uintptr_t(ptr) & 0x3) ) );
357 const uint8_t bitoffset = uint8_t( 8*((uintptr_t)ptr & 0x3) );
359 const uint32_t mask = ( (1<<(S*8)) - 1 )<<bitoffset;
363 result = *base; // reload the base value which might change during the pause
364 uint32_t old_value = ( result & ~mask ) | ( comparand << bitoffset );
365 uint32_t new_value = ( result & ~mask ) | ( value << bitoffset );
366 // __TBB_CompareAndSwap4 presumed to have full fence.
367 // Cast shuts up /Wp64 warning
368 result = (uint32_t)__TBB_machine_cmpswp4( base, new_value, old_value );
369 if( result==old_value // CAS succeeded
370 || ((result^old_value)&mask)!=0 ) // CAS failed and the bits of interest have changed
372 else // CAS failed but the bits of interest left unchanged
375 return T((result & mask) >> bitoffset);
378 template<size_t S, typename T>
379 inline T __TBB_CompareAndSwapGeneric (volatile void *ptr, T value, T comparand );
382 inline uint8_t __TBB_CompareAndSwapGeneric <1,uint8_t> (volatile void *ptr, uint8_t value, uint8_t comparand ) {
383 #if __TBB_USE_GENERIC_PART_WORD_CAS
384 return __TBB_MaskedCompareAndSwap<1,uint8_t>((volatile uint8_t *)ptr,value,comparand);
386 return __TBB_machine_cmpswp1(ptr,value,comparand);
391 inline uint16_t __TBB_CompareAndSwapGeneric <2,uint16_t> (volatile void *ptr, uint16_t value, uint16_t comparand ) {
392 #if __TBB_USE_GENERIC_PART_WORD_CAS
393 return __TBB_MaskedCompareAndSwap<2,uint16_t>((volatile uint16_t *)ptr,value,comparand);
395 return __TBB_machine_cmpswp2(ptr,value,comparand);
400 inline uint32_t __TBB_CompareAndSwapGeneric <4,uint32_t> (volatile void *ptr, uint32_t value, uint32_t comparand ) {
401 // Cast shuts up /Wp64 warning
402 return (uint32_t)__TBB_machine_cmpswp4(ptr,value,comparand);
405 #if __TBB_64BIT_ATOMICS
407 inline uint64_t __TBB_CompareAndSwapGeneric <8,uint64_t> (volatile void *ptr, uint64_t value, uint64_t comparand ) {
408 return __TBB_machine_cmpswp8(ptr,value,comparand);
412 template<size_t S, typename T>
413 inline T __TBB_FetchAndAddGeneric (volatile void *ptr, T addend) {
417 result = *reinterpret_cast<volatile T *>(ptr);
418 // __TBB_CompareAndSwapGeneric presumed to have full fence.
419 if( __TBB_CompareAndSwapGeneric<S,T> ( ptr, result+addend, result )==result )
426 template<size_t S, typename T>
427 inline T __TBB_FetchAndStoreGeneric (volatile void *ptr, T value) {
431 result = *reinterpret_cast<volatile T *>(ptr);
432 // __TBB_CompareAndSwapGeneric presumed to have full fence.
433 if( __TBB_CompareAndSwapGeneric<S,T> ( ptr, value, result )==result )
440 #if __TBB_USE_GENERIC_PART_WORD_CAS
441 #define __TBB_machine_cmpswp1 tbb::internal::__TBB_CompareAndSwapGeneric<1,uint8_t>
442 #define __TBB_machine_cmpswp2 tbb::internal::__TBB_CompareAndSwapGeneric<2,uint16_t>
445 #if __TBB_USE_GENERIC_FETCH_ADD || __TBB_USE_GENERIC_PART_WORD_FETCH_ADD
446 #define __TBB_machine_fetchadd1 tbb::internal::__TBB_FetchAndAddGeneric<1,uint8_t>
447 #define __TBB_machine_fetchadd2 tbb::internal::__TBB_FetchAndAddGeneric<2,uint16_t>
450 #if __TBB_USE_GENERIC_FETCH_ADD
451 #define __TBB_machine_fetchadd4 tbb::internal::__TBB_FetchAndAddGeneric<4,uint32_t>
454 #if __TBB_USE_GENERIC_FETCH_ADD || __TBB_USE_GENERIC_DWORD_FETCH_ADD
455 #define __TBB_machine_fetchadd8 tbb::internal::__TBB_FetchAndAddGeneric<8,uint64_t>
458 #if __TBB_USE_GENERIC_FETCH_STORE || __TBB_USE_GENERIC_PART_WORD_FETCH_STORE
459 #define __TBB_machine_fetchstore1 tbb::internal::__TBB_FetchAndStoreGeneric<1,uint8_t>
460 #define __TBB_machine_fetchstore2 tbb::internal::__TBB_FetchAndStoreGeneric<2,uint16_t>
463 #if __TBB_USE_GENERIC_FETCH_STORE
464 #define __TBB_machine_fetchstore4 tbb::internal::__TBB_FetchAndStoreGeneric<4,uint32_t>
467 #if __TBB_USE_GENERIC_FETCH_STORE || __TBB_USE_GENERIC_DWORD_FETCH_STORE
468 #define __TBB_machine_fetchstore8 tbb::internal::__TBB_FetchAndStoreGeneric<8,uint64_t>
471 #if __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE
472 #define __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(S) \
473 atomic_selector<S>::word atomic_selector<S>::fetch_store ( volatile void* location, word value ) { \
474 return __TBB_machine_fetchstore##S( location, value ); \
477 __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(1)
478 __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(2)
479 __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(4)
480 __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(8)
482 #undef __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE
483 #endif /* __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */
485 #if __TBB_USE_GENERIC_DWORD_LOAD_STORE
486 inline void __TBB_machine_store8 (volatile void *ptr, int64_t value) {
488 int64_t result = *(int64_t *)ptr;
489 if( __TBB_machine_cmpswp8(ptr,value,result)==result ) break;
493 inline int64_t __TBB_machine_load8 (const volatile void *ptr) {
494 // Comparand and new value may be anything, they only must be equal, and
495 // the value should have a low probability to be actually found in 'location'.
496 const int64_t anyvalue = 2305843009213693951;
497 return __TBB_machine_cmpswp8(const_cast<volatile void *>(ptr),anyvalue,anyvalue);
499 #endif /* __TBB_USE_GENERIC_DWORD_LOAD_STORE */
501 #if __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE
502 /** Fenced operations use volatile qualifier to prevent compiler from optimizing
503 them out, and on on architectures with weak memory ordering to induce compiler
504 to generate code with appropriate acquire/release semantics.
505 On architectures like IA32, Intel64 (and likely and Sparc TSO) volatile has
506 no effect on code gen, and consistency helpers serve as a compiler fence (the
507 latter being true for IA64/gcc as well to fix a bug in some gcc versions). **/
508 template <typename T, size_t S>
509 struct machine_load_store {
510 static T load_with_acquire ( const volatile T& location ) {
511 T to_return = location;
512 __TBB_acquire_consistency_helper();
515 static void store_with_release ( volatile T &location, T value ) {
516 __TBB_release_consistency_helper();
521 #if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS
522 template <typename T>
523 struct machine_load_store<T,8> {
524 static T load_with_acquire ( const volatile T& location ) {
525 return (T)__TBB_machine_load8( (const volatile void*)&location );
527 static void store_with_release ( volatile T& location, T value ) {
528 __TBB_machine_store8( (volatile void*)&location, (int64_t)value );
531 #endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */
532 #endif /* __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE */
534 template <typename T, size_t S>
535 struct machine_load_store_seq_cst {
536 static T load ( const volatile T& location ) {
537 __TBB_full_memory_fence();
538 return machine_load_store<T,S>::load_with_acquire( location );
540 #if __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE
541 static void store ( volatile T &location, T value ) {
542 atomic_selector<S>::fetch_store( (volatile void*)&location, (typename atomic_selector<S>::word)value );
544 #else /* !__TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */
545 static void store ( volatile T &location, T value ) {
546 machine_load_store<T,S>::store_with_release( location, value );
547 __TBB_full_memory_fence();
549 #endif /* !__TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */
552 #if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS
553 /** The implementation does not use functions __TBB_machine_load8/store8 as they
554 are not required to be sequentially consistent. **/
555 template <typename T>
556 struct machine_load_store_seq_cst<T,8> {
557 static T load ( const volatile T& location ) {
558 // Comparand and new value may be anything, they only must be equal, and
559 // the value should have a low probability to be actually found in 'location'.
560 const int64_t anyvalue = 2305843009213693951ll;
561 return __TBB_machine_cmpswp8( (volatile void*)const_cast<volatile T*>(&location), anyvalue, anyvalue );
563 static void store ( volatile T &location, T value ) {
564 int64_t result = (volatile int64_t&)location;
565 while ( __TBB_machine_cmpswp8((volatile void*)&location, (int64_t)value, result) != result )
566 result = (volatile int64_t&)location;
569 #endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */
571 #if __TBB_USE_GENERIC_RELAXED_LOAD_STORE
572 // Relaxed operations add volatile qualifier to prevent compiler from optimizing them out.
573 /** Volatile should not incur any additional cost on IA32, Intel64, and Sparc TSO
574 architectures. However on architectures with weak memory ordering compiler may
575 generate code with acquire/release semantics for operations on volatile data. **/
576 template <typename T, size_t S>
577 struct machine_load_store_relaxed {
578 static inline T load ( const volatile T& location ) {
581 static inline void store ( volatile T& location, T value ) {
586 #if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS
587 template <typename T>
588 struct machine_load_store_relaxed<T,8> {
589 static inline T load ( const volatile T& location ) {
590 return (T)__TBB_machine_load8( (const volatile void*)&location );
592 static inline void store ( volatile T& location, T value ) {
593 __TBB_machine_store8( (volatile void*)&location, (int64_t)value );
596 #endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */
597 #endif /* __TBB_USE_GENERIC_RELAXED_LOAD_STORE */
600 inline T __TBB_load_with_acquire(const volatile T &location) {
601 return machine_load_store<T,sizeof(T)>::load_with_acquire( location );
603 template<typename T, typename V>
604 inline void __TBB_store_with_release(volatile T& location, V value) {
605 machine_load_store<T,sizeof(T)>::store_with_release( location, T(value) );
607 //! Overload that exists solely to avoid /Wp64 warnings.
608 inline void __TBB_store_with_release(volatile size_t& location, size_t value) {
609 machine_load_store<size_t,sizeof(size_t)>::store_with_release( location, value );
613 inline T __TBB_load_full_fence(const volatile T &location) {
614 return machine_load_store_seq_cst<T,sizeof(T)>::load( location );
616 template<typename T, typename V>
617 inline void __TBB_store_full_fence(volatile T& location, V value) {
618 machine_load_store_seq_cst<T,sizeof(T)>::store( location, T(value) );
620 //! Overload that exists solely to avoid /Wp64 warnings.
621 inline void __TBB_store_full_fence(volatile size_t& location, size_t value) {
622 machine_load_store_seq_cst<size_t,sizeof(size_t)>::store( location, value );
626 inline T __TBB_load_relaxed (const volatile T& location) {
627 return machine_load_store_relaxed<T,sizeof(T)>::load( const_cast<T&>(location) );
629 template<typename T, typename V>
630 inline void __TBB_store_relaxed ( volatile T& location, V value ) {
631 machine_load_store_relaxed<T,sizeof(T)>::store( const_cast<T&>(location), T(value) );
633 //! Overload that exists solely to avoid /Wp64 warnings.
634 inline void __TBB_store_relaxed ( volatile size_t& location, size_t value ) {
635 machine_load_store_relaxed<size_t,sizeof(size_t)>::store( const_cast<size_t&>(location), value );
638 // Macro __TBB_TypeWithAlignmentAtLeastAsStrict(T) should be a type with alignment at least as
639 // strict as type T. The type should have a trivial default constructor and destructor, so that
640 // arrays of that type can be declared without initializers.
641 // It is correct (but perhaps a waste of space) if __TBB_TypeWithAlignmentAtLeastAsStrict(T) expands
642 // to a type bigger than T.
643 // The default definition here works on machines where integers are naturally aligned and the
644 // strictest alignment is 64.
645 #ifndef __TBB_TypeWithAlignmentAtLeastAsStrict
647 #if __TBB_ATTRIBUTE_ALIGNED_PRESENT
649 #define __TBB_DefineTypeWithAlignment(PowerOf2) \
650 struct __TBB_machine_type_with_alignment_##PowerOf2 { \
651 uint32_t member[PowerOf2/sizeof(uint32_t)]; \
652 } __attribute__((aligned(PowerOf2)));
653 #define __TBB_alignof(T) __alignof__(T)
655 #elif __TBB_DECLSPEC_ALIGN_PRESENT
657 #define __TBB_DefineTypeWithAlignment(PowerOf2) \
658 __declspec(align(PowerOf2)) \
659 struct __TBB_machine_type_with_alignment_##PowerOf2 { \
660 uint32_t member[PowerOf2/sizeof(uint32_t)]; \
662 #define __TBB_alignof(T) __alignof(T)
664 #else /* A compiler with unknown syntax for data alignment */
665 #error Must define __TBB_TypeWithAlignmentAtLeastAsStrict(T)
668 /* Now declare types aligned to useful powers of two */
669 // TODO: Is __TBB_DefineTypeWithAlignment(8) needed on 32 bit platforms?
670 __TBB_DefineTypeWithAlignment(16)
671 __TBB_DefineTypeWithAlignment(32)
672 __TBB_DefineTypeWithAlignment(64)
674 typedef __TBB_machine_type_with_alignment_64 __TBB_machine_type_with_strictest_alignment;
676 // Primary template is a declaration of incomplete type so that it fails with unknown alignments
677 template<size_t N> struct type_with_alignment;
679 // Specializations for allowed alignments
680 template<> struct type_with_alignment<1> { char member; };
681 template<> struct type_with_alignment<2> { uint16_t member; };
682 template<> struct type_with_alignment<4> { uint32_t member; };
683 template<> struct type_with_alignment<8> { uint64_t member; };
684 template<> struct type_with_alignment<16> {__TBB_machine_type_with_alignment_16 member; };
685 template<> struct type_with_alignment<32> {__TBB_machine_type_with_alignment_32 member; };
686 template<> struct type_with_alignment<64> {__TBB_machine_type_with_alignment_64 member; };
688 #if __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN
689 //! Work around for bug in GNU 3.2 and MSVC compilers.
690 /** Bug is that compiler sometimes returns 0 for __alignof(T) when T has not yet been instantiated.
691 The work-around forces instantiation by forcing computation of sizeof(T) before __alignof(T). */
692 template<size_t Size, typename T>
693 struct work_around_alignment_bug {
694 static const size_t alignment = __TBB_alignof(T);
696 #define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment<tbb::internal::work_around_alignment_bug<sizeof(T),T>::alignment>
698 #define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment<__TBB_alignof(T)>
699 #endif /* __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN */
701 #endif /* __TBB_TypeWithAlignmentAtLeastAsStrict */
703 // Template class here is to avoid instantiation of the static data for modules that don't use it
706 static const T byte_table[256];
708 // An efficient implementation of the reverse function utilizes a 2^8 lookup table holding the bit-reversed
709 // values of [0..2^8 - 1]. Those values can also be computed on the fly at a slightly higher cost.
711 const T reverse<T>::byte_table[256] = {
712 0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,
713 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,
714 0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
715 0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,
716 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,
717 0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
718 0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,
719 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,
720 0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
721 0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,
722 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,
723 0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
724 0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
725 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,
726 0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
727 0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF
730 } // namespace internal
733 // Preserving access to legacy APIs
734 using tbb::internal::__TBB_load_with_acquire;
735 using tbb::internal::__TBB_store_with_release;
737 // Mapping historically used names to the ones expected by atomic_load_store_traits
738 #define __TBB_load_acquire __TBB_load_with_acquire
739 #define __TBB_store_release __TBB_store_with_release
742 inline intptr_t __TBB_Log2( uintptr_t x ) {
743 if( x==0 ) return -1;
746 #if __TBB_WORDSIZE>=8
747 if( (tmp = x>>32) ) { x=tmp; result += 32; }
749 if( (tmp = x>>16) ) { x=tmp; result += 16; }
750 if( (tmp = x>>8) ) { x=tmp; result += 8; }
751 if( (tmp = x>>4) ) { x=tmp; result += 4; }
752 if( (tmp = x>>2) ) { x=tmp; result += 2; }
753 return (x&2)? result+1: result;
757 #ifndef __TBB_AtomicOR
758 inline void __TBB_AtomicOR( volatile void *operand, uintptr_t addend ) {
759 tbb::internal::atomic_backoff b;
761 uintptr_t tmp = *(volatile uintptr_t *)operand;
762 uintptr_t result = __TBB_CompareAndSwapW(operand, tmp|addend, tmp);
763 if( result==tmp ) break;
769 #ifndef __TBB_AtomicAND
770 inline void __TBB_AtomicAND( volatile void *operand, uintptr_t addend ) {
771 tbb::internal::atomic_backoff b;
773 uintptr_t tmp = *(volatile uintptr_t *)operand;
774 uintptr_t result = __TBB_CompareAndSwapW(operand, tmp&addend, tmp);
775 if( result==tmp ) break;
782 typedef unsigned char __TBB_Flag;
784 typedef __TBB_atomic __TBB_Flag __TBB_atomic_flag;
786 #ifndef __TBB_TryLockByte
787 inline bool __TBB_TryLockByte( __TBB_atomic_flag &flag ) {
788 return __TBB_machine_cmpswp1(&flag,1,0)==0;
792 #ifndef __TBB_LockByte
793 inline __TBB_Flag __TBB_LockByte( __TBB_atomic_flag& flag ) {
794 if ( !__TBB_TryLockByte(flag) ) {
795 tbb::internal::atomic_backoff b;
798 } while ( !__TBB_TryLockByte(flag) );
804 #define __TBB_UnlockByte __TBB_store_with_release
806 #ifndef __TBB_ReverseByte
807 inline unsigned char __TBB_ReverseByte(unsigned char src) {
808 return tbb::internal::reverse<unsigned char>::byte_table[src];
813 T __TBB_ReverseBits(T src) {
815 unsigned char *original = (unsigned char *) &src;
816 unsigned char *reversed = (unsigned char *) &dst;
818 for( int i = sizeof(T)-1; i >= 0; i-- )
819 reversed[i] = __TBB_ReverseByte( original[sizeof(T)-i-1] );
824 #endif /* __TBB_machine_H */