]> git.sesse.net Git - casparcg/blob - tbb/include/tbb/machine/linux_ia32.h
2.0. Updated tbb library.
[casparcg] / tbb / include / tbb / machine / linux_ia32.h
1 /*
2     Copyright 2005-2011 Intel Corporation.  All Rights Reserved.
3
4     This file is part of Threading Building Blocks.
5
6     Threading Building Blocks is free software; you can redistribute it
7     and/or modify it under the terms of the GNU General Public License
8     version 2 as published by the Free Software Foundation.
9
10     Threading Building Blocks is distributed in the hope that it will be
11     useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12     of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13     GNU General Public License for more details.
14
15     You should have received a copy of the GNU General Public License
16     along with Threading Building Blocks; if not, write to the Free Software
17     Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18
19     As a special exception, you may use this file as part of a free software
20     library without restriction.  Specifically, if other files instantiate
21     templates or use macros or inline functions from this file, or you compile
22     this file and link it with other files to produce an executable, this
23     file does not by itself cause the resulting executable to be covered by
24     the GNU General Public License.  This exception does not however
25     invalidate any other reasons why the executable file might be covered by
26     the GNU General Public License.
27 */
28
29 #if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_ia32_H)
30 #error Do not include this file directly; include tbb_machine.h instead
31 #endif
32
33 #define __TBB_machine_linux_ia32_H
34
35 #include <stdint.h>
36 #include <unistd.h>
37
38 #define __TBB_WORDSIZE 4
39 #define __TBB_BIG_ENDIAN 0
40
41 #define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory")
42 #define __TBB_control_consistency_helper() __TBB_compiler_fence()
43 #define __TBB_acquire_consistency_helper() __TBB_compiler_fence()
44 #define __TBB_release_consistency_helper() __TBB_compiler_fence()
45 #define __TBB_full_memory_fence()          __asm__ __volatile__("mfence": : :"memory")
46
47 #if __TBB_ICC_ASM_VOLATILE_BROKEN
48 #define __TBB_VOLATILE
49 #else
50 #define __TBB_VOLATILE volatile
51 #endif
52
53 #define __TBB_MACHINE_DEFINE_ATOMICS(S,T,X,R)                                        \
54 static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand )  \
55 {                                                                                    \
56     T result;                                                                        \
57                                                                                      \
58     __asm__ __volatile__("lock\ncmpxchg" X " %2,%1"                                  \
59                           : "=a"(result), "=m"(*(__TBB_VOLATILE T*)ptr)              \
60                           : "q"(value), "0"(comparand), "m"(*(__TBB_VOLATILE T*)ptr) \
61                           : "memory");                                               \
62     return result;                                                                   \
63 }                                                                                    \
64                                                                                      \
65 static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend)              \
66 {                                                                                    \
67     T result;                                                                        \
68     __asm__ __volatile__("lock\nxadd" X " %0,%1"                                     \
69                           : R (result), "=m"(*(__TBB_VOLATILE T*)ptr)                \
70                           : "0"(addend), "m"(*(__TBB_VOLATILE T*)ptr)                \
71                           : "memory");                                               \
72     return result;                                                                   \
73 }                                                                                    \
74                                                                                      \
75 static inline  T __TBB_machine_fetchstore##S(volatile void *ptr, T value)            \
76 {                                                                                    \
77     T result;                                                                        \
78     __asm__ __volatile__("lock\nxchg" X " %0,%1"                                     \
79                           : R (result), "=m"(*(__TBB_VOLATILE T*)ptr)                \
80                           : "0"(value), "m"(*(__TBB_VOLATILE T*)ptr)                 \
81                           : "memory");                                               \
82     return result;                                                                   \
83 }                                                                                    \
84                                                                                      
85 __TBB_MACHINE_DEFINE_ATOMICS(1,int8_t,"","=q")
86 __TBB_MACHINE_DEFINE_ATOMICS(2,int16_t,"","=r")
87 __TBB_MACHINE_DEFINE_ATOMICS(4,int32_t,"l","=r")
88
89 #if __INTEL_COMPILER
90 #pragma warning( push )
91 // reference to EBX in a function requiring stack alignment
92 #pragma warning( disable: 998 )
93 #endif
94
95 static inline int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand )
96 {
97     int64_t result;
98     union {
99         int64_t i64;
100         int32_t i32[2];
101     };
102     i64 = value;
103 #if __PIC__ 
104     /* compiling position-independent code */
105     // EBX register preserved for compliance with position-independent code rules on IA32
106     int32_t tmp;
107     __asm__ __volatile__ (
108             "movl  %%ebx,%2\n\t"
109             "movl  %5,%%ebx\n\t"
110 #if __GNUC__==3
111             "lock\n\t cmpxchg8b %1\n\t"
112 #else
113             "lock\n\t cmpxchg8b (%3)\n\t"
114 #endif
115             "movl  %2,%%ebx"
116              : "=A"(result)
117              , "=m"(*(__TBB_VOLATILE int64_t *)ptr)
118              , "=m"(tmp)
119 #if __GNUC__==3
120              : "m"(*(__TBB_VOLATILE int64_t *)ptr)
121 #else
122              : "SD"(ptr)
123 #endif
124              , "0"(comparand)
125              , "m"(i32[0]), "c"(i32[1])
126              : "memory"
127 #if __INTEL_COMPILER
128              ,"ebx"
129 #endif
130     );
131 #else /* !__PIC__ */
132     __asm__ __volatile__ (
133             "lock\n\t cmpxchg8b %1\n\t"
134              : "=A"(result), "=m"(*(__TBB_VOLATILE int64_t *)ptr)
135              : "m"(*(__TBB_VOLATILE int64_t *)ptr)
136              , "0"(comparand)
137              , "b"(i32[0]), "c"(i32[1])
138              : "memory"
139     );
140 #endif /* __PIC__ */
141     return result;
142 }
143
144 #if __INTEL_COMPILER
145 #pragma warning( pop )
146 #endif // warning 998 is back
147
148 static inline int32_t __TBB_machine_lg( uint32_t x ) {
149     int32_t j;
150     __asm__ ("bsr %1,%0" : "=r"(j) : "r"(x));
151     return j;
152 }
153
154 static inline void __TBB_machine_or( volatile void *ptr, uint32_t addend ) {
155     __asm__ __volatile__("lock\norl %1,%0" : "=m"(*(__TBB_VOLATILE uint32_t *)ptr) : "r"(addend), "m"(*(__TBB_VOLATILE uint32_t *)ptr) : "memory");
156 }
157
158 static inline void __TBB_machine_and( volatile void *ptr, uint32_t addend ) {
159     __asm__ __volatile__("lock\nandl %1,%0" : "=m"(*(__TBB_VOLATILE uint32_t *)ptr) : "r"(addend), "m"(*(__TBB_VOLATILE uint32_t *)ptr) : "memory");
160 }
161
162 static inline void __TBB_machine_pause( int32_t delay ) {
163     for (int32_t i = 0; i < delay; i++) {
164        __asm__ __volatile__("pause;");
165     }
166     return;
167 }   
168
169 static inline int64_t __TBB_machine_load8 (const volatile void *ptr) {
170     int64_t result;
171     if( ((uint32_t)ptr&7u)==0 ) {
172         // Aligned load
173         __asm__ __volatile__ ( "fildq %1\n\t"
174                                "fistpq %0" :  "=m"(result) : "m"(*(const __TBB_VOLATILE uint64_t*)ptr) : "memory" );
175     } else {
176         // Unaligned load
177         result = __TBB_machine_cmpswp8(const_cast<void*>(ptr),0,0);
178     }
179     return result;
180 }
181
182 //! Handles misaligned 8-byte store
183 /** Defined in tbb_misc.cpp */
184 extern "C" void __TBB_machine_store8_slow( volatile void *ptr, int64_t value );
185 extern "C" void __TBB_machine_store8_slow_perf_warning( volatile void *ptr );
186
187 static inline void __TBB_machine_store8(volatile void *ptr, int64_t value) {
188     if( ((uint32_t)ptr&7u)==0 ) {
189         // Aligned store
190         __asm__ __volatile__ ( "fildq %1\n\t"
191                                "fistpq %0" :  "=m"(*(__TBB_VOLATILE int64_t*)ptr) : "m"(value) : "memory" );
192     } else {
193         // Unaligned store
194 #if TBB_USE_PERFORMANCE_WARNINGS
195         __TBB_machine_store8_slow_perf_warning(ptr);
196 #endif /* TBB_USE_PERFORMANCE_WARNINGS */
197         __TBB_machine_store8_slow(ptr,value);
198     }
199 }
200  
201 // Machine specific atomic operations
202 #define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V)
203 #define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V)
204
205 // Definition of other functions
206 #define __TBB_Pause(V) __TBB_machine_pause(V)
207 #define __TBB_Log2(V)  __TBB_machine_lg(V)
208
209 #define __TBB_USE_GENERIC_DWORD_FETCH_ADD           1
210 #define __TBB_USE_GENERIC_DWORD_FETCH_STORE         1
211 #define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE   1
212 #define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE    1
213 #define __TBB_USE_GENERIC_RELAXED_LOAD_STORE        1
214
215 // API to retrieve/update FPU control setting
216 #define __TBB_CPU_CTL_ENV_PRESENT 1
217
218 struct __TBB_cpu_ctl_env_t {
219     int     mxcsr;
220     short   x87cw;
221 };
222
223 inline void __TBB_get_cpu_ctl_env ( __TBB_cpu_ctl_env_t* ctl ) {
224     __asm__ __volatile__ (
225             "stmxcsr %0\n\t"
226             "fstcw   %1"
227             : "=m"(ctl->mxcsr), "=m"(ctl->x87cw)
228     );
229 }
230 inline void __TBB_set_cpu_ctl_env ( const __TBB_cpu_ctl_env_t* ctl ) {
231     __asm__ __volatile__ (
232             "ldmxcsr %0\n\t"
233             "fldcw   %1"
234             : : "m"(ctl->mxcsr), "m"(ctl->x87cw)
235     );
236 }