+// Cache line alignment specification
+#if defined(_MSC_VER) || defined(__INTEL_COMPILER)
+#define CACHE_LINE_ALIGNMENT __declspec(align(64))
+#else
+#define CACHE_LINE_ALIGNMENT __attribute__ ((aligned(64)))
+#endif
+
+// Define a __cpuid() function for gcc compilers, for Intel and MSVC
+// is already available as an intrinsic.
+#if defined(_MSC_VER)
+#include <intrin.h>
+#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+inline void __cpuid(int CPUInfo[4], int InfoType)
+{
+ int* eax = CPUInfo + 0;
+ int* ebx = CPUInfo + 1;
+ int* ecx = CPUInfo + 2;
+ int* edx = CPUInfo + 3;
+
+ *eax = InfoType;
+ *ecx = 0;
+ __asm__("cpuid" : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
+ : "0" (*eax), "2" (*ecx));
+}
+#else
+inline void __cpuid(int CPUInfo[4], int)
+{
+ CPUInfo[0] = CPUInfo[1] = CPUInfo[2] = CPUInfo[3] = 0;
+}
+#endif
+
+
+// Templetized enum operations, we avoid to repeat the same inlines for each
+// different enum.
+
+template<typename T>
+inline T operator+ (const T d1, const T d2) { return T(int(d1) + int(d2)); }
+
+template<typename T>
+inline T operator- (const T d1, const T d2) { return T(int(d1) - int(d2)); }
+
+template<typename T>
+inline T operator* (int i, const T d) { return T(int(d) * i); }
+
+template<typename T>
+inline T operator/ (const T d, int i) { return T(int(d) / i); }
+
+template<typename T>
+inline T operator- (const T d) { return T(-int(d)); }
+
+template<typename T>
+inline void operator++ (T& d, int) { d = T(int(d) + 1); }
+
+template<typename T>
+inline void operator-- (T& d, int) { d = T(int(d) - 1); }
+
+template<typename T>
+inline void operator+= (T& d1, const T d2) { d1 = d1 + d2; }
+
+template<typename T>
+inline void operator*= (T& d, int i) { d = T(int(d) * i); }
+
+template<typename T>
+inline void operator/= (T &d, int i) { d = T(int(d) / i); }
+