+
+/// Wrappers for systems where the c++17 implementation doesn't guarantee the availability of aligned_alloc.
+/// Memory allocated with std_aligned_alloc must be freed with std_aligned_free.
+///
+
+void* std_aligned_alloc(size_t alignment, size_t size) {
+#if defined(__APPLE__)
+ return aligned_alloc(alignment, size);
+#elif defined(_WIN32)
+ return _mm_malloc(size, alignment);
+#else
+ return std::aligned_alloc(alignment, size);
+#endif
+}
+
+void std_aligned_free(void* ptr) {
+#if defined(__APPLE__)
+ free(ptr);
+#elif defined(_WIN32)
+ _mm_free(ptr);
+#else
+ free(ptr);
+#endif
+}
+
+/// aligned_ttmem_alloc() will return suitably aligned memory, and if possible use large pages.
+/// The returned pointer is the aligned one, while the mem argument is the one that needs
+/// to be passed to free. With c++17 some of this functionality could be simplified.
+
+#if defined(__linux__) && !defined(__ANDROID__)
+
+void* aligned_ttmem_alloc(size_t allocSize, void*& mem) {
+
+ constexpr size_t alignment = 2 * 1024 * 1024; // assumed 2MB page sizes
+ size_t size = ((allocSize + alignment - 1) / alignment) * alignment; // multiple of alignment
+ if (posix_memalign(&mem, alignment, size))
+ mem = nullptr;
+ madvise(mem, allocSize, MADV_HUGEPAGE);
+ return mem;
+}
+
+#elif defined(_WIN64)
+
+static void* aligned_ttmem_alloc_large_pages(size_t allocSize) {
+
+ HANDLE hProcessToken { };
+ LUID luid { };
+ void* mem = nullptr;
+
+ const size_t largePageSize = GetLargePageMinimum();
+ if (!largePageSize)
+ return nullptr;
+
+ // We need SeLockMemoryPrivilege, so try to enable it for the process
+ if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hProcessToken))
+ return nullptr;
+
+ if (LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &luid))
+ {
+ TOKEN_PRIVILEGES tp { };
+ TOKEN_PRIVILEGES prevTp { };
+ DWORD prevTpLen = 0;
+
+ tp.PrivilegeCount = 1;
+ tp.Privileges[0].Luid = luid;
+ tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
+
+ // Try to enable SeLockMemoryPrivilege. Note that even if AdjustTokenPrivileges() succeeds,
+ // we still need to query GetLastError() to ensure that the privileges were actually obtained.
+ if (AdjustTokenPrivileges(
+ hProcessToken, FALSE, &tp, sizeof(TOKEN_PRIVILEGES), &prevTp, &prevTpLen) &&
+ GetLastError() == ERROR_SUCCESS)
+ {
+ // Round up size to full pages and allocate
+ allocSize = (allocSize + largePageSize - 1) & ~size_t(largePageSize - 1);
+ mem = VirtualAlloc(
+ NULL, allocSize, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE);
+
+ // Privilege no longer needed, restore previous state
+ AdjustTokenPrivileges(hProcessToken, FALSE, &prevTp, 0, NULL, NULL);
+ }
+ }
+
+ CloseHandle(hProcessToken);
+
+ return mem;
+}
+
+void* aligned_ttmem_alloc(size_t allocSize, void*& mem) {
+
+ static bool firstCall = true;
+
+ // Try to allocate large pages
+ mem = aligned_ttmem_alloc_large_pages(allocSize);
+
+ // Suppress info strings on the first call. The first call occurs before 'uci'
+ // is received and in that case this output confuses some GUIs.
+ if (!firstCall)
+ {
+ if (mem)
+ sync_cout << "info string Hash table allocation: Windows large pages used." << sync_endl;
+ else
+ sync_cout << "info string Hash table allocation: Windows large pages not used." << sync_endl;
+ }
+ firstCall = false;
+
+ // Fall back to regular, page aligned, allocation if necessary
+ if (!mem)
+ mem = VirtualAlloc(NULL, allocSize, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+
+ return mem;
+}
+
+#else
+
+void* aligned_ttmem_alloc(size_t allocSize, void*& mem) {
+
+ constexpr size_t alignment = 64; // assumed cache line size
+ size_t size = allocSize + alignment - 1; // allocate some extra space
+ mem = malloc(size);
+ void* ret = reinterpret_cast<void*>((uintptr_t(mem) + alignment - 1) & ~uintptr_t(alignment - 1));
+ return ret;
+}
+
+#endif
+
+
+/// aligned_ttmem_free() will free the previously allocated ttmem
+
+#if defined(_WIN64)
+
+void aligned_ttmem_free(void* mem) {
+
+ if (mem && !VirtualFree(mem, 0, MEM_RELEASE))
+ {
+ DWORD err = GetLastError();
+ std::cerr << "Failed to free transposition table. Error code: 0x" <<
+ std::hex << err << std::dec << std::endl;
+ exit(EXIT_FAILURE);
+ }
+}
+
+#else
+
+void aligned_ttmem_free(void *mem) {
+ free(mem);
+}
+
+#endif
+
+
+namespace WinProcGroup {
+
+#ifndef _WIN32
+
+void bindThisThread(size_t) {}
+
+#else
+
+/// best_group() retrieves logical processor information using Windows specific
+/// API and returns the best group id for the thread with index idx. Original
+/// code from Texel by Peter Ă–sterlund.
+
+int best_group(size_t idx) {
+
+ int threads = 0;
+ int nodes = 0;
+ int cores = 0;
+ DWORD returnLength = 0;
+ DWORD byteOffset = 0;
+
+ // Early exit if the needed API is not available at runtime
+ HMODULE k32 = GetModuleHandle("Kernel32.dll");
+ auto fun1 = (fun1_t)(void(*)())GetProcAddress(k32, "GetLogicalProcessorInformationEx");
+ if (!fun1)
+ return -1;
+
+ // First call to get returnLength. We expect it to fail due to null buffer
+ if (fun1(RelationAll, nullptr, &returnLength))
+ return -1;
+
+ // Once we know returnLength, allocate the buffer
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *buffer, *ptr;
+ ptr = buffer = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX*)malloc(returnLength);
+
+ // Second call, now we expect to succeed
+ if (!fun1(RelationAll, buffer, &returnLength))
+ {
+ free(buffer);
+ return -1;
+ }
+
+ while (byteOffset < returnLength)
+ {
+ if (ptr->Relationship == RelationNumaNode)
+ nodes++;
+
+ else if (ptr->Relationship == RelationProcessorCore)
+ {
+ cores++;
+ threads += (ptr->Processor.Flags == LTP_PC_SMT) ? 2 : 1;
+ }
+
+ assert(ptr->Size);
+ byteOffset += ptr->Size;
+ ptr = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX*)(((char*)ptr) + ptr->Size);
+ }
+
+ free(buffer);
+
+ std::vector<int> groups;
+
+ // Run as many threads as possible on the same node until core limit is
+ // reached, then move on filling the next node.
+ for (int n = 0; n < nodes; n++)
+ for (int i = 0; i < cores / nodes; i++)
+ groups.push_back(n);
+
+ // In case a core has more than one logical processor (we assume 2) and we
+ // have still threads to allocate, then spread them evenly across available
+ // nodes.
+ for (int t = 0; t < threads - cores; t++)
+ groups.push_back(t % nodes);
+
+ // If we still have more threads than the total number of logical processors
+ // then return -1 and let the OS to decide what to do.
+ return idx < groups.size() ? groups[idx] : -1;
+}
+
+
+/// bindThisThread() set the group affinity of the current thread
+
+void bindThisThread(size_t idx) {
+
+ // Use only local variables to be thread-safe
+ int group = best_group(idx);
+
+ if (group == -1)
+ return;
+
+ // Early exit if the needed API are not available at runtime
+ HMODULE k32 = GetModuleHandle("Kernel32.dll");
+ auto fun2 = (fun2_t)(void(*)())GetProcAddress(k32, "GetNumaNodeProcessorMaskEx");
+ auto fun3 = (fun3_t)(void(*)())GetProcAddress(k32, "SetThreadGroupAffinity");
+
+ if (!fun2 || !fun3)
+ return;
+
+ GROUP_AFFINITY affinity;
+ if (fun2(group, &affinity))
+ fun3(GetCurrentThread(), &affinity, nullptr);
+}
+
+#endif
+
+} // namespace WinProcGroup