X-Git-Url: https://git.sesse.net/?p=stockfish;a=blobdiff_plain;f=src%2Fmisc.cpp;h=5061ae13a1742bb4f4b10fd8c306164ccb63787d;hp=0bae9f1e0e9b6cf0a24364c46a1985172f481dc6;hb=cb0504028e8830dbc71be53cbd701d78c3d562a1;hpb=39437f4e55aaa26ef9f0d5a1c762e560e9ffde32 diff --git a/src/misc.cpp b/src/misc.cpp index 0bae9f1e..5061ae13 100644 --- a/src/misc.cpp +++ b/src/misc.cpp @@ -1,8 +1,6 @@ /* Stockfish, a UCI chess playing engine derived from Glaurung 2.1 - Copyright (C) 2004-2008 Tord Romstad (Glaurung author) - Copyright (C) 2008-2015 Marco Costalba, Joona Kiiski, Tord Romstad - Copyright (C) 2015-2020 Marco Costalba, Joona Kiiski, Gary Linscott, Tord Romstad + Copyright (C) 2004-2020 The Stockfish developers (see AUTHORS file) Stockfish is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -46,8 +44,9 @@ typedef bool(*fun3_t)(HANDLE, CONST GROUP_AFFINITY*, PGROUP_AFFINITY); #include #include #include +#include -#ifdef __linux__ +#if defined(__linux__) && !defined(__ANDROID__) #include #include #endif @@ -147,10 +146,8 @@ const string engine_info(bool to_uci) { ss << setw(2) << day << setw(2) << (1 + months.find(month) / 4) << year.substr(2); } - ss << (Is64Bit ? " 64" : "") - << (HasPext ? " BMI2" : (HasPopCnt ? " POPCNT" : "")) - << (to_uci ? "\nid author ": " by ") - << "T. Romstad, M. Costalba, J. Kiiski, G. Linscott"; + ss << (to_uci ? "\nid author ": " by ") + << "the Stockfish developers (see AUTHORS file)"; return ss.str(); } @@ -215,7 +212,27 @@ const std::string compiler_info() { compiler += " on unknown system"; #endif - compiler += "\n __VERSION__ macro expands to: "; + compiler += "\nCompilation settings include: "; + compiler += (Is64Bit ? " 64bit" : " 32bit"); + #if defined(USE_AVX512) + compiler += " AVX512"; + #endif + #if defined(USE_AVX2) + compiler += " AVX2"; + #endif + #if defined(USE_SSE41) + compiler += " SSE41"; + #endif + #if defined(USE_SSSE3) + compiler += " SSSE3"; + #endif + compiler += (HasPext ? " BMI2" : ""); + compiler += (HasPopCnt ? " POPCNT" : ""); + #if !defined(NDEBUG) + compiler += " DEBUG"; + #endif + + compiler += "\n__VERSION__ macro expands to: "; #ifdef __VERSION__ compiler += __VERSION__; #else @@ -293,35 +310,156 @@ void prefetch(void* addr) { #endif +/// Wrappers for systems where the c++17 implementation doesn't guarantee the availability of aligned_alloc. +/// Memory allocated with std_aligned_alloc must be freed with std_aligned_free. +/// + +void* std_aligned_alloc(size_t alignment, size_t size) { +#if (defined(__APPLE__) && defined(_LIBCPP_HAS_C11_FEATURES)) || defined(__ANDROID__) || defined(__OpenBSD__) || (defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC) && !defined(_WIN32)) + return aligned_alloc(alignment, size); +#elif (defined(_WIN32) || (defined(__APPLE__) && !defined(_LIBCPP_HAS_C11_FEATURES))) + return _mm_malloc(size, alignment); +#else + return std::aligned_alloc(alignment, size); +#endif +} -/// aligned_ttmem_alloc will return suitably aligned memory, and if possible use large pages. -/// The returned pointer is the aligned one, while the mem argument is the one that needs to be passed to free. -/// With c++17 some of this functionality can be simplified. -#ifdef __linux__ +void std_aligned_free(void* ptr) { +#if (defined(__APPLE__) && defined(_LIBCPP_HAS_C11_FEATURES)) || defined(__ANDROID__) || defined(__OpenBSD__) || (defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC) && !defined(_WIN32)) + free(ptr); +#elif (defined(_WIN32) || (defined(__APPLE__) && !defined(_LIBCPP_HAS_C11_FEATURES))) + _mm_free(ptr); +#else + free(ptr); +#endif +} -void* aligned_ttmem_alloc(size_t allocSize, void** mem) { +/// aligned_ttmem_alloc() will return suitably aligned memory, and if possible use large pages. +/// The returned pointer is the aligned one, while the mem argument is the one that needs +/// to be passed to free. With c++17 some of this functionality could be simplified. + +#if defined(__linux__) && !defined(__ANDROID__) + +void* aligned_ttmem_alloc(size_t allocSize, void*& mem) { constexpr size_t alignment = 2 * 1024 * 1024; // assumed 2MB page sizes size_t size = ((allocSize + alignment - 1) / alignment) * alignment; // multiple of alignment - *mem = aligned_alloc(alignment, size); - madvise(*mem, allocSize, MADV_HUGEPAGE); - return *mem; + if (posix_memalign(&mem, alignment, size)) + mem = nullptr; + madvise(mem, allocSize, MADV_HUGEPAGE); + return mem; +} + +#elif defined(_WIN64) + +static void* aligned_ttmem_alloc_large_pages(size_t allocSize) { + + HANDLE hProcessToken { }; + LUID luid { }; + void* mem = nullptr; + + const size_t largePageSize = GetLargePageMinimum(); + if (!largePageSize) + return nullptr; + + // We need SeLockMemoryPrivilege, so try to enable it for the process + if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hProcessToken)) + return nullptr; + + if (LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &luid)) + { + TOKEN_PRIVILEGES tp { }; + TOKEN_PRIVILEGES prevTp { }; + DWORD prevTpLen = 0; + + tp.PrivilegeCount = 1; + tp.Privileges[0].Luid = luid; + tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; + + // Try to enable SeLockMemoryPrivilege. Note that even if AdjustTokenPrivileges() succeeds, + // we still need to query GetLastError() to ensure that the privileges were actually obtained. + if (AdjustTokenPrivileges( + hProcessToken, FALSE, &tp, sizeof(TOKEN_PRIVILEGES), &prevTp, &prevTpLen) && + GetLastError() == ERROR_SUCCESS) + { + // Round up size to full pages and allocate + allocSize = (allocSize + largePageSize - 1) & ~size_t(largePageSize - 1); + mem = VirtualAlloc( + NULL, allocSize, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE); + + // Privilege no longer needed, restore previous state + AdjustTokenPrivileges(hProcessToken, FALSE, &prevTp, 0, NULL, NULL); + } + } + + CloseHandle(hProcessToken); + + return mem; +} + +void* aligned_ttmem_alloc(size_t allocSize, void*& mem) { + + static bool firstCall = true; + + // Try to allocate large pages + mem = aligned_ttmem_alloc_large_pages(allocSize); + + // Suppress info strings on the first call. The first call occurs before 'uci' + // is received and in that case this output confuses some GUIs. + if (!firstCall) + { + if (mem) + sync_cout << "info string Hash table allocation: Windows large pages used." << sync_endl; + else + sync_cout << "info string Hash table allocation: Windows large pages not used." << sync_endl; + } + firstCall = false; + + // Fall back to regular, page aligned, allocation if necessary + if (!mem) + mem = VirtualAlloc(NULL, allocSize, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + + return mem; } #else -void* aligned_ttmem_alloc(size_t allocSize, void** mem) { +void* aligned_ttmem_alloc(size_t allocSize, void*& mem) { constexpr size_t alignment = 64; // assumed cache line size size_t size = allocSize + alignment - 1; // allocate some extra space - *mem = malloc(size); - void* ret = reinterpret_cast((uintptr_t(*mem) + alignment - 1) & ~uintptr_t(alignment - 1)); + mem = malloc(size); + void* ret = reinterpret_cast((uintptr_t(mem) + alignment - 1) & ~uintptr_t(alignment - 1)); return ret; } #endif +/// aligned_ttmem_free() will free the previously allocated ttmem + +#if defined(_WIN64) + +void aligned_ttmem_free(void* mem) { + + if (mem && !VirtualFree(mem, 0, MEM_RELEASE)) + { + DWORD err = GetLastError(); + std::cerr << "Failed to free transposition table. Error code: 0x" << + std::hex << err << std::dec << std::endl; + exit(EXIT_FAILURE); + } +} + +#else + +void aligned_ttmem_free(void *mem) { + free(mem); +} + +#endif + + namespace WinProcGroup { #ifndef _WIN32