2 Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3 Copyright (C) 2004-2023 The Stockfish developers (see AUTHORS file)
5 Stockfish is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 Stockfish is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #if _WIN32_WINNT < 0x0601
22 #define _WIN32_WINNT 0x0601 // Force to include needed API prototypes
30 // The needed Windows API for processor groups could be missed from old Windows
31 // versions, so instead of calling them directly (forcing the linker to resolve
32 // the calls at compile time), try to load them at runtime. To do this we need
33 // first to define the corresponding function pointers.
35 using fun1_t = bool(*)(LOGICAL_PROCESSOR_RELATIONSHIP,
36 PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX, PDWORD);
37 using fun2_t = bool(*)(USHORT, PGROUP_AFFINITY);
38 using fun3_t = bool(*)(HANDLE, CONST GROUP_AFFINITY*, PGROUP_AFFINITY);
39 using fun4_t = bool(*)(USHORT, PGROUP_AFFINITY, USHORT, PUSHORT);
40 using fun5_t = WORD(*)();
41 using fun6_t = bool(*)(HANDLE, DWORD, PHANDLE);
42 using fun7_t = bool(*)(LPCSTR, LPCSTR, PLUID);
43 using fun8_t = bool(*)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD);
53 #include <string_view>
56 #if defined(__linux__) && !defined(__ANDROID__)
61 #if defined(__APPLE__) || defined(__ANDROID__) || defined(__OpenBSD__) || (defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC) && !defined(_WIN32)) || defined(__e2k__)
62 #define POSIXALIGNEDALLOC
75 /// Version number or dev.
76 constexpr string_view version = "dev";
78 /// Our fancy logging facility. The trick here is to replace cin.rdbuf() and
79 /// cout.rdbuf() with two Tie objects that tie cin and cout to a file stream. We
80 /// can toggle the logging of std::cout and std:cin at runtime whilst preserving
81 /// usual I/O functionality, all without changing a single line of code!
82 /// Idea from http://groups.google.com/group/comp.lang.c++/msg/1d941c0f26ea0d81
84 struct Tie: public streambuf { // MSVC requires split streambuf for cin and cout
86 Tie(streambuf* b, streambuf* l) : buf(b), logBuf(l) {}
88 int sync() override { return logBuf->pubsync(), buf->pubsync(); }
89 int overflow(int c) override { return log(buf->sputc((char)c), "<< "); }
90 int underflow() override { return buf->sgetc(); }
91 int uflow() override { return log(buf->sbumpc(), ">> "); }
93 streambuf *buf, *logBuf;
95 int log(int c, const char* prefix) {
97 static int last = '\n'; // Single log file
100 logBuf->sputn(prefix, 3);
102 return last = logBuf->sputc((char)c);
108 Logger() : in(cin.rdbuf(), file.rdbuf()), out(cout.rdbuf(), file.rdbuf()) {}
109 ~Logger() { start(""); }
115 static void start(const std::string& fname) {
119 if (l.file.is_open())
121 cout.rdbuf(l.out.buf);
128 l.file.open(fname, ifstream::out);
130 if (!l.file.is_open())
132 cerr << "Unable to open debug log file " << fname << endl;
145 /// engine_info() returns the full name of the current Stockfish version.
146 /// For local dev compiles we try to append the commit sha and commit date
147 /// from git if that fails only the local compilation date is set and "nogit" is specified:
148 /// Stockfish dev-YYYYMMDD-SHA
150 /// Stockfish dev-YYYYMMDD-nogit
152 /// For releases (non dev builds) we only include the version number:
153 /// Stockfish version
155 string engine_info(bool to_uci) {
157 ss << "Stockfish " << version << setfill('0');
159 if constexpr (version == "dev")
163 ss << stringify(GIT_DATE);
165 constexpr string_view months("Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec");
166 string month, day, year;
167 stringstream date(__DATE__); // From compiler, format is "Sep 21 2008"
169 date >> month >> day >> year;
170 ss << year << setw(2) << setfill('0') << (1 + months.find(month) / 4) << setw(2) << setfill('0') << day;
176 ss << (to_uci ? "\nid author ": " by ")
177 << "the Stockfish developers (see AUTHORS file)";
183 /// compiler_info() returns a string trying to describe the compiler we use
185 std::string compiler_info() {
187 #define make_version_string(major, minor, patch) stringify(major) "." stringify(minor) "." stringify(patch)
189 /// Predefined macros hell:
191 /// __GNUC__ Compiler is gcc, Clang or Intel on Linux
192 /// __INTEL_COMPILER Compiler is Intel
193 /// _MSC_VER Compiler is MSVC or Intel on Windows
194 /// _WIN32 Building on Windows (any)
195 /// _WIN64 Building on Windows 64 bit
197 std::string compiler = "\nCompiled by ";
200 compiler += "clang++ ";
201 compiler += make_version_string(__clang_major__, __clang_minor__, __clang_patchlevel__);
202 #elif __INTEL_COMPILER
203 compiler += "Intel compiler ";
204 compiler += "(version ";
205 compiler += stringify(__INTEL_COMPILER) " update " stringify(__INTEL_COMPILER_UPDATE);
209 compiler += "(version ";
210 compiler += stringify(_MSC_FULL_VER) "." stringify(_MSC_BUILD);
212 #elif defined(__e2k__) && defined(__LCC__)
213 #define dot_ver2(n) \
214 compiler += (char)'.'; \
215 compiler += (char)('0' + (n) / 10); \
216 compiler += (char)('0' + (n) % 10);
218 compiler += "MCST LCC ";
219 compiler += "(version ";
220 compiler += std::to_string(__LCC__ / 100);
221 dot_ver2(__LCC__ % 100)
222 dot_ver2(__LCC_MINOR__)
225 compiler += "g++ (GNUC) ";
226 compiler += make_version_string(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__);
228 compiler += "Unknown compiler ";
229 compiler += "(unknown version)";
232 #if defined(__APPLE__)
233 compiler += " on Apple";
234 #elif defined(__CYGWIN__)
235 compiler += " on Cygwin";
236 #elif defined(__MINGW64__)
237 compiler += " on MinGW64";
238 #elif defined(__MINGW32__)
239 compiler += " on MinGW32";
240 #elif defined(__ANDROID__)
241 compiler += " on Android";
242 #elif defined(__linux__)
243 compiler += " on Linux";
244 #elif defined(_WIN64)
245 compiler += " on Microsoft Windows 64-bit";
246 #elif defined(_WIN32)
247 compiler += " on Microsoft Windows 32-bit";
249 compiler += " on unknown system";
252 compiler += "\nCompilation settings include: ";
253 compiler += (Is64Bit ? " 64bit" : " 32bit");
254 #if defined(USE_VNNI)
257 #if defined(USE_AVX512)
258 compiler += " AVX512";
260 compiler += (HasPext ? " BMI2" : "");
261 #if defined(USE_AVX2)
264 #if defined(USE_SSE41)
265 compiler += " SSE41";
267 #if defined(USE_SSSE3)
268 compiler += " SSSE3";
270 #if defined(USE_SSE2)
273 compiler += (HasPopCnt ? " POPCNT" : "");
277 #if defined(USE_NEON)
282 compiler += " DEBUG";
285 compiler += "\n__VERSION__ macro expands to: ";
287 compiler += __VERSION__;
289 compiler += "(undefined macro)";
297 /// Debug functions used mainly to collect run-time statistics
298 constexpr int MaxDebugSlots = 32;
304 std::atomic<int64_t> data[N] = { 0 };
306 constexpr inline std::atomic<int64_t>& operator[](int index) { return data[index]; }
309 DebugInfo<2> hit[MaxDebugSlots];
310 DebugInfo<2> mean[MaxDebugSlots];
311 DebugInfo<3> stdev[MaxDebugSlots];
312 DebugInfo<6> correl[MaxDebugSlots];
316 void dbg_hit_on(bool cond, int slot) {
323 void dbg_mean_of(int64_t value, int slot) {
326 mean[slot][1] += value;
329 void dbg_stdev_of(int64_t value, int slot) {
332 stdev[slot][1] += value;
333 stdev[slot][2] += value * value;
336 void dbg_correl_of(int64_t value1, int64_t value2, int slot) {
339 correl[slot][1] += value1;
340 correl[slot][2] += value1 * value1;
341 correl[slot][3] += value2;
342 correl[slot][4] += value2 * value2;
343 correl[slot][5] += value1 * value2;
349 auto E = [&n](int64_t x) { return double(x) / n; };
350 auto sqr = [](double x) { return x * x; };
352 for (int i = 0; i < MaxDebugSlots; ++i)
354 std::cerr << "Hit #" << i
355 << ": Total " << n << " Hits " << hit[i][1]
356 << " Hit Rate (%) " << 100.0 * E(hit[i][1])
359 for (int i = 0; i < MaxDebugSlots; ++i)
360 if ((n = mean[i][0]))
362 std::cerr << "Mean #" << i
363 << ": Total " << n << " Mean " << E(mean[i][1])
367 for (int i = 0; i < MaxDebugSlots; ++i)
368 if ((n = stdev[i][0]))
370 double r = sqrtl(E(stdev[i][2]) - sqr(E(stdev[i][1])));
371 std::cerr << "Stdev #" << i
372 << ": Total " << n << " Stdev " << r
376 for (int i = 0; i < MaxDebugSlots; ++i)
377 if ((n = correl[i][0]))
379 double r = (E(correl[i][5]) - E(correl[i][1]) * E(correl[i][3]))
380 / ( sqrtl(E(correl[i][2]) - sqr(E(correl[i][1])))
381 * sqrtl(E(correl[i][4]) - sqr(E(correl[i][3]))));
382 std::cerr << "Correl. #" << i
383 << ": Total " << n << " Coefficient " << r
389 /// Used to serialize access to std::cout to avoid multiple threads writing at
392 std::ostream& operator<<(std::ostream& os, SyncCout sc) {
406 /// Trampoline helper to avoid moving Logger to misc.h
407 void start_logger(const std::string& fname) { Logger::start(fname); }
410 /// prefetch() preloads the given address in L1/L2 cache. This is a non-blocking
411 /// function that doesn't stall the CPU waiting for data to be loaded from memory,
412 /// which can be quite slow.
415 void prefetch(void*) {}
419 void prefetch(void* addr) {
421 # if defined(__INTEL_COMPILER)
422 // This hack prevents prefetches from being optimized away by
423 // Intel compiler. Both MSVC and gcc seem not be affected by this.
427 # if defined(__INTEL_COMPILER) || defined(_MSC_VER)
428 _mm_prefetch((char*)addr, _MM_HINT_T0);
430 __builtin_prefetch(addr);
437 /// std_aligned_alloc() is our wrapper for systems where the c++17 implementation
438 /// does not guarantee the availability of aligned_alloc(). Memory allocated with
439 /// std_aligned_alloc() must be freed with std_aligned_free().
441 void* std_aligned_alloc(size_t alignment, size_t size) {
443 #if defined(POSIXALIGNEDALLOC)
445 return posix_memalign(&mem, alignment, size) ? nullptr : mem;
446 #elif defined(_WIN32) && !defined(_M_ARM) && !defined(_M_ARM64)
447 return _mm_malloc(size, alignment);
448 #elif defined(_WIN32)
449 return _aligned_malloc(size, alignment);
451 return std::aligned_alloc(alignment, size);
455 void std_aligned_free(void* ptr) {
457 #if defined(POSIXALIGNEDALLOC)
459 #elif defined(_WIN32) && !defined(_M_ARM) && !defined(_M_ARM64)
461 #elif defined(_WIN32)
468 /// aligned_large_pages_alloc() will return suitably aligned memory, if possible using large pages.
472 static void* aligned_large_pages_alloc_windows([[maybe_unused]] size_t allocSize) {
478 HANDLE hProcessToken { };
482 const size_t largePageSize = GetLargePageMinimum();
486 // Dynamically link OpenProcessToken, LookupPrivilegeValue and AdjustTokenPrivileges
488 HMODULE hAdvapi32 = GetModuleHandle(TEXT("advapi32.dll"));
491 hAdvapi32 = LoadLibrary(TEXT("advapi32.dll"));
493 auto fun6 = (fun6_t)(void(*)())GetProcAddress(hAdvapi32, "OpenProcessToken");
496 auto fun7 = (fun7_t)(void(*)())GetProcAddress(hAdvapi32, "LookupPrivilegeValueA");
499 auto fun8 = (fun8_t)(void(*)())GetProcAddress(hAdvapi32, "AdjustTokenPrivileges");
503 // We need SeLockMemoryPrivilege, so try to enable it for the process
504 if (!fun6( // OpenProcessToken()
505 GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hProcessToken))
508 if (fun7( // LookupPrivilegeValue(nullptr, SE_LOCK_MEMORY_NAME, &luid)
509 nullptr, "SeLockMemoryPrivilege", &luid))
511 TOKEN_PRIVILEGES tp { };
512 TOKEN_PRIVILEGES prevTp { };
515 tp.PrivilegeCount = 1;
516 tp.Privileges[0].Luid = luid;
517 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
519 // Try to enable SeLockMemoryPrivilege. Note that even if AdjustTokenPrivileges() succeeds,
520 // we still need to query GetLastError() to ensure that the privileges were actually obtained.
521 if (fun8( // AdjustTokenPrivileges()
522 hProcessToken, FALSE, &tp, sizeof(TOKEN_PRIVILEGES), &prevTp, &prevTpLen) &&
523 GetLastError() == ERROR_SUCCESS)
525 // Round up size to full pages and allocate
526 allocSize = (allocSize + largePageSize - 1) & ~size_t(largePageSize - 1);
528 nullptr, allocSize, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE);
530 // Privilege no longer needed, restore previous state
531 fun8( // AdjustTokenPrivileges ()
532 hProcessToken, FALSE, &prevTp, 0, nullptr, nullptr);
536 CloseHandle(hProcessToken);
543 void* aligned_large_pages_alloc(size_t allocSize) {
545 // Try to allocate large pages
546 void* mem = aligned_large_pages_alloc_windows(allocSize);
548 // Fall back to regular, page aligned, allocation if necessary
550 mem = VirtualAlloc(nullptr, allocSize, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
557 void* aligned_large_pages_alloc(size_t allocSize) {
559 #if defined(__linux__)
560 constexpr size_t alignment = 2 * 1024 * 1024; // assumed 2MB page size
562 constexpr size_t alignment = 4096; // assumed small page size
565 // round up to multiples of alignment
566 size_t size = ((allocSize + alignment - 1) / alignment) * alignment;
567 void *mem = std_aligned_alloc(alignment, size);
568 #if defined(MADV_HUGEPAGE)
569 madvise(mem, size, MADV_HUGEPAGE);
577 /// aligned_large_pages_free() will free the previously allocated ttmem
581 void aligned_large_pages_free(void* mem) {
583 if (mem && !VirtualFree(mem, 0, MEM_RELEASE))
585 DWORD err = GetLastError();
586 std::cerr << "Failed to free large page memory. Error code: 0x"
588 << std::dec << std::endl;
595 void aligned_large_pages_free(void *mem) {
596 std_aligned_free(mem);
602 namespace WinProcGroup {
606 void bindThisThread(size_t) {}
610 /// best_node() retrieves logical processor information using Windows specific
611 /// API and returns the best node id for the thread with index idx. Original
612 /// code from Texel by Peter Ă–sterlund.
614 static int best_node(size_t idx) {
619 DWORD returnLength = 0;
620 DWORD byteOffset = 0;
622 // Early exit if the needed API is not available at runtime
623 HMODULE k32 = GetModuleHandle(TEXT("Kernel32.dll"));
624 auto fun1 = (fun1_t)(void(*)())GetProcAddress(k32, "GetLogicalProcessorInformationEx");
628 // First call to GetLogicalProcessorInformationEx() to get returnLength.
629 // We expect the call to fail due to null buffer.
630 if (fun1(RelationAll, nullptr, &returnLength))
633 // Once we know returnLength, allocate the buffer
634 SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *buffer, *ptr;
635 ptr = buffer = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX*)malloc(returnLength);
637 // Second call to GetLogicalProcessorInformationEx(), now we expect to succeed
638 if (!fun1(RelationAll, buffer, &returnLength))
644 while (byteOffset < returnLength)
646 if (ptr->Relationship == RelationNumaNode)
649 else if (ptr->Relationship == RelationProcessorCore)
652 threads += (ptr->Processor.Flags == LTP_PC_SMT) ? 2 : 1;
656 byteOffset += ptr->Size;
657 ptr = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX*)(((char*)ptr) + ptr->Size);
662 std::vector<int> groups;
664 // Run as many threads as possible on the same node until core limit is
665 // reached, then move on filling the next node.
666 for (int n = 0; n < nodes; n++)
667 for (int i = 0; i < cores / nodes; i++)
670 // In case a core has more than one logical processor (we assume 2) and we
671 // have still threads to allocate, then spread them evenly across available
673 for (int t = 0; t < threads - cores; t++)
674 groups.push_back(t % nodes);
676 // If we still have more threads than the total number of logical processors
677 // then return -1 and let the OS to decide what to do.
678 return idx < groups.size() ? groups[idx] : -1;
682 /// bindThisThread() set the group affinity of the current thread
684 void bindThisThread(size_t idx) {
686 // Use only local variables to be thread-safe
687 int node = best_node(idx);
692 // Early exit if the needed API are not available at runtime
693 HMODULE k32 = GetModuleHandle(TEXT("Kernel32.dll"));
694 auto fun2 = (fun2_t)(void(*)())GetProcAddress(k32, "GetNumaNodeProcessorMaskEx");
695 auto fun3 = (fun3_t)(void(*)())GetProcAddress(k32, "SetThreadGroupAffinity");
696 auto fun4 = (fun4_t)(void(*)())GetProcAddress(k32, "GetNumaNodeProcessorMask2");
697 auto fun5 = (fun5_t)(void(*)())GetProcAddress(k32, "GetMaximumProcessorGroupCount");
704 GROUP_AFFINITY affinity;
705 if (fun2(node, &affinity)) // GetNumaNodeProcessorMaskEx
706 fun3(GetCurrentThread(), &affinity, nullptr); // SetThreadGroupAffinity
710 // If a numa node has more than one processor group, we assume they are
711 // sized equal and we spread threads evenly across the groups.
712 USHORT elements, returnedElements;
713 elements = fun5(); // GetMaximumProcessorGroupCount
714 GROUP_AFFINITY *affinity = (GROUP_AFFINITY*)malloc(elements * sizeof(GROUP_AFFINITY));
715 if (fun4(node, affinity, elements, &returnedElements)) // GetNumaNodeProcessorMask2
716 fun3(GetCurrentThread(), &affinity[idx % returnedElements], nullptr); // SetThreadGroupAffinity
723 } // namespace WinProcGroup
727 #define GETCWD _getcwd
730 #define GETCWD getcwd
733 namespace CommandLine {
735 string argv0; // path+name of the executable binary, as given by argv[0]
736 string binaryDirectory; // path of the executable directory
737 string workingDirectory; // path of the working directory
739 void init([[maybe_unused]] int argc, char* argv[]) {
740 string pathSeparator;
742 // extract the path+name of the executable binary
746 pathSeparator = "\\";
748 // Under windows argv[0] may not have the extension. Also _get_pgmptr() had
749 // issues in some windows 10 versions, so check returned values carefully.
750 char* pgmptr = nullptr;
751 if (!_get_pgmptr(&pgmptr) && pgmptr != nullptr && *pgmptr)
758 // extract the working directory
759 workingDirectory = "";
761 char* cwd = GETCWD(buff, 40000);
763 workingDirectory = cwd;
765 // extract the binary directory path from argv0
766 binaryDirectory = argv0;
767 size_t pos = binaryDirectory.find_last_of("\\/");
768 if (pos == std::string::npos)
769 binaryDirectory = "." + pathSeparator;
771 binaryDirectory.resize(pos + 1);
773 // pattern replacement: "./" at the start of path is replaced by the working directory
774 if (binaryDirectory.find("." + pathSeparator) == 0)
775 binaryDirectory.replace(0, 1, workingDirectory);
779 } // namespace CommandLine
781 } // namespace Stockfish