2 Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3 Copyright (C) 2004-2020 The Stockfish developers (see AUTHORS file)
5 Stockfish is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 Stockfish is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #if _WIN32_WINNT < 0x0601
22 #define _WIN32_WINNT 0x0601 // Force to include needed API prototypes
30 // The needed Windows API for processor groups could be missed from old Windows
31 // versions, so instead of calling them directly (forcing the linker to resolve
32 // the calls at compile time), try to load them at runtime. To do this we need
33 // first to define the corresponding function pointers.
35 typedef bool(*fun1_t)(LOGICAL_PROCESSOR_RELATIONSHIP,
36 PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX, PDWORD);
37 typedef bool(*fun2_t)(USHORT, PGROUP_AFFINITY);
38 typedef bool(*fun3_t)(HANDLE, CONST GROUP_AFFINITY*, PGROUP_AFFINITY);
49 #if defined(__linux__) && !defined(__ANDROID__)
61 /// Version number. If Version is left empty, then compile date in the format
62 /// DD-MM-YY and show in engine_info.
63 const string Version = "";
65 /// Our fancy logging facility. The trick here is to replace cin.rdbuf() and
66 /// cout.rdbuf() with two Tie objects that tie cin and cout to a file stream. We
67 /// can toggle the logging of std::cout and std:cin at runtime whilst preserving
68 /// usual I/O functionality, all without changing a single line of code!
69 /// Idea from http://groups.google.com/group/comp.lang.c++/msg/1d941c0f26ea0d81
71 struct Tie: public streambuf { // MSVC requires split streambuf for cin and cout
73 Tie(streambuf* b, streambuf* l) : buf(b), logBuf(l) {}
75 int sync() override { return logBuf->pubsync(), buf->pubsync(); }
76 int overflow(int c) override { return log(buf->sputc((char)c), "<< "); }
77 int underflow() override { return buf->sgetc(); }
78 int uflow() override { return log(buf->sbumpc(), ">> "); }
80 streambuf *buf, *logBuf;
82 int log(int c, const char* prefix) {
84 static int last = '\n'; // Single log file
87 logBuf->sputn(prefix, 3);
89 return last = logBuf->sputc((char)c);
95 Logger() : in(cin.rdbuf(), file.rdbuf()), out(cout.rdbuf(), file.rdbuf()) {}
96 ~Logger() { start(""); }
102 static void start(const std::string& fname) {
106 if (!fname.empty() && !l.file.is_open())
108 l.file.open(fname, ifstream::out);
110 if (!l.file.is_open())
112 cerr << "Unable to open debug log file " << fname << endl;
119 else if (fname.empty() && l.file.is_open())
121 cout.rdbuf(l.out.buf);
130 /// engine_info() returns the full name of the current Stockfish version. This
131 /// will be either "Stockfish <Tag> DD-MM-YY" (where DD-MM-YY is the date when
132 /// the program was compiled) or "Stockfish <Version>", depending on whether
133 /// Version is empty.
135 const string engine_info(bool to_uci) {
137 const string months("Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec");
138 string month, day, year;
139 stringstream ss, date(__DATE__); // From compiler, format is "Sep 21 2008"
141 ss << "Stockfish " << Version << setfill('0');
145 date >> month >> day >> year;
146 ss << setw(2) << day << setw(2) << (1 + months.find(month) / 4) << year.substr(2);
149 ss << (to_uci ? "\nid author ": " by ")
150 << "the Stockfish developers (see AUTHORS file)";
156 /// compiler_info() returns a string trying to describe the compiler we use
158 const std::string compiler_info() {
160 #define stringify2(x) #x
161 #define stringify(x) stringify2(x)
162 #define make_version_string(major, minor, patch) stringify(major) "." stringify(minor) "." stringify(patch)
164 /// Predefined macros hell:
166 /// __GNUC__ Compiler is gcc, Clang or Intel on Linux
167 /// __INTEL_COMPILER Compiler is Intel
168 /// _MSC_VER Compiler is MSVC or Intel on Windows
169 /// _WIN32 Building on Windows (any)
170 /// _WIN64 Building on Windows 64 bit
172 std::string compiler = "\nCompiled by ";
175 compiler += "clang++ ";
176 compiler += make_version_string(__clang_major__, __clang_minor__, __clang_patchlevel__);
177 #elif __INTEL_COMPILER
178 compiler += "Intel compiler ";
179 compiler += "(version ";
180 compiler += stringify(__INTEL_COMPILER) " update " stringify(__INTEL_COMPILER_UPDATE);
184 compiler += "(version ";
185 compiler += stringify(_MSC_FULL_VER) "." stringify(_MSC_BUILD);
188 compiler += "g++ (GNUC) ";
189 compiler += make_version_string(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__);
191 compiler += "Unknown compiler ";
192 compiler += "(unknown version)";
195 #if defined(__APPLE__)
196 compiler += " on Apple";
197 #elif defined(__CYGWIN__)
198 compiler += " on Cygwin";
199 #elif defined(__MINGW64__)
200 compiler += " on MinGW64";
201 #elif defined(__MINGW32__)
202 compiler += " on MinGW32";
203 #elif defined(__ANDROID__)
204 compiler += " on Android";
205 #elif defined(__linux__)
206 compiler += " on Linux";
207 #elif defined(_WIN64)
208 compiler += " on Microsoft Windows 64-bit";
209 #elif defined(_WIN32)
210 compiler += " on Microsoft Windows 32-bit";
212 compiler += " on unknown system";
215 compiler += "\nCompilation settings include: ";
216 compiler += (Is64Bit ? " 64bit" : " 32bit");
217 #if defined(USE_AVX512)
218 compiler += " AVX512";
220 #if defined(USE_AVX2)
223 #if defined(USE_SSE41)
224 compiler += " SSE41";
226 #if defined(USE_SSSE3)
227 compiler += " SSSE3";
229 compiler += (HasPext ? " BMI2" : "");
230 compiler += (HasPopCnt ? " POPCNT" : "");
235 compiler += " DEBUG";
238 compiler += "\n__VERSION__ macro expands to: ";
240 compiler += __VERSION__;
242 compiler += "(undefined macro)";
250 /// Debug functions used mainly to collect run-time statistics
251 static std::atomic<int64_t> hits[2], means[2];
253 void dbg_hit_on(bool b) { ++hits[0]; if (b) ++hits[1]; }
254 void dbg_hit_on(bool c, bool b) { if (c) dbg_hit_on(b); }
255 void dbg_mean_of(int v) { ++means[0]; means[1] += v; }
260 cerr << "Total " << hits[0] << " Hits " << hits[1]
261 << " hit rate (%) " << 100 * hits[1] / hits[0] << endl;
264 cerr << "Total " << means[0] << " Mean "
265 << (double)means[1] / means[0] << endl;
269 /// Used to serialize access to std::cout to avoid multiple threads writing at
272 std::ostream& operator<<(std::ostream& os, SyncCout sc) {
286 /// Trampoline helper to avoid moving Logger to misc.h
287 void start_logger(const std::string& fname) { Logger::start(fname); }
290 /// prefetch() preloads the given address in L1/L2 cache. This is a non-blocking
291 /// function that doesn't stall the CPU waiting for data to be loaded from memory,
292 /// which can be quite slow.
295 void prefetch(void*) {}
299 void prefetch(void* addr) {
301 # if defined(__INTEL_COMPILER)
302 // This hack prevents prefetches from being optimized away by
303 // Intel compiler. Both MSVC and gcc seem not be affected by this.
307 # if defined(__INTEL_COMPILER) || defined(_MSC_VER)
308 _mm_prefetch((char*)addr, _MM_HINT_T0);
310 __builtin_prefetch(addr);
316 /// Wrappers for systems where the c++17 implementation doesn't guarantee the availability of aligned_alloc.
317 /// Memory allocated with std_aligned_alloc must be freed with std_aligned_free.
320 void* std_aligned_alloc(size_t alignment, size_t size) {
321 #if (defined(__APPLE__) && defined(_LIBCPP_HAS_C11_FEATURES)) || defined(__ANDROID__) || defined(__OpenBSD__) || (defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC) && !defined(_WIN32))
322 return aligned_alloc(alignment, size);
323 #elif (defined(_WIN32) || (defined(__APPLE__) && !defined(_LIBCPP_HAS_C11_FEATURES)))
324 return _mm_malloc(size, alignment);
326 return std::aligned_alloc(alignment, size);
330 void std_aligned_free(void* ptr) {
331 #if (defined(__APPLE__) && defined(_LIBCPP_HAS_C11_FEATURES)) || defined(__ANDROID__) || defined(__OpenBSD__) || (defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC) && !defined(_WIN32))
333 #elif (defined(_WIN32) || (defined(__APPLE__) && !defined(_LIBCPP_HAS_C11_FEATURES)))
340 /// aligned_ttmem_alloc() will return suitably aligned memory, and if possible use large pages.
341 /// The returned pointer is the aligned one, while the mem argument is the one that needs
342 /// to be passed to free. With c++17 some of this functionality could be simplified.
344 #if defined(__linux__) && !defined(__ANDROID__)
346 void* aligned_ttmem_alloc(size_t allocSize, void*& mem) {
348 constexpr size_t alignment = 2 * 1024 * 1024; // assumed 2MB page sizes
349 size_t size = ((allocSize + alignment - 1) / alignment) * alignment; // multiple of alignment
350 if (posix_memalign(&mem, alignment, size))
352 madvise(mem, allocSize, MADV_HUGEPAGE);
356 #elif defined(_WIN64)
358 static void* aligned_ttmem_alloc_large_pages(size_t allocSize) {
360 HANDLE hProcessToken { };
364 const size_t largePageSize = GetLargePageMinimum();
368 // We need SeLockMemoryPrivilege, so try to enable it for the process
369 if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hProcessToken))
372 if (LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &luid))
374 TOKEN_PRIVILEGES tp { };
375 TOKEN_PRIVILEGES prevTp { };
378 tp.PrivilegeCount = 1;
379 tp.Privileges[0].Luid = luid;
380 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
382 // Try to enable SeLockMemoryPrivilege. Note that even if AdjustTokenPrivileges() succeeds,
383 // we still need to query GetLastError() to ensure that the privileges were actually obtained.
384 if (AdjustTokenPrivileges(
385 hProcessToken, FALSE, &tp, sizeof(TOKEN_PRIVILEGES), &prevTp, &prevTpLen) &&
386 GetLastError() == ERROR_SUCCESS)
388 // Round up size to full pages and allocate
389 allocSize = (allocSize + largePageSize - 1) & ~size_t(largePageSize - 1);
391 NULL, allocSize, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE);
393 // Privilege no longer needed, restore previous state
394 AdjustTokenPrivileges(hProcessToken, FALSE, &prevTp, 0, NULL, NULL);
398 CloseHandle(hProcessToken);
403 void* aligned_ttmem_alloc(size_t allocSize, void*& mem) {
405 static bool firstCall = true;
407 // Try to allocate large pages
408 mem = aligned_ttmem_alloc_large_pages(allocSize);
410 // Suppress info strings on the first call. The first call occurs before 'uci'
411 // is received and in that case this output confuses some GUIs.
415 sync_cout << "info string Hash table allocation: Windows large pages used." << sync_endl;
417 sync_cout << "info string Hash table allocation: Windows large pages not used." << sync_endl;
421 // Fall back to regular, page aligned, allocation if necessary
423 mem = VirtualAlloc(NULL, allocSize, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
430 void* aligned_ttmem_alloc(size_t allocSize, void*& mem) {
432 constexpr size_t alignment = 64; // assumed cache line size
433 size_t size = allocSize + alignment - 1; // allocate some extra space
435 void* ret = reinterpret_cast<void*>((uintptr_t(mem) + alignment - 1) & ~uintptr_t(alignment - 1));
442 /// aligned_ttmem_free() will free the previously allocated ttmem
446 void aligned_ttmem_free(void* mem) {
448 if (mem && !VirtualFree(mem, 0, MEM_RELEASE))
450 DWORD err = GetLastError();
451 std::cerr << "Failed to free transposition table. Error code: 0x" <<
452 std::hex << err << std::dec << std::endl;
459 void aligned_ttmem_free(void *mem) {
466 namespace WinProcGroup {
470 void bindThisThread(size_t) {}
474 /// best_group() retrieves logical processor information using Windows specific
475 /// API and returns the best group id for the thread with index idx. Original
476 /// code from Texel by Peter Ă–sterlund.
478 int best_group(size_t idx) {
483 DWORD returnLength = 0;
484 DWORD byteOffset = 0;
486 // Early exit if the needed API is not available at runtime
487 HMODULE k32 = GetModuleHandle("Kernel32.dll");
488 auto fun1 = (fun1_t)(void(*)())GetProcAddress(k32, "GetLogicalProcessorInformationEx");
492 // First call to get returnLength. We expect it to fail due to null buffer
493 if (fun1(RelationAll, nullptr, &returnLength))
496 // Once we know returnLength, allocate the buffer
497 SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *buffer, *ptr;
498 ptr = buffer = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX*)malloc(returnLength);
500 // Second call, now we expect to succeed
501 if (!fun1(RelationAll, buffer, &returnLength))
507 while (byteOffset < returnLength)
509 if (ptr->Relationship == RelationNumaNode)
512 else if (ptr->Relationship == RelationProcessorCore)
515 threads += (ptr->Processor.Flags == LTP_PC_SMT) ? 2 : 1;
519 byteOffset += ptr->Size;
520 ptr = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX*)(((char*)ptr) + ptr->Size);
525 std::vector<int> groups;
527 // Run as many threads as possible on the same node until core limit is
528 // reached, then move on filling the next node.
529 for (int n = 0; n < nodes; n++)
530 for (int i = 0; i < cores / nodes; i++)
533 // In case a core has more than one logical processor (we assume 2) and we
534 // have still threads to allocate, then spread them evenly across available
536 for (int t = 0; t < threads - cores; t++)
537 groups.push_back(t % nodes);
539 // If we still have more threads than the total number of logical processors
540 // then return -1 and let the OS to decide what to do.
541 return idx < groups.size() ? groups[idx] : -1;
545 /// bindThisThread() set the group affinity of the current thread
547 void bindThisThread(size_t idx) {
549 // Use only local variables to be thread-safe
550 int group = best_group(idx);
555 // Early exit if the needed API are not available at runtime
556 HMODULE k32 = GetModuleHandle("Kernel32.dll");
557 auto fun2 = (fun2_t)(void(*)())GetProcAddress(k32, "GetNumaNodeProcessorMaskEx");
558 auto fun3 = (fun3_t)(void(*)())GetProcAddress(k32, "SetThreadGroupAffinity");
563 GROUP_AFFINITY affinity;
564 if (fun2(group, &affinity))
565 fun3(GetCurrentThread(), &affinity, nullptr);
570 } // namespace WinProcGroup