std::string engine_info(bool to_uci = false);
std::string compiler_info();
-void prefetch(void* addr);
-void start_logger(const std::string& fname);
-void* std_aligned_alloc(size_t alignment, size_t size);
-void std_aligned_free(void* ptr);
-void* aligned_large_pages_alloc(
- size_t size); // memory aligned by page size, min alignment: 4096 bytes
-void aligned_large_pages_free(void* mem); // nop if mem == nullptr
+
+// Preloads the given address in L1/L2 cache. This is a non-blocking
+// function that doesn't stall the CPU waiting for data to be loaded from memory,
+// which can be quite slow.
+void prefetch(void* addr);
+
+void start_logger(const std::string& fname);
+void* std_aligned_alloc(size_t alignment, size_t size);
+void std_aligned_free(void* ptr);
+// memory aligned by page size, min alignment: 4096 bytes
+void* aligned_large_pages_alloc(size_t size);
+// nop if mem == nullptr
+void aligned_large_pages_free(void* mem);
void dbg_hit_on(bool cond, int slot = 0);
void dbg_mean_of(int64_t value, int slot = 0);
#define sync_endl std::endl << IO_UNLOCK
-// align_ptr_up() : get the first aligned element of an array.
+// Get the first aligned element of an array.
// ptr must point to an array of size at least `sizeof(T) * N + alignment` bytes,
// where N is the number of elements in the array.
template<uintptr_t Alignment, typename T>
}
-// IsLittleEndian : true if and only if the binary is compiled on a little-endian machine
+// True if and only if the binary is compiled on a little-endian machine
static inline const union {
uint32_t i;
char c[4];
// cores. To overcome this, some special platform-specific API should be
// called to set group affinity for each thread. Original code from Texel by
// Peter Ă–sterlund.
-
namespace WinProcGroup {
void bindThisThread(size_t idx);
}