From 166bf90e4172b77aa0c420ed271fa55e92301a70 Mon Sep 17 00:00:00 2001 From: "Steinar H. Gunderson" Date: Wed, 29 Aug 2018 02:00:14 +0200 Subject: [PATCH] Shrink the hash table of tablebases back to 4096 entries There is no need to make this as large as 65536 just for the sake of the single 7-man tablebase that happens to have the key 0xf9247fff. Idea for the fix by Ronald de Man, who suggested simply to allow more buckets past the end. We also implement Robin Hood hashing for the hash table, which takes the worst -case search for full 7-man tablebases down from 68 to 11 probes (Also takes the average probe length from 2.06 to 2.05). For a table with 8K entries, the corresponding numbers would be worst-case from 9 to 4, with average from 1.30 to 1.29. https://github.com/official-stockfish/Stockfish/pull/1747 No functional change --- Readme.md | 2 +- src/syzygy/tbprobe.cpp | 25 +++++++++++++++++++------ 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/Readme.md b/Readme.md index 46f26090..5819d99d 100644 --- a/Readme.md +++ b/Readme.md @@ -59,7 +59,7 @@ The "SyzygyProbeLimit" option should normally be left at its default value. **What to expect** If the engine is searching a position that is not in the tablebases (e.g. -a position with 7 pieces), it will access the tablebases during the search. +a position with 8 pieces), it will access the tablebases during the search. If the engine reports a very large score (typically 123.xx), this means that it has found a winning line into a tablebase position. diff --git a/src/syzygy/tbprobe.cpp b/src/syzygy/tbprobe.cpp index dc7c523a..235fe195 100644 --- a/src/syzygy/tbprobe.cpp +++ b/src/syzygy/tbprobe.cpp @@ -384,22 +384,35 @@ class TBTables { typedef std::tuple*, TBTable*> Entry; - static const int Size = 1 << 16; // 64K table, indexed by key's 16 lsb + static constexpr int Size = 1 << 12; // 4K table, indexed by key's 12 lsb + static constexpr int Overflow = 1; // Number of elements allowed to map to the last bucket - Entry hashTable[Size]; + Entry hashTable[Size + Overflow]; std::deque> wdlTable; std::deque> dtzTable; void insert(Key key, TBTable* wdl, TBTable* dtz) { - Entry* entry = &hashTable[(uint32_t)key & (Size - 1)]; + uint32_t homeBucket = (uint32_t)key & (Size - 1); + Entry entry = std::make_tuple(key, wdl, dtz); // Ensure last element is empty to avoid overflow when looking up - for ( ; entry - hashTable < Size - 1; ++entry) - if (std::get(*entry) == key || !std::get(*entry)) { - *entry = std::make_tuple(key, wdl, dtz); + for (uint32_t bucket = homeBucket; bucket < Size + Overflow - 1; ++bucket) { + Key otherKey = std::get(hashTable[bucket]); + if (otherKey == key || !std::get(hashTable[bucket])) { + hashTable[bucket] = entry; return; } + + // Robin Hood hashing: If we've probed for longer than this element, + // insert here and search for a new spot for the other element instead. + uint32_t otherHomeBucket = (uint32_t)otherKey & (Size - 1); + if (otherHomeBucket > homeBucket) { + swap(entry, hashTable[bucket]); + key = otherKey; + homeBucket = otherHomeBucket; + } + } std::cerr << "TB hash table size too low!" << std::endl; exit(1); } -- 2.39.2