2 Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3 Copyright (C) 2004-2020 The Stockfish developers (see AUTHORS file)
5 Stockfish is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 Stockfish is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
39 // Total number of ranks and rank within the communicator
40 static int world_rank = MPI_PROC_NULL;
41 static int world_size = 0;
43 // Signals between ranks exchange basic info using a dedicated communicator
44 static MPI_Comm signalsComm = MPI_COMM_NULL;
45 static MPI_Request reqSignals = MPI_REQUEST_NULL;
46 static uint64_t signalsCallCounter = 0;
48 // Signals are the number of nodes searched, stop, table base hits, transposition table saves
49 enum Signals : int { SIG_NODES = 0, SIG_STOP = 1, SIG_TB = 2, SIG_TTS = 3, SIG_NB = 4};
50 static uint64_t signalsSend[SIG_NB] = {};
51 static uint64_t signalsRecv[SIG_NB] = {};
52 static uint64_t nodesSearchedOthers = 0;
53 static uint64_t tbHitsOthers = 0;
54 static uint64_t TTsavesOthers = 0;
55 static uint64_t stopSignalsPosted = 0;
57 // The UCI threads of each rank exchange use a dedicated communicator
58 static MPI_Comm InputComm = MPI_COMM_NULL;
60 // bestMove requires MoveInfo communicators and data types
61 static MPI_Comm MoveComm = MPI_COMM_NULL;
62 static MPI_Datatype MIDatatype = MPI_DATATYPE_NULL;
64 // TT entries are communicated with a dedicated communicator.
65 // The receive buffer is used to gather information from all ranks.
66 // THe TTCacheCounter tracks the number of local elements that are ready to be sent.
67 static MPI_Comm TTComm = MPI_COMM_NULL;
68 static std::array<std::vector<KeyedTTEntry>, 2> TTSendRecvBuffs;
69 static std::array<MPI_Request, 2> reqsTTSendRecv = {MPI_REQUEST_NULL, MPI_REQUEST_NULL};
70 static uint64_t sendRecvPosted = 0;
71 static std::atomic<uint64_t> TTCacheCounter = {};
73 /// Initialize MPI and associated data types. Note that the MPI library must be configured
74 /// to support MPI_THREAD_MULTIPLE, since multiple threads access MPI simultaneously.
78 MPI_Init_thread(nullptr, nullptr, MPI_THREAD_MULTIPLE, &thread_support);
79 if (thread_support < MPI_THREAD_MULTIPLE)
81 std::cerr << "Stockfish requires support for MPI_THREAD_MULTIPLE."
83 std::exit(EXIT_FAILURE);
86 MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
87 MPI_Comm_size(MPI_COMM_WORLD, &world_size);
89 const std::array<MPI_Aint, 5> MIdisps = {offsetof(MoveInfo, move),
90 offsetof(MoveInfo, ponder),
91 offsetof(MoveInfo, depth),
92 offsetof(MoveInfo, score),
93 offsetof(MoveInfo, rank)};
94 MPI_Type_create_hindexed_block(5, 1, MIdisps.data(), MPI_INT, &MIDatatype);
95 MPI_Type_commit(&MIDatatype);
97 MPI_Comm_dup(MPI_COMM_WORLD, &InputComm);
98 MPI_Comm_dup(MPI_COMM_WORLD, &TTComm);
99 MPI_Comm_dup(MPI_COMM_WORLD, &MoveComm);
100 MPI_Comm_dup(MPI_COMM_WORLD, &signalsComm);
103 /// Finalize MPI and free the associated data types.
106 MPI_Type_free(&MIDatatype);
108 MPI_Comm_free(&InputComm);
109 MPI_Comm_free(&TTComm);
110 MPI_Comm_free(&MoveComm);
111 MPI_Comm_free(&signalsComm);
116 /// Return the total number of ranks
122 /// Return the rank (index) of the process
128 /// The receive buffer depends on the number of MPI ranks and threads, resize as needed
129 void ttSendRecvBuff_resize(size_t nThreads) {
133 TTSendRecvBuffs[i].resize(TTCacheSize * world_size * nThreads);
134 std::fill(TTSendRecvBuffs[i].begin(), TTSendRecvBuffs[i].end(), KeyedTTEntry());
138 /// As input is only received by the root (rank 0) of the cluster, this input must be relayed
139 /// to the UCI threads of all ranks, in order to setup the position, etc. We do this with a
140 /// dedicated getline implementation, where the root broadcasts to all other ranks the received
142 bool getline(std::istream& input, std::string& str) {
145 std::vector<char> vec;
150 state = static_cast<bool>(std::getline(input, str));
151 vec.assign(str.begin(), str.end());
155 // Some MPI implementations use busy-wait polling, while we need yielding as otherwise
156 // the UCI thread on the non-root ranks would be consuming resources.
157 static MPI_Request reqInput = MPI_REQUEST_NULL;
158 MPI_Ibcast(&size, 1, MPI_INT, 0, InputComm, &reqInput);
160 MPI_Wait(&reqInput, MPI_STATUS_IGNORE);
166 MPI_Test(&reqInput, &flag, MPI_STATUS_IGNORE);
170 std::this_thread::sleep_for(std::chrono::milliseconds(10));
174 // Broadcast received string
177 MPI_Bcast(vec.data(), size, MPI_CHAR, 0, InputComm);
179 str.assign(vec.begin(), vec.end());
180 MPI_Bcast(&state, 1, MPI_INT, 0, InputComm);
185 /// Sending part of the signal communication loop
186 void signals_send() {
188 signalsSend[SIG_NODES] = Threads.nodes_searched();
189 signalsSend[SIG_TB] = Threads.tb_hits();
190 signalsSend[SIG_TTS] = Threads.TT_saves();
191 signalsSend[SIG_STOP] = Threads.stop;
192 MPI_Iallreduce(signalsSend, signalsRecv, SIG_NB, MPI_UINT64_T,
193 MPI_SUM, signalsComm, &reqSignals);
194 ++signalsCallCounter;
197 /// Processing part of the signal communication loop.
198 /// For some counters (e.g. nodes) we only keep their sum on the other nodes
199 /// allowing to add local counters at any time for more fine grained process,
200 /// which is useful to indicate progress during early iterations, and to have
201 /// node counts that exactly match the non-MPI code in the single rank case.
202 /// This call also propagates the stop signal between ranks.
203 void signals_process() {
205 nodesSearchedOthers = signalsRecv[SIG_NODES] - signalsSend[SIG_NODES];
206 tbHitsOthers = signalsRecv[SIG_TB] - signalsSend[SIG_TB];
207 TTsavesOthers = signalsRecv[SIG_TTS] - signalsSend[SIG_TTS];
208 stopSignalsPosted = signalsRecv[SIG_STOP];
209 if (signalsRecv[SIG_STOP] > 0)
213 void sendrecv_post() {
216 MPI_Irecv(TTSendRecvBuffs[sendRecvPosted % 2].data(),
217 TTSendRecvBuffs[sendRecvPosted % 2].size() * sizeof(KeyedTTEntry), MPI_BYTE,
218 (rank() + size() - 1) % size(), 42, TTComm, &reqsTTSendRecv[0]);
219 MPI_Isend(TTSendRecvBuffs[(sendRecvPosted + 1) % 2].data(),
220 TTSendRecvBuffs[(sendRecvPosted + 1) % 2].size() * sizeof(KeyedTTEntry), MPI_BYTE,
221 (rank() + 1 ) % size(), 42, TTComm, &reqsTTSendRecv[1]);
224 /// During search, most message passing is asynchronous, but at the end of
225 /// search it makes sense to bring them to a common, finalized state.
226 void signals_sync() {
228 while(stopSignalsPosted < uint64_t(size()))
231 // Finalize outstanding messages of the signal loops.
232 // We might have issued one call less than needed on some ranks.
233 uint64_t globalCounter;
234 MPI_Allreduce(&signalsCallCounter, &globalCounter, 1, MPI_UINT64_T, MPI_MAX, MoveComm);
235 if (signalsCallCounter < globalCounter)
237 MPI_Wait(&reqSignals, MPI_STATUS_IGNORE);
240 assert(signalsCallCounter == globalCounter);
241 MPI_Wait(&reqSignals, MPI_STATUS_IGNORE);
244 // Finalize outstanding messages in the sendRecv loop
245 MPI_Allreduce(&sendRecvPosted, &globalCounter, 1, MPI_UINT64_T, MPI_MAX, MoveComm);
246 while (sendRecvPosted < globalCounter)
248 MPI_Waitall(reqsTTSendRecv.size(), reqsTTSendRecv.data(), MPI_STATUSES_IGNORE);
251 assert(sendRecvPosted == globalCounter);
252 MPI_Waitall(reqsTTSendRecv.size(), reqsTTSendRecv.data(), MPI_STATUSES_IGNORE);
256 /// Initialize signal counters to zero.
257 void signals_init() {
259 stopSignalsPosted = tbHitsOthers = TTsavesOthers = nodesSearchedOthers = 0;
261 signalsSend[SIG_NODES] = signalsRecv[SIG_NODES] = 0;
262 signalsSend[SIG_TB] = signalsRecv[SIG_TB] = 0;
263 signalsSend[SIG_TTS] = signalsRecv[SIG_TTS] = 0;
264 signalsSend[SIG_STOP] = signalsRecv[SIG_STOP] = 0;
268 /// Poll the signal loop, and start next round as needed.
269 void signals_poll() {
272 MPI_Test(&reqSignals, &flag, MPI_STATUS_IGNORE);
280 /// Provide basic info related the cluster performance, in particular, the number of signals send,
281 /// signals per sounds (sps), the number of gathers, the number of positions gathered (per node and per second, gpps)
282 /// The number of TT saves and TT saves per second. If gpps equals approximately TTSavesps the gather loop has enough bandwidth.
283 void cluster_info(Depth depth) {
285 TimePoint elapsed = Time.elapsed() + 1;
286 uint64_t TTSaves = TT_saves();
288 sync_cout << "info depth " << depth << " cluster "
289 << " signals " << signalsCallCounter << " sps " << signalsCallCounter * 1000 / elapsed
290 << " sendRecvs " << sendRecvPosted << " srpps " << TTSendRecvBuffs[0].size() * sendRecvPosted * 1000 / elapsed
291 << " TTSaves " << TTSaves << " TTSavesps " << TTSaves * 1000 / elapsed
295 /// When a TT entry is saved, additional steps are taken if the entry is of sufficient depth.
296 /// If sufficient entries has been collected, a communication is initiated.
297 /// If a communication has been completed, the received results are saved to the TT.
298 void save(Thread* thread, TTEntry* tte,
299 Key k, Value v, bool PvHit, Bound b, Depth d, Move m, Value ev) {
301 // Standard save to the TT
302 tte->save(k, v, PvHit, b, d, m, ev);
304 // If the entry is of sufficient depth to be worth communicating, take action.
307 // count the TTsaves to information: this should be relatively similar
308 // to the number of entries we can send/recv.
309 thread->TTsaves.fetch_add(1, std::memory_order_relaxed);
311 // Add to thread's send buffer, the locking here avoids races when the master thread
312 // prepares the send buffer.
314 std::lock_guard<std::mutex> lk(thread->ttCache.mutex);
315 thread->ttCache.buffer.replace(KeyedTTEntry(k,*tte));
319 size_t recvBuffPerRankSize = Threads.size() * TTCacheSize;
321 // Communicate on main search thread, as soon the threads combined have collected
322 // sufficient data to fill the send buffers.
323 if (thread == Threads.main() && TTCacheCounter > recvBuffPerRankSize)
325 // Test communication status
327 MPI_Testall(reqsTTSendRecv.size(), reqsTTSendRecv.data(), &flag, MPI_STATUSES_IGNORE);
329 // Current communication is complete
332 // Save all received entries to TT, and store our TTCaches, ready for the next round of communication
333 for (size_t irank = 0; irank < size_t(size()) ; ++irank)
335 if (irank == size_t(rank())) // this is our part, fill the part of the buffer for sending
337 // Copy from the thread caches to the right spot in the buffer
338 size_t i = irank * recvBuffPerRankSize;
339 for (auto&& th : Threads)
341 std::lock_guard<std::mutex> lk(th->ttCache.mutex);
343 for (auto&& e : th->ttCache.buffer)
344 TTSendRecvBuffs[sendRecvPosted % 2][i++] = e;
346 // Reset thread's send buffer
347 th->ttCache.buffer = {};
352 else // process data received from the corresponding rank.
353 for (size_t i = irank * recvBuffPerRankSize; i < (irank + 1) * recvBuffPerRankSize; ++i)
355 auto&& e = TTSendRecvBuffs[sendRecvPosted % 2][i];
357 TTEntry* replace_tte;
358 replace_tte = TT.probe(e.first, found);
359 replace_tte->save(e.first, e.second.value(), e.second.is_pv(), e.second.bound(), e.second.depth(),
360 e.second.move(), e.second.eval());
364 // Start next communication
367 // Force check of time on the next occasion, the above actions might have taken some time.
368 static_cast<MainThread*>(thread)->callsCnt = 0;
375 /// Picks the bestMove across ranks, and send the associated info and PV to the root of the cluster.
376 /// Note that this bestMove and PV must be output by the root, the guarantee proper ordering of output.
377 /// TODO update to the scheme in master.. can this use aggregation of votes?
378 void pick_moves(MoveInfo& mi, std::string& PVLine) {
380 MoveInfo* pMoveInfo = NULL;
383 pMoveInfo = (MoveInfo*)malloc(sizeof(MoveInfo) * size());
385 MPI_Gather(&mi, 1, MIDatatype, pMoveInfo, 1, MIDatatype, 0, MoveComm);
389 std::map<int, int> votes;
390 int minScore = pMoveInfo[0].score;
391 for (int i = 0; i < size(); ++i)
393 minScore = std::min(minScore, pMoveInfo[i].score);
394 votes[pMoveInfo[i].move] = 0;
396 for (int i = 0; i < size(); ++i)
398 votes[pMoveInfo[i].move] += pMoveInfo[i].score - minScore + pMoveInfo[i].depth;
400 int bestVote = votes[pMoveInfo[0].move];
401 for (int i = 0; i < size(); ++i)
403 if (votes[pMoveInfo[i].move] > bestVote)
405 bestVote = votes[pMoveInfo[i].move];
412 // Send around the final result
413 MPI_Bcast(&mi, 1, MIDatatype, 0, MoveComm);
415 // Send PV line to root as needed
416 if (mi.rank != 0 && mi.rank == rank()) {
418 std::vector<char> vec;
419 vec.assign(PVLine.begin(), PVLine.end());
421 MPI_Send(&size, 1, MPI_INT, 0, 42, MoveComm);
422 MPI_Send(vec.data(), size, MPI_CHAR, 0, 42, MoveComm);
424 if (mi.rank != 0 && is_root()) {
426 std::vector<char> vec;
427 MPI_Recv(&size, 1, MPI_INT, mi.rank, 42, MoveComm, MPI_STATUS_IGNORE);
429 MPI_Recv(vec.data(), size, MPI_CHAR, mi.rank, 42, MoveComm, MPI_STATUS_IGNORE);
430 PVLine.assign(vec.begin(), vec.end());
435 /// Return nodes searched (lazily updated cluster wide in the signal loop)
436 uint64_t nodes_searched() {
438 return nodesSearchedOthers + Threads.nodes_searched();
441 /// Return table base hits (lazily updated cluster wide in the signal loop)
444 return tbHitsOthers + Threads.tb_hits();
447 /// Return the number of saves to the TT buffers, (lazily updated cluster wide in the signal loop)
448 uint64_t TT_saves() {
450 return TTsavesOthers + Threads.TT_saves();
462 namespace Stockfish {
465 uint64_t nodes_searched() {
467 return Threads.nodes_searched();
472 return Threads.tb_hits();
475 uint64_t TT_saves() {
477 return Threads.TT_saves();