2 Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3 Copyright (C) 2004-2008 Tord Romstad (Glaurung author)
4 Copyright (C) 2008-2015 Marco Costalba, Joona Kiiski, Tord Romstad
5 Copyright (C) 2015-2018 Marco Costalba, Joona Kiiski, Gary Linscott, Tord Romstad
7 Stockfish is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
12 Stockfish is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
38 static int world_rank = MPI_PROC_NULL;
39 static int world_size = 0;
40 static bool stop_signal = false;
41 static MPI_Request reqStop = MPI_REQUEST_NULL;
43 static MPI_Comm InputComm = MPI_COMM_NULL;
44 static MPI_Comm TTComm = MPI_COMM_NULL;
45 static MPI_Comm MoveComm = MPI_COMM_NULL;
46 static MPI_Comm StopComm = MPI_COMM_NULL;
48 static MPI_Datatype TTEntryDatatype = MPI_DATATYPE_NULL;
49 static std::vector<TTEntry> TTBuff;
51 static MPI_Op BestMoveOp = MPI_OP_NULL;
52 static MPI_Datatype MIDatatype = MPI_DATATYPE_NULL;
54 static void BestMove(void* in, void* inout, int* len, MPI_Datatype* datatype) {
55 if (*datatype != MIDatatype)
56 MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
57 MoveInfo* l = static_cast<MoveInfo*>(in);
58 MoveInfo* r = static_cast<MoveInfo*>(inout);
59 for (int i=0; i < *len; ++i)
61 if (l[i].depth >= r[i].depth && l[i].score >= r[i].score)
68 constexpr std::array<int, 6> TTblocklens = {1, 1, 1, 1, 1, 1};
69 const std::array<MPI_Aint, 6> TTdisps = {offsetof(TTEntry, key16),
70 offsetof(TTEntry, move16),
71 offsetof(TTEntry, value16),
72 offsetof(TTEntry, eval16),
73 offsetof(TTEntry, genBound8),
74 offsetof(TTEntry, depth8)};
75 const std::array<MPI_Datatype, 6> TTtypes = {MPI_UINT16_T,
81 const std::array<MPI_Aint, 3> MIdisps = {offsetof(MoveInfo, depth),
82 offsetof(MoveInfo, score),
83 offsetof(MoveInfo, rank)};
85 MPI_Init_thread(nullptr, nullptr, MPI_THREAD_MULTIPLE, &thread_support);
86 if (thread_support < MPI_THREAD_MULTIPLE)
88 std::cerr << "Stockfish requires support for MPI_THREAD_MULTIPLE."
90 std::exit(EXIT_FAILURE);
93 MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
94 MPI_Comm_size(MPI_COMM_WORLD, &world_size);
96 TTBuff.resize(TTSendBufferSize * world_size);
98 MPI_Type_create_struct(6, TTblocklens.data(), TTdisps.data(), TTtypes.data(),
100 MPI_Type_commit(&TTEntryDatatype);
102 MPI_Type_create_hindexed_block(3, 1, MIdisps.data(), MPI_INT, &MIDatatype);
103 MPI_Type_commit(&MIDatatype);
104 MPI_Op_create(BestMove, false, &BestMoveOp);
106 MPI_Comm_dup(MPI_COMM_WORLD, &InputComm);
107 MPI_Comm_dup(MPI_COMM_WORLD, &TTComm);
108 MPI_Comm_dup(MPI_COMM_WORLD, &MoveComm);
109 MPI_Comm_dup(MPI_COMM_WORLD, &StopComm);
116 bool getline(std::istream& input, std::string& str) {
118 std::vector<char> vec;
123 state = static_cast<bool>(std::getline(input, str));
124 vec.assign(str.begin(), str.end());
128 // Some MPI implementations use busy-wait pooling, while we need yielding
129 static MPI_Request reqInput = MPI_REQUEST_NULL;
130 MPI_Ibarrier(InputComm, &reqInput);
132 MPI_Wait(&reqInput, MPI_STATUS_IGNORE);
136 MPI_Test(&reqInput, &flag, MPI_STATUS_IGNORE);
140 std::this_thread::sleep_for(std::chrono::milliseconds(10));
144 MPI_Bcast(&size, 1, MPI_UNSIGNED_LONG, 0, InputComm);
147 MPI_Bcast(vec.data(), size, MPI_CHAR, 0, InputComm);
149 str.assign(vec.begin(), vec.end());
150 MPI_Bcast(&state, 1, MPI_CXX_BOOL, 0, InputComm);
157 // Start listening to stop signal
159 MPI_Ibarrier(StopComm, &reqStop);
164 if (!stop_signal && Threads.stop) {
165 // Signal the cluster about stopping
167 MPI_Ibarrier(StopComm, &reqStop);
168 MPI_Wait(&reqStop, MPI_STATUS_IGNORE);
173 // Check if we've received any stop signal
174 MPI_Test(&reqStop, &flagStop, MPI_STATUS_IGNORE);
188 void save(Thread* thread, TTEntry* tte,
189 Key k, Value v, Bound b, Depth d, Move m, Value ev, uint8_t g) {
190 tte->save(k, v, b, d, m, ev, g);
191 // Try to add to thread's send buffer
193 std::lock_guard<Mutex> lk(thread->ttBuffer.mutex);
194 thread->ttBuffer.buffer.replace(*tte);
197 // Communicate on main search thread
198 if (thread == Threads.main()) {
199 static MPI_Request req = MPI_REQUEST_NULL;
200 static TTSendBuffer<TTSendBufferSize> send_buff = {};
203 TTEntry* replace_tte;
205 // Test communication status
206 MPI_Test(&req, &flag, MPI_STATUS_IGNORE);
208 // Current communication is complete
210 // Save all recieved entries
211 for (auto&& e : TTBuff) {
212 replace_tte = TT.probe(e.key(), found);
213 replace_tte->save(e.key(), e.value(), e.bound(), e.depth(),
214 e.move(), e.eval(), e.gen());
220 // Build up new send buffer: best 16 found across all threads
221 for (auto&& th : Threads) {
222 std::lock_guard<Mutex> lk(th->ttBuffer.mutex);
223 for (auto&& e : th->ttBuffer.buffer)
224 send_buff.replace(e);
225 // Reset thread's send buffer
226 th->ttBuffer.buffer = {};
229 // Start next communication
230 MPI_Iallgather(send_buff.data(), send_buff.size(), TTEntryDatatype,
231 TTBuff.data(), TTSendBufferSize, TTEntryDatatype,
237 void reduce_moves(MoveInfo& mi) {
238 MPI_Allreduce(MPI_IN_PLACE, &mi, 1, MIDatatype, BestMoveOp, MoveComm);