]> git.sesse.net Git - stockfish/commitdiff
[cluster] Add depth condition to cluster TT saves.
authorJoost VandeVondele <Joost.VandeVondele@gmail.com>
Wed, 5 Dec 2018 06:26:08 +0000 (07:26 +0100)
committerStéphane Nicolet <stephanenicoletsuriphone@gmail.com>
Sat, 29 Dec 2018 14:34:56 +0000 (15:34 +0100)
since the logic for saving moves in the sendbuffer and the associated rehashing is expensive, only do it for TT stores of sufficient depth.

quite some gain in local testing with 4 ranks against the previous version.
Elo difference: 288.84 +/- 21.98

This starts to make the branch useful, but for on-node runs, difference remains to the standard threading.

src/cluster.cpp

index a0a2be71317dbafca760364730ecd655d647f5ec..5dbfcaf322417cff3e7f7fe1f5372535d78de45c 100644 (file)
@@ -175,51 +175,55 @@ int rank() {
 
 void save(Thread* thread, TTEntry* tte,
           Key k, Value v, Bound b, Depth d, Move m, Value ev) {
+
   tte->save(k, v, b, d, m, ev);
 
-  // Try to add to thread's send buffer
+  if (d > 5 * ONE_PLY)
   {
-      std::lock_guard<Mutex> lk(thread->ttBuffer.mutex);
-      thread->ttBuffer.buffer.replace(KeyedTTEntry(k,*tte));
-  }
-
-  // Communicate on main search thread
-  if (thread == Threads.main()) {
-      static MPI_Request req = MPI_REQUEST_NULL;
-      static TTSendBuffer<TTSendBufferSize> send_buff = {};
-      int flag;
-      bool found;
-      TTEntry* replace_tte;
-
-      // Test communication status
-      MPI_Test(&req, &flag, MPI_STATUS_IGNORE);
-
-      // Current communication is complete
-      if (flag) {
-          // Save all recieved entries
-          for (auto&& e : TTBuff) {
-              replace_tte = TT.probe(e.first, found);
-              replace_tte->save(e.first, e.second.value(), e.second.bound(), e.second.depth(),
-                                e.second.move(), e.second.eval());
-          }
-
-          // Reset send buffer
-          send_buff = {};
-
-          // Build up new send buffer: best 16 found across all threads
-          for (auto&& th : Threads) {
-              std::lock_guard<Mutex> lk(th->ttBuffer.mutex);
-              for (auto&& e : th->ttBuffer.buffer)
-                  send_buff.replace(e);
-              // Reset thread's send buffer
-              th->ttBuffer.buffer = {};
-          }
-
-          // Start next communication
-          MPI_Iallgather(send_buff.data(), send_buff.size(), TTEntryDatatype,
-                         TTBuff.data(), TTSendBufferSize, TTEntryDatatype,
-                         TTComm, &req);
-      }
+     // Try to add to thread's send buffer
+     {
+         std::lock_guard<Mutex> lk(thread->ttBuffer.mutex);
+         thread->ttBuffer.buffer.replace(KeyedTTEntry(k,*tte));
+     }
+
+     // Communicate on main search thread
+     if (thread == Threads.main()) {
+         static MPI_Request req = MPI_REQUEST_NULL;
+         static TTSendBuffer<TTSendBufferSize> send_buff = {};
+         int flag;
+         bool found;
+         TTEntry* replace_tte;
+
+         // Test communication status
+         MPI_Test(&req, &flag, MPI_STATUS_IGNORE);
+
+         // Current communication is complete
+         if (flag) {
+             // Save all recieved entries
+             for (auto&& e : TTBuff) {
+                 replace_tte = TT.probe(e.first, found);
+                 replace_tte->save(e.first, e.second.value(), e.second.bound(), e.second.depth(),
+                                   e.second.move(), e.second.eval());
+             }
+
+             // Reset send buffer
+             send_buff = {};
+
+             // Build up new send buffer: best 16 found across all threads
+             for (auto&& th : Threads) {
+                 std::lock_guard<Mutex> lk(th->ttBuffer.mutex);
+                 for (auto&& e : th->ttBuffer.buffer)
+                     send_buff.replace(e);
+                 // Reset thread's send buffer
+                 th->ttBuffer.buffer = {};
+             }
+
+             // Start next communication
+             MPI_Iallgather(send_buff.data(), send_buff.size(), TTEntryDatatype,
+                            TTBuff.data(), TTSendBufferSize, TTEntryDatatype,
+                            TTComm, &req);
+         }
+     }
   }
 }