Fix endless reaparenting loop
authorMarco Costalba <mcostalba@gmail.com>
Tue, 17 Apr 2012 07:11:34 +0000 (09:11 +0200)
committerMarco Costalba <mcostalba@gmail.com>
Tue, 17 Apr 2012 17:51:49 +0000 (18:51 +0100)
The check for detecting when a split point has all the
slaves still running is done with:

   slavesMask == allSlavesMask

When a thread reparents, slavesMask is increased, then, if
the same thread finishes, because there are no more moves,
slavesMask returns to original value and the above condition
returns to be true. So that the just finished thread immediately
reparents again with the same split point, then starts and
then immediately exits in a tight loop that ends only when a
second slave finishes, so that slavesMask decrements and the
condition becomes false. This gives a spurious and anomaly
high number of faked reparents.

With this patch, that rewrites the logic to avoid this pitfall,
the reparenting success rate drops to a more realistical 5-10%
for 4 threads case.

As a side effect note that now there is no more the limit of
maxThreadsPerSplitPoint when reparenting.

No functional change.

Signed-off-by: Marco Costalba <mcostalba@gmail.com>
src/search.cpp
src/thread.cpp
src/thread.h

index 61ac9a6..21d5d78 100644 (file)
@@ -1844,6 +1844,7 @@ void Thread::idle_loop(SplitPoint* sp_master) {
           assert(is_searching);
 
           is_searching = false;
+          sp->allSlavesRunning = false;
           sp->slavesMask &= ~(1ULL << idx);
           sp->nodes += pos.nodes_searched();
 
@@ -1860,20 +1861,19 @@ void Thread::idle_loop(SplitPoint* sp_master) {
           // unsafe because if we are exiting there is a chance are already freed.
           lock_release(sp->lock);
 
-          // Try to reparent to another split point
+          // Try to reparent to the first split point, with still all slaves
+          // running, where we are available as a possible slave.
           for (int i = 0; i < Threads.size(); i++)
           {
               Thread* th = &Threads[i];
               int spCnt = th->splitPointsCnt;
               SplitPoint* latest = &th->splitPoints[spCnt ? spCnt - 1 : 0];
 
-              // Find the first split point with still all slaves running
-              // where we are available as a possible slave.
               if (    this->is_available_to(th)
                   &&  spCnt > 0
                   && !th->cutoff_occurred()
-                  &&  latest->slavesMask == latest->allSlavesMask
-                  &&  more_than_one(latest->allSlavesMask))
+                  &&  latest->allSlavesRunning
+                  &&  more_than_one(latest->slavesMask))
               {
                   lock_grab(latest->lock);
                   lock_grab(Threads.splitLock);
@@ -1883,10 +1883,10 @@ void Thread::idle_loop(SplitPoint* sp_master) {
                   if (    this->is_available_to(th)
                       &&  spCnt == th->splitPointsCnt
                       && !th->cutoff_occurred()
-                      &&  latest->slavesMask == latest->allSlavesMask
-                      &&  more_than_one(latest->allSlavesMask))
+                      &&  latest->allSlavesRunning
+                      &&  more_than_one(latest->slavesMask))
                   {
-                      latest->slavesMask |= 1ULL << idx; // allSlavesMask is not updated
+                      latest->slavesMask |= 1ULL << idx;
                       curSplitPoint = latest;
                       is_searching = true;
                   }
index a644cae..631834d 100644 (file)
@@ -319,7 +319,7 @@ Value ThreadsManager::split(Position& pos, Stack* ss, Value alpha, Value beta,
   sp->master = master;
   sp->cutoff = false;
   sp->slavesMask = 1ULL << master->idx;
-  sp->allSlavesMask = 1ULL << master->idx;
+  sp->allSlavesRunning = true;
   sp->depth = depth;
   sp->bestMove = *bestMove;
   sp->threatMove = threatMove;
@@ -348,7 +348,6 @@ Value ThreadsManager::split(Position& pos, Stack* ss, Value alpha, Value beta,
       if (threads[i]->is_available_to(master))
       {
           sp->slavesMask |= 1ULL << i;
-          sp->allSlavesMask |= 1ULL << i;
           threads[i]->curSplitPoint = sp;
           threads[i]->is_searching = true; // Slave leaves idle_loop()
 
@@ -356,10 +355,7 @@ Value ThreadsManager::split(Position& pos, Stack* ss, Value alpha, Value beta,
               threads[i]->wake_up();
 
           if (++slavesCnt + 1 >= maxThreadsPerSplitPoint) // Master is always included
-          {
-              sp->allSlavesMask = 0; // Disable reparenting to this split point
               break;
-          }
       }
 
   master->splitPointsCnt++;
index d60dd5f..9f0bb24 100644 (file)
@@ -51,13 +51,13 @@ struct SplitPoint {
   // Shared data
   Lock lock;
   volatile uint64_t slavesMask;
-  volatile uint64_t allSlavesMask;
   volatile int64_t nodes;
   volatile Value alpha;
   volatile Value bestValue;
   volatile Move bestMove;
   volatile int moveCount;
   volatile bool cutoff;
+  volatile bool allSlavesRunning;
 };