]> git.sesse.net Git - stockfish/blobdiff - src/search.cpp
Further refine SMP code
[stockfish] / src / search.cpp
index 76d0350c9c7284cf2e4ec400166174f63891c7f3..6e0552be64566baac7f6e8d908520447b2ff5f38 100644 (file)
@@ -1043,8 +1043,8 @@ moves_loop: // When in check and at SpNode search starts from here
           &&  depth >= Threads.minimumSplitDepth
           &&  (   !thisThread->activeSplitPoint
                || !thisThread->activeSplitPoint->allSlavesSearching
-               || (   int(Threads.size()) > MAX_SLAVES_PER_SPLITPOINT
-                   && thisThread->activeSplitPoint->slavesCount == MAX_SLAVES_PER_SPLITPOINT))
+               || (   Threads.size() > MAX_SLAVES_PER_SPLITPOINT
+                   && thisThread->activeSplitPoint->slavesMask.count() == MAX_SLAVES_PER_SPLITPOINT))
           &&  thisThread->splitPointsSize < MAX_SPLITPOINTS_PER_THREAD)
       {
           assert(bestValue > -VALUE_INFINITE && bestValue < beta);
@@ -1524,9 +1524,9 @@ void Thread::idle_loop() {
 
   // Pointer 'this_sp' is not null only if we are called from split(), and not
   // at the thread creation. This means we are the split point's master.
-  SplitPoint* this_sp = splitPointsSize ? activeSplitPoint : NULL;
+  SplitPoint* this_sp = activeSplitPoint;
 
-  assert(!this_sp || (this_sp->masterThread == this && searching));
+  assert(!this_sp || (this_sp->master == this && searching));
 
   while (!exit)
   {
@@ -1536,6 +1536,7 @@ void Thread::idle_loop() {
           Threads.mutex.lock();
 
           assert(activeSplitPoint);
+
           SplitPoint* sp = activeSplitPoint;
 
           Threads.mutex.unlock();
@@ -1574,11 +1575,11 @@ void Thread::idle_loop() {
 
           // Wake up the master thread so to allow it to return from the idle
           // loop in case we are the last slave of the split point.
-          if (    this != sp->masterThread
-              &&  sp->slavesMask.none())
+          if (this != sp->master && sp->slavesMask.none())
           {
-              assert(!sp->masterThread->searching);
-              sp->masterThread->notify_one();
+              assert(!sp->master->searching);
+
+              sp->master->notify_one();
           }
 
           // After releasing the lock we can't access any SplitPoint related data
@@ -1589,33 +1590,33 @@ void Thread::idle_loop() {
           // Try to late join to another split point if none of its slaves has
           // already finished.
           SplitPoint* bestSp = NULL;
-          Thread* bestThread = NULL;
           int bestScore = INT_MAX;
 
           for (size_t i = 0; i < Threads.size(); ++i)
           {
-              const int size = Threads[i]->splitPointsSize; // Local copy
+              const size_t size = Threads[i]->splitPointsSize; // Local copy
               sp = size ? &Threads[i]->splitPoints[size - 1] : NULL;
 
               if (   sp
                   && sp->allSlavesSearching
-                  && sp->slavesCount < MAX_SLAVES_PER_SPLITPOINT
+                  && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
                   && available_to(Threads[i]))
               {
                   assert(this != Threads[i]);
                   assert(!(this_sp && this_sp->slavesMask.none()));
+                  assert(Threads.size() > 2);
 
-                  // Compute the recursive split points chain size
+                  // Prefer to join to SP with few parents to reduce the probability
+                  // that a cut-off occurs above us, and hence we waste our work.
                   int level = -1;
                   for (SplitPoint* spp = Threads[i]->activeSplitPoint; spp; spp = spp->parentSplitPoint)
                       level++;
 
-                  int score = level * 256 * 256 + sp->slavesCount * 256 - sp->depth * 1;
+                  int score = level * 256 * 256 + (int)sp->slavesMask.count() * 256 - sp->depth * 1;
 
                   if (score < bestScore)
                   {
                       bestSp = sp;
-                      bestThread = Threads[i];
                       bestScore = score;
                   }
               }
@@ -1630,11 +1631,10 @@ void Thread::idle_loop() {
               sp->mutex.lock();
 
               if (   sp->allSlavesSearching
-                  && sp->slavesCount < MAX_SLAVES_PER_SPLITPOINT
-                  && available_to(bestThread))
+                  && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
+                  && available_to(sp->master))
               {
                   sp->slavesMask.set(idx);
-                  sp->slavesCount++;
                   activeSplitPoint = sp;
                   searching = true;
               }
@@ -1644,7 +1644,7 @@ void Thread::idle_loop() {
           }
       }
 
-      // Grab the lock to avoid races with Thread::notify_one()
+      // Avoid races with notify_one() fired from last slave of the split point
       mutex.lock();
 
       // If we are master and all slaves have finished then exit idle_loop
@@ -1706,7 +1706,7 @@ void check_time() {
       // Loop across all split points and sum accumulated SplitPoint nodes plus
       // all the currently active positions nodes.
       for (size_t i = 0; i < Threads.size(); ++i)
-          for (int j = 0; j < Threads[i]->splitPointsSize; ++j)
+          for (size_t j = 0; j < Threads[i]->splitPointsSize; ++j)
           {
               SplitPoint& sp = Threads[i]->splitPoints[j];