]> git.sesse.net Git - stockfish/blobdiff - src/search.cpp
Use spinlock instead of mutex for Threads and SplitPoint
[stockfish] / src / search.cpp
index bc41978f2041cc1d4ac6f46aa40f6763eaea707c..5dacc877498796a6a3e09144547e95ecf04993be 100644 (file)
@@ -765,7 +765,7 @@ moves_loop: // When in check and at SpNode search starts from here
               continue;
 
           moveCount = ++splitPoint->moveCount;
-          splitPoint->mutex.unlock();
+          splitPoint->spinlock.release();
       }
       else
           ++moveCount;
@@ -834,7 +834,7 @@ moves_loop: // When in check and at SpNode search starts from here
               && moveCount >= FutilityMoveCounts[improving][depth])
           {
               if (SpNode)
-                  splitPoint->mutex.lock();
+                  splitPoint->spinlock.acquire();
 
               continue;
           }
@@ -853,7 +853,7 @@ moves_loop: // When in check and at SpNode search starts from here
 
                   if (SpNode)
                   {
-                      splitPoint->mutex.lock();
+                      splitPoint->spinlock.acquire();
                       if (bestValue > splitPoint->bestValue)
                           splitPoint->bestValue = bestValue;
                   }
@@ -865,7 +865,7 @@ moves_loop: // When in check and at SpNode search starts from here
           if (predictedDepth < 4 * ONE_PLY && pos.see_sign(move) < VALUE_ZERO)
           {
               if (SpNode)
-                  splitPoint->mutex.lock();
+                  splitPoint->spinlock.acquire();
 
               continue;
           }
@@ -965,7 +965,7 @@ moves_loop: // When in check and at SpNode search starts from here
       // Step 18. Check for new best move
       if (SpNode)
       {
-          splitPoint->mutex.lock();
+          splitPoint->spinlock.acquire();
           bestValue = splitPoint->bestValue;
           alpha = splitPoint->alpha;
       }
@@ -1517,21 +1517,22 @@ void Thread::idle_loop() {
 
   // Pointer 'this_sp' is not null only if we are called from split(), and not
   // at the thread creation. This means we are the split point's master.
-  SplitPoint* this_sp = splitPointsSize ? activeSplitPoint : nullptr;
+  SplitPoint* this_sp = activeSplitPoint;
 
-  assert(!this_sp || (this_sp->masterThread == this && searching));
+  assert(!this_sp || (this_sp->master == this && searching));
 
   while (!exit)
   {
       // If this thread has been assigned work, launch a search
       while (searching)
       {
-          Threads.mutex.lock();
+          Threads.spinlock.acquire();
 
           assert(activeSplitPoint);
+
           SplitPoint* sp = activeSplitPoint;
 
-          Threads.mutex.unlock();
+          Threads.spinlock.release();
 
           Stack stack[MAX_PLY+4], *ss = stack+2; // To allow referencing (ss-2) and (ss+2)
           Position pos(*sp->pos, this);
@@ -1539,7 +1540,7 @@ void Thread::idle_loop() {
           std::memcpy(ss-2, sp->ss-2, 5 * sizeof(Stack));
           ss->splitPoint = sp;
 
-          sp->mutex.lock();
+          sp->spinlock.acquire();
 
           assert(activePosition == nullptr);
 
@@ -1567,23 +1568,22 @@ void Thread::idle_loop() {
 
           // Wake up the master thread so to allow it to return from the idle
           // loop in case we are the last slave of the split point.
-          if (    this != sp->masterThread
-              &&  sp->slavesMask.none())
+          if (this != sp->master && sp->slavesMask.none())
           {
-              assert(!sp->masterThread->searching);
-              sp->masterThread->notify_one();
+              assert(!sp->master->searching);
+
+              sp->master->notify_one();
           }
 
           // After releasing the lock we can't access any SplitPoint related data
           // in a safe way because it could have been released under our feet by
           // the sp master.
-          sp->mutex.unlock();
+          sp->spinlock.release();
 
           // Try to late join to another split point if none of its slaves has
           // already finished.
           SplitPoint* bestSp = NULL;
-          Thread* bestThread = NULL;
-          int bestScore = INT_MAX;
+          int minLevel = INT_MAX;
 
           for (Thread* th : Threads)
           {
@@ -1593,7 +1593,7 @@ void Thread::idle_loop() {
               if (   sp
                   && sp->allSlavesSearching
                   && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
-                  && available_to(th))
+                  && available_to(sp->master))
               {
                   assert(this != th);
                   assert(!(this_sp && this_sp->slavesMask.none()));
@@ -1601,17 +1601,14 @@ void Thread::idle_loop() {
 
                   // Prefer to join to SP with few parents to reduce the probability
                   // that a cut-off occurs above us, and hence we waste our work.
-                  int level = -1;
-                  for (SplitPoint* spp = th->activeSplitPoint; spp; spp = spp->parentSplitPoint)
+                  int level = 0;
+                  for (SplitPoint* p = th->activeSplitPoint; p; p = p->parentSplitPoint)
                       level++;
 
-                  int score = level * 256 * 256 + (int)sp->slavesMask.count() * 256 - sp->depth * 1;
-
-                  if (score < bestScore)
+                  if (level < minLevel)
                   {
                       bestSp = sp;
-                      bestThread = th;
-                      bestScore = score;
+                      minLevel = level;
                   }
               }
           }
@@ -1621,24 +1618,24 @@ void Thread::idle_loop() {
               sp = bestSp;
 
               // Recheck the conditions under lock protection
-              Threads.mutex.lock();
-              sp->mutex.lock();
+              Threads.spinlock.acquire();
+              sp->spinlock.acquire();
 
               if (   sp->allSlavesSearching
                   && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
-                  && available_to(bestThread))
+                  && available_to(sp->master))
               {
                   sp->slavesMask.set(idx);
                   activeSplitPoint = sp;
                   searching = true;
               }
 
-              sp->mutex.unlock();
-              Threads.mutex.unlock();
+              sp->spinlock.release();
+              Threads.spinlock.release();
           }
       }
 
-      // Grab the lock to avoid races with Thread::notify_one()
+      // Avoid races with notify_one() fired from last slave of the split point
       std::unique_lock<std::mutex> lk(mutex);
 
       // If we are master and all slaves have finished then exit idle_loop
@@ -1690,7 +1687,7 @@ void check_time() {
 
   else if (Limits.nodes)
   {
-      Threads.mutex.lock();
+      Threads.spinlock.acquire();
 
       int64_t nodes = RootPos.nodes_searched();
 
@@ -1701,7 +1698,7 @@ void check_time() {
           {
               SplitPoint& sp = th->splitPoints[i];
 
-              sp.mutex.lock();
+              sp.spinlock.acquire();
 
               nodes += sp.nodes;
 
@@ -1709,10 +1706,10 @@ void check_time() {
                   if (sp.slavesMask.test(idx) && Threads[idx]->activePosition)
                       nodes += Threads[idx]->activePosition->nodes_searched();
 
-              sp.mutex.unlock();
+              sp.spinlock.release();
           }
 
-      Threads.mutex.unlock();
+      Threads.spinlock.release();
 
       if (nodes >= Limits.nodes)
           Signals.stop = true;