]> git.sesse.net Git - stockfish/commitdiff
Retire spinlocks
authorMarco Costalba <mcostalba@gmail.com>
Tue, 10 Mar 2015 20:50:45 +0000 (21:50 +0100)
committerMarco Costalba <mcostalba@gmail.com>
Wed, 11 Mar 2015 20:20:47 +0000 (21:20 +0100)
Use Mutex instead.

This is in preparaation for merging with master branch,
where we stilll don't have spinlocks.

Eventually spinlocks will be readded in some future
patch, once c++11 has been merged.

No functional change.

src/search.cpp
src/thread.cpp
src/thread.h

index 3b35e3a061842f667b386d853e7e659529cf3238..14e95a84e219f26645e030df02384fcc08ca81c6 100644 (file)
@@ -765,7 +765,7 @@ moves_loop: // When in check and at SpNode search starts from here
               continue;
 
           moveCount = ++splitPoint->moveCount;
-          splitPoint->spinlock.release();
+          splitPoint->mutex.unlock();
       }
       else
           ++moveCount;
@@ -834,7 +834,7 @@ moves_loop: // When in check and at SpNode search starts from here
               && moveCount >= FutilityMoveCounts[improving][depth])
           {
               if (SpNode)
-                  splitPoint->spinlock.acquire();
+                  splitPoint->mutex.lock();
 
               continue;
           }
@@ -853,7 +853,7 @@ moves_loop: // When in check and at SpNode search starts from here
 
                   if (SpNode)
                   {
-                      splitPoint->spinlock.acquire();
+                      splitPoint->mutex.lock();
                       if (bestValue > splitPoint->bestValue)
                           splitPoint->bestValue = bestValue;
                   }
@@ -865,7 +865,7 @@ moves_loop: // When in check and at SpNode search starts from here
           if (predictedDepth < 4 * ONE_PLY && pos.see_sign(move) < VALUE_ZERO)
           {
               if (SpNode)
-                  splitPoint->spinlock.acquire();
+                  splitPoint->mutex.lock();
 
               continue;
           }
@@ -965,7 +965,7 @@ moves_loop: // When in check and at SpNode search starts from here
       // Step 18. Check for new best move
       if (SpNode)
       {
-          splitPoint->spinlock.acquire();
+          splitPoint->mutex.lock();
           bestValue = splitPoint->bestValue;
           alpha = splitPoint->alpha;
       }
@@ -1526,13 +1526,13 @@ void Thread::idle_loop() {
       // If this thread has been assigned work, launch a search
       while (searching)
       {
-          Threads.spinlock.acquire();
+          Threads.mutex.lock();
 
           assert(activeSplitPoint);
 
           SplitPoint* sp = activeSplitPoint;
 
-          Threads.spinlock.release();
+          Threads.mutex.unlock();
 
           Stack stack[MAX_PLY+4], *ss = stack+2; // To allow referencing (ss-2) and (ss+2)
           Position pos(*sp->pos, this);
@@ -1540,7 +1540,7 @@ void Thread::idle_loop() {
           std::memcpy(ss-2, sp->ss-2, 5 * sizeof(Stack));
           ss->splitPoint = sp;
 
-          sp->spinlock.acquire();
+          sp->mutex.lock();
 
           assert(activePosition == nullptr);
 
@@ -1578,7 +1578,7 @@ void Thread::idle_loop() {
           // After releasing the lock we can't access any SplitPoint related data
           // in a safe way because it could have been released under our feet by
           // the sp master.
-          sp->spinlock.release();
+          sp->mutex.unlock();
 
           // Try to late join to another split point if none of its slaves has
           // already finished.
@@ -1618,8 +1618,8 @@ void Thread::idle_loop() {
               sp = bestSp;
 
               // Recheck the conditions under lock protection
-              Threads.spinlock.acquire();
-              sp->spinlock.acquire();
+              Threads.mutex.lock();
+              sp->mutex.lock();
 
               if (   sp->allSlavesSearching
                   && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
@@ -1630,8 +1630,8 @@ void Thread::idle_loop() {
                   searching = true;
               }
 
-              sp->spinlock.release();
-              Threads.spinlock.release();
+              sp->mutex.unlock();
+              Threads.mutex.unlock();
           }
       }
 
@@ -1687,7 +1687,7 @@ void check_time() {
 
   else if (Limits.nodes)
   {
-      Threads.spinlock.acquire();
+      Threads.mutex.lock();
 
       int64_t nodes = RootPos.nodes_searched();
 
@@ -1698,7 +1698,7 @@ void check_time() {
           {
               SplitPoint& sp = th->splitPoints[i];
 
-              sp.spinlock.acquire();
+              sp.mutex.lock();
 
               nodes += sp.nodes;
 
@@ -1706,10 +1706,10 @@ void check_time() {
                   if (sp.slavesMask.test(idx) && Threads[idx]->activePosition)
                       nodes += Threads[idx]->activePosition->nodes_searched();
 
-              sp.spinlock.release();
+              sp.mutex.unlock();
           }
 
-      Threads.spinlock.release();
+      Threads.mutex.unlock();
 
       if (nodes >= Limits.nodes)
           Signals.stop = true;
index 6ddbf3f2e85f82ee085cf82db5f46cf76657b78d..3f901445ce1429f3fd6a00920459a66b78417ce0 100644 (file)
@@ -164,8 +164,8 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
   // Try to allocate available threads and ask them to start searching setting
   // 'searching' flag. This must be done under lock protection to avoid concurrent
   // allocation of the same slave by another master.
-  Threads.spinlock.acquire();
-  sp.spinlock.acquire();
+  Threads.mutex.lock();
+  sp.mutex.lock();
 
   sp.allSlavesSearching = true; // Must be set under lock protection
   ++splitPointsSize;
@@ -187,8 +187,8 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
   // it will instantly launch a search, because its 'searching' flag is set.
   // The thread will return from the idle loop when all slaves have finished
   // their work at this split point.
-  sp.spinlock.release();
-  Threads.spinlock.release();
+  sp.mutex.unlock();
+  Threads.mutex.unlock();
 
   Thread::idle_loop(); // Force a call to base class idle_loop()
 
@@ -201,8 +201,8 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
   // We have returned from the idle loop, which means that all threads are
   // finished. Note that setting 'searching' and decreasing splitPointsSize must
   // be done under lock protection to avoid a race with Thread::available_to().
-  Threads.spinlock.acquire();
-  sp.spinlock.acquire();
+  Threads.mutex.lock();
+  sp.mutex.lock();
 
   searching = true;
   --splitPointsSize;
@@ -212,8 +212,8 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
   *bestMove = sp.bestMove;
   *bestValue = sp.bestValue;
 
-  sp.spinlock.release();
-  Threads.spinlock.release();
+  sp.mutex.unlock();
+  Threads.mutex.unlock();
 }
 
 
index 0469dc1d70fa180cfe17ca6a38e1c7a733b1ba28..231443dbba0d9f9c521b085ec2837c20587b2973 100644 (file)
@@ -41,18 +41,6 @@ const size_t MAX_SPLITPOINTS_PER_THREAD = 8;
 const size_t MAX_SLAVES_PER_SPLITPOINT = 4;
 
 
-/// Spinlock class wraps low level atomic operations to provide a spin lock
-
-class Spinlock {
-
-  Mutex m; // WARNING: Diasabled spinlocks to test on fishtest
-
-public:
-  void acquire() { m.lock(); }
-  void release() { m.unlock(); }
-};
-
-
 /// SplitPoint struct stores information shared by the threads searching in
 /// parallel below the same split point. It is populated at splitting time.
 
@@ -72,7 +60,7 @@ struct SplitPoint {
   SplitPoint* parentSplitPoint;
 
   // Shared variable data
-  Spinlock spinlock;
+  Mutex mutex;
   std::bitset<MAX_THREADS> slavesMask;
   volatile bool allSlavesSearching;
   volatile uint64_t nodes;
@@ -163,7 +151,7 @@ struct ThreadPool : public std::vector<Thread*> {
   void start_thinking(const Position&, const Search::LimitsType&, Search::StateStackPtr&);
 
   Depth minimumSplitDepth;
-  Spinlock spinlock;
+  Mutex mutex;
   ConditionVariable sleepCondition;
   TimerThread* timer;
 };