From 9a542d96981e6cb45b6b01f17258a078cf27da36 Mon Sep 17 00:00:00 2001 From: Marco Costalba Date: Sat, 6 Aug 2011 19:11:35 +0100 Subject: [PATCH 1/1] Initialize a new split point out of lock Allocate and initialize a new split point out of lock becuase modified data is local to master thread only. Also better document why we need a lock at the end of split(). No functional change with faked split. Signed-off-by: Marco Costalba --- src/search.cpp | 2 -- src/thread.cpp | 62 +++++++++++++++++++++++++++----------------------- src/thread.h | 2 +- 3 files changed, 34 insertions(+), 32 deletions(-) diff --git a/src/search.cpp b/src/search.cpp index 5a72d6b0..d06ca8f3 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -2241,8 +2241,6 @@ void ThreadsManager::idle_loop(int threadID, SplitPoint* sp) { // In helpful master concept a master can help only a sub-tree, and // because here is all finished is not possible master is booked. assert(threads[threadID].state == Thread::AVAILABLE); - - threads[threadID].state = Thread::SEARCHING; return; } } diff --git a/src/thread.cpp b/src/thread.cpp index 4ef14578..5d2b39f5 100644 --- a/src/thread.cpp +++ b/src/thread.cpp @@ -132,7 +132,7 @@ void ThreadsManager::init() { // Allocate pawn and material hash tables for main thread init_hash_tables(); - lock_init(&mpLock); + lock_init(&threadsLock); // Initialize thread and split point locks for (int i = 0; i < MAX_THREADS; i++) @@ -193,7 +193,7 @@ void ThreadsManager::exit() { lock_destroy(&(threads[i].splitPoints[j].lock)); } - lock_destroy(&mpLock); + lock_destroy(&threadsLock); } @@ -253,19 +253,12 @@ void ThreadsManager::split(Position& pos, SearchStack* ss, Value* alpha, const V int i, master = pos.thread(); Thread& masterThread = threads[master]; - lock_grab(&mpLock); - - // If no other thread is available to help us, or if we have too many - // active split points, don't split. - if ( !available_slave_exists(master) - || masterThread.activeSplitPoints >= MAX_ACTIVE_SPLIT_POINTS) - { - lock_release(&mpLock); + // If we already have too many active split points, don't split + if (masterThread.activeSplitPoints >= MAX_ACTIVE_SPLIT_POINTS) return; - } // Pick the next available split point object from the split point stack - SplitPoint& splitPoint = masterThread.splitPoints[masterThread.activeSplitPoints++]; + SplitPoint& splitPoint = masterThread.splitPoints[masterThread.activeSplitPoints]; // Initialize the split point object splitPoint.parent = masterThread.splitPoint; @@ -285,27 +278,33 @@ void ThreadsManager::split(Position& pos, SearchStack* ss, Value* alpha, const V for (i = 0; i < activeThreads; i++) splitPoint.is_slave[i] = false; - masterThread.splitPoint = &splitPoint; - // If we are here it means we are not available - assert(masterThread.state != Thread::AVAILABLE); + assert(masterThread.state == Thread::SEARCHING); + + int booked = 0; - int workersCnt = 1; // At least the master is included + // Try to allocate available threads setting state to Thread::BOOKED, this + // must be done under lock protection to avoid concurrent allocation of + // the same slave by another master. + lock_grab(&threadsLock); - // Allocate available threads setting state to THREAD_BOOKED - for (i = 0; !Fake && i < activeThreads && workersCnt < maxThreadsPerSplitPoint; i++) + for (i = 0; !Fake && i < activeThreads && booked < maxThreadsPerSplitPoint; i++) if (i != master && threads[i].is_available_to(master)) { threads[i].state = Thread::BOOKED; threads[i].splitPoint = &splitPoint; splitPoint.is_slave[i] = true; - workersCnt++; + booked++; } - assert(Fake || workersCnt > 1); + lock_release(&threadsLock); + + // We failed to allocate even one slave, return + if (!Fake && !booked) + return; - // We can release the lock because slave threads are already booked and master is not available - lock_release(&mpLock); + masterThread.activeSplitPoints++; + masterThread.splitPoint = &splitPoint; // Tell the threads that they have work to do. This will make them leave // their idle loop. @@ -314,7 +313,8 @@ void ThreadsManager::split(Position& pos, SearchStack* ss, Value* alpha, const V { assert(i == master || threads[i].state == Thread::BOOKED); - threads[i].state = Thread::WORKISWAITING; // This makes the slave to exit from idle_loop() + // This makes the slave to exit from idle_loop() + threads[i].state = Thread::WORKISWAITING; if (useSleepingThreads && i != master) threads[i].wake_up(); @@ -328,16 +328,20 @@ void ThreadsManager::split(Position& pos, SearchStack* ss, Value* alpha, const V idle_loop(master, &splitPoint); // We have returned from the idle loop, which means that all threads are - // finished. Update alpha and bestValue, and return. - lock_grab(&mpLock); + // finished. Update alpha and bestValue, and return. Note that changing + // state and decreasing activeSplitPoints is done under lock protection + // to avoid a race with Thread::is_available_to(). + lock_grab(&threadsLock); - *alpha = splitPoint.alpha; - *bestValue = splitPoint.bestValue; + masterThread.state = Thread::SEARCHING; masterThread.activeSplitPoints--; masterThread.splitPoint = splitPoint.parent; - pos.set_nodes_searched(pos.nodes_searched() + splitPoint.nodes); - lock_release(&mpLock); + lock_release(&threadsLock); + + *alpha = splitPoint.alpha; + *bestValue = splitPoint.bestValue; + pos.set_nodes_searched(pos.nodes_searched() + splitPoint.nodes); } // Explicit template instantiations diff --git a/src/thread.h b/src/thread.h index 038d8e39..1e92b776 100644 --- a/src/thread.h +++ b/src/thread.h @@ -119,7 +119,7 @@ public: Depth depth, Move threatMove, int moveCount, MovePicker* mp, int nodeType); private: Thread threads[MAX_THREADS]; - Lock mpLock; + Lock threadsLock; Depth minimumSplitDepth; int maxThreadsPerSplitPoint; int activeThreads; -- 2.39.2