// Allocate pawn and material hash tables for main thread
init_hash_tables();
- lock_init(&mpLock);
+ lock_init(&threadsLock);
// Initialize thread and split point locks
for (int i = 0; i < MAX_THREADS; i++)
lock_destroy(&(threads[i].splitPoints[j].lock));
}
- lock_destroy(&mpLock);
+ lock_destroy(&threadsLock);
}
// call search().When all threads have returned from search() then split() returns.
template <bool Fake>
-void ThreadsManager::split(Position& pos, SearchStack* ss, Value* alpha, const Value beta,
- Value* bestValue, Depth depth, Move threatMove,
- int moveCount, MovePicker* mp, int nodeType) {
+Value ThreadsManager::split(Position& pos, SearchStack* ss, Value alpha, Value beta,
+ Value bestValue, Depth depth, Move threatMove,
+ int moveCount, MovePicker* mp, int nodeType) {
assert(pos.is_ok());
- assert(*bestValue >= -VALUE_INFINITE);
- assert(*bestValue <= *alpha);
- assert(*alpha < beta);
+ assert(bestValue >= -VALUE_INFINITE);
+ assert(bestValue <= alpha);
+ assert(alpha < beta);
assert(beta <= VALUE_INFINITE);
assert(depth > DEPTH_ZERO);
assert(pos.thread() >= 0 && pos.thread() < activeThreads);
int i, master = pos.thread();
Thread& masterThread = threads[master];
- lock_grab(&mpLock);
-
- // If no other thread is available to help us, or if we have too many
- // active split points, don't split.
- if ( !available_slave_exists(master)
- || masterThread.activeSplitPoints >= MAX_ACTIVE_SPLIT_POINTS)
- {
- lock_release(&mpLock);
- return;
- }
+ // If we already have too many active split points, don't split
+ if (masterThread.activeSplitPoints >= MAX_ACTIVE_SPLIT_POINTS)
+ return bestValue;
// Pick the next available split point object from the split point stack
- SplitPoint& splitPoint = masterThread.splitPoints[masterThread.activeSplitPoints++];
+ SplitPoint& splitPoint = masterThread.splitPoints[masterThread.activeSplitPoints];
// Initialize the split point object
splitPoint.parent = masterThread.splitPoint;
splitPoint.is_betaCutoff = false;
splitPoint.depth = depth;
splitPoint.threatMove = threatMove;
- splitPoint.alpha = *alpha;
+ splitPoint.alpha = alpha;
splitPoint.beta = beta;
splitPoint.nodeType = nodeType;
- splitPoint.bestValue = *bestValue;
+ splitPoint.bestValue = bestValue;
splitPoint.mp = mp;
splitPoint.moveCount = moveCount;
splitPoint.pos = &pos;
for (i = 0; i < activeThreads; i++)
splitPoint.is_slave[i] = false;
- masterThread.splitPoint = &splitPoint;
-
// If we are here it means we are not available
- assert(masterThread.state != Thread::AVAILABLE);
+ assert(masterThread.state == Thread::SEARCHING);
+
+ int booked = 0;
- int workersCnt = 1; // At least the master is included
+ // Try to allocate available threads setting state to Thread::BOOKED, this
+ // must be done under lock protection to avoid concurrent allocation of
+ // the same slave by another master.
+ lock_grab(&threadsLock);
- // Allocate available threads setting state to THREAD_BOOKED
- for (i = 0; !Fake && i < activeThreads && workersCnt < maxThreadsPerSplitPoint; i++)
+ for (i = 0; !Fake && i < activeThreads && booked < maxThreadsPerSplitPoint; i++)
if (i != master && threads[i].is_available_to(master))
{
threads[i].state = Thread::BOOKED;
threads[i].splitPoint = &splitPoint;
splitPoint.is_slave[i] = true;
- workersCnt++;
+ booked++;
}
- assert(Fake || workersCnt > 1);
+ lock_release(&threadsLock);
- // We can release the lock because slave threads are already booked and master is not available
- lock_release(&mpLock);
+ // We failed to allocate even one slave, return
+ if (!Fake && !booked)
+ return bestValue;
- // Tell the threads that they have work to do. This will make them leave
+ masterThread.activeSplitPoints++;
+ masterThread.splitPoint = &splitPoint;
+
+ // Tell the threads that they have some work to do. This will make them leave
// their idle loop.
for (i = 0; i < activeThreads; i++)
if (i == master || splitPoint.is_slave[i])
{
assert(i == master || threads[i].state == Thread::BOOKED);
- threads[i].state = Thread::WORKISWAITING; // This makes the slave to exit from idle_loop()
+ // This makes the slave to exit from idle_loop()
+ threads[i].state = Thread::WORKISWAITING;
if (useSleepingThreads && i != master)
threads[i].wake_up();
idle_loop(master, &splitPoint);
// We have returned from the idle loop, which means that all threads are
- // finished. Update alpha and bestValue, and return.
- lock_grab(&mpLock);
+ // finished. Note that changing state and decreasing activeSplitPoints is done
+ // under lock protection to avoid a race with Thread::is_available_to().
+ lock_grab(&threadsLock);
- *alpha = splitPoint.alpha;
- *bestValue = splitPoint.bestValue;
+ masterThread.state = Thread::SEARCHING;
masterThread.activeSplitPoints--;
masterThread.splitPoint = splitPoint.parent;
- pos.set_nodes_searched(pos.nodes_searched() + splitPoint.nodes);
- lock_release(&mpLock);
+ lock_release(&threadsLock);
+
+ pos.set_nodes_searched(pos.nodes_searched() + splitPoint.nodes);
+ return splitPoint.bestValue;
}
// Explicit template instantiations
-template void ThreadsManager::split<false>(Position&, SearchStack*, Value*, const Value, Value*, Depth, Move, int, MovePicker*, int);
-template void ThreadsManager::split<true>(Position&, SearchStack*, Value*, const Value, Value*, Depth, Move, int, MovePicker*, int);
+template Value ThreadsManager::split<false>(Position&, SearchStack*, Value, Value, Value, Depth, Move, int, MovePicker*, int);
+template Value ThreadsManager::split<true>(Position&, SearchStack*, Value, Value, Value, Depth, Move, int, MovePicker*, int);