read_evaluation_uci_options(pos.side_to_move());
Threads.read_uci_options();
- // If needed allocate pawn and material hash tables and adjust TT size
+ // Allocate pawn and material hash tables if number of active threads
+ // increased and set a new TT size if changed.
Threads.init_hash_tables();
TT.set_size(Options["Hash"].value<int>());
while (true)
{
- // Slave threads can exit as soon as AllThreadsShouldExit raises,
+ // Slave threads can exit as soon as allThreadsShouldExit flag raises,
// master should exit as last one.
if (allThreadsShouldExit)
{
return;
}
- // If we are not thinking, wait for a condition to be signaled
+ // If we are not searching, wait for a condition to be signaled
// instead of wasting CPU time polling for work.
while ( threadID >= activeThreads
|| threads[threadID].state == Thread::INITIALIZING
// Grab the lock to avoid races with Thread::wake_up()
lock_grab(&threads[threadID].sleepLock);
- // If we are master and all slaves have finished do not go to sleep
+ // If we are master and all slaves have finished don't go to sleep
for (i = 0; sp && i < activeThreads && !sp->is_slave[i]; i++) {}
allFinished = (i == activeThreads);
break;
}
- // Do sleep here after retesting sleep conditions
+ // Do sleep after retesting sleep conditions under lock protection, in
+ // particular we need to avoid a deadlock in case a master thread has,
+ // in the meanwhile, allocated us and sent the wake_up() call before we
+ // had the chance to grab the lock.
if (threadID >= activeThreads || threads[threadID].state == Thread::AVAILABLE)
cond_wait(&threads[threadID].sleepCond, &threads[threadID].sleepLock);
threads[threadID].state = Thread::SEARCHING;
// Copy split point position and search stack and call search()
- // with SplitPoint template parameter set to true.
SearchStack ss[PLY_MAX_PLUS_2];
SplitPoint* tsp = threads[threadID].splitPoint;
Position pos(*tsp->pos, threadID);
if (allFinished)
{
- // Because sp->slaves[] is reset under lock protection,
+ // Because sp->is_slave[] is reset under lock protection,
// be sure sp->lock has been released before to return.
lock_grab(&(sp->lock));
lock_release(&(sp->lock));
-
- // In helpful master concept a master can help only a sub-tree, and
- // because here is all finished is not possible master is booked.
- assert(threads[threadID].state == Thread::AVAILABLE);
return;
}
}
// Allocate pawn and material hash tables for main thread
init_hash_tables();
+ // Initialize threads lock, used when allocating slaves during splitting
lock_init(&threadsLock);
- // Initialize thread and split point locks
+ // Initialize sleep and split point locks
for (int i = 0; i < MAX_THREADS; i++)
{
lock_init(&threads[i].sleepLock);
}
-// exit() is called to cleanly exit the threads when the program finishes
+// exit() is called to cleanly terminate the threads when the program finishes
void ThreadsManager::exit() {
for (int i = 0; i < MAX_THREADS; i++)
{
- // Wake up all the threads and waits for termination
+ // Wake up all the threads and wait for termination
if (i != 0)
{
threads[i].wake_up();
while (threads[i].state != Thread::TERMINATED) {}
}
- // Now we can safely destroy the locks and wait conditions
+ // Now we can safely destroy locks and wait conditions
lock_destroy(&threads[i].sleepLock);
cond_destroy(&threads[i].sleepCond);
return bestValue;
// Pick the next available split point object from the split point stack
- SplitPoint& splitPoint = masterThread.splitPoints[masterThread.activeSplitPoints];
+ SplitPoint* sp = masterThread.splitPoints + masterThread.activeSplitPoints;
// Initialize the split point object
- splitPoint.parent = masterThread.splitPoint;
- splitPoint.master = master;
- splitPoint.is_betaCutoff = false;
- splitPoint.depth = depth;
- splitPoint.threatMove = threatMove;
- splitPoint.alpha = alpha;
- splitPoint.beta = beta;
- splitPoint.nodeType = nodeType;
- splitPoint.bestValue = bestValue;
- splitPoint.mp = mp;
- splitPoint.moveCount = moveCount;
- splitPoint.pos = &pos;
- splitPoint.nodes = 0;
- splitPoint.ss = ss;
+ sp->parent = masterThread.splitPoint;
+ sp->master = master;
+ sp->is_betaCutoff = false;
+ sp->depth = depth;
+ sp->threatMove = threatMove;
+ sp->alpha = alpha;
+ sp->beta = beta;
+ sp->nodeType = nodeType;
+ sp->bestValue = bestValue;
+ sp->mp = mp;
+ sp->moveCount = moveCount;
+ sp->pos = &pos;
+ sp->nodes = 0;
+ sp->ss = ss;
for (i = 0; i < activeThreads; i++)
- splitPoint.is_slave[i] = false;
+ sp->is_slave[i] = false;
// If we are here it means we are not available
assert(masterThread.state == Thread::SEARCHING);
if (i != master && threads[i].is_available_to(master))
{
workersCnt++;
- splitPoint.is_slave[i] = true;
- threads[i].splitPoint = &splitPoint;
+ sp->is_slave[i] = true;
+ threads[i].splitPoint = sp;
// This makes the slave to exit from idle_loop()
threads[i].state = Thread::WORKISWAITING;
if (!Fake && workersCnt == 1)
return bestValue;
- masterThread.splitPoint = &splitPoint;
+ masterThread.splitPoint = sp;
masterThread.activeSplitPoints++;
masterThread.state = Thread::WORKISWAITING;
// Thread::WORKISWAITING. We send the split point as a second parameter to
// the idle loop, which means that the main thread will return from the idle
// loop when all threads have finished their work at this split point.
- idle_loop(master, &splitPoint);
+ idle_loop(master, sp);
+
+ // In helpful master concept a master can help only a sub-tree, and
+ // because here is all finished is not possible master is booked.
+ assert(masterThread.state == Thread::AVAILABLE);
// We have returned from the idle loop, which means that all threads are
// finished. Note that changing state and decreasing activeSplitPoints is done
masterThread.state = Thread::SEARCHING;
masterThread.activeSplitPoints--;
- masterThread.splitPoint = splitPoint.parent;
lock_release(&threadsLock);
- pos.set_nodes_searched(pos.nodes_searched() + splitPoint.nodes);
- return splitPoint.bestValue;
+ masterThread.splitPoint = sp->parent;
+ pos.set_nodes_searched(pos.nodes_searched() + sp->nodes);
+
+ return sp->bestValue;
}
// Explicit template instantiations