// If this thread has been assigned work, launch a search
while (searching)
{
- Threads.mutex.lock();
+ mutex.lock();
assert(activeSplitPoint);
-
SplitPoint* sp = activeSplitPoint;
- Threads.mutex.unlock();
+ mutex.unlock();
Stack stack[MAX_PLY+4], *ss = stack+2; // To allow referencing (ss-2) and (ss+2)
Position pos(*sp->pos, this);
sp = bestSp;
// Recheck the conditions under lock protection
- Threads.mutex.lock();
sp->mutex.lock();
if ( sp->allSlavesSearching
- && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
- && can_join(sp))
+ && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT)
{
- sp->slavesMask.set(idx);
- activeSplitPoint = sp;
- searching = true;
+ mutex.lock();
+
+ if (can_join(sp))
+ {
+ sp->slavesMask.set(idx);
+ activeSplitPoint = sp;
+ searching = true;
+ }
+
+ mutex.unlock();
}
sp->mutex.unlock();
- Threads.mutex.unlock();
}
}
else if (Limits.nodes)
{
- Threads.mutex.lock();
-
int64_t nodes = RootPos.nodes_searched();
// Loop across all split points and sum accumulated SplitPoint nodes plus
// all the currently active positions nodes.
+ // FIXME: Racy...
for (Thread* th : Threads)
for (size_t i = 0; i < th->splitPointsSize; ++i)
{
sp.mutex.unlock();
}
- Threads.mutex.unlock();
-
if (nodes >= Limits.nodes)
Signals.stop = true;
}
// Pick and init the next available split point
SplitPoint& sp = splitPoints[splitPointsSize];
+ sp.mutex.lock(); // No contention here until we don't increment splitPointsSize
+
sp.master = this;
sp.parentSplitPoint = activeSplitPoint;
sp.slavesMask = 0, sp.slavesMask.set(idx);
sp.nodes = 0;
sp.cutoff = false;
sp.ss = ss;
-
- // Try to allocate available threads and ask them to start searching setting
- // 'searching' flag. This must be done under lock protection to avoid concurrent
- // allocation of the same slave by another master.
- Threads.mutex.lock();
- sp.mutex.lock();
-
sp.allSlavesSearching = true; // Must be set under lock protection
+
++splitPointsSize;
activeSplitPoint = &sp;
activePosition = nullptr;
+ // Try to allocate available threads
Thread* slave;
while ( sp.slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
- && (slave = Threads.available_slave(activeSplitPoint)) != nullptr)
+ && (slave = Threads.available_slave(&sp)) != nullptr)
{
- sp.slavesMask.set(slave->idx);
- slave->activeSplitPoint = activeSplitPoint;
- slave->searching = true; // Slave leaves idle_loop()
- slave->notify_one(); // Could be sleeping
+ slave->mutex.lock();
+
+ if (slave->can_join(activeSplitPoint))
+ {
+ activeSplitPoint->slavesMask.set(slave->idx);
+ slave->activeSplitPoint = activeSplitPoint;
+ slave->searching = true;
+ slave->sleepCondition.notify_one(); // Could be sleeping
+ }
+
+ slave->mutex.unlock();
}
// Everything is set up. The master thread enters the idle loop, from which
// The thread will return from the idle loop when all slaves have finished
// their work at this split point.
sp.mutex.unlock();
- Threads.mutex.unlock();
Thread::idle_loop(); // Force a call to base class idle_loop()
assert(!searching);
assert(!activePosition);
+ searching = true;
+
// We have returned from the idle loop, which means that all threads are
- // finished. Note that setting 'searching' and decreasing splitPointsSize must
- // be done under lock protection to avoid a race with Thread::available_to().
- Threads.mutex.lock();
+ // finished. Note that decreasing splitPointsSize must be done under lock
+ // protection to avoid a race with Thread::can_join().
sp.mutex.lock();
- searching = true;
--splitPointsSize;
activeSplitPoint = sp.parentSplitPoint;
activePosition = &pos;
*bestValue = sp.bestValue;
sp.mutex.unlock();
- Threads.mutex.unlock();
}