// Reset the threads, still sleeping: will be wake up at split time
for (size_t i = 0; i < Threads.size(); i++)
- Threads[i].maxPly = 0;
+ Threads[i]->maxPly = 0;
Threads.sleepWhileIdle = Options["Use Sleeping Threads"];
// Set best timer interval to avoid lagging under time pressure. Timer is
// used to check for remaining available thinking time.
- Threads.timer_thread()->msec =
+ Threads.timer->msec =
Limits.use_time_management() ? std::min(100, std::max(TimeMgr.available_time() / 16, TimerResolution)) :
Limits.nodes ? 2 * TimerResolution
: 100;
- Threads.timer_thread()->notify_one(); // Wake up the recurring timer
+ Threads.timer->notify_one(); // Wake up the recurring timer
id_loop(RootPos); // Let's start searching !
- Threads.timer_thread()->msec = 0; // Stop the timer
+ Threads.timer->msec = 0; // Stop the timer
Threads.sleepWhileIdle = true; // Send idle threads to sleep
if (Options["Use Search Log"])
{
assert(bestValue < beta);
- bestValue = Threads.split<FakeSplit>(pos, ss, alpha, beta, bestValue, &bestMove,
- depth, threatMove, moveCount, mp, NT);
+ bestValue = thisThread->split<FakeSplit>(pos, ss, alpha, beta, bestValue, &bestMove,
+ depth, threatMove, moveCount, mp, NT);
if (bestValue >= beta)
break;
}
int selDepth = 0;
for (size_t i = 0; i < Threads.size(); i++)
- if (Threads[i].maxPly > selDepth)
- selDepth = Threads[i].maxPly;
+ if (Threads[i]->maxPly > selDepth)
+ selDepth = Threads[i]->maxPly;
for (size_t i = 0; i < uciPVSize; i++)
{
// at the thread creation. So it means we are the split point's master.
const SplitPoint* this_sp = splitPointsSize ? activeSplitPoint : NULL;
- assert(!this_sp || (this_sp->master == this && searching));
+ assert(!this_sp || (this_sp->masterThread == this && searching));
// If this thread is the master of a split point and all slaves have finished
// their work at this split point, return from the idle loop.
// Wake up master thread so to allow it to return from the idle loop
// in case we are the last slave of the split point.
if ( Threads.sleepWhileIdle
- && this != sp->master
+ && this != sp->masterThread
&& !sp->slavesMask)
{
- assert(!sp->master->searching);
- sp->master->notify_one();
+ assert(!sp->masterThread->searching);
+ sp->masterThread->notify_one();
}
// After releasing the lock we cannot access anymore any SplitPoint
// Loop across all split points and sum accumulated SplitPoint nodes plus
// all the currently active slaves positions.
for (size_t i = 0; i < Threads.size(); i++)
- for (int j = 0; j < Threads[i].splitPointsSize; j++)
+ for (int j = 0; j < Threads[i]->splitPointsSize; j++)
{
- SplitPoint& sp = Threads[i].splitPoints[j];
+ SplitPoint& sp = Threads[i]->splitPoints[j];
sp.mutex.lock();