The check for detecting when a split point has all the
slaves still running is done with:
slavesMask == allSlavesMask
When a thread reparents, slavesMask is increased, then, if
the same thread finishes, because there are no more moves,
slavesMask returns to original value and the above condition
returns to be true. So that the just finished thread immediately
reparents again with the same split point, then starts and
then immediately exits in a tight loop that ends only when a
second slave finishes, so that slavesMask decrements and the
condition becomes false. This gives a spurious and anomaly
high number of faked reparents.
With this patch, that rewrites the logic to avoid this pitfall,
the reparenting success rate drops to a more realistical 5-10%
for 4 threads case.
As a side effect note that now there is no more the limit of
maxThreadsPerSplitPoint when reparenting.
No functional change.
Signed-off-by: Marco Costalba <mcostalba@gmail.com>
assert(is_searching);
is_searching = false;
assert(is_searching);
is_searching = false;
+ sp->allSlavesRunning = false;
sp->slavesMask &= ~(1ULL << idx);
sp->nodes += pos.nodes_searched();
sp->slavesMask &= ~(1ULL << idx);
sp->nodes += pos.nodes_searched();
// unsafe because if we are exiting there is a chance are already freed.
lock_release(sp->lock);
// unsafe because if we are exiting there is a chance are already freed.
lock_release(sp->lock);
- // Try to reparent to another split point
+ // Try to reparent to the first split point, with still all slaves
+ // running, where we are available as a possible slave.
for (int i = 0; i < Threads.size(); i++)
{
Thread* th = &Threads[i];
int spCnt = th->splitPointsCnt;
SplitPoint* latest = &th->splitPoints[spCnt ? spCnt - 1 : 0];
for (int i = 0; i < Threads.size(); i++)
{
Thread* th = &Threads[i];
int spCnt = th->splitPointsCnt;
SplitPoint* latest = &th->splitPoints[spCnt ? spCnt - 1 : 0];
- // Find the first split point with still all slaves running
- // where we are available as a possible slave.
if ( this->is_available_to(th)
&& spCnt > 0
&& !th->cutoff_occurred()
if ( this->is_available_to(th)
&& spCnt > 0
&& !th->cutoff_occurred()
- && latest->slavesMask == latest->allSlavesMask
- && more_than_one(latest->allSlavesMask))
+ && latest->allSlavesRunning
+ && more_than_one(latest->slavesMask))
{
lock_grab(latest->lock);
lock_grab(Threads.splitLock);
{
lock_grab(latest->lock);
lock_grab(Threads.splitLock);
if ( this->is_available_to(th)
&& spCnt == th->splitPointsCnt
&& !th->cutoff_occurred()
if ( this->is_available_to(th)
&& spCnt == th->splitPointsCnt
&& !th->cutoff_occurred()
- && latest->slavesMask == latest->allSlavesMask
- && more_than_one(latest->allSlavesMask))
+ && latest->allSlavesRunning
+ && more_than_one(latest->slavesMask))
- latest->slavesMask |= 1ULL << idx; // allSlavesMask is not updated
+ latest->slavesMask |= 1ULL << idx;
curSplitPoint = latest;
is_searching = true;
}
curSplitPoint = latest;
is_searching = true;
}
sp->master = master;
sp->cutoff = false;
sp->slavesMask = 1ULL << master->idx;
sp->master = master;
sp->cutoff = false;
sp->slavesMask = 1ULL << master->idx;
- sp->allSlavesMask = 1ULL << master->idx;
+ sp->allSlavesRunning = true;
sp->depth = depth;
sp->bestMove = *bestMove;
sp->threatMove = threatMove;
sp->depth = depth;
sp->bestMove = *bestMove;
sp->threatMove = threatMove;
if (threads[i]->is_available_to(master))
{
sp->slavesMask |= 1ULL << i;
if (threads[i]->is_available_to(master))
{
sp->slavesMask |= 1ULL << i;
- sp->allSlavesMask |= 1ULL << i;
threads[i]->curSplitPoint = sp;
threads[i]->is_searching = true; // Slave leaves idle_loop()
threads[i]->curSplitPoint = sp;
threads[i]->is_searching = true; // Slave leaves idle_loop()
threads[i]->wake_up();
if (++slavesCnt + 1 >= maxThreadsPerSplitPoint) // Master is always included
threads[i]->wake_up();
if (++slavesCnt + 1 >= maxThreadsPerSplitPoint) // Master is always included
- {
- sp->allSlavesMask = 0; // Disable reparenting to this split point
}
master->splitPointsCnt++;
}
master->splitPointsCnt++;
// Shared data
Lock lock;
volatile uint64_t slavesMask;
// Shared data
Lock lock;
volatile uint64_t slavesMask;
- volatile uint64_t allSlavesMask;
volatile int64_t nodes;
volatile Value alpha;
volatile Value bestValue;
volatile Move bestMove;
volatile int moveCount;
volatile bool cutoff;
volatile int64_t nodes;
volatile Value alpha;
volatile Value bestValue;
volatile Move bestMove;
volatile int moveCount;
volatile bool cutoff;
+ volatile bool allSlavesRunning;