cnt = 1, nodes++;
else
{
- pos.do_move(m, st, ci, pos.gives_check(m, ci));
+ pos.do_move(m, st, pos.gives_check(m, ci));
cnt = leaf ? MoveList<LEGAL>(pos).size() : perft<false>(pos, depth - ONE_PLY);
nodes += cnt;
pos.undo_move(m);
if (pos.legal(move, ci.pinned))
{
ss->currentMove = move;
- pos.do_move(move, st, ci, pos.gives_check(move, ci));
+ pos.do_move(move, st, pos.gives_check(move, ci));
value = -search<NonPV, false>(pos, ss+1, -rbeta, -rbeta+1, rdepth, !cutNode);
pos.undo_move(move);
if (value >= rbeta)
quietsSearched[quietCount++] = move;
// Step 14. Make the move
- pos.do_move(move, st, ci, givesCheck);
+ pos.do_move(move, st, givesCheck);
// Step 15. Reduced depth search (LMR). If the move fails high it will be
// re-searched at full depth.
&& Threads.size() >= 2
&& depth >= Threads.minimumSplitDepth
&& ( !thisThread->activeSplitPoint
- || !thisThread->activeSplitPoint->allSlavesSearching)
+ || !thisThread->activeSplitPoint->allSlavesSearching
+ || ( Threads.size() > MAX_SLAVES_PER_SPLITPOINT
+ && thisThread->activeSplitPoint->slavesMask.count() == MAX_SLAVES_PER_SPLITPOINT))
&& thisThread->splitPointsSize < MAX_SPLITPOINTS_PER_THREAD)
{
assert(bestValue > -VALUE_INFINITE && bestValue < beta);
ss->currentMove = move;
// Make and search the move
- pos.do_move(move, st, ci, givesCheck);
+ pos.do_move(move, st, givesCheck);
value = givesCheck ? -qsearch<NT, true>(pos, ss+1, -beta, -alpha, depth - ONE_PLY)
: -qsearch<NT, false>(pos, ss+1, -beta, -alpha, depth - ONE_PLY);
pos.undo_move(move);
if (!ttHit || tte->move() != m) // Don't overwrite correct entries
tte->save(pos.key(), VALUE_NONE, BOUND_NONE, DEPTH_NONE, m, VALUE_NONE, TT.generation());
- pos.do_move(m, *st++);
+ pos.do_move(m, *st++, pos.gives_check(m, CheckInfo(pos)));
}
for (size_t i = pv.size(); i > 0; )
assert(pv.size() == 1);
- pos.do_move(pv[0], st);
+ pos.do_move(pv[0], st, pos.gives_check(pv[0], CheckInfo(pos)));
TTEntry* tte = TT.probe(pos.key(), ttHit);
pos.undo_move(pv[0]);
// Pointer 'this_sp' is not null only if we are called from split(), and not
// at the thread creation. This means we are the split point's master.
- SplitPoint* this_sp = splitPointsSize ? activeSplitPoint : nullptr;
+ SplitPoint* this_sp = activeSplitPoint;
- assert(!this_sp || (this_sp->masterThread == this && searching));
+ assert(!this_sp || (this_sp->master == this && searching));
while (!exit)
{
Threads.mutex.lock();
assert(activeSplitPoint);
+
SplitPoint* sp = activeSplitPoint;
Threads.mutex.unlock();
// Wake up the master thread so to allow it to return from the idle
// loop in case we are the last slave of the split point.
- if ( this != sp->masterThread
- && sp->slavesMask.none())
+ if (this != sp->master && sp->slavesMask.none())
{
- assert(!sp->masterThread->searching);
- sp->masterThread->notify_one();
+ assert(!sp->master->searching);
+
+ sp->master->notify_one();
}
// After releasing the lock we can't access any SplitPoint related data
// Try to late join to another split point if none of its slaves has
// already finished.
- if (Threads.size() > 2)
- for (size_t i = 0; i < Threads.size(); ++i)
+ SplitPoint* bestSp = NULL;
+ int bestScore = INT_MAX;
+
+ for (Thread* th : Threads)
+ {
+ const size_t size = th->splitPointsSize; // Local copy
+ sp = size ? &th->splitPoints[size - 1] : nullptr;
+
+ if ( sp
+ && sp->allSlavesSearching
+ && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
+ && available_to(th))
{
- const int size = Threads[i]->splitPointsSize; // Local copy
- sp = size ? &Threads[i]->splitPoints[size - 1] : nullptr;
+ assert(this != th);
+ assert(!(this_sp && this_sp->slavesMask.none()));
+ assert(Threads.size() > 2);
+
+ // Prefer to join to SP with few parents to reduce the probability
+ // that a cut-off occurs above us, and hence we waste our work.
+ int level = -1;
+ for (SplitPoint* spp = th->activeSplitPoint; spp; spp = spp->parentSplitPoint)
+ level++;
- if ( sp
- && sp->allSlavesSearching
- && available_to(Threads[i]))
+ int score = level * 256 * 256 + (int)sp->slavesMask.count() * 256 - sp->depth * 1;
+
+ if (score < bestScore)
{
- // Recheck the conditions under lock protection
- Threads.mutex.lock();
- sp->mutex.lock();
-
- if ( sp->allSlavesSearching
- && available_to(Threads[i]))
- {
- sp->slavesMask.set(idx);
- activeSplitPoint = sp;
- searching = true;
- }
-
- sp->mutex.unlock();
- Threads.mutex.unlock();
-
- break; // Just a single attempt
+ bestSp = sp;
+ bestScore = score;
}
}
+ }
+
+ if (bestSp)
+ {
+ sp = bestSp;
+
+ // Recheck the conditions under lock protection
+ Threads.mutex.lock();
+ sp->mutex.lock();
+
+ if ( sp->allSlavesSearching
+ && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
+ && available_to(sp->master))
+ {
+ sp->slavesMask.set(idx);
+ activeSplitPoint = sp;
+ searching = true;
+ }
+
+ sp->mutex.unlock();
+ Threads.mutex.unlock();
+ }
}
- // Grab the lock to avoid races with Thread::notify_one()
+ // Avoid races with notify_one() fired from last slave of the split point
std::unique_lock<std::mutex> lk(mutex);
// If we are master and all slaves have finished then exit idle_loop
// Loop across all split points and sum accumulated SplitPoint nodes plus
// all the currently active positions nodes.
for (Thread* th : Threads)
- for (int i = 0; i < th->splitPointsSize; ++i)
+ for (size_t i = 0; i < th->splitPointsSize; ++i)
{
SplitPoint& sp = th->splitPoints[i];