const bool RootNode = (NT == Root || NT == SplitPointRoot);
assert(alpha >= -VALUE_INFINITE && alpha < beta && beta <= VALUE_INFINITE);
- assert(PvNode == (alpha != beta - 1));
+ assert((alpha == beta - 1) || PvNode);
assert(depth > DEPTH_ZERO);
assert(pos.thread() >= 0 && pos.thread() < Threads.size());
Key posKey;
Move ttMove, move, excludedMove, threatMove;
Depth ext, newDepth;
- ValueType vt;
+ Bound bt;
Value bestValue, value, oldAlpha;
Value refinedValue, nullValue, futilityBase, futilityValue;
bool isPvMove, inCheck, singularExtensionNode, givesCheck;
thread.maxPly = ss->ply;
// Step 1. Initialize node
- if (!SpNode)
- {
- ss->currentMove = ss->bestMove = threatMove = (ss+1)->excludedMove = MOVE_NONE;
- (ss+1)->skipNullMove = false; (ss+1)->reduction = DEPTH_ZERO;
- (ss+2)->killers[0] = (ss+2)->killers[1] = MOVE_NONE;
- }
- else
+ if (SpNode)
{
- sp = ss->sp;
tte = NULL;
ttMove = excludedMove = MOVE_NONE;
+ sp = ss->sp;
threatMove = sp->threatMove;
+ bestValue = sp->bestValue;
+ moveCount = sp->moveCount; // Lock must be held here
+
+ assert(bestValue > -VALUE_INFINITE && moveCount > 0);
+
goto split_point_start;
}
+ else
+ {
+ ss->currentMove = ss->bestMove = threatMove = (ss+1)->excludedMove = MOVE_NONE;
+ (ss+1)->skipNullMove = false; (ss+1)->reduction = DEPTH_ZERO;
+ (ss+2)->killers[0] = (ss+2)->killers[1] = MOVE_NONE;
+
+ }
// Step 2. Check for aborted search and immediate draw
// Enforce node limit here. FIXME: This only works with 1 search thread.
// a fail high/low. Biggest advantage at probing at PV nodes is to have a
// smooth experience in analysis mode. We don't probe at Root nodes otherwise
// we should also update RootMoveList to avoid bogus output.
- if (!RootNode && tte && (PvNode ? tte->depth() >= depth && tte->type() == VALUE_TYPE_EXACT
+ if (!RootNode && tte && (PvNode ? tte->depth() >= depth && tte->type() == BOUND_EXACT
: can_return_tt(tte, depth, beta, ss->ply)))
{
TT.refresh(tte);
else
{
refinedValue = ss->eval = evaluate(pos, ss->evalMargin);
- TT.store(posKey, VALUE_NONE, VALUE_TYPE_NONE, DEPTH_NONE, MOVE_NONE, ss->eval, ss->evalMargin);
+ TT.store(posKey, VALUE_NONE, BOUND_NONE, DEPTH_NONE, MOVE_NONE, ss->eval, ss->evalMargin);
}
// Update gain for the parent non-capture move given the static position
&& depth >= SingularExtensionDepth[PvNode]
&& ttMove != MOVE_NONE
&& !excludedMove // Recursive singular search is not allowed
- && (tte->type() & VALUE_TYPE_LOWER)
+ && (tte->type() & BOUND_LOWER)
&& tte->depth() >= depth - 3 * ONE_PLY;
- if (SpNode)
- {
- lock_grab(&(sp->lock));
- bestValue = sp->bestValue;
- moveCount = sp->moveCount;
-
- assert(bestValue > -VALUE_INFINITE && moveCount > 0);
- }
// Step 11. Loop through moves
// Loop through all pseudo-legal moves until no moves remain or a beta cutoff occurs
if (SpNode)
{
moveCount = ++sp->moveCount;
- lock_release(&(sp->lock));
+ lock_release(sp->lock);
}
else
moveCount++;
&& (!threatMove || !connected_threat(pos, move, threatMove)))
{
if (SpNode)
- lock_grab(&(sp->lock));
+ lock_grab(sp->lock);
continue;
}
if (futilityValue < beta)
{
if (SpNode)
- lock_grab(&(sp->lock));
+ lock_grab(sp->lock);
continue;
}
&& pos.see_sign(move) < 0)
{
if (SpNode)
- lock_grab(&(sp->lock));
+ lock_grab(sp->lock);
continue;
}
// Step 15. Reduced depth search (LMR). If the move fails high will be
// re-searched at full depth.
- if ( depth > 4 * ONE_PLY
+ if ( depth > 3 * ONE_PLY
&& !isPvMove
&& !captureOrPromotion
&& !dangerous
// Step 18. Check for new best move
if (SpNode)
{
- lock_grab(&(sp->lock));
+ lock_grab(sp->lock);
bestValue = sp->bestValue;
alpha = sp->alpha;
}
if (!SpNode && !Signals.stop && !thread.cutoff_occurred())
{
move = bestValue <= oldAlpha ? MOVE_NONE : ss->bestMove;
- vt = bestValue <= oldAlpha ? VALUE_TYPE_UPPER
- : bestValue >= beta ? VALUE_TYPE_LOWER : VALUE_TYPE_EXACT;
+ bt = bestValue <= oldAlpha ? BOUND_UPPER
+ : bestValue >= beta ? BOUND_LOWER : BOUND_EXACT;
- TT.store(posKey, value_to_tt(bestValue, ss->ply), vt, depth, move, ss->eval, ss->evalMargin);
+ TT.store(posKey, value_to_tt(bestValue, ss->ply), bt, depth, move, ss->eval, ss->evalMargin);
// Update killers and history for non capture cut-off moves
if ( bestValue >= beta
}
}
- if (SpNode)
- {
- // Here we have the lock still grabbed
- sp->is_slave[pos.thread()] = false;
- sp->nodes += pos.nodes_searched();
- lock_release(&(sp->lock));
- }
-
assert(bestValue > -VALUE_INFINITE && bestValue < VALUE_INFINITE);
return bestValue;
assert(NT == PV || NT == NonPV);
assert(alpha >= -VALUE_INFINITE && alpha < beta && beta <= VALUE_INFINITE);
- assert(PvNode == (alpha != beta - 1));
+ assert((alpha == beta - 1) || PvNode);
assert(depth <= DEPTH_ZERO);
assert(pos.thread() >= 0 && pos.thread() < Threads.size());
bool inCheck, enoughMaterial, givesCheck, evasionPrunable;
const TTEntry* tte;
Depth ttDepth;
- ValueType vt;
+ Bound bt;
Value oldAlpha = alpha;
ss->bestMove = ss->currentMove = MOVE_NONE;
if (bestValue >= beta)
{
if (!tte)
- TT.store(pos.key(), value_to_tt(bestValue, ss->ply), VALUE_TYPE_LOWER, DEPTH_NONE, MOVE_NONE, ss->eval, evalMargin);
+ TT.store(pos.key(), value_to_tt(bestValue, ss->ply), BOUND_LOWER, DEPTH_NONE, MOVE_NONE, ss->eval, evalMargin);
return bestValue;
}
// Update transposition table
move = bestValue <= oldAlpha ? MOVE_NONE : ss->bestMove;
- vt = bestValue <= oldAlpha ? VALUE_TYPE_UPPER
- : bestValue >= beta ? VALUE_TYPE_LOWER : VALUE_TYPE_EXACT;
+ bt = bestValue <= oldAlpha ? BOUND_UPPER
+ : bestValue >= beta ? BOUND_LOWER : BOUND_EXACT;
- TT.store(pos.key(), value_to_tt(bestValue, ss->ply), vt, ttDepth, move, ss->eval, evalMargin);
+ TT.store(pos.key(), value_to_tt(bestValue, ss->ply), bt, ttDepth, move, ss->eval, evalMargin);
assert(bestValue > -VALUE_INFINITE && bestValue < VALUE_INFINITE);
&& bit_is_set(squares_between(t1, ksq), f2))
{
Bitboard occ = pos.occupied_squares();
- clear_bit(&occ, f2);
+ xor_bit(&occ, f2);
if (bit_is_set(pos.attacks_from(p1, t1, occ), ksq))
return true;
}
|| v >= std::max(VALUE_MATE_IN_MAX_PLY, beta)
|| v < std::min(VALUE_MATED_IN_MAX_PLY, beta))
- && ( ((tte->type() & VALUE_TYPE_LOWER) && v >= beta)
- || ((tte->type() & VALUE_TYPE_UPPER) && v < beta));
+ && ( ((tte->type() & BOUND_LOWER) && v >= beta)
+ || ((tte->type() & BOUND_UPPER) && v < beta));
}
Value v = value_from_tt(tte->value(), ply);
- if ( ((tte->type() & VALUE_TYPE_LOWER) && v >= defaultEval)
- || ((tte->type() & VALUE_TYPE_UPPER) && v < defaultEval))
+ if ( ((tte->type() & BOUND_LOWER) && v >= defaultEval)
+ || ((tte->type() & BOUND_UPPER) && v < defaultEval))
return v;
return defaultEval;
/// RootMove::extract_pv_from_tt() builds a PV by adding moves from the TT table.
-/// We consider also failing high nodes and not only VALUE_TYPE_EXACT nodes so
-/// to allow to always have a ponder move even when we fail high at root, and
-/// a long PV to print that is important for position analysis.
+/// We consider also failing high nodes and not only BOUND_EXACT nodes so to
+/// allow to always have a ponder move even when we fail high at root, and a
+/// long PV to print that is important for position analysis.
void RootMove::extract_pv_from_tt(Position& pos) {
if (!tte || tte->move() != pv[ply])
{
v = (pos.in_check() ? VALUE_NONE : evaluate(pos, m));
- TT.store(k, VALUE_NONE, VALUE_TYPE_NONE, DEPTH_NONE, pv[ply], v, m);
+ TT.store(k, VALUE_NONE, BOUND_NONE, DEPTH_NONE, pv[ply], v, m);
}
pos.do_move(pv[ply], *st++);
/// Thread::idle_loop() is where the thread is parked when it has no work to do.
-/// The parameter 'sp', if non-NULL, is a pointer to an active SplitPoint object
-/// for which the thread is the master.
+/// The parameter 'master_sp', if non-NULL, is a pointer to an active SplitPoint
+/// object for which the thread is the master.
-void Thread::idle_loop(SplitPoint* sp) {
+void Thread::idle_loop(SplitPoint* sp_master) {
- while (true)
+ // If this thread is the master of a split point and all slaves have
+ // finished their work at this split point, return from the idle loop.
+ while (!sp_master || sp_master->slavesMask)
{
// If we are not searching, wait for a condition to be signaled
// instead of wasting CPU time polling for work.
while ( do_sleep
- || do_terminate
- || (Threads.use_sleeping_threads() && !is_searching))
+ || do_exit
+ || (!is_searching && Threads.use_sleeping_threads()))
{
- assert((!sp && threadID) || Threads.use_sleeping_threads());
-
- if (do_terminate)
+ if (do_exit)
{
- assert(!sp);
+ assert(!sp_master);
return;
}
// Grab the lock to avoid races with Thread::wake_up()
- lock_grab(&sleepLock);
+ lock_grab(sleepLock);
// If we are master and all slaves have finished don't go to sleep
- if (sp && Threads.split_point_finished(sp))
+ if (sp_master && !sp_master->slavesMask)
{
- lock_release(&sleepLock);
+ lock_release(sleepLock);
break;
}
// in the meanwhile, allocated us and sent the wake_up() call before we
// had the chance to grab the lock.
if (do_sleep || !is_searching)
- cond_wait(&sleepCond, &sleepLock);
+ cond_wait(sleepCond, sleepLock);
- lock_release(&sleepLock);
+ lock_release(sleepLock);
}
// If this thread has been assigned work, launch a search
if (is_searching)
{
- assert(!do_terminate);
+ assert(!do_sleep && !do_exit);
// Copy split point position and search stack and call search()
Stack ss[MAX_PLY_PLUS_2];
- SplitPoint* tsp = splitPoint;
- Position pos(*tsp->pos, threadID);
-
- memcpy(ss, tsp->ss - 1, 4 * sizeof(Stack));
- (ss+1)->sp = tsp;
-
- if (tsp->nodeType == Root)
- search<SplitPointRoot>(pos, ss+1, tsp->alpha, tsp->beta, tsp->depth);
- else if (tsp->nodeType == PV)
- search<SplitPointPV>(pos, ss+1, tsp->alpha, tsp->beta, tsp->depth);
- else if (tsp->nodeType == NonPV)
- search<SplitPointNonPV>(pos, ss+1, tsp->alpha, tsp->beta, tsp->depth);
+ SplitPoint* sp = splitPoint;
+ Position pos(*sp->pos, threadID);
+
+ memcpy(ss, sp->ss - 1, 4 * sizeof(Stack));
+ (ss+1)->sp = sp;
+
+ lock_grab(sp->lock);
+
+ if (sp->nodeType == Root)
+ search<SplitPointRoot>(pos, ss+1, sp->alpha, sp->beta, sp->depth);
+ else if (sp->nodeType == PV)
+ search<SplitPointPV>(pos, ss+1, sp->alpha, sp->beta, sp->depth);
+ else if (sp->nodeType == NonPV)
+ search<SplitPointNonPV>(pos, ss+1, sp->alpha, sp->beta, sp->depth);
else
assert(false);
assert(is_searching);
+ // We return from search with lock held
+ sp->slavesMask &= ~(1ULL << threadID);
+ sp->nodes += pos.nodes_searched();
+ lock_release(sp->lock);
+
is_searching = false;
// Wake up master thread so to allow it to return from the idle loop in
// case we are the last slave of the split point.
if ( Threads.use_sleeping_threads()
- && threadID != tsp->master
- && !Threads[tsp->master].is_searching)
- Threads[tsp->master].wake_up();
- }
-
- // If this thread is the master of a split point and all slaves have
- // finished their work at this split point, return from the idle loop.
- if (sp && Threads.split_point_finished(sp))
- {
- // Because sp->is_slave[] is reset under lock protection,
- // be sure sp->lock has been released before to return.
- lock_grab(&(sp->lock));
- lock_release(&(sp->lock));
- return;
+ && threadID != sp->master
+ && !Threads[sp->master].is_searching)
+ Threads[sp->master].wake_up();
}
}
}