// Futility margin
Value futility_margin(Depth d, bool noTtCutNode, bool improving) {
- return Value((126 - 42 * noTtCutNode) * (d - improving));
+ return Value((116 - 44 * noTtCutNode) * (d - improving));
}
// Reductions lookup table initialized at startup
Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) {
int reductionScale = Reductions[d] * Reductions[mn];
- return (reductionScale + 1560 - int(delta) * 945 / int(rootDelta)) / 1024
- + (!i && reductionScale > 791);
+ return (reductionScale + 1346 - int(delta) * 896 / int(rootDelta)) / 1024
+ + (!i && reductionScale > 880);
}
constexpr int futility_move_count(bool improving, Depth depth) {
}
// History and stats update bonus, based on depth
-int stat_bonus(Depth d) { return std::min(334 * d - 531, 1538); }
+int stat_bonus(Depth d) { return std::min(268 * d - 352, 1153); }
+
+// History and stats update malus, based on depth
+int stat_malus(Depth d) { return std::min(400 * d - 354, 1201); }
// Add a small random component to draw evaluations to avoid 3-fold blindness
Value value_draw(const Thread* thisThread) {
int captureCount,
Depth depth);
-// perft() is our utility to verify move generation. All the leaf nodes up
+// Utility to verify move generation. All the leaf nodes up
// to the given depth are generated and counted, and the sum is returned.
template<bool Root>
uint64_t perft(Position& pos, Depth depth) {
} // namespace
-// Search::init() is called at startup to initialize various lookup tables
-
+// Called at startup to initialize various lookup tables
void Search::init() {
for (int i = 1; i < MAX_MOVES; ++i)
}
-// Search::clear() resets search state to its initial value
-
+// Resets search state to its initial value
void Search::clear() {
Threads.main()->wait_for_search_finished();
}
-// MainThread::search() is started when the program receives the UCI 'go'
+// Called when the program receives the UCI 'go'
// command. It searches from the root position and outputs the "bestmove".
-
void MainThread::search() {
if (Limits.perft)
}
-// Thread::search() is the main iterative deepening loop. It calls search()
+// Main iterative deepening loop. It calls search()
// repeatedly with increasing depth until the allocated thinking time has been
// consumed, the user stops the search, or the maximum search depth is reached.
-
void Thread::search() {
- // Allocate stack with extra size to allow access from (ss-7) to (ss+2):
- // (ss-7) is needed for update_continuation_histories(ss-1) which accesses (ss-6),
- // (ss+2) is needed for initialization of statScore and killers.
+ // Allocate stack with extra size to allow access from (ss - 7) to (ss + 2):
+ // (ss - 7) is needed for update_continuation_histories(ss - 1) which accesses (ss - 6),
+ // (ss + 2) is needed for initialization of cutOffCnt and killers.
Stack stack[MAX_PLY + 10], *ss = stack + 7;
Move pv[MAX_PLY + 1];
Value alpha, beta, delta;
selDepth = 0;
// Reset aspiration window starting size
- Value prev = rootMoves[pvIdx].averageScore;
- delta = Value(10) + int(prev) * prev / 17470;
- alpha = std::max(prev - delta, -VALUE_INFINITE);
- beta = std::min(prev + delta, VALUE_INFINITE);
-
- // Adjust optimism based on root move's previousScore (~4 Elo)
- int opt = 113 * prev / (std::abs(prev) + 109);
- optimism[us] = Value(opt);
+ Value avg = rootMoves[pvIdx].averageScore;
+ delta = Value(9) + int(avg) * avg / 14847;
+ alpha = std::max(avg - delta, -VALUE_INFINITE);
+ beta = std::min(avg + delta, VALUE_INFINITE);
+
+ // Adjust optimism based on root move's averageScore (~4 Elo)
+ optimism[us] = 121 * avg / (std::abs(avg) + 109);
optimism[~us] = -optimism[us];
// Start with a small aspiration window and, in the case of a fail
int failedHighCnt = 0;
while (true)
{
- // Adjust the effective depth searched, but ensure at least one effective increment for every
- // four searchAgain steps (see issue #2717).
+ // Adjust the effective depth searched, but ensure at least one effective increment
+ // for every four searchAgain steps (see issue #2717).
Depth adjustedDepth =
std::max(1, rootDepth - failedHighCnt - 3 * (searchAgainCounter + 1) / 4);
bestValue = Stockfish::search<Root>(rootPos, ss, alpha, beta, adjustedDepth, false);
// Do we have time for the next iteration? Can we stop searching now?
if (Limits.use_time_management() && !Threads.stop && !mainThread->stopOnPonderhit)
{
- double fallingEval = (69 + 13 * (mainThread->bestPreviousAverageScore - bestValue)
+ double fallingEval = (66 + 14 * (mainThread->bestPreviousAverageScore - bestValue)
+ 6 * (mainThread->iterValue[iterIdx] - bestValue))
- / 619.6;
- fallingEval = std::clamp(fallingEval, 0.5, 1.5);
+ / 616.6;
+ fallingEval = std::clamp(fallingEval, 0.51, 1.51);
// If the bestMove is stable over several iterations, reduce time accordingly
- timeReduction = lastBestMoveDepth + 8 < completedDepth ? 1.57 : 0.65;
- double reduction = (1.4 + mainThread->previousTimeReduction) / (2.08 * timeReduction);
- double bestMoveInstability = 1 + 1.8 * totBestMoveChanges / Threads.size();
+ timeReduction = lastBestMoveDepth + 8 < completedDepth ? 1.56 : 0.69;
+ double reduction = (1.4 + mainThread->previousTimeReduction) / (2.17 * timeReduction);
+ double bestMoveInstability = 1 + 1.79 * totBestMoveChanges / Threads.size();
double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability;
namespace {
-// search<>() is the main search function for both PV and non-PV nodes
-
+// Main search function for both PV and non-PV nodes
template<NodeType nodeType>
Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, bool cutNode) {
: value_draw(pos.this_thread());
// Step 3. Mate distance pruning. Even if we mate at the next move our score
- // would be at best mate_in(ss->ply+1), but if alpha is already bigger because
+ // would be at best mate_in(ss->ply + 1), but if alpha is already bigger because
// a shorter mate was found upward in the tree then there is no need to search
// because we will never beat the current alpha. Same logic but with reversed
// signs apply also in the opposite condition of being mated instead of giving
if (!ttCapture)
update_quiet_stats(pos, ss, ttMove, stat_bonus(depth));
- // Extra penalty for early quiet moves of the previous ply (~0 Elo on STC, ~2 Elo on LTC)
+ // Extra penalty for early quiet moves of
+ // the previous ply (~0 Elo on STC, ~2 Elo on LTC).
if (prevSq != SQ_NONE && (ss - 1)->moveCount <= 2 && !priorCapture)
update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq,
- -stat_bonus(depth + 1));
+ -stat_malus(depth + 1));
}
// Penalty for a quiet ttMove that fails low (~1 Elo)
else if (!ttCapture)
{
- int penalty = -stat_bonus(depth);
+ int penalty = -stat_malus(depth);
thisThread->mainHistory[us][from_to(ttMove)] << penalty;
update_continuation_histories(ss, pos.moved_piece(ttMove), to_sq(ttMove), penalty);
}
int drawScore = TB::UseRule50 ? 1 : 0;
- // use the range VALUE_MATE_IN_MAX_PLY to VALUE_TB_WIN_IN_MAX_PLY to score
- value = wdl < -drawScore ? VALUE_MATED_IN_MAX_PLY + ss->ply + 1
- : wdl > drawScore ? VALUE_MATE_IN_MAX_PLY - ss->ply - 1
+ Value tbValue = VALUE_TB - ss->ply;
+
+ // use the range VALUE_TB to VALUE_TB_WIN_IN_MAX_PLY to score
+ value = wdl < -drawScore ? -tbValue
+ : wdl > drawScore ? tbValue
: VALUE_DRAW + 2 * wdl * drawScore;
Bound b = wdl < -drawScore ? BOUND_UPPER
}
else if (excludedMove)
{
- // Providing the hint that this node's accumulator will be used often brings significant Elo gain (~13 Elo)
+ // Providing the hint that this node's accumulator will be used often
+ // brings significant Elo gain (~13 Elo).
Eval::NNUE::hint_common_parent_position(pos);
eval = ss->staticEval;
}
// Use static evaluation difference to improve quiet move ordering (~4 Elo)
if (is_ok((ss - 1)->currentMove) && !(ss - 1)->inCheck && !priorCapture)
{
- int bonus = std::clamp(-18 * int((ss - 1)->staticEval + ss->staticEval), -1812, 1812);
+ int bonus = std::clamp(-13 * int((ss - 1)->staticEval + ss->staticEval), -1555, 1452);
thisThread->mainHistory[~us][from_to((ss - 1)->currentMove)] << bonus;
+ if (type_of(pos.piece_on(prevSq)) != PAWN && type_of((ss - 1)->currentMove) != PROMOTION)
+ thisThread->pawnHistory[pawn_structure(pos)][pos.piece_on(prevSq)][prevSq] << bonus / 4;
}
// Set up the improving flag, which is true if current static evaluation is
// If eval is really low check with qsearch if it can exceed alpha, if it can't,
// return a fail low.
// Adjust razor margin according to cutoffCnt. (~1 Elo)
- if (eval < alpha - 492 - (257 - 200 * ((ss + 1)->cutoffCnt > 3)) * depth * depth)
+ if (eval < alpha - 472 - (284 - 165 * ((ss + 1)->cutoffCnt > 3)) * depth * depth)
{
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
if (value < alpha)
// The depth condition is important for mate finding.
if (!ss->ttPv && depth < 9
&& eval - futility_margin(depth, cutNode && !ss->ttHit, improving)
- - (ss - 1)->statScore / 321
+ - (ss - 1)->statScore / 337
>= beta
- && eval >= beta && eval < 29462 // smaller than TB wins
- && !(!ttCapture && ttMove))
- return eval;
+ && eval >= beta && eval < 29008 // smaller than TB wins
+ && (!ttMove || ttCapture))
+ return (eval + beta) / 2;
// Step 9. Null move search with verification search (~35 Elo)
- if (!PvNode && (ss - 1)->currentMove != MOVE_NULL && (ss - 1)->statScore < 17257 && eval >= beta
- && eval >= ss->staticEval && ss->staticEval >= beta - 24 * depth + 281 && !excludedMove
+ if (!PvNode && (ss - 1)->currentMove != MOVE_NULL && (ss - 1)->statScore < 17496 && eval >= beta
+ && eval >= ss->staticEval && ss->staticEval >= beta - 23 * depth + 304 && !excludedMove
&& pos.non_pawn_material(us) && ss->ply >= thisThread->nmpMinPly
&& beta > VALUE_TB_LOSS_IN_MAX_PLY)
{
assert(eval - beta >= 0);
// Null move dynamic reduction based on depth and eval
- Depth R = std::min(int(eval - beta) / 152, 6) + depth / 3 + 4;
+ Depth R = std::min(int(eval - beta) / 144, 6) + depth / 3 + 4;
ss->currentMove = MOVE_NULL;
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
// Do not return unproven mate or TB scores
if (nullValue >= beta && nullValue < VALUE_TB_WIN_IN_MAX_PLY)
{
- if (thisThread->nmpMinPly || depth < 14)
+ if (thisThread->nmpMinPly || depth < 15)
return nullValue;
assert(!thisThread->nmpMinPly); // Recursive verification is not allowed
}
}
- // Step 10. If the position doesn't have a ttMove, decrease depth by 2
- // (or by 4 if the TT entry for the current position was hit and the stored depth is greater than or equal to the current depth).
- // Use qsearch if depth is equal or below zero (~9 Elo)
+ // Step 10. Internal iterative reductions (~9 Elo)
+ // For PV nodes without a ttMove, we decrease depth by 2,
+ // or by 4 if the current position is present in the TT and
+ // the stored depth is greater than or equal to the current depth.
+ // Use qsearch if depth <= 0.
if (PvNode && !ttMove)
depth -= 2 + 2 * (ss->ttHit && tte->depth() >= depth);
if (depth <= 0)
return qsearch<PV>(pos, ss, alpha, beta);
+ // For cutNodes without a ttMove, we decrease depth by 2 if depth is high enough.
if (cutNode && depth >= 8 && !ttMove)
depth -= 2;
- probCutBeta = beta + 168 - 70 * improving;
+ probCutBeta = beta + 163 - 67 * improving;
// Step 11. ProbCut (~10 Elo)
// If we have a good enough capture (or queen promotion) and a reduced search returns a value
{
assert(pos.capture_stage(move));
+ // Prefetch the TT entry for the resulting position
+ prefetch(TT.first_entry(pos.key_after(move)));
+
ss->currentMove = move;
ss->continuationHistory =
&thisThread
moves_loop: // When in check, search starts here
// Step 12. A small Probcut idea, when we are in check (~4 Elo)
- probCutBeta = beta + 416;
+ probCutBeta = beta + 425;
if (ss->inCheck && !PvNode && ttCapture && (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 4 && ttValue >= probCutBeta
&& abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && abs(beta) < VALUE_TB_WIN_IN_MAX_PLY)
prevSq != SQ_NONE ? thisThread->counterMoves[pos.piece_on(prevSq)][prevSq] : MOVE_NONE;
MovePicker mp(pos, ttMove, depth, &thisThread->mainHistory, &captureHistory, contHist,
- countermove, ss->killers);
+ &thisThread->pawnHistory, countermove, ss->killers);
value = bestValue;
moveCountPruning = singularQuietLMR = false;
if (capture || givesCheck)
{
// Futility pruning for captures (~2 Elo)
- if (!givesCheck && lmrDepth < 7 && !ss->inCheck
- && ss->staticEval + 188 + 206 * lmrDepth + PieceValue[pos.piece_on(to_sq(move))]
- + captureHistory[movedPiece][to_sq(move)]
- [type_of(pos.piece_on(to_sq(move)))]
- / 7
- < alpha)
- continue;
+ if (!givesCheck && lmrDepth < 7 && !ss->inCheck)
+ {
+ Piece capturedPiece = pos.piece_on(to_sq(move));
+ int futilityEval =
+ ss->staticEval + 238 + 305 * lmrDepth + PieceValue[capturedPiece]
+ + captureHistory[movedPiece][to_sq(move)][type_of(capturedPiece)] / 7;
+ if (futilityEval < alpha)
+ continue;
+ }
// SEE based pruning for captures and checks (~11 Elo)
- if (!pos.see_ge(move, Value(-185) * depth))
+ if (!pos.see_ge(move, Value(-187) * depth))
continue;
}
else
{
int history = (*contHist[0])[movedPiece][to_sq(move)]
+ (*contHist[1])[movedPiece][to_sq(move)]
- + (*contHist[3])[movedPiece][to_sq(move)];
+ + (*contHist[3])[movedPiece][to_sq(move)]
+ + thisThread->pawnHistory[pawn_structure(pos)][movedPiece][to_sq(move)];
// Continuation history based pruning (~2 Elo)
- if (lmrDepth < 6 && history < -3232 * depth)
+ if (lmrDepth < 6 && history < -3752 * depth)
continue;
history += 2 * thisThread->mainHistory[us][from_to(move)];
- lmrDepth += history / 5793;
- lmrDepth = std::max(lmrDepth, -2);
+ lmrDepth += history / 7838;
+ lmrDepth = std::max(lmrDepth, -1);
// Futility pruning: parent node (~13 Elo)
- if (!ss->inCheck && lmrDepth < 13 && ss->staticEval + 115 + 122 * lmrDepth <= alpha)
+ if (!ss->inCheck && lmrDepth < 14
+ && ss->staticEval + (bestValue < ss->staticEval - 57 ? 124 : 71)
+ + 118 * lmrDepth
+ <= alpha)
continue;
lmrDepth = std::max(lmrDepth, 0);
// Prune moves with negative SEE (~4 Elo)
- if (!pos.see_ge(move, Value(-27 * lmrDepth * lmrDepth)))
+ if (!pos.see_ge(move, Value(-26 * lmrDepth * lmrDepth)))
continue;
}
}
// Singular extension search (~94 Elo). If all moves but one fail low on a
// search of (alpha-s, beta-s), and just one fails high on (alpha, beta),
// then that move is singular and should be extended. To verify this we do
- // a reduced search on all the other moves but the ttMove and if the result
- // is lower than ttValue minus a margin, then we will extend the ttMove. Note
- // that depth margin and singularBeta margin are known for having non-linear
+ // a reduced search on the position excluding the ttMove and if the result
+ // is lower than ttValue minus a margin, then we will extend the ttMove.
+
+ // Note: the depth margin and singularBeta margin are known for having non-linear
// scaling. Their values are optimized to time controls of 180+1.8 and longer
- // so changing them requires tests at this type of time controls.
- if (!rootNode
- && depth >= 4 - (thisThread->completedDepth > 24) + 2 * (PvNode && tte->is_pv())
- && move == ttMove && !excludedMove // Avoid recursive singular search
+ // so changing them requires tests at these types of time controls.
+ // Recursive singular search is avoided.
+ if (!rootNode && move == ttMove && !excludedMove
+ && depth >= 4 - (thisThread->completedDepth > 27) + 2 * (PvNode && tte->is_pv())
&& abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 3)
{
- Value singularBeta = ttValue - (64 + 57 * (ss->ttPv && !PvNode)) * depth / 64;
- Depth singularDepth = (depth - 1) / 2;
+ Value singularBeta = ttValue - (66 + 58 * (ss->ttPv && !PvNode)) * depth / 64;
+ Depth singularDepth = newDepth / 2;
ss->excludedMove = move;
value =
singularQuietLMR = !ttCapture;
// Avoid search explosion by limiting the number of double extensions
- if (!PvNode && value < singularBeta - 18 && ss->doubleExtensions <= 11)
+ if (!PvNode && value < singularBeta - 17 && ss->doubleExtensions <= 11)
{
extension = 2;
depth += depth < 15;
}
// Multi-cut pruning
- // Our ttMove is assumed to fail high, and now we failed high also on a
- // reduced search without the ttMove. So we assume this expected cut-node
- // is not singular, that multiple moves fail high, and we can prune the
- // whole subtree by returning a softbound.
+ // Our ttMove is assumed to fail high based on the bound of the TT entry,
+ // and if after excluding the ttMove with a reduced search we fail high over the original beta,
+ // we assume this expected cut-node is not singular (multiple moves fail high),
+ // and we can prune the whole subtree by returning a softbound.
else if (singularBeta >= beta)
return singularBeta;
- // If the eval of ttMove is greater than beta, we reduce it (negative extension) (~7 Elo)
+ // Negative extensions
+ // If other moves failed high over (ttValue - margin) without the ttMove on a reduced search,
+ // but we cannot do multi-cut because (ttValue - margin) is lower than the original beta,
+ // we do not know if the ttMove is singular or can do a multi-cut,
+ // so we reduce the ttMove in favor of other moves based on some conditions:
+
+ // If the ttMove is assumed to fail high over current beta (~7 Elo)
else if (ttValue >= beta)
extension = -2 - !PvNode;
- // If we are on a cutNode, reduce it based on depth (negative extension) (~1 Elo)
+ // If we are on a cutNode but the ttMove is not assumed to fail high over current beta (~1 Elo)
else if (cutNode)
extension = depth < 19 ? -2 : -1;
- // If the eval of ttMove is less than value, we reduce it (negative extension) (~1 Elo)
+ // If the ttMove is assumed to fail low over the value of the reduced search (~1 Elo)
else if (ttValue <= value)
extension = -1;
}
// Check extensions (~1 Elo)
- else if (givesCheck && depth > 9)
+ else if (givesCheck && depth > 10)
extension = 1;
// Quiet ttMove extensions (~1 Elo)
else if (PvNode && move == ttMove && move == ss->killers[0]
- && (*contHist[0])[movedPiece][to_sq(move)] >= 4194)
+ && (*contHist[0])[movedPiece][to_sq(move)] >= 4325)
+ extension = 1;
+
+ // Recapture extensions (~1 Elo)
+ else if (PvNode && move == ttMove && to_sq(move) == prevSq
+ && captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))]
+ > 4146)
extension = 1;
}
if (PvNode)
r--;
- // Decrease reduction if ttMove has been singularly extended (~1 Elo)
+ // Decrease reduction if a quiet ttMove has been singularly extended (~1 Elo)
if (singularQuietLMR)
r--;
if ((ss + 1)->cutoffCnt > 3)
r++;
- // Decrease reduction for first generated move (ttMove)
+ // Set reduction to 0 for first picked move (ttMove) (~2 Elo)
+ // Nullifies all previous reduction adjustments to ttMove and leaves only history to do them
else if (move == ttMove)
- r--;
+ r = 0;
ss->statScore = 2 * thisThread->mainHistory[us][from_to(move)]
+ (*contHist[0])[movedPiece][to_sq(move)]
+ (*contHist[1])[movedPiece][to_sq(move)]
- + (*contHist[3])[movedPiece][to_sq(move)] - 3848;
+ + (*contHist[3])[movedPiece][to_sq(move)] - 3817;
// Decrease/increase reduction for moves with a good/bad history (~25 Elo)
- r -= ss->statScore / (10216 + 3855 * (depth > 5 && depth < 23));
+ r -= ss->statScore / 14767;
// Step 17. Late moves reduction / extension (LMR, ~117 Elo)
// We use various heuristics for the sons of a node after the first son has
// been searched. In general, we would like to reduce them, but there are many
// cases where we extend a son if it has good chances to be "interesting".
- if (depth >= 2 && moveCount > 1 + (PvNode && ss->ply <= 1)
+ if (depth >= 2 && moveCount > 1 + rootNode
&& (!ss->ttPv || !capture || (cutNode && (ss - 1)->moveCount > 1)))
{
// In general we want to cap the LMR depth search at newDepth, but when
// reduction is negative, we allow this move a limited search extension
// beyond the first move depth. This may lead to hidden double extensions.
- Depth d = std::clamp(newDepth - r, 1, newDepth + 1);
+ // To prevent problems when the max value is less than the min value,
+ // std::clamp has been replaced by a more robust implementation.
+ Depth d = std::max(1, std::min(newDepth - r, newDepth + 1));
value = -search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, d, true);
{
// Adjust full-depth search based on LMR results - if the result
// was good enough search deeper, if it was bad enough search shallower.
- const bool doDeeperSearch = value > (bestValue + 51 + 10 * (newDepth - d));
- const bool doEvenDeeperSearch = value > alpha + 700 && ss->doubleExtensions <= 6;
- const bool doShallowerSearch = value < bestValue + newDepth;
-
- ss->doubleExtensions = ss->doubleExtensions + doEvenDeeperSearch;
+ const bool doDeeperSearch = value > (bestValue + 53 + 2 * newDepth); // (~1 Elo)
+ const bool doShallowerSearch = value < bestValue + newDepth; // (~2 Elo)
- newDepth += doDeeperSearch - doShallowerSearch + doEvenDeeperSearch;
+ newDepth += doDeeperSearch - doShallowerSearch;
if (newDepth > d)
value = -search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, newDepth, !cutNode);
- int bonus = value <= alpha ? -stat_bonus(newDepth)
+ int bonus = value <= alpha ? -stat_malus(newDepth)
: value >= beta ? stat_bonus(newDepth)
: 0;
// Step 18. Full-depth search when LMR is skipped
else if (!PvNode || moveCount > 1)
{
- // Increase reduction for cut nodes and not ttMove (~1 Elo)
- if (!ttMove && cutNode)
+ // Increase reduction if ttMove is not present (~1 Elo)
+ if (!ttMove)
r += 2;
// Note that if expected reduction is high, we reduce search depth by 1 here
else
{
// Reduce other moves if we have found at least one score improvement (~2 Elo)
- if (depth > 2 && depth < 12 && beta < 13828 && value > -11369)
+ if (depth > 2 && depth < 12 && beta < 13782 && value > -11541)
depth -= 2;
assert(depth > 0);
// Bonus for prior countermove that caused the fail low
else if (!priorCapture && prevSq != SQ_NONE)
{
- int bonus = (depth > 6) + (PvNode || cutNode) + (bestValue < alpha - 653)
- + ((ss - 1)->moveCount > 11);
+ int bonus = (depth > 6) + (PvNode || cutNode) + (bestValue < alpha - 656)
+ + ((ss - 1)->moveCount > 10);
update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq,
stat_bonus(depth) * bonus);
thisThread->mainHistory[~us][from_to((ss - 1)->currentMove)]
}
-// qsearch() is the quiescence search function, which is called by the main search
+// Quiescence search function, which is called by the main search
// function with zero depth, or recursively with further decreasing depth per call.
// (~155 Elo)
template<NodeType nodeType>
ss->inCheck = pos.checkers();
moveCount = 0;
+ // Used to send selDepth info to GUI (selDepth counts from 1, ply from 0)
+ if (PvNode && thisThread->selDepth < ss->ply + 1)
+ thisThread->selDepth = ss->ply + 1;
+
// Step 2. Check for an immediate draw or maximum ply reached
if (pos.is_draw(ss->ply) || ss->ply >= MAX_PLY)
return (ss->ply >= MAX_PLY && !ss->inCheck) ? evaluate(pos) : VALUE_DRAW;
assert(0 <= ss->ply && ss->ply < MAX_PLY);
- // Decide whether or not to include checks: this fixes also the type of
- // TT entry depth that we are going to use. Note that in qsearch we use
- // only two types of depth in TT: DEPTH_QS_CHECKS or DEPTH_QS_NO_CHECKS.
+ // Decide the replacement and cutoff priority of the qsearch TT entries
ttDepth = ss->inCheck || depth >= DEPTH_QS_CHECKS ? DEPTH_QS_CHECKS : DEPTH_QS_NO_CHECKS;
// Step 3. Transposition table lookup
bestValue = ttValue;
}
else
- // In case of null move search use previous static eval with a different sign
+ // In case of null move search, use previous static eval with a different sign
ss->staticEval = bestValue =
(ss - 1)->currentMove != MOVE_NULL ? evaluate(pos) : -(ss - 1)->staticEval;
- // Stand pat. Return immediately if static value is at least beta
+ // Stand pat. Return immediately if bestValue is at least beta at non-Pv nodes.
+ // At PvNodes set bestValue between alpha and beta instead
if (bestValue >= beta)
{
- if (!ss->ttHit)
- tte->save(posKey, value_to_tt(bestValue, ss->ply), false, BOUND_LOWER, DEPTH_NONE,
- MOVE_NONE, ss->staticEval);
+ if (!PvNode || abs(bestValue) >= VALUE_TB_WIN_IN_MAX_PLY)
+ {
+ if (!ss->ttHit)
+ tte->save(posKey, value_to_tt(bestValue, ss->ply), false, BOUND_LOWER,
+ DEPTH_NONE, MOVE_NONE, ss->staticEval);
- return bestValue;
+ return bestValue;
+ }
+ bestValue = std::min((alpha + beta) / 2, beta - 1);
}
if (bestValue > alpha)
alpha = bestValue;
- futilityBase = ss->staticEval + 200;
+ futilityBase = ss->staticEval + 182;
}
const PieceToHistory* contHist[] = {(ss - 1)->continuationHistory,
// will be generated.
Square prevSq = is_ok((ss - 1)->currentMove) ? to_sq((ss - 1)->currentMove) : SQ_NONE;
MovePicker mp(pos, ttMove, depth, &thisThread->mainHistory, &thisThread->captureHistory,
- contHist, prevSq);
+ contHist, &thisThread->pawnHistory);
int quietCheckEvasions = 0;
continue;
// Do not search moves with bad enough SEE values (~5 Elo)
- if (!pos.see_ge(move, Value(-90)))
+ if (!pos.see_ge(move, Value(-77)))
continue;
}
}
-// value_to_tt() adjusts a mate or TB score from "plies to mate from the root"
+// Adjusts a mate or TB score from "plies to mate from the root"
// to "plies to mate from the current position". Standard scores are unchanged.
// The function is called before storing a value in the transposition table.
-
Value value_to_tt(Value v, int ply) {
assert(v != VALUE_NONE);
}
-// value_from_tt() is the inverse of value_to_tt(): it adjusts a mate or TB score
+// Inverse of value_to_tt(): it adjusts a mate or TB score
// from the transposition table (which refers to the plies to mate/be mated from
// current position) to "plies to mate/be mated (TB win/loss) from the root".
-// However, to avoid potentially false mate scores related to the 50 moves rule
-// and the graph history interaction problem, we return an optimal TB score instead.
+// However, to avoid potentially false mate or TB scores related to the 50 moves rule
+// and the graph history interaction, we return highest non-TB score instead.
Value value_from_tt(Value v, int ply, int r50c) {
if (v == VALUE_NONE)
return VALUE_NONE;
- if (v >= VALUE_TB_WIN_IN_MAX_PLY) // TB win or better
+ // handle TB win or better
+ if (v >= VALUE_TB_WIN_IN_MAX_PLY)
{
- if (v >= VALUE_MATE_IN_MAX_PLY && VALUE_MATE - v > 99 - r50c)
- return VALUE_MATE_IN_MAX_PLY - 1; // do not return a potentially false mate score
+ // Downgrade a potentially false mate score
+ if (v >= VALUE_MATE_IN_MAX_PLY && VALUE_MATE - v > 100 - r50c)
+ return VALUE_TB_WIN_IN_MAX_PLY - 1;
+
+ // Downgrade a potentially false TB score.
+ if (VALUE_TB - v > 100 - r50c)
+ return VALUE_TB_WIN_IN_MAX_PLY - 1;
return v - ply;
}
- if (v <= VALUE_TB_LOSS_IN_MAX_PLY) // TB loss or worse
+ // handle TB loss or worse
+ if (v <= VALUE_TB_LOSS_IN_MAX_PLY)
{
- if (v <= VALUE_MATED_IN_MAX_PLY && VALUE_MATE + v > 99 - r50c)
- return VALUE_MATED_IN_MAX_PLY + 1; // do not return a potentially false mate score
+ // Downgrade a potentially false mate score.
+ if (v <= VALUE_MATED_IN_MAX_PLY && VALUE_MATE + v > 100 - r50c)
+ return VALUE_TB_LOSS_IN_MAX_PLY + 1;
+
+ // Downgrade a potentially false TB score.
+ if (VALUE_TB + v > 100 - r50c)
+ return VALUE_TB_LOSS_IN_MAX_PLY + 1;
return v + ply;
}
}
-// update_pv() adds current move and appends child pv[]
-
+// Adds current move and appends child pv[]
void update_pv(Move* pv, Move move, const Move* childPv) {
for (*pv++ = move; childPv && *childPv != MOVE_NONE;)
}
-// update_all_stats() updates stats at the end of search() when a bestMove is found
-
+// Updates stats at the end of search() when a bestMove is found
void update_all_stats(const Position& pos,
Stack* ss,
Move bestMove,
PieceType captured;
int quietMoveBonus = stat_bonus(depth + 1);
+ int quietMoveMalus = stat_malus(depth);
if (!pos.capture_stage(bestMove))
{
- int bestMoveBonus = bestValue > beta + 168 ? quietMoveBonus // larger bonus
+ int bestMoveBonus = bestValue > beta + 173 ? quietMoveBonus // larger bonus
: stat_bonus(depth); // smaller bonus
// Increase stats for the best move in case it was a quiet move
update_quiet_stats(pos, ss, bestMove, bestMoveBonus);
+ thisThread->pawnHistory[pawn_structure(pos)][moved_piece][to_sq(bestMove)]
+ << quietMoveBonus;
// Decrease stats for all non-best quiet moves
for (int i = 0; i < quietCount; ++i)
{
- thisThread->mainHistory[us][from_to(quietsSearched[i])] << -bestMoveBonus;
+ thisThread->pawnHistory[pawn_structure(pos)][pos.moved_piece(quietsSearched[i])]
+ [to_sq(quietsSearched[i])]
+ << -quietMoveMalus;
+ thisThread->mainHistory[us][from_to(quietsSearched[i])] << -quietMoveMalus;
update_continuation_histories(ss, pos.moved_piece(quietsSearched[i]),
- to_sq(quietsSearched[i]), -bestMoveBonus);
+ to_sq(quietsSearched[i]), -quietMoveMalus);
}
}
else
&& ((ss - 1)->moveCount == 1 + (ss - 1)->ttHit
|| ((ss - 1)->currentMove == (ss - 1)->killers[0]))
&& !pos.captured_piece())
- update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, -quietMoveBonus);
+ update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, -quietMoveMalus);
// Decrease stats for all non-best capture moves
for (int i = 0; i < captureCount; ++i)
{
moved_piece = pos.moved_piece(capturesSearched[i]);
captured = type_of(pos.piece_on(to_sq(capturesSearched[i])));
- captureHistory[moved_piece][to_sq(capturesSearched[i])][captured] << -quietMoveBonus;
+ captureHistory[moved_piece][to_sq(capturesSearched[i])][captured] << -quietMoveMalus;
}
}
-// update_continuation_histories() updates histories of the move pairs formed
-// by moves at ply -1, -2, -4, and -6 with current move.
-
+// Updates histories of the move pairs formed
+// by moves at ply -1, -2, -3, -4, and -6 with current move.
void update_continuation_histories(Stack* ss, Piece pc, Square to, int bonus) {
for (int i : {1, 2, 3, 4, 6})
}
-// update_quiet_stats() updates move sorting heuristics
-
+// Updates move sorting heuristics
void update_quiet_stats(const Position& pos, Stack* ss, Move move, int bonus) {
// Update killers
// When playing with strength handicap, choose the best move among a set of RootMoves
// using a statistical rule dependent on 'level'. Idea by Heinz van Saanen.
-
Move Skill::pick_best(size_t multiPV) {
const RootMoves& rootMoves = Threads.main()->rootMoves;
} // namespace
-// MainThread::check_time() is used to print debug info and, more importantly,
+// Used to print debug info and, more importantly,
// to detect when we are out of available time and thus stop the search.
-
void MainThread::check_time() {
if (--callsCnt > 0)
}
-// UCI::pv() formats PV information according to the UCI protocol. UCI requires
+// Formats PV information according to the UCI protocol. UCI requires
// that all (if any) unsearched PV lines are sent using a previous search score.
-
string UCI::pv(const Position& pos, Depth depth) {
std::stringstream ss;
if (v == -VALUE_INFINITE)
v = VALUE_ZERO;
- bool tb = TB::RootInTB && abs(v) < VALUE_MATE_IN_MAX_PLY;
+ bool tb = TB::RootInTB && abs(v) <= VALUE_TB;
v = tb ? rootMoves[i].tbScore : v;
if (ss.rdbuf()->in_avail()) // Not at first line
}
-// RootMove::extract_ponder_from_tt() is called in case we have no ponder move
-// before exiting the search, for instance, in case we stop the search during a
-// fail high at root. We try hard to have a ponder move to return to the GUI,
+// Called in case we have no ponder move before exiting the search,
+// for instance, in case we stop the search during a fail high at root.
+// We try hard to have a ponder move to return to the GUI,
// otherwise in case of 'ponder on' we have nothing to think about.
-
bool RootMove::extract_ponder_from_tt(Position& pos) {
StateInfo st;