/*
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
- Copyright (C) 2004-2022 The Stockfish developers (see AUTHORS file)
+ Copyright (C) 2004-2023 The Stockfish developers (see AUTHORS file)
Stockfish is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
// Futility margin
Value futility_margin(Depth d, bool improving) {
- return Value(165 * (d - improving));
+ return Value(154 * (d - improving));
}
// Reductions lookup table, initialized at startup
Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) {
int r = Reductions[d] * Reductions[mn];
- return (r + 1642 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 916);
+ return (r + 1449 - int(delta) * 1032 / int(rootDelta)) / 1024 + (!i && r > 941);
}
constexpr int futility_move_count(bool improving, Depth depth) {
// History and stats update bonus, based on depth
int stat_bonus(Depth d) {
- return std::min((12 * d + 282) * d - 349 , 1480);
+ return std::min(340 * d - 470, 1855);
}
// Add a small random component to draw evaluations to avoid 3-fold blindness
struct Skill {
Skill(int skill_level, int uci_elo) {
if (uci_elo)
- level = std::clamp(std::pow((uci_elo - 1346.6) / 143.4, 1 / 0.806), 0.0, 20.0);
+ {
+ double e = double(uci_elo - 1320) / (3190 - 1320);
+ level = std::clamp((((37.2473 * e - 40.8525) * e + 22.2943) * e - 0.311438), 0.0, 19.0);
+ }
else
level = double(skill_level);
}
void Search::init() {
for (int i = 1; i < MAX_MOVES; ++i)
- Reductions[i] = int((20.26 + std::log(Threads.size()) / 2) * std::log(i));
+ Reductions[i] = int((19.47 + std::log(Threads.size()) / 2) * std::log(i));
}
bestPreviousScore = bestThread->rootMoves[0].score;
bestPreviousAverageScore = bestThread->rootMoves[0].averageScore;
- for (Thread* th : Threads)
- th->previousDepth = bestThread->completedDepth;
-
// Send again PV info if we have a new best thread
if (bestThread != this)
sync_cout << UCI::pv(bestThread->rootPos, bestThread->completedDepth) << sync_endl;
int iterIdx = 0;
std::memset(ss-7, 0, 10 * sizeof(Stack));
- for (int i = 7; i > 0; i--)
+ for (int i = 7; i > 0; --i)
+ {
(ss-i)->continuationHistory = &this->continuationHistory[0][0][NO_PIECE][0]; // Use as a sentinel
+ (ss-i)->staticEval = VALUE_NONE;
+ }
for (int i = 0; i <= MAX_PLY + 2; ++i)
(ss+i)->ply = i;
multiPV = std::min(multiPV, rootMoves.size());
- complexityAverage.set(155, 1);
+ complexityAverage.set(153, 1);
optimism[us] = optimism[~us] = VALUE_ZERO;
pvLast = 0;
if (!Threads.increaseDepth)
- searchAgainCounter++;
+ searchAgainCounter++;
// MultiPV loop. We perform a full root search for each PV line
for (pvIdx = 0; pvIdx < multiPV && !Threads.stop; ++pvIdx)
if (rootDepth >= 4)
{
Value prev = rootMoves[pvIdx].averageScore;
- delta = Value(10) + int(prev) * prev / 15620;
+ delta = Value(10) + int(prev) * prev / 16502;
alpha = std::max(prev - delta,-VALUE_INFINITE);
beta = std::min(prev + delta, VALUE_INFINITE);
// Adjust optimism based on root move's previousScore
- int opt = 118 * prev / (std::abs(prev) + 169);
+ int opt = 120 * prev / (std::abs(prev) + 161);
optimism[ us] = Value(opt);
optimism[~us] = -optimism[us];
}
if (!Threads.stop)
completedDepth = rootDepth;
- if (rootMoves[0].pv[0] != lastBestMove) {
- lastBestMove = rootMoves[0].pv[0];
- lastBestMoveDepth = rootDepth;
+ if (rootMoves[0].pv[0] != lastBestMove)
+ {
+ lastBestMove = rootMoves[0].pv[0];
+ lastBestMoveDepth = rootDepth;
}
// Have we found a "mate in x"?
&& !Threads.stop
&& !mainThread->stopOnPonderhit)
{
- double fallingEval = (71 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
- + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 656.7;
+ double fallingEval = (69 + 13 * (mainThread->bestPreviousAverageScore - bestValue)
+ + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 619.6;
fallingEval = std::clamp(fallingEval, 0.5, 1.5);
// If the bestMove is stable over several iterations, reduce time accordingly
- timeReduction = lastBestMoveDepth + 9 < completedDepth ? 1.37 : 0.65;
- double reduction = (1.4 + mainThread->previousTimeReduction) / (2.15 * timeReduction);
- double bestMoveInstability = 1 + 1.7 * totBestMoveChanges / Threads.size();
+ timeReduction = lastBestMoveDepth + 8 < completedDepth ? 1.57 : 0.65;
+ double reduction = (1.4 + mainThread->previousTimeReduction) / (2.08 * timeReduction);
+ double bestMoveInstability = 1 + 1.8 * totBestMoveChanges / Threads.size();
int complexity = mainThread->complexityAverage.value();
- double complexPosition = std::min(1.0 + (complexity - 261) / 1738.7, 1.5);
+ double complexPosition = std::min(1.03 + (complexity - 241) / 1552.0, 1.45);
double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability * complexPosition;
else
Threads.stop = true;
}
- else if ( Threads.increaseDepth
- && !mainThread->ponder
- && Time.elapsed() > totalTime * 0.53)
- Threads.increaseDepth = false;
+ else if ( !mainThread->ponder
+ && Time.elapsed() > totalTime * 0.50)
+ Threads.increaseDepth = false;
else
- Threads.increaseDepth = true;
+ Threads.increaseDepth = true;
}
mainThread->iterValue[iterIdx] = bestValue;
constexpr bool PvNode = nodeType != NonPV;
constexpr bool rootNode = nodeType == Root;
- const Depth maxNextDepth = rootNode ? depth : depth + 1;
// Check if we have an upcoming move which draws by repetition, or
// if the opponent had an alternative move earlier to this position.
if (!rootNode)
(ss+2)->statScore = 0;
- // Step 4. Transposition table lookup. We don't want the score of a partial
- // search to overwrite a previous full search TT value, so we use a different
- // position key in case of an excluded move.
+ // Step 4. Transposition table lookup.
excludedMove = ss->excludedMove;
- posKey = excludedMove == MOVE_NONE ? pos.key() : pos.key() ^ make_key(excludedMove);
+ posKey = pos.key();
tte = TT.probe(posKey, ss->ttHit);
ttValue = ss->ttHit ? value_from_tt(tte->value(), ss->ply, pos.rule50_count()) : VALUE_NONE;
ttMove = rootNode ? thisThread->rootMoves[thisThread->pvIdx].pv[0]
: ss->ttHit ? tte->move() : MOVE_NONE;
- ttCapture = ttMove && pos.capture(ttMove);
+ ttCapture = ttMove && pos.capture_stage(ttMove);
+
+ // At this point, if excluded, skip straight to step 6, static eval. However,
+ // to save indentation, we list the condition in all code between here and there.
if (!excludedMove)
ss->ttPv = PvNode || (ss->ttHit && tte->is_pv());
// At non-PV nodes we check for an early TT cutoff
if ( !PvNode
&& ss->ttHit
+ && !excludedMove
&& tte->depth() > depth - (tte->bound() == BOUND_EXACT)
&& ttValue != VALUE_NONE // Possible in case of TT access race
&& (tte->bound() & (ttValue >= beta ? BOUND_LOWER : BOUND_UPPER)))
{
- // If ttMove is quiet, update move sorting heuristics on TT hit (~1 Elo)
+ // If ttMove is quiet, update move sorting heuristics on TT hit (~2 Elo)
if (ttMove)
{
if (ttValue >= beta)
{
- // Bonus for a quiet ttMove that fails high (~3 Elo)
+ // Bonus for a quiet ttMove that fails high (~2 Elo)
if (!ttCapture)
update_quiet_stats(pos, ss, ttMove, stat_bonus(depth));
- // Extra penalty for early quiet moves of the previous ply (~0 Elo)
+ // Extra penalty for early quiet moves of the previous ply (~0 Elo on STC, ~2 Elo on LTC)
if ((ss-1)->moveCount <= 2 && !priorCapture)
update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, -stat_bonus(depth + 1));
}
}
// Step 5. Tablebases probe
- if (!rootNode && TB::Cardinality)
+ if (!rootNode && !excludedMove && TB::Cardinality)
{
int piecesCount = pos.count<ALL_PIECES>();
complexity = 0;
goto moves_loop;
}
+ else if (excludedMove)
+ {
+ // Providing the hint that this node's accumulator will be used often brings significant Elo gain (13 elo)
+ Eval::NNUE::hint_common_parent_position(pos);
+ eval = ss->staticEval;
+ complexity = abs(ss->staticEval - pos.psq_eg_stm());
+ }
else if (ss->ttHit)
{
// Never assume anything about values stored in TT
if (eval == VALUE_NONE)
ss->staticEval = eval = evaluate(pos, &complexity);
else // Fall back to (semi)classical complexity for TT hits, the NNUE complexity is lost
+ {
complexity = abs(ss->staticEval - pos.psq_eg_stm());
+ if (PvNode)
+ Eval::NNUE::hint_common_parent_position(pos);
+ }
- // ttValue can be used as a better position evaluation (~4 Elo)
+ // ttValue can be used as a better position evaluation (~7 Elo)
if ( ttValue != VALUE_NONE
&& (tte->bound() & (ttValue > eval ? BOUND_LOWER : BOUND_UPPER)))
eval = ttValue;
else
{
ss->staticEval = eval = evaluate(pos, &complexity);
-
// Save static evaluation into transposition table
- if (!excludedMove)
- tte->save(posKey, VALUE_NONE, ss->ttPv, BOUND_NONE, DEPTH_NONE, MOVE_NONE, eval);
+ tte->save(posKey, VALUE_NONE, ss->ttPv, BOUND_NONE, DEPTH_NONE, MOVE_NONE, eval);
}
thisThread->complexityAverage.update(complexity);
- // Use static evaluation difference to improve quiet move ordering (~3 Elo)
+ // Use static evaluation difference to improve quiet move ordering (~4 Elo)
if (is_ok((ss-1)->currentMove) && !(ss-1)->inCheck && !priorCapture)
{
- int bonus = std::clamp(-19 * int((ss-1)->staticEval + ss->staticEval), -1914, 1914);
+ int bonus = std::clamp(-19 * int((ss-1)->staticEval + ss->staticEval), -1920, 1920);
thisThread->mainHistory[~us][from_to((ss-1)->currentMove)] << bonus;
}
// margin and the improving flag are used in various pruning heuristics.
improvement = (ss-2)->staticEval != VALUE_NONE ? ss->staticEval - (ss-2)->staticEval
: (ss-4)->staticEval != VALUE_NONE ? ss->staticEval - (ss-4)->staticEval
- : 168;
+ : 156;
improving = improvement > 0;
- // Step 7. Razoring.
+ // Step 7. Razoring (~1 Elo).
// If eval is really low check with qsearch if it can exceed alpha, if it can't,
// return a fail low.
- if (eval < alpha - 369 - 254 * depth * depth)
+ if (eval < alpha - 426 - 252 * depth * depth)
{
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
if (value < alpha)
return value;
}
- // Step 8. Futility pruning: child node (~25 Elo).
+ // Step 8. Futility pruning: child node (~40 Elo).
// The depth condition is important for mate finding.
if ( !ss->ttPv
- && depth < 8
- && eval - futility_margin(depth, improving) - (ss-1)->statScore / 303 >= beta
+ && depth < 9
+ && eval - futility_margin(depth, improving) - (ss-1)->statScore / 280 >= beta
&& eval >= beta
- && eval < 28031) // larger than VALUE_KNOWN_WIN, but smaller than TB wins
+ && eval < 25128) // larger than VALUE_KNOWN_WIN, but smaller than TB wins
return eval;
- // Step 9. Null move search with verification search (~22 Elo)
+ // Step 9. Null move search with verification search (~35 Elo)
if ( !PvNode
&& (ss-1)->currentMove != MOVE_NULL
- && (ss-1)->statScore < 17139
+ && (ss-1)->statScore < 18755
&& eval >= beta
&& eval >= ss->staticEval
- && ss->staticEval >= beta - 20 * depth - improvement / 13 + 233 + complexity / 25
+ && ss->staticEval >= beta - 19 * depth - improvement / 13 + 253 + complexity / 25
&& !excludedMove
&& pos.non_pawn_material(us)
&& (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor))
assert(eval - beta >= 0);
// Null move dynamic reduction based on depth, eval and complexity of position
- Depth R = std::min(int(eval - beta) / 168, 7) + depth / 3 + 4 - (complexity > 861);
+ Depth R = std::min(int(eval - beta) / 168, 6) + depth / 3 + 4 - (complexity > 825);
ss->currentMove = MOVE_NULL;
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
}
}
- probCutBeta = beta + 191 - 54 * improving;
+ probCutBeta = beta + 186 - 54 * improving;
- // Step 10. ProbCut (~4 Elo)
- // If we have a good enough capture and a reduced search returns a value
+ // Step 10. ProbCut (~10 Elo)
+ // If we have a good enough capture (or queen promotion) and a reduced search returns a value
// much above beta, we can (almost) safely prune the previous move.
if ( !PvNode
&& depth > 4
while ((move = mp.next_move()) != MOVE_NONE)
if (move != excludedMove && pos.legal(move))
{
- assert(pos.capture(move) || promotion_type(move) == QUEEN);
+ assert(pos.capture_stage(move));
ss->currentMove = move;
ss->continuationHistory = &thisThread->continuationHistory[ss->inCheck]
return value;
}
}
+
+ Eval::NNUE::hint_common_parent_position(pos);
}
// Step 11. If the position is not in TT, decrease depth by 3.
- // Use qsearch if depth is equal or below zero (~4 Elo)
+ // Use qsearch if depth is equal or below zero (~9 Elo)
if ( PvNode
&& !ttMove)
depth -= 3;
return qsearch<PV>(pos, ss, alpha, beta);
if ( cutNode
- && depth >= 9
+ && depth >= 7
&& !ttMove)
depth -= 2;
moves_loop: // When in check, search starts here
- // Step 12. A small Probcut idea, when we are in check (~0 Elo)
- probCutBeta = beta + 417;
+ // Step 12. A small Probcut idea, when we are in check (~4 Elo)
+ probCutBeta = beta + 391;
if ( ss->inCheck
&& !PvNode
&& depth >= 2
&& tte->depth() >= depth - 3
&& ttValue >= probCutBeta
&& abs(ttValue) <= VALUE_KNOWN_WIN
- && abs(beta) <= VALUE_KNOWN_WIN
- )
+ && abs(beta) <= VALUE_KNOWN_WIN)
return probCutBeta;
-
const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,
nullptr , (ss-4)->continuationHistory,
nullptr , (ss-6)->continuationHistory };
(ss+1)->pv = nullptr;
extension = 0;
- capture = pos.capture(move);
+ capture = pos.capture_stage(move);
movedPiece = pos.moved_piece(move);
givesCheck = pos.gives_check(move);
Value delta = beta - alpha;
- // Step 14. Pruning at shallow depth (~98 Elo). Depth conditions are important for mate finding.
+ Depth r = reduction(improving, depth, moveCount, delta, thisThread->rootDelta);
+
+ // Step 14. Pruning at shallow depth (~120 Elo). Depth conditions are important for mate finding.
if ( !rootNode
&& pos.non_pawn_material(us)
&& bestValue > VALUE_TB_LOSS_IN_MAX_PLY)
{
- // Skip quiet moves if movecount exceeds our FutilityMoveCount threshold (~7 Elo)
+ // Skip quiet moves if movecount exceeds our FutilityMoveCount threshold (~8 Elo)
moveCountPruning = moveCount >= futility_move_count(improving, depth);
// Reduced depth of the next LMR search
- int lmrDepth = std::max(newDepth - reduction(improving, depth, moveCount, delta, thisThread->rootDelta), 0);
+ int lmrDepth = std::max(newDepth - r, 0);
if ( capture
|| givesCheck)
{
- // Futility pruning for captures (~0 Elo)
+ // Futility pruning for captures (~2 Elo)
if ( !givesCheck
&& !PvNode
- && lmrDepth < 7
+ && lmrDepth < 6
&& !ss->inCheck
- && ss->staticEval + 180 + 201 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
- + captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 6 < alpha)
+ && ss->staticEval + 182 + 230 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
+ + captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 7 < alpha)
continue;
- // SEE based pruning (~9 Elo)
- if (!pos.see_ge(move, Value(-222) * depth))
+ // SEE based pruning (~11 Elo)
+ if (!pos.see_ge(move, Value(-206) * depth))
continue;
}
else
// Continuation history based pruning (~2 Elo)
if ( lmrDepth < 5
- && history < -3875 * (depth - 1))
+ && history < -4405 * (depth - 1))
continue;
history += 2 * thisThread->mainHistory[us][from_to(move)];
- // Futility pruning: parent node (~9 Elo)
+ lmrDepth += history / 7278;
+ lmrDepth = std::max(lmrDepth, -2);
+
+ // Futility pruning: parent node (~13 Elo)
if ( !ss->inCheck
&& lmrDepth < 13
- && ss->staticEval + 106 + 145 * lmrDepth + history / 52 <= alpha)
+ && ss->staticEval + 103 + 138 * lmrDepth <= alpha)
continue;
- // Prune moves with negative SEE (~3 Elo)
+ lmrDepth = std::max(lmrDepth, 0);
+
+ // Prune moves with negative SEE (~4 Elo)
if (!pos.see_ge(move, Value(-24 * lmrDepth * lmrDepth - 15 * lmrDepth)))
continue;
}
}
- // Step 15. Extensions (~66 Elo)
+ // Step 15. Extensions (~100 Elo)
// We take care to not overdo to avoid search getting stuck.
if (ss->ply < thisThread->rootDepth * 2)
{
- // Singular extension search (~58 Elo). If all moves but one fail low on a
+ // Singular extension search (~94 Elo). If all moves but one fail low on a
// search of (alpha-s, beta-s), and just one fails high on (alpha, beta),
// then that move is singular and should be extended. To verify this we do
// a reduced search on all the other moves but the ttMove and if the
// result is lower than ttValue minus a margin, then we will extend the ttMove.
if ( !rootNode
- && depth >= 4 - (thisThread->previousDepth > 24) + 2 * (PvNode && tte->is_pv())
+ && depth >= 4 - (thisThread->completedDepth > 21) + 2 * (PvNode && tte->is_pv())
&& move == ttMove
&& !excludedMove // Avoid recursive singular search
/* && ttValue != VALUE_NONE Already implicit in the next condition */
&& (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 3)
{
- Value singularBeta = ttValue - (3 + (ss->ttPv && !PvNode)) * depth;
+ Value singularBeta = ttValue - (3 + 2 * (ss->ttPv && !PvNode)) * depth / 2;
Depth singularDepth = (depth - 1) / 2;
ss->excludedMove = move;
+ // the search with excludedMove will update ss->staticEval
value = search<NonPV>(pos, ss, singularBeta - 1, singularBeta, singularDepth, cutNode);
ss->excludedMove = MOVE_NONE;
// Avoid search explosion by limiting the number of double extensions
if ( !PvNode
&& value < singularBeta - 25
- && ss->doubleExtensions <= 9)
+ && ss->doubleExtensions <= 10)
{
extension = 2;
- depth += depth < 12;
+ depth += depth < 13;
}
}
// If the eval of ttMove is greater than beta, we reduce it (negative extension)
else if (ttValue >= beta)
- extension = -2;
+ extension = -2 - !PvNode;
- // If the eval of ttMove is less than alpha and value, we reduce it (negative extension)
- else if (ttValue <= alpha && ttValue <= value)
+ // If the eval of ttMove is less than value, we reduce it (negative extension)
+ else if (ttValue <= value)
+ extension = -1;
+
+ // If the eval of ttMove is less than alpha, we reduce it (negative extension)
+ else if (ttValue <= alpha)
extension = -1;
}
// Check extensions (~1 Elo)
else if ( givesCheck
- && depth > 9
- && abs(ss->staticEval) > 82)
+ && depth > 10
+ && abs(ss->staticEval) > 88)
extension = 1;
- // Quiet ttMove extensions (~0 Elo)
+ // Quiet ttMove extensions (~1 Elo)
else if ( PvNode
&& move == ttMove
&& move == ss->killers[0]
- && (*contHist[0])[movedPiece][to_sq(move)] >= 5177)
+ && (*contHist[0])[movedPiece][to_sq(move)] >= 5705)
extension = 1;
}
// Step 16. Make the move
pos.do_move(move, st, givesCheck);
- // Step 17. Late moves reduction / extension (LMR, ~98 Elo)
- // We use various heuristics for the sons of a node after the first son has
- // been searched. In general we would like to reduce them, but there are many
- // cases where we extend a son if it has good chances to be "interesting".
- if ( depth >= 2
- && moveCount > 1 + (PvNode && ss->ply <= 1)
- && ( !ss->ttPv
- || !capture
- || (cutNode && (ss-1)->moveCount > 1)))
- {
- Depth r = reduction(improving, depth, moveCount, delta, thisThread->rootDelta);
+ // Decrease reduction if position is or has been on the PV
+ // and node is not likely to fail low. (~3 Elo)
+ if ( ss->ttPv
+ && !likelyFailLow)
+ r -= 2;
- // Decrease reduction if position is or has been on the PV
- // and node is not likely to fail low. (~3 Elo)
- if ( ss->ttPv
- && !likelyFailLow)
- r -= 2;
+ // Decrease reduction if opponent's move count is high (~1 Elo)
+ if ((ss-1)->moveCount > 7)
+ r--;
- // Decrease reduction if opponent's move count is high (~1 Elo)
- if ((ss-1)->moveCount > 7)
- r--;
+ // Increase reduction for cut nodes (~3 Elo)
+ if (cutNode)
+ r += 2;
- // Increase reduction for cut nodes (~3 Elo)
- if (cutNode)
- r += 2;
+ // Increase reduction if ttMove is a capture (~3 Elo)
+ if (ttCapture)
+ r++;
- // Increase reduction if ttMove is a capture (~3 Elo)
- if (ttCapture)
- r++;
+ // Decrease reduction for PvNodes based on depth
+ if (PvNode)
+ r -= 1 + 12 / (3 + depth);
- // Decrease reduction for PvNodes based on depth
- if (PvNode)
- r -= 1 + 11 / (3 + depth);
+ // Decrease reduction if ttMove has been singularly extended (~1 Elo)
+ if (singularQuietLMR)
+ r--;
- // Decrease reduction if ttMove has been singularly extended (~1 Elo)
- if (singularQuietLMR)
- r--;
+ // Decrease reduction if we move a threatened piece (~1 Elo)
+ if ( depth > 9
+ && (mp.threatenedPieces & from_sq(move)))
+ r--;
- // Decrease reduction if we move a threatened piece (~1 Elo)
- if ( depth > 9
- && (mp.threatenedPieces & from_sq(move)))
- r--;
+ // Increase reduction if next ply has a lot of fail high
+ if ((ss+1)->cutoffCnt > 3)
+ r++;
- // Increase reduction if next ply has a lot of fail high
- if ((ss+1)->cutoffCnt > 3)
- r++;
+ // Decrease reduction if move is a killer and we have a good history
+ if (move == ss->killers[0]
+ && (*contHist[0])[movedPiece][to_sq(move)] >= 3722)
+ r--;
- ss->statScore = 2 * thisThread->mainHistory[us][from_to(move)]
- + (*contHist[0])[movedPiece][to_sq(move)]
- + (*contHist[1])[movedPiece][to_sq(move)]
- + (*contHist[3])[movedPiece][to_sq(move)]
- - 4433;
+ ss->statScore = 2 * thisThread->mainHistory[us][from_to(move)]
+ + (*contHist[0])[movedPiece][to_sq(move)]
+ + (*contHist[1])[movedPiece][to_sq(move)]
+ + (*contHist[3])[movedPiece][to_sq(move)]
+ - 4182;
- // Decrease/increase reduction for moves with a good/bad history (~30 Elo)
- r -= ss->statScore / (13000 + 4152 * (depth > 7 && depth < 19));
+ // Decrease/increase reduction for moves with a good/bad history (~30 Elo)
+ r -= ss->statScore / (11791 + 3992 * (depth > 6 && depth < 19));
+ // Step 17. Late moves reduction / extension (LMR, ~117 Elo)
+ // We use various heuristics for the sons of a node after the first son has
+ // been searched. In general we would like to reduce them, but there are many
+ // cases where we extend a son if it has good chances to be "interesting".
+ if ( depth >= 2
+ && moveCount > 1 + (PvNode && ss->ply <= 1)
+ && ( !ss->ttPv
+ || !capture
+ || (cutNode && (ss-1)->moveCount > 1)))
+ {
// In general we want to cap the LMR depth search at newDepth, but when
// reduction is negative, we allow this move a limited search extension
// beyond the first move depth. This may lead to hidden double extensions.
{
// Adjust full depth search based on LMR results - if result
// was good enough search deeper, if it was bad enough search shallower
- const bool doDeeperSearch = value > (alpha + 64 + 11 * (newDepth - d));
- const bool doEvenDeeperSearch = value > alpha + 582;
+ const bool doDeeperSearch = value > (alpha + 58 + 12 * (newDepth - d));
+ const bool doEvenDeeperSearch = value > alpha + 588 && ss->doubleExtensions <= 5;
const bool doShallowerSearch = value < bestValue + newDepth;
+ ss->doubleExtensions = ss->doubleExtensions + doEvenDeeperSearch;
+
newDepth += doDeeperSearch - doShallowerSearch + doEvenDeeperSearch;
if (newDepth > d)
int bonus = value > alpha ? stat_bonus(newDepth)
: -stat_bonus(newDepth);
- if (capture)
- bonus /= 6;
-
update_continuation_histories(ss, movedPiece, to_sq(move), bonus);
}
}
- // Step 18. Full depth search when LMR is skipped
+ // Step 18. Full depth search when LMR is skipped. If expected reduction is high, reduce its depth by 1.
else if (!PvNode || moveCount > 1)
{
- value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth, !cutNode);
+ // Increase reduction for cut nodes and not ttMove (~1 Elo)
+ if (!ttMove && cutNode)
+ r += 2;
+
+ value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth - (r > 4), !cutNode);
}
// For PV nodes only, do a full PV search on the first move or after a fail
(ss+1)->pv = pv;
(ss+1)->pv[0] = MOVE_NONE;
- value = -search<PV>(pos, ss+1, -beta, -alpha,
- std::min(maxNextDepth, newDepth), false);
+ value = -search<PV>(pos, ss+1, -beta, -alpha, newDepth, false);
}
// Step 19. Undo move
rm.selDepth = thisThread->selDepth;
rm.scoreLowerbound = rm.scoreUpperbound = false;
- if (value >= beta) {
- rm.scoreLowerbound = true;
- rm.uciScore = beta;
+ if (value >= beta)
+ {
+ rm.scoreLowerbound = true;
+ rm.uciScore = beta;
}
- else if (value <= alpha) {
- rm.scoreUpperbound = true;
- rm.uciScore = alpha;
+ else if (value <= alpha)
+ {
+ rm.scoreUpperbound = true;
+ rm.uciScore = alpha;
}
+
rm.pv.resize(1);
assert((ss+1)->pv);
// Reduce other moves if we have found at least one score improvement
if ( depth > 1
&& depth < 6
- && beta < VALUE_KNOWN_WIN
- && alpha > -VALUE_KNOWN_WIN)
+ && beta < 10534
+ && alpha > -10534)
depth -= 1;
assert(depth > 0);
quietsSearched, quietCount, capturesSearched, captureCount, depth);
// Bonus for prior countermove that caused the fail low
- else if ( (depth >= 5 || PvNode)
- && !priorCapture)
+ else if (!priorCapture)
{
- //Assign extra bonus if current node is PvNode or cutNode
- //or fail low was really bad
- bool extraBonus = PvNode
- || cutNode
- || bestValue < alpha - 62 * depth;
-
- update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * (1 + extraBonus));
+ int bonus = (depth > 5) + (PvNode || cutNode) + (bestValue < alpha - 97 * depth) + ((ss-1)->moveCount > 10);
+ update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * bonus);
}
if (PvNode)
bool pvHit, givesCheck, capture;
int moveCount;
+ // Step 1. Initialize node
if (PvNode)
{
(ss+1)->pv = pv;
ss->inCheck = pos.checkers();
moveCount = 0;
- // Check for an immediate draw or maximum ply reached
+ // Step 2. Check for an immediate draw or maximum ply reached
if ( pos.is_draw(ss->ply)
|| ss->ply >= MAX_PLY)
return (ss->ply >= MAX_PLY && !ss->inCheck) ? evaluate(pos) : VALUE_DRAW;
// TT entry depth that we are going to use. Note that in qsearch we use
// only two types of depth in TT: DEPTH_QS_CHECKS or DEPTH_QS_NO_CHECKS.
ttDepth = ss->inCheck || depth >= DEPTH_QS_CHECKS ? DEPTH_QS_CHECKS
- : DEPTH_QS_NO_CHECKS;
- // Transposition table lookup
+ : DEPTH_QS_NO_CHECKS;
+
+ // Step 3. Transposition table lookup
posKey = pos.key();
tte = TT.probe(posKey, ss->ttHit);
ttValue = ss->ttHit ? value_from_tt(tte->value(), ss->ply, pos.rule50_count()) : VALUE_NONE;
ttMove = ss->ttHit ? tte->move() : MOVE_NONE;
pvHit = ss->ttHit && tte->is_pv();
+ // At non-PV nodes we check for an early TT cutoff
if ( !PvNode
&& ss->ttHit
&& tte->depth() >= ttDepth
&& (tte->bound() & (ttValue >= beta ? BOUND_LOWER : BOUND_UPPER)))
return ttValue;
- // Evaluate the position statically
+ // Step 4. Static evaluation of the position
if (ss->inCheck)
{
ss->staticEval = VALUE_NONE;
if ((ss->staticEval = bestValue = tte->eval()) == VALUE_NONE)
ss->staticEval = bestValue = evaluate(pos);
- // ttValue can be used as a better position evaluation (~7 Elo)
+ // ttValue can be used as a better position evaluation (~13 Elo)
if ( ttValue != VALUE_NONE
&& (tte->bound() & (ttValue > bestValue ? BOUND_LOWER : BOUND_UPPER)))
bestValue = ttValue;
if (PvNode && bestValue > alpha)
alpha = bestValue;
- futilityBase = bestValue + 153;
+ futilityBase = bestValue + 168;
}
const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,
int quietCheckEvasions = 0;
- // Loop through the moves until no moves remain or a beta cutoff occurs
+ // Step 5. Loop through all pseudo-legal moves until no moves remain
+ // or a beta cutoff occurs.
while ((move = mp.next_move()) != MOVE_NONE)
{
assert(is_ok(move));
continue;
givesCheck = pos.gives_check(move);
- capture = pos.capture(move);
+ capture = pos.capture_stage(move);
moveCount++;
- // Futility pruning and moveCount pruning (~5 Elo)
- if ( bestValue > VALUE_TB_LOSS_IN_MAX_PLY
- && !givesCheck
+ // Step 6. Pruning.
+ if (bestValue > VALUE_TB_LOSS_IN_MAX_PLY)
+ {
+ // Futility pruning and moveCount pruning (~10 Elo)
+ if ( !givesCheck
&& to_sq(move) != prevSq
&& futilityBase > -VALUE_KNOWN_WIN
&& type_of(move) != PROMOTION)
}
}
- // Do not search moves with negative SEE values (~5 Elo)
- if ( bestValue > VALUE_TB_LOSS_IN_MAX_PLY
- && !pos.see_ge(move))
+ // We prune after 2nd quiet check evasion where being 'in check' is implicitly checked through the counter
+ // and being a 'quiet' apart from being a tt move is assumed after an increment because captures are pushed ahead.
+ if (quietCheckEvasions > 1)
+ break;
+
+ // Continuation history based pruning (~3 Elo)
+ if ( !capture
+ && (*contHist[0])[pos.moved_piece(move)][to_sq(move)] < 0
+ && (*contHist[1])[pos.moved_piece(move)][to_sq(move)] < 0)
+ continue;
+
+ // Do not search moves with bad enough SEE values (~5 Elo)
+ if (!pos.see_ge(move, Value(-110)))
continue;
+ }
// Speculative prefetch as early as possible
prefetch(TT.first_entry(pos.key_after(move)));
+ // Update the current move
ss->currentMove = move;
ss->continuationHistory = &thisThread->continuationHistory[ss->inCheck]
[capture]
[pos.moved_piece(move)]
[to_sq(move)];
- // Continuation history based pruning (~2 Elo)
- if ( !capture
- && bestValue > VALUE_TB_LOSS_IN_MAX_PLY
- && (*contHist[0])[pos.moved_piece(move)][to_sq(move)] < 0
- && (*contHist[1])[pos.moved_piece(move)][to_sq(move)] < 0)
- continue;
-
- // We prune after 2nd quiet check evasion where being 'in check' is implicitly checked through the counter
- // and being a 'quiet' apart from being a tt move is assumed after an increment because captures are pushed ahead.
- if ( bestValue > VALUE_TB_LOSS_IN_MAX_PLY
- && quietCheckEvasions > 1)
- break;
-
quietCheckEvasions += !capture && ss->inCheck;
- // Make and search the move
+ // Step 7. Make and search the move
pos.do_move(move, st, givesCheck);
value = -qsearch<nodeType>(pos, ss+1, -beta, -alpha, depth - 1);
pos.undo_move(move);
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
- // Check for a new best move
+ // Step 8. Check for a new best move
if (value > bestValue)
{
bestValue = value;
}
}
+ // Step 9. Check for mate
// All legal moves have been searched. A special case: if we're in check
// and no legal moves were found, it is checkmate.
if (ss->inCheck && bestValue == -VALUE_INFINITE)
PieceType captured = type_of(pos.piece_on(to_sq(bestMove)));
int bonus1 = stat_bonus(depth + 1);
- if (!pos.capture(bestMove))
+ if (!pos.capture_stage(bestMove))
{
- int bonus2 = bestValue > beta + 137 ? bonus1 // larger bonus
+ int bonus2 = bestValue > beta + 153 ? bonus1 // larger bonus
: stat_bonus(depth); // smaller bonus
// Increase stats for the best move in case it was a quiet move