namespace {
// Different node types, used as a template parameter
- enum NodeType { NonPV, PV };
-
- constexpr uint64_t TtHitAverageWindow = 4096;
- constexpr uint64_t TtHitAverageResolution = 1024;
+ enum NodeType { NonPV, PV, Root };
// Futility margin
Value futility_margin(Depth d, bool improving) {
// Reductions lookup table, initialized at startup
int Reductions[MAX_MOVES]; // [depth or moveNumber]
- Depth reduction(bool i, Depth d, int mn) {
+ Depth reduction(bool i, Depth d, int mn, bool rangeReduction) {
int r = Reductions[d] * Reductions[mn];
- return (r + 534) / 1024 + (!i && r > 904);
+ return (r + 534) / 1024 + (!i && r > 904) + rangeReduction;
}
constexpr int futility_move_count(bool improving, Depth depth) {
return VALUE_DRAW + Value(2 * (thisThread->nodes & 1) - 1);
}
+ // Check if the current thread is in a search explosion
+ ExplosionState search_explosion(Thread* thisThread) {
+
+ uint64_t nodesNow = thisThread->nodes;
+ bool explosive = thisThread->doubleExtensionAverage[WHITE].is_greater(2, 100)
+ || thisThread->doubleExtensionAverage[BLACK].is_greater(2, 100);
+
+ if (explosive)
+ thisThread->nodesLastExplosive = nodesNow;
+ else
+ thisThread->nodesLastNormal = nodesNow;
+
+ if ( explosive
+ && thisThread->state == EXPLOSION_NONE
+ && nodesNow - thisThread->nodesLastNormal > 6000000)
+ thisThread->state = MUST_CALM_DOWN;
+
+ if ( thisThread->state == MUST_CALM_DOWN
+ && nodesNow - thisThread->nodesLastExplosive > 6000000)
+ thisThread->state = EXPLOSION_NONE;
+
+ return thisThread->state;
+ }
+
// Skill structure is used to implement strength limit
struct Skill {
explicit Skill(int l) : level(l) {}
Move best = MOVE_NONE;
};
- template <NodeType NT>
+ template <NodeType nodeType>
Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, bool cutNode);
- template <NodeType NT>
+ template <NodeType nodeType>
Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth = 0);
Value value_to_tt(Value v, int ply);
void Search::init() {
for (int i = 1; i < MAX_MOVES; ++i)
- Reductions[i] = int(21.3 * std::log(i + 0.25 * std::log(i)));
+ Reductions[i] = int(21.9 * std::log(i));
}
multiPV = std::max(multiPV, (size_t)4);
multiPV = std::min(multiPV, rootMoves.size());
- ttHitAverage = TtHitAverageWindow * TtHitAverageResolution / 2;
- int ct = int(Options["Contempt"]) * PawnValueEg / 100; // From centipawns
+ ttHitAverage.set(50, 100); // initialize the running average at 50%
+ doubleExtensionAverage[WHITE].set(0, 100); // initialize the running average at 0%
+ doubleExtensionAverage[BLACK].set(0, 100); // initialize the running average at 0%
- // In analysis mode, adjust contempt in accordance with user preference
- if (Limits.infinite || Options["UCI_AnalyseMode"])
- ct = Options["Analysis Contempt"] == "Off" ? 0
- : Options["Analysis Contempt"] == "Both" ? ct
- : Options["Analysis Contempt"] == "White" && us == BLACK ? -ct
- : Options["Analysis Contempt"] == "Black" && us == WHITE ? -ct
- : ct;
-
- // Evaluation score is from the white point of view
- contempt = (us == WHITE ? make_score(ct, ct / 2)
- : -make_score(ct, ct / 2));
+ nodesLastExplosive = nodes;
+ nodesLastNormal = nodes;
+ state = EXPLOSION_NONE;
+ trend = SCORE_ZERO;
int searchAgainCounter = 0;
alpha = std::max(prev - delta,-VALUE_INFINITE);
beta = std::min(prev + delta, VALUE_INFINITE);
- // Adjust contempt based on root move's previousScore (dynamic contempt)
- int dct = ct + (113 - ct / 2) * prev / (abs(prev) + 147);
+ // Adjust trend based on root move's previousScore (dynamic contempt)
+ int tr = 113 * prev / (abs(prev) + 147);
- contempt = (us == WHITE ? make_score(dct, dct / 2)
- : -make_score(dct, dct / 2));
+ trend = (us == WHITE ? make_score(tr, tr / 2)
+ : -make_score(tr, tr / 2));
}
// Start with a small aspiration window and, in the case of a fail
while (true)
{
Depth adjustedDepth = std::max(1, rootDepth - failedHighCnt - searchAgainCounter);
- bestValue = Stockfish::search<PV>(rootPos, ss, alpha, beta, adjustedDepth, false);
+ bestValue = Stockfish::search<Root>(rootPos, ss, alpha, beta, adjustedDepth, false);
// Bring the best move to the front. It is critical that sorting
// is done with a stable algorithm because all the values but the
totBestMoveChanges += th->bestMoveChanges;
th->bestMoveChanges = 0;
}
- double bestMoveInstability = 1 + 2 * totBestMoveChanges / Threads.size();
-
+ double bestMoveInstability = 1.073 + std::max(1.0, 2.25 - 9.9 / rootDepth)
+ * totBestMoveChanges / Threads.size();
double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability;
// Cap used time in case of a single legal move for a better viewer experience in tournaments
// search<>() is the main search function for both PV and non-PV nodes
- template <NodeType NT>
+ template <NodeType nodeType>
Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, bool cutNode) {
- constexpr bool PvNode = NT == PV;
- const bool rootNode = PvNode && ss->ply == 0;
+ Thread* thisThread = pos.this_thread();
+
+ // Step 0. Limit search explosion
+ if ( ss->ply > 10
+ && search_explosion(thisThread) == MUST_CALM_DOWN
+ && depth > (ss-1)->depth)
+ depth = (ss-1)->depth;
+
+ constexpr bool PvNode = nodeType != NonPV;
+ constexpr bool rootNode = nodeType == Root;
const Depth maxNextDepth = rootNode ? depth : depth + 1;
// Check if we have an upcoming move which draws by repetition, or
// if the opponent had an alternative move earlier to this position.
- if ( pos.rule50_count() >= 3
+ if ( !rootNode
+ && pos.rule50_count() >= 3
&& alpha < VALUE_DRAW
- && !rootNode
&& pos.has_game_cycle(ss->ply))
{
alpha = value_draw(pos.this_thread());
// Dive into quiescence search when the depth reaches zero
if (depth <= 0)
- return qsearch<NT>(pos, ss, alpha, beta);
+ return qsearch<PvNode ? PV : NonPV>(pos, ss, alpha, beta);
assert(-VALUE_INFINITE <= alpha && alpha < beta && beta <= VALUE_INFINITE);
assert(PvNode || (alpha == beta - 1));
Value bestValue, value, ttValue, eval, maxValue, probCutBeta;
bool givesCheck, improving, didLMR, priorCapture;
bool captureOrPromotion, doFullDepthSearch, moveCountPruning,
- ttCapture, singularQuietLMR;
+ ttCapture, singularQuietLMR, noLMRExtension;
Piece movedPiece;
int moveCount, captureCount, quietCount;
// Step 1. Initialize node
- Thread* thisThread = pos.this_thread();
ss->inCheck = pos.checkers();
priorCapture = pos.captured_piece();
Color us = pos.side_to_move();
assert(0 <= ss->ply && ss->ply < MAX_PLY);
- (ss+1)->ttPv = false;
+ (ss+1)->ttPv = false;
(ss+1)->excludedMove = bestMove = MOVE_NONE;
- (ss+2)->killers[0] = (ss+2)->killers[1] = MOVE_NONE;
- Square prevSq = to_sq((ss-1)->currentMove);
+ (ss+2)->killers[0] = (ss+2)->killers[1] = MOVE_NONE;
+ ss->doubleExtensions = (ss-1)->doubleExtensions;
+ ss->depth = depth;
+ Square prevSq = to_sq((ss-1)->currentMove);
+
+ // Update the running average statistics for double extensions
+ thisThread->doubleExtensionAverage[us].update(ss->depth > (ss-1)->depth);
// Initialize statScore to zero for the grandchildren of the current position.
// So statScore is shared between all grandchildren and only the first grandchild
&& is_ok((ss-1)->currentMove))
thisThread->lowPlyHistory[ss->ply - 1][from_to((ss-1)->currentMove)] << stat_bonus(depth - 5);
- // thisThread->ttHitAverage can be used to approximate the running average of ttHit
- thisThread->ttHitAverage = (TtHitAverageWindow - 1) * thisThread->ttHitAverage / TtHitAverageWindow
- + TtHitAverageResolution * ss->ttHit;
+ // running average of ttHit
+ thisThread->ttHitAverage.update(ss->ttHit);
// At non-PV nodes we check for an early TT cutoff
if ( !PvNode
ss->staticEval = eval = -(ss-1)->staticEval;
// Save static evaluation into transposition table
+ if(!excludedMove)
tte->save(posKey, VALUE_NONE, ss->ttPv, BOUND_NONE, DEPTH_NONE, MOVE_NONE, eval);
}
? ss->staticEval > (ss-4)->staticEval || (ss-4)->staticEval == VALUE_NONE
: ss->staticEval > (ss-2)->staticEval;
- // Step 7. Futility pruning: child node (~50 Elo)
+ // Step 7. Futility pruning: child node (~50 Elo).
+ // The depth condition is important for mate finding.
if ( !PvNode
&& depth < 9
&& eval - futility_margin(depth, improving) >= beta
&& (ss-1)->statScore < 23767
&& eval >= beta
&& eval >= ss->staticEval
- && ss->staticEval >= beta - 20 * depth - 22 * improving + 168 * ss->ttPv + 159
+ && ss->staticEval >= beta - 20 * depth - 22 * improving + 168 * ss->ttPv + 177
&& !excludedMove
&& pos.non_pawn_material(us)
&& (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor))
assert(eval - beta >= 0);
// Null move dynamic reduction based on depth and value
- Depth R = (1090 + 81 * depth) / 256 + std::min(int(eval - beta) / 205, 3);
+ Depth R = std::min(int(eval - beta) / 205, 3) + depth / 3 + 4;
ss->currentMove = MOVE_NULL;
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
ss->ttPv = ttPv;
}
- // Step 10. If the position is not in TT, decrease depth by 2
+ // Step 10. If the position is not in TT, decrease depth by 2 or 1 depending on node type
if ( PvNode
&& depth >= 6
&& !ttMove)
depth -= 2;
-moves_loop: // When in check, search starts from here
+ if ( cutNode
+ && depth >= 9
+ && !ttMove)
+ depth--;
+
+moves_loop: // When in check, search starts here
ttCapture = ttMove && pos.capture_or_promotion(ttMove);
+ int rangeReduction = 0;
// Step 11. A small Probcut idea, when we are in check
probCutBeta = beta + 409;
ss->ply);
value = bestValue;
- singularQuietLMR = moveCountPruning = false;
+ singularQuietLMR = moveCountPruning = noLMRExtension = false;
// Indicate PvNodes that will probably fail low if the node was searched
// at a depth equal or greater than the current depth, and the result of this search was a fail low.
// Calculate new depth for this move
newDepth = depth - 1;
- // Step 13. Pruning at shallow depth (~200 Elo)
+ // Step 13. Pruning at shallow depth (~200 Elo). Depth conditions are important for mate finding.
if ( !rootNode
&& pos.non_pawn_material(us)
&& bestValue > VALUE_TB_LOSS_IN_MAX_PLY)
moveCountPruning = moveCount >= futility_move_count(improving, depth);
// Reduced depth of the next LMR search
- int lmrDepth = std::max(newDepth - reduction(improving, depth, moveCount), 0);
+ int lmrDepth = std::max(newDepth - reduction(improving, depth, moveCount, rangeReduction > 2), 0);
if ( captureOrPromotion
|| givesCheck)
else
{
// Continuation history based pruning (~20 Elo)
- if ( lmrDepth < 5
- && (*contHist[0])[movedPiece][to_sq(move)] < CounterMovePruneThreshold
- && (*contHist[1])[movedPiece][to_sq(move)] < CounterMovePruneThreshold)
+ if (lmrDepth < 5
+ && (*contHist[0])[movedPiece][to_sq(move)]
+ + (*contHist[1])[movedPiece][to_sq(move)]
+ + (*contHist[3])[movedPiece][to_sq(move)] < -3000 * depth + 3000)
continue;
// Futility pruning: parent node (~5 Elo)
- if ( lmrDepth < 7
- && !ss->inCheck
- && ss->staticEval + 174 + 157 * lmrDepth <= alpha
- && (*contHist[0])[movedPiece][to_sq(move)]
- + (*contHist[1])[movedPiece][to_sq(move)]
- + (*contHist[3])[movedPiece][to_sq(move)]
- + (*contHist[5])[movedPiece][to_sq(move)] / 3 < 28255)
+ if ( !ss->inCheck
+ && lmrDepth < 8
+ && ss->staticEval + 172 + 145 * lmrDepth <= alpha)
continue;
// Prune moves with negative SEE (~20 Elo)
- if (!pos.see_ge(move, Value(-(30 - std::min(lmrDepth, 18)) * lmrDepth * lmrDepth)))
+ if (!pos.see_ge(move, Value(-21 * lmrDepth * lmrDepth - 21 * lmrDepth)))
continue;
}
}
// then that move is singular and should be extended. To verify this we do
// a reduced search on all the other moves but the ttMove and if the
// result is lower than ttValue minus a margin, then we will extend the ttMove.
- if ( depth >= 7
+ if ( !rootNode
+ && depth >= 7
&& move == ttMove
- && !rootNode
&& !excludedMove // Avoid recursive singular search
/* && ttValue != VALUE_NONE Already implicit in the next condition */
&& abs(ttValue) < VALUE_KNOWN_WIN
&& (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 3)
{
- Value singularBeta = ttValue - 2 * depth;
+ Value singularBeta = ttValue - 3 * depth;
Depth singularDepth = (depth - 1) / 2;
ss->excludedMove = move;
{
extension = 1;
singularQuietLMR = !ttCapture;
- if (!PvNode && value < singularBeta - 93)
+
+ // Avoid search explosion by limiting the number of double extensions
+ if ( !PvNode
+ && value < singularBeta - 75
+ && ss->doubleExtensions <= 6)
+ {
extension = 2;
+ noLMRExtension = true;
+ }
}
// Multi-cut pruning
return singularBeta;
// If the eval of ttMove is greater than beta we try also if there is another
- // move that pushes it over beta, if so also produce a cutoff.
+ // move that pushes it over beta, if so the position also has probably multiple
+ // moves giving fail highs. We will then reduce the ttMove (negative extension).
else if (ttValue >= beta)
{
ss->excludedMove = move;
ss->excludedMove = MOVE_NONE;
if (value >= beta)
- return beta;
+ extension = -2;
}
}
+
+ // Capture extensions for PvNodes and cutNodes
+ else if ( (PvNode || cutNode)
+ && captureOrPromotion
+ && moveCount != 1)
+ extension = 1;
+
+ // Check extensions
else if ( givesCheck
&& depth > 6
- && abs(ss->staticEval) > Value(100))
+ && abs(ss->staticEval) > 100)
+ extension = 1;
+
+ // Quiet ttMove extensions
+ else if ( PvNode
+ && move == ttMove
+ && move == ss->killers[0]
+ && (*contHist[0])[movedPiece][to_sq(move)] >= 10000)
extension = 1;
// Add extension to new depth
newDepth += extension;
+ ss->doubleExtensions = (ss-1)->doubleExtensions + (extension == 2);
// Speculative prefetch as early as possible
prefetch(TT.first_entry(pos.key_after(move)));
|| !ss->ttPv)
&& (!PvNode || ss->ply > 1 || thisThread->id() % 4 != 3))
{
- Depth r = reduction(improving, depth, moveCount);
+ Depth r = reduction(improving, depth, moveCount, rangeReduction > 2);
+
+ if (PvNode)
+ r--;
// Decrease reduction if the ttHit running average is large (~0 Elo)
- if (thisThread->ttHitAverage > 537 * TtHitAverageResolution * TtHitAverageWindow / 1024)
+ if (thisThread->ttHitAverage.is_greater(537, 1024))
r--;
// Decrease reduction if position is or has been on the PV
// Increase reduction at root and non-PV nodes when the best move does not change frequently
if ( (rootNode || !PvNode)
- && thisThread->rootDepth > 10
&& thisThread->bestMoveChanges <= 2)
r++;
r--;
// Increase reduction for cut nodes (~3 Elo)
- if (cutNode)
- r += 1 + !captureOrPromotion;
+ if (cutNode && move != ss->killers[0])
+ r += 2;
- if (!captureOrPromotion)
- {
- // Increase reduction if ttMove is a capture (~3 Elo)
- if (ttCapture)
- r++;
-
- ss->statScore = thisThread->mainHistory[us][from_to(move)]
- + (*contHist[0])[movedPiece][to_sq(move)]
- + (*contHist[1])[movedPiece][to_sq(move)]
- + (*contHist[3])[movedPiece][to_sq(move)]
- - 4923;
-
- // Decrease/increase reduction for moves with a good/bad history (~30 Elo)
- if (!ss->inCheck)
- r -= ss->statScore / 14721;
- }
+ // Increase reduction if ttMove is a capture (~3 Elo)
+ if (ttCapture)
+ r++;
+
+ ss->statScore = thisThread->mainHistory[us][from_to(move)]
+ + (*contHist[0])[movedPiece][to_sq(move)]
+ + (*contHist[1])[movedPiece][to_sq(move)]
+ + (*contHist[3])[movedPiece][to_sq(move)]
+ - 4923;
+
+ // Decrease/increase reduction for moves with a good/bad history (~30 Elo)
+ r -= ss->statScore / 14721;
- // In general we want to cap the LMR depth search at newDepth. But if
- // reductions are really negative and movecount is low, we allow this move
- // to be searched deeper than the first move.
- Depth d = std::clamp(newDepth - r, 1, newDepth + (r < -1 && moveCount <= 5));
+ // In general we want to cap the LMR depth search at newDepth. But if reductions
+ // are really negative and movecount is low, we allow this move to be searched
+ // deeper than the first move (this may lead to hidden double extensions if
+ // newDepth got its own extension before).
+ int deeper = r >= -1 ? 0
+ : noLMRExtension ? 0
+ : moveCount <= 5 ? 1
+ : (depth > 6 && PvNode) ? 1
+ : 0;
+
+ Depth d = std::clamp(newDepth - r, 1, newDepth + deeper);
value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, d, true);
+ // Range reductions (~3 Elo)
+ if (ss->staticEval - value < 30 && depth > 7)
+ rangeReduction++;
+
// If the son is reduced and fails high it will be re-searched at full depth
doFullDepthSearch = value > alpha && d < newDepth;
didLMR = true;
// Bonus for prior countermove that caused the fail low
else if ( (depth >= 3 || PvNode)
&& !priorCapture)
- update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth));
+ update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * (1 + (PvNode || cutNode)));
if (PvNode)
bestValue = std::min(bestValue, maxValue);
// qsearch() is the quiescence search function, which is called by the main search
// function with zero depth, or recursively with further decreasing depth per call.
- template <NodeType NT>
+ template <NodeType nodeType>
Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) {
- constexpr bool PvNode = NT == PV;
+ static_assert(nodeType != Root);
+ constexpr bool PvNode = nodeType == PV;
assert(alpha >= -VALUE_INFINITE && alpha < beta && beta <= VALUE_INFINITE);
assert(PvNode || (alpha == beta - 1));
// Initialize a MovePicker object for the current position, and prepare
// to search the moves. Because the depth is <= 0 here, only captures,
- // queen and checking knight promotions, and other checks(only if depth >= DEPTH_QS_CHECKS)
+ // queen promotions, and other checks (only if depth >= DEPTH_QS_CHECKS)
// will be generated.
MovePicker mp(pos, ttMove, depth, &thisThread->mainHistory,
&thisThread->captureHistory,
{
assert(is_ok(move));
+ // Check for legality
+ if (!pos.legal(move))
+ continue;
+
givesCheck = pos.gives_check(move);
captureOrPromotion = pos.capture_or_promotion(move);
// Speculative prefetch as early as possible
prefetch(TT.first_entry(pos.key_after(move)));
- // Check for legality just before making the move
- if (!pos.legal(move))
- {
- moveCount--;
- continue;
- }
-
ss->currentMove = move;
ss->continuationHistory = &thisThread->continuationHistory[ss->inCheck]
[captureOrPromotion]
// Make and search the move
pos.do_move(move, st, givesCheck);
- value = -qsearch<NT>(pos, ss+1, -beta, -alpha, depth - 1);
+ value = -qsearch<nodeType>(pos, ss+1, -beta, -alpha, depth - 1);
pos.undo_move(move);
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);