Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) {
int r = Reductions[d] * Reductions[mn];
- return (r + 1449 - int(delta) * 1032 / int(rootDelta)) / 1024 + (!i && r > 941);
+ return (r + 1449 - int(delta) * 1001 / int(rootDelta)) / 1024 + (!i && r > 941);
}
constexpr int futility_move_count(bool improving, Depth depth) {
// History and stats update bonus, based on depth
int stat_bonus(Depth d) {
- return std::min(340 * d - 470, 1855);
+ return std::min(341 * d - 470, 1855);
}
// Add a small random component to draw evaluations to avoid 3-fold blindness
bestValue = delta = alpha = -VALUE_INFINITE;
beta = VALUE_INFINITE;
+ optimism[WHITE] = optimism[BLACK] = VALUE_ZERO;
if (mainThread)
{
int rootComplexity;
- if (Eval::useNNUE)
- Eval::NNUE::evaluate(rootPos, true, &rootComplexity);
- else
- Eval::evaluate(rootPos, &rootComplexity);
+ Eval::evaluate(rootPos, &rootComplexity);
mainThread->complexity = std::min(1.03 + (rootComplexity - 241) / 1552.0, 1.45);
multiPV = std::min(multiPV, rootMoves.size());
- optimism[us] = optimism[~us] = VALUE_ZERO;
-
int searchAgainCounter = 0;
// Iterative deepening loop until requested to stop or the target depth is reached
Eval::NNUE::hint_common_parent_position(pos);
}
- // Step 11. If the position is not in TT, decrease depth by 3.
+ // Step 11. If the position is not in TT, decrease depth by 2 (or by 4 if the TT entry for the current position was hit and the stored depth is greater than or equal to the current depth).
// Use qsearch if depth is equal or below zero (~9 Elo)
if ( PvNode
&& !ttMove)
- depth -= 3;
+ depth -= 2 + 2 * (ss->ttHit && tte->depth() >= depth);
if (depth <= 0)
return qsearch<PV>(pos, ss, alpha, beta);
lmrDepth = std::max(lmrDepth, 0);
// Prune moves with negative SEE (~4 Elo)
- if (!pos.see_ge(move, Value(-24 * lmrDepth * lmrDepth - 15 * lmrDepth)))
+ if (!pos.see_ge(move, Value(-24 * lmrDepth * lmrDepth - 16 * lmrDepth)))
continue;
}
}
+ (*contHist[0])[movedPiece][to_sq(move)]
+ (*contHist[1])[movedPiece][to_sq(move)]
+ (*contHist[3])[movedPiece][to_sq(move)]
- - 4182;
+ - 4082;
// Decrease/increase reduction for moves with a good/bad history (~25 Elo)
- r -= ss->statScore / (11791 + 3992 * (depth > 6 && depth < 19));
+ r -= ss->statScore / (11079 + 4626 * (depth > 6 && depth < 19));
// Step 17. Late moves reduction / extension (LMR, ~117 Elo)
// We use various heuristics for the sons of a node after the first son has
(ss+1)->pv[0] = MOVE_NONE;
value = -search<PV>(pos, ss+1, -beta, -alpha, newDepth, false);
+
+ if (moveCount > 1 && newDepth >= depth && !capture)
+ update_continuation_histories(ss, movedPiece, to_sq(move), -stat_bonus(newDepth));
}
// Step 19. Undo move