beta = std::min(avg + delta, VALUE_INFINITE);
// Adjust optimism based on root move's averageScore (~4 Elo)
- optimism[us] = 103 * avg / (std::abs(avg) + 119);
- optimism[~us] = -116 * avg / (std::abs(avg) + 123);
+ optimism[us] = 110 * avg / (std::abs(avg) + 121);
+ optimism[~us] = -optimism[us];
// Start with a small aspiration window and, in the case of a fail
// high/low, re-search with a bigger window until we don't fail
if (depth <= 0)
return qsearch<PV>(pos, ss, alpha, beta);
- // For cutNodes without a ttMove, we decrease depth by 2
- // if current depth >= 8.
+ // For cutNodes without a ttMove, we decrease depth by 2 if depth is high enough.
if (cutNode && depth >= 8 && !ttMove)
depth -= 2;
// Note: the depth margin and singularBeta margin are known for having non-linear
// scaling. Their values are optimized to time controls of 180+1.8 and longer
- // so changing them requires tests at this type of time controls.
+ // so changing them requires tests at these types of time controls.
// Recursive singular search is avoided.
if (!rootNode && move == ttMove && !excludedMove
&& depth >= 4 - (thisThread->completedDepth > 24) + 2 * (PvNode && tte->is_pv())
&& tte->depth() >= depth - 3)
{
Value singularBeta = ttValue - (64 + 57 * (ss->ttPv && !PvNode)) * depth / 64;
- Depth singularDepth = (depth - 1) / 2;
+ Depth singularDepth = newDepth / 2;
ss->excludedMove = move;
value =
// we do not know if the ttMove is singular or can do a multi-cut,
// so we reduce the ttMove in favor of other moves based on some conditions:
- // If the ttMove is assumed to fail high over currnet beta (~7 Elo)
+ // If the ttMove is assumed to fail high over current beta (~7 Elo)
else if (ttValue >= beta)
extension = -2 - !PvNode;
if ((ss + 1)->cutoffCnt > 3)
r++;
- // Set reduction to 0 for first generated move (ttMove)
+ // Set reduction to 0 for first picked move (ttMove) (~2 Elo)
// Nullifies all previous reduction adjustments to ttMove and leaves only history to do them
else if (move == ttMove)
r = 0;
// In general we want to cap the LMR depth search at newDepth, but when
// reduction is negative, we allow this move a limited search extension
// beyond the first move depth. This may lead to hidden double extensions.
- Depth d = std::clamp(newDepth - r, 1, newDepth + 1);
+ // To prevent problems when the max value is less than the min value,
+ // std::clamp has been replaced by a more robust implementation.
+ Depth d = std::max(1, std::min(newDepth - r, newDepth + 1));
value = -search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, d, true);
{
// Adjust full-depth search based on LMR results - if the result
// was good enough search deeper, if it was bad enough search shallower.
- const bool doDeeperSearch = value > (bestValue + 51 + 10 * (newDepth - d));
- const bool doEvenDeeperSearch = value > alpha + 700 && ss->doubleExtensions <= 6;
- const bool doShallowerSearch = value < bestValue + newDepth;
+ const bool doDeeperSearch =
+ value > (bestValue + 51 + 10 * (newDepth - d)); // (~1 Elo)
+ const bool doShallowerSearch = value < bestValue + newDepth; // (~2 Elo)
- ss->doubleExtensions = ss->doubleExtensions + doEvenDeeperSearch;
-
- newDepth += doDeeperSearch - doShallowerSearch + doEvenDeeperSearch;
+ newDepth += doDeeperSearch - doShallowerSearch;
if (newDepth > d)
value = -search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, newDepth, !cutNode);
bestValue = ttValue;
}
else
- // In case of null move search use previous static eval with a different sign
+ // In case of null move search, use previous static eval with a different sign
ss->staticEval = bestValue =
(ss - 1)->currentMove != MOVE_NULL ? evaluate(pos) : -(ss - 1)->staticEval;