This patch is a parameter tweak that passed both STC and LTC tests.
STC:
LLR: 2.94 (-2.94,2.94) <0.00,2.00>
Total: 80944 W: 21557 L: 21189 D: 38198
Ptnml(0-2): 192, 8883, 22028, 9103, 266
https://tests.stockfishchess.org/tests/view/
63b07fe2d421d8f75795a03b
LTC:
LLR: 2.95 (-2.94,2.94) <0.50,2.50>
Total: 30440 W: 8296 L: 8007 D: 14137
Ptnml(0-2): 6, 2893, 9143, 3162, 16
https://tests.stockfishchess.org/tests/view/
63b167d02ab1290f961644db
closes https://github.com/official-stockfish/Stockfish/pull/4318
Bench:
4182223
// We use the much less accurate but faster Classical eval when the NNUE
// option is set to false. Otherwise we use the NNUE eval unless the
// PSQ advantage is decisive and several pieces remain. (~3 Elo)
// We use the much less accurate but faster Classical eval when the NNUE
// option is set to false. Otherwise we use the NNUE eval unless the
// PSQ advantage is decisive and several pieces remain. (~3 Elo)
- bool useClassical = !useNNUE || (pos.count<ALL_PIECES>() > 7 && abs(psq) > 1760);
+ bool useClassical = !useNNUE || (pos.count<ALL_PIECES>() > 7 && abs(psq) > 1781);
if (useClassical)
v = Evaluation<NO_TRACE>(pos).value();
if (useClassical)
v = Evaluation<NO_TRACE>(pos).value();
Value nnue = NNUE::evaluate(pos, true, &nnueComplexity);
// Blend nnue complexity with (semi)classical complexity
Value nnue = NNUE::evaluate(pos, true, &nnueComplexity);
// Blend nnue complexity with (semi)classical complexity
- nnueComplexity = ( 412 * nnueComplexity
- + 428 * abs(psq - nnue)
+ nnueComplexity = ( 406 * nnueComplexity
+ + 424 * abs(psq - nnue)
+ (optimism > 0 ? int(optimism) * int(psq - nnue) : 0)
) / 1024;
+ (optimism > 0 ? int(optimism) * int(psq - nnue) : 0)
) / 1024;
if (complexity)
*complexity = nnueComplexity;
if (complexity)
*complexity = nnueComplexity;
- optimism = optimism * (278 + nnueComplexity) / 256;
- v = (nnue * scale + optimism * (scale - 755)) / 1024;
+ optimism = optimism * (272 + nnueComplexity) / 256;
+ v = (nnue * scale + optimism * (scale - 748)) / 1024;
}
// Damp down the evaluation linearly when shuffling
}
// Damp down the evaluation linearly when shuffling
- v = v * (197 - pos.rule50_count()) / 214;
+ v = v * (200 - pos.rule50_count()) / 214;
// Guarantee evaluation does not hit the tablebase range
v = std::clamp(v, VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1);
// Guarantee evaluation does not hit the tablebase range
v = std::clamp(v, VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1);
// Futility margin
Value futility_margin(Depth d, bool improving) {
// Futility margin
Value futility_margin(Depth d, bool improving) {
- return Value(165 * (d - improving));
+ return Value(158 * (d - improving));
}
// Reductions lookup table, initialized at startup
}
// Reductions lookup table, initialized at startup
Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) {
int r = Reductions[d] * Reductions[mn];
Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) {
int r = Reductions[d] * Reductions[mn];
- return (r + 1642 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 916);
+ return (r + 1460 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 937);
}
constexpr int futility_move_count(bool improving, Depth depth) {
}
constexpr int futility_move_count(bool improving, Depth depth) {
// History and stats update bonus, based on depth
int stat_bonus(Depth d) {
// History and stats update bonus, based on depth
int stat_bonus(Depth d) {
- return std::min((12 * d + 282) * d - 349 , 1480);
+ return std::min((11 * d + 284) * d - 363 , 1650);
}
// Add a small random component to draw evaluations to avoid 3-fold blindness
}
// Add a small random component to draw evaluations to avoid 3-fold blindness
multiPV = std::min(multiPV, rootMoves.size());
multiPV = std::min(multiPV, rootMoves.size());
- complexityAverage.set(155, 1);
+ complexityAverage.set(153, 1);
optimism[us] = optimism[~us] = VALUE_ZERO;
optimism[us] = optimism[~us] = VALUE_ZERO;
if (rootDepth >= 4)
{
Value prev = rootMoves[pvIdx].averageScore;
if (rootDepth >= 4)
{
Value prev = rootMoves[pvIdx].averageScore;
- delta = Value(10) + int(prev) * prev / 15620;
+ delta = Value(10) + int(prev) * prev / 15400;
alpha = std::max(prev - delta,-VALUE_INFINITE);
beta = std::min(prev + delta, VALUE_INFINITE);
// Adjust optimism based on root move's previousScore
alpha = std::max(prev - delta,-VALUE_INFINITE);
beta = std::min(prev + delta, VALUE_INFINITE);
// Adjust optimism based on root move's previousScore
- int opt = 118 * prev / (std::abs(prev) + 169);
+ int opt = 116 * prev / (std::abs(prev) + 170);
optimism[ us] = Value(opt);
optimism[~us] = -optimism[us];
}
optimism[ us] = Value(opt);
optimism[~us] = -optimism[us];
}
// Use static evaluation difference to improve quiet move ordering (~4 Elo)
if (is_ok((ss-1)->currentMove) && !(ss-1)->inCheck && !priorCapture)
{
// Use static evaluation difference to improve quiet move ordering (~4 Elo)
if (is_ok((ss-1)->currentMove) && !(ss-1)->inCheck && !priorCapture)
{
- int bonus = std::clamp(-19 * int((ss-1)->staticEval + ss->staticEval), -1914, 1914);
+ int bonus = std::clamp(-19 * int((ss-1)->staticEval + ss->staticEval), -1940, 1940);
thisThread->mainHistory[~us][from_to((ss-1)->currentMove)] << bonus;
}
thisThread->mainHistory[~us][from_to((ss-1)->currentMove)] << bonus;
}
// margin and the improving flag are used in various pruning heuristics.
improvement = (ss-2)->staticEval != VALUE_NONE ? ss->staticEval - (ss-2)->staticEval
: (ss-4)->staticEval != VALUE_NONE ? ss->staticEval - (ss-4)->staticEval
// margin and the improving flag are used in various pruning heuristics.
improvement = (ss-2)->staticEval != VALUE_NONE ? ss->staticEval - (ss-2)->staticEval
: (ss-4)->staticEval != VALUE_NONE ? ss->staticEval - (ss-4)->staticEval
improving = improvement > 0;
// Step 7. Razoring (~1 Elo).
// If eval is really low check with qsearch if it can exceed alpha, if it can't,
// return a fail low.
improving = improvement > 0;
// Step 7. Razoring (~1 Elo).
// If eval is really low check with qsearch if it can exceed alpha, if it can't,
// return a fail low.
- if (eval < alpha - 369 - 254 * depth * depth)
+ if (eval < alpha - 394 - 255 * depth * depth)
{
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
if (value < alpha)
{
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
if (value < alpha)
// The depth condition is important for mate finding.
if ( !ss->ttPv
&& depth < 8
// The depth condition is important for mate finding.
if ( !ss->ttPv
&& depth < 8
- && eval - futility_margin(depth, improving) - (ss-1)->statScore / 303 >= beta
+ && eval - futility_margin(depth, improving) - (ss-1)->statScore / 304 >= beta
- && eval < 28031) // larger than VALUE_KNOWN_WIN, but smaller than TB wins
+ && eval < 28580) // larger than VALUE_KNOWN_WIN, but smaller than TB wins
return eval;
// Step 9. Null move search with verification search (~35 Elo)
if ( !PvNode
&& (ss-1)->currentMove != MOVE_NULL
return eval;
// Step 9. Null move search with verification search (~35 Elo)
if ( !PvNode
&& (ss-1)->currentMove != MOVE_NULL
- && (ss-1)->statScore < 17139
+ && (ss-1)->statScore < 18200
&& eval >= beta
&& eval >= ss->staticEval
&& eval >= beta
&& eval >= ss->staticEval
- && ss->staticEval >= beta - 20 * depth - improvement / 13 + 233 + complexity / 25
+ && ss->staticEval >= beta - 20 * depth - improvement / 14 + 235 + complexity / 24
&& !excludedMove
&& pos.non_pawn_material(us)
&& (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor))
&& !excludedMove
&& pos.non_pawn_material(us)
&& (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor))
assert(eval - beta >= 0);
// Null move dynamic reduction based on depth, eval and complexity of position
assert(eval - beta >= 0);
// Null move dynamic reduction based on depth, eval and complexity of position
- Depth R = std::min(int(eval - beta) / 168, 7) + depth / 3 + 4 - (complexity > 861);
+ Depth R = std::min(int(eval - beta) / 165, 6) + depth / 3 + 4 - (complexity > 800);
ss->currentMove = MOVE_NULL;
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
ss->currentMove = MOVE_NULL;
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
- probCutBeta = beta + 191 - 54 * improving;
+ probCutBeta = beta + 180 - 54 * improving;
// Step 10. ProbCut (~10 Elo)
// If we have a good enough capture and a reduced search returns a value
// Step 10. ProbCut (~10 Elo)
// If we have a good enough capture and a reduced search returns a value
moves_loop: // When in check, search starts here
// Step 12. A small Probcut idea, when we are in check (~4 Elo)
moves_loop: // When in check, search starts here
// Step 12. A small Probcut idea, when we are in check (~4 Elo)
- probCutBeta = beta + 417;
+ probCutBeta = beta + 402;
if ( ss->inCheck
&& !PvNode
&& depth >= 2
if ( ss->inCheck
&& !PvNode
&& depth >= 2
&& !PvNode
&& lmrDepth < 7
&& !ss->inCheck
&& !PvNode
&& lmrDepth < 7
&& !ss->inCheck
- && ss->staticEval + 180 + 201 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
+ && ss->staticEval + 185 + 203 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
+ captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 6 < alpha)
continue;
// SEE based pruning (~11 Elo)
+ captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 6 < alpha)
continue;
// SEE based pruning (~11 Elo)
- if (!pos.see_ge(move, Value(-222) * depth))
+ if (!pos.see_ge(move, Value(-220) * depth))
// Continuation history based pruning (~2 Elo)
if ( lmrDepth < 5
// Continuation history based pruning (~2 Elo)
if ( lmrDepth < 5
- && history < -3875 * (depth - 1))
+ && history < -4180 * (depth - 1))
continue;
history += 2 * thisThread->mainHistory[us][from_to(move)];
continue;
history += 2 * thisThread->mainHistory[us][from_to(move)];
// Futility pruning: parent node (~13 Elo)
if ( !ss->inCheck
&& lmrDepth < 13
// Futility pruning: parent node (~13 Elo)
if ( !ss->inCheck
&& lmrDepth < 13
- && ss->staticEval + 106 + 145 * lmrDepth + history / 52 <= alpha)
+ && ss->staticEval + 103 + 136 * lmrDepth + history / 53 <= alpha)
continue;
// Prune moves with negative SEE (~4 Elo)
continue;
// Prune moves with negative SEE (~4 Elo)
- if (!pos.see_ge(move, Value(-24 * lmrDepth * lmrDepth - 15 * lmrDepth)))
+ if (!pos.see_ge(move, Value(-25 * lmrDepth * lmrDepth - 16 * lmrDepth)))
// Check extensions (~1 Elo)
else if ( givesCheck
&& depth > 9
// Check extensions (~1 Elo)
else if ( givesCheck
&& depth > 9
- && abs(ss->staticEval) > 82)
+ && abs(ss->staticEval) > 78)
extension = 1;
// Quiet ttMove extensions (~1 Elo)
else if ( PvNode
&& move == ttMove
&& move == ss->killers[0]
extension = 1;
// Quiet ttMove extensions (~1 Elo)
else if ( PvNode
&& move == ttMove
&& move == ss->killers[0]
- && (*contHist[0])[movedPiece][to_sq(move)] >= 5177)
+ && (*contHist[0])[movedPiece][to_sq(move)] >= 5600)
+ (*contHist[0])[movedPiece][to_sq(move)]
+ (*contHist[1])[movedPiece][to_sq(move)]
+ (*contHist[3])[movedPiece][to_sq(move)]
+ (*contHist[0])[movedPiece][to_sq(move)]
+ (*contHist[1])[movedPiece][to_sq(move)]
+ (*contHist[3])[movedPiece][to_sq(move)]
// Decrease/increase reduction for moves with a good/bad history (~30 Elo)
// Decrease/increase reduction for moves with a good/bad history (~30 Elo)
- r -= ss->statScore / (13000 + 4152 * (depth > 7 && depth < 19));
+ r -= ss->statScore / (12800 + 4410 * (depth > 7 && depth < 19));
// Step 17. Late moves reduction / extension (LMR, ~117 Elo)
// We use various heuristics for the sons of a node after the first son has
// Step 17. Late moves reduction / extension (LMR, ~117 Elo)
// We use various heuristics for the sons of a node after the first son has
{
// Adjust full depth search based on LMR results - if result
// was good enough search deeper, if it was bad enough search shallower
{
// Adjust full depth search based on LMR results - if result
// was good enough search deeper, if it was bad enough search shallower
- const bool doDeeperSearch = value > (alpha + 64 + 11 * (newDepth - d));
+ const bool doDeeperSearch = value > (alpha + 66 + 11 * (newDepth - d));
const bool doEvenDeeperSearch = value > alpha + 582 && ss->doubleExtensions <= 5;
const bool doShallowerSearch = value < bestValue + newDepth;
const bool doEvenDeeperSearch = value > alpha + 582 && ss->doubleExtensions <= 5;
const bool doShallowerSearch = value < bestValue + newDepth;
quietsSearched, quietCount, capturesSearched, captureCount, depth);
// Bonus for prior countermove that caused the fail low
quietsSearched, quietCount, capturesSearched, captureCount, depth);
// Bonus for prior countermove that caused the fail low
- else if ( (depth >= 5 || PvNode || bestValue < alpha - 62 * depth)
+ else if ( (depth >= 5 || PvNode || bestValue < alpha - 65 * depth)
&& !priorCapture)
{
//Assign extra bonus if current node is PvNode or cutNode
&& !priorCapture)
{
//Assign extra bonus if current node is PvNode or cutNode
bool extraBonus = PvNode
|| cutNode;
bool extraBonus = PvNode
|| cutNode;
- bool doubleExtraBonus = extraBonus && bestValue < alpha - 85 * depth;
+ bool doubleExtraBonus = extraBonus && bestValue < alpha - 88 * depth;
update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * (1 + extraBonus + doubleExtraBonus));
}
update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * (1 + extraBonus + doubleExtraBonus));
}
if (PvNode && bestValue > alpha)
alpha = bestValue;
if (PvNode && bestValue > alpha)
alpha = bestValue;
- futilityBase = bestValue + 153;
+ futilityBase = bestValue + 158;
}
const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,
}
const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,
if (!pos.capture(bestMove))
{
if (!pos.capture(bestMove))
{
- int bonus2 = bestValue > beta + 137 ? bonus1 // larger bonus
+ int bonus2 = bestValue > beta + 146 ? bonus1 // larger bonus
: stat_bonus(depth); // smaller bonus
// Increase stats for the best move in case it was a quiet move
: stat_bonus(depth); // smaller bonus
// Increase stats for the best move in case it was a quiet move