: networks.big.evaluate(pos, true, &nnueComplexity, false);
// Blend optimism and eval with nnue complexity and material imbalance
- optimism += optimism * (nnueComplexity + std::abs(simpleEval - nnue)) / 512;
- nnue -= nnue * (nnueComplexity + std::abs(simpleEval - nnue)) / 32768;
+ optimism += optimism * (nnueComplexity + std::abs(simpleEval - nnue)) / 524;
+ nnue -= nnue * (nnueComplexity + std::abs(simpleEval - nnue)) / 31950;
int npm = pos.non_pawn_material() / 64;
- int v = (nnue * (915 + npm + 9 * pos.count<PAWN>()) + optimism * (154 + npm)) / 1024;
+ int v = (nnue * (927 + npm + 9 * pos.count<PAWN>()) + optimism * (159 + npm)) / 1000;
// Damp down the evaluation linearly when shuffling
int shuffling = pos.rule50_count();
- v = v * (200 - shuffling) / 214;
+ v = v * (195 - shuffling) / 228;
// Guarantee evaluation does not hit the tablebase range
v = std::clamp(v, VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1);
m.value += bool(pos.check_squares(pt) & to) * 16384;
// bonus for escaping from capture
- m.value += threatenedPieces & from ? (pt == QUEEN && !(to & threatenedByRook) ? 50000
- : pt == ROOK && !(to & threatenedByMinor) ? 25000
- : !(to & threatenedByPawn) ? 15000
+ m.value += threatenedPieces & from ? (pt == QUEEN && !(to & threatenedByRook) ? 51000
+ : pt == ROOK && !(to & threatenedByMinor) ? 24950
+ : !(to & threatenedByPawn) ? 14450
: 0)
: 0;
// malus for putting piece en prise
m.value -= !(threatenedPieces & from)
- ? (pt == QUEEN ? bool(to & threatenedByRook) * 50000
- + bool(to & threatenedByMinor) * 10000
- : pt == ROOK ? bool(to & threatenedByMinor) * 25000
- : pt != PAWN ? bool(to & threatenedByPawn) * 15000
+ ? (pt == QUEEN ? bool(to & threatenedByRook) * 48150
+ + bool(to & threatenedByMinor) * 10650
+ : pt == ROOK ? bool(to & threatenedByMinor) * 24500
+ : pt != PAWN ? bool(to & threatenedByPawn) * 14950
: 0)
: 0;
}
// moves left, picking the move with the highest score from a list of generated moves.
Move MovePicker::next_move(bool skipQuiets) {
- auto quiet_threshold = [](Depth d) { return -3330 * d; };
+ auto quiet_threshold = [](Depth d) { return -3550 * d; };
top:
switch (stage)
// Futility margin
Value futility_margin(Depth d, bool noTtCutNode, bool improving, bool oppWorsening) {
- Value futilityMult = 121 - 43 * noTtCutNode;
- Value improvingDeduction = 3 * improving * futilityMult / 2;
+ Value futilityMult = 122 - 46 * noTtCutNode;
+ Value improvingDeduction = 57 * improving * futilityMult / 32;
Value worseningDeduction = (331 + 45 * improving) * oppWorsening * futilityMult / 1024;
return futilityMult * d - improvingDeduction - worseningDeduction;
// Add correctionHistory value to raw staticEval and guarantee evaluation does not hit the tablebase range
Value to_corrected_static_eval(Value v, const Worker& w, const Position& pos) {
auto cv = w.correctionHistory[pos.side_to_move()][pawn_structure_index<Correction>(pos)];
- v += cv * std::abs(cv) / 10759;
+ v += cv * std::abs(cv) / 11450;
return std::clamp(v, VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1);
}
int stat_bonus(Depth d) { return std::min(249 * d - 327, 1192); }
// History and stats update malus, based on depth
-int stat_malus(Depth d) { return std::min(516 * d - 299, 1432); }
+int stat_malus(Depth d) { return std::min(516 * d - 299, 1254); }
// Add a small random component to draw evaluations to avoid 3-fold blindness
Value value_draw(size_t nodes) { return VALUE_DRAW - 1 + Value(nodes & 0x2); }
// Reset aspiration window starting size
Value avg = rootMoves[pvIdx].averageScore;
- delta = 9 + avg * avg / 12804;
+ delta = 9 + avg * avg / 12800;
alpha = std::max(avg - delta, -VALUE_INFINITE);
beta = std::min(avg + delta, VALUE_INFINITE);
// Adjust optimism based on root move's averageScore (~4 Elo)
- optimism[us] = 131 * avg / (std::abs(avg) + 90);
+ optimism[us] = 130 * avg / (std::abs(avg) + 90);
optimism[~us] = -optimism[us];
// Start with a small aspiration window and, in the case of a fail
h->fill(-71);
for (size_t i = 1; i < reductions.size(); ++i)
- reductions[i] = int((19.02 + std::log(size_t(options["Threads"])) / 2) * std::log(i));
+ reductions[i] = int((19.80 + std::log(size_t(options["Threads"])) / 2) * std::log(i));
}
// Use static evaluation difference to improve quiet move ordering (~9 Elo)
if (((ss - 1)->currentMove).is_ok() && !(ss - 1)->inCheck && !priorCapture)
{
- int bonus = std::clamp(-14 * int((ss - 1)->staticEval + ss->staticEval), -1621, 1237);
+ int bonus = std::clamp(-14 * int((ss - 1)->staticEval + ss->staticEval), -1621, 1238);
bonus = bonus > 0 ? 2 * bonus : bonus / 2;
thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] << bonus;
if (type_of(pos.piece_on(prevSq)) != PAWN && ((ss - 1)->currentMove).type_of() != PROMOTION)
thisThread->pawnHistory[pawn_structure_index(pos)][pos.piece_on(prevSq)][prevSq]
- << bonus / 4;
+ << bonus / 2;
}
// Set up the improving flag, which is true if current static evaluation is
// Step 11. ProbCut (~10 Elo)
// If we have a good enough capture (or queen promotion) and a reduced search returns a value
// much above beta, we can (almost) safely prune the previous move.
- probCutBeta = beta + 164 - 62 * improving;
+ probCutBeta = beta + 168 - 64 * improving;
if (
!PvNode && depth > 3
&& std::abs(beta) < VALUE_TB_WIN_IN_MAX_PLY
+ (*contHist[3])[movedPiece][move.to_sq()] - 4587;
// Decrease/increase reduction for moves with a good/bad history (~8 Elo)
- r -= ss->statScore / 12372;
+ r -= ss->statScore / 14956;
// Step 17. Late moves reduction / extension (LMR, ~117 Elo)
if (depth >= 2 && moveCount > 1 + rootNode)
Depth Search::Worker::reduction(bool i, Depth d, int mn, int delta) {
int reductionScale = reductions[d] * reductions[mn];
- return (reductionScale + 1091 - delta * 759 / rootDelta) / 1024 + (!i && reductionScale > 952);
+ return (reductionScale + 1091 - delta * 759 / rootDelta) / 1024 + (!i && reductionScale > 950);
}
namespace {