// Reset aspiration window starting size
Value avg = rootMoves[pvIdx].averageScore;
- delta = Value(10) + int(avg) * avg / 17470;
+ delta = Value(10) + int(avg) * avg / 15335;
alpha = std::max(avg - delta, -VALUE_INFINITE);
beta = std::min(avg + delta, VALUE_INFINITE);
// Adjust optimism based on root move's averageScore (~4 Elo)
- int opt = 113 * avg / (std::abs(avg) + 109);
- optimism[us] = Value(opt);
- optimism[~us] = -optimism[us];
+ optimism[us] = 103 * (avg + 33) / (std::abs(avg + 34) + 119);
+ optimism[~us] = -116 * (avg + 40) / (std::abs(avg + 12) + 123);
// Start with a small aspiration window and, in the case of a fail
// high/low, re-search with a bigger window until we don't fail
{
int bonus = std::clamp(-18 * int((ss - 1)->staticEval + ss->staticEval), -1812, 1812);
thisThread->mainHistory[~us][from_to((ss - 1)->currentMove)] << bonus;
+ if (type_of(pos.piece_on(prevSq)) != PAWN && type_of((ss - 1)->currentMove) != PROMOTION)
+ thisThread->pawnHistory[pawn_structure(pos)][pos.piece_on(prevSq)][prevSq] << bonus / 4;
}
// Set up the improving flag, which is true if current static evaluation is
{
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
if (value < alpha)
- {
- if (!priorCapture && prevSq != SQ_NONE)
- {
- int bonus = (depth > 6) + (PvNode || cutNode) + (value < alpha - 658)
- + ((ss - 1)->moveCount > 11);
- update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq,
- stat_bonus(depth) * bonus);
- thisThread->mainHistory[~us][from_to((ss - 1)->currentMove)]
- << stat_bonus(depth) * bonus * 57 / 100;
- }
return value;
- }
}
// Step 8. Futility pruning: child node (~40 Elo)
{
assert(pos.capture_stage(move));
+ // Prefetch the TT entry for the resulting position
+ prefetch(TT.first_entry(pos.key_after(move)));
+
ss->currentMove = move;
ss->continuationHistory =
&thisThread
lmrDepth = std::max(lmrDepth, -1);
// Futility pruning: parent node (~13 Elo)
- if (!ss->inCheck && lmrDepth < 13 && ss->staticEval + 77 + 124 * lmrDepth <= alpha)
+ if (!ss->inCheck && lmrDepth < 13
+ && ss->staticEval + (bestValue < ss->staticEval - 62 ? 123 : 77)
+ + 127 * lmrDepth
+ <= alpha)
continue;
lmrDepth = std::max(lmrDepth, 0);
// Singular extension search (~94 Elo). If all moves but one fail low on a
// search of (alpha-s, beta-s), and just one fails high on (alpha, beta),
// then that move is singular and should be extended. To verify this we do
- // a reduced search on all the other moves but the ttMove and if the result
- // is lower than ttValue minus a margin, then we will extend the ttMove. Note
- // that depth margin and singularBeta margin are known for having non-linear
+ // a reduced search on the position excluding the ttMove and if the result
+ // is lower than ttValue minus a margin, then we will extend the ttMove.
+
+ // Note: the depth margin and singularBeta margin are known for having non-linear
// scaling. Their values are optimized to time controls of 180+1.8 and longer
// so changing them requires tests at this type of time controls.
// Recursive singular search is avoided.
}
// Multi-cut pruning
- // Our ttMove is assumed to fail high, and now we failed high also on a
- // reduced search without the ttMove. So we assume this expected cut-node
- // is not singular, that multiple moves fail high, and we can prune the
- // whole subtree by returning a softbound.
+ // Our ttMove is assumed to fail high based on the bound of the TT entry,
+ // and if after excluding the ttMove with a reduced search we fail high over the original beta,
+ // we assume this expected cut-node is not singular (multiple moves fail high),
+ // and we can prune the whole subtree by returning a softbound.
else if (singularBeta >= beta)
return singularBeta;
- // If the eval of ttMove is greater than beta, reduce it (negative extension) (~7 Elo)
+ // Negative extensions
+ // If other moves failed high over (ttValue - margin) without the ttMove on a reduced search,
+ // but we cannot do multi-cut because (ttValue - margin) is lower than the original beta,
+ // we do not know if the ttMove is singular or can do a multi-cut,
+ // so we reduce the ttMove in favor of other moves based on some conditions:
+
+ // If the ttMove is assumed to fail high over currnet beta (~7 Elo)
else if (ttValue >= beta)
extension = -2 - !PvNode;
- // If we are on a cutNode, reduce it based on depth (negative extension) (~1 Elo)
+ // If we are on a cutNode but the ttMove is not assumed to fail high over current beta (~1 Elo)
else if (cutNode)
extension = depth < 19 ? -2 : -1;
- // If the eval of ttMove is less than value, reduce it (negative extension) (~1 Elo)
+ // If the ttMove is assumed to fail low over the value of the reduced search (~1 Elo)
else if (ttValue <= value)
extension = -1;
}
update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq,
stat_bonus(depth) * bonus);
thisThread->mainHistory[~us][from_to((ss - 1)->currentMove)]
- << stat_bonus(depth) * bonus * 61 / 100;
+ << stat_bonus(depth) * bonus / 2;
}
if (PvNode)
assert(0 <= ss->ply && ss->ply < MAX_PLY);
- // Decide whether or not to include checks: this fixes also the type of
- // TT entry depth that we are going to use. Note that in qsearch we use
- // only two types of depth in TT: DEPTH_QS_CHECKS or DEPTH_QS_NO_CHECKS.
+ // Decide the replacement and cutoff priority of the qsearch TT entries
ttDepth = ss->inCheck || depth >= DEPTH_QS_CHECKS ? DEPTH_QS_CHECKS : DEPTH_QS_NO_CHECKS;
// Step 3. Transposition table lookup