// Futility margin
Value futility_margin(Depth d, bool improving) {
- return Value(168 * (d - improving));
+ return Value(165 * (d - improving));
}
// Reductions lookup table, initialized at startup
Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) {
int r = Reductions[d] * Reductions[mn];
- return (r + 1463 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 1010);
+ return (r + 1642 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 916);
}
constexpr int futility_move_count(bool improving, Depth depth) {
// History and stats update bonus, based on depth
int stat_bonus(Depth d) {
- return std::min((9 * d + 270) * d - 311 , 2145);
+ return std::min((12 * d + 282) * d - 349 , 1594);
}
// Add a small random component to draw evaluations to avoid 3-fold blindness
- Value value_draw(Thread* thisThread) {
- return VALUE_DRAW + Value(2 * (thisThread->nodes & 1) - 1);
+ Value value_draw(const Thread* thisThread) {
+ return VALUE_DRAW - 1 + Value(thisThread->nodes & 0x2);
}
// Skill structure is used to implement strength limit. If we have an uci_elo then
Value value_to_tt(Value v, int ply);
Value value_from_tt(Value v, int ply, int r50c);
- void update_pv(Move* pv, Move move, Move* childPv);
+ void update_pv(Move* pv, Move move, const Move* childPv);
void update_continuation_histories(Stack* ss, Piece pc, Square to, int bonus);
void update_quiet_stats(const Position& pos, Stack* ss, Move move, int bonus);
void update_all_stats(const Position& pos, Stack* ss, Move bestMove, Value bestValue, Value beta, Square prevSq,
void Search::init() {
for (int i = 1; i < MAX_MOVES; ++i)
- Reductions[i] = int((20.81 + std::log(Threads.size()) / 2) * std::log(i));
+ Reductions[i] = int((20.26 + std::log(Threads.size()) / 2) * std::log(i));
}
// Send again PV info if we have a new best thread
if (bestThread != this)
- sync_cout << UCI::pv(bestThread->rootPos, bestThread->completedDepth, -VALUE_INFINITE, VALUE_INFINITE) << sync_endl;
+ sync_cout << UCI::pv(bestThread->rootPos, bestThread->completedDepth) << sync_endl;
sync_cout << "bestmove " << UCI::move(bestThread->rootMoves[0].pv[0], rootPos.is_chess960());
multiPV = std::min(multiPV, rootMoves.size());
- complexityAverage.set(202, 1);
+ complexityAverage.set(155, 1);
- trend = SCORE_ZERO;
- optimism[ us] = Value(39);
- optimism[~us] = -optimism[us];
+ optimism[us] = optimism[~us] = VALUE_ZERO;
int searchAgainCounter = 0;
if (rootDepth >= 4)
{
Value prev = rootMoves[pvIdx].averageScore;
- delta = Value(16) + int(prev) * prev / 19178;
+ delta = Value(10) + int(prev) * prev / 15620;
alpha = std::max(prev - delta,-VALUE_INFINITE);
beta = std::min(prev + delta, VALUE_INFINITE);
- // Adjust trend and optimism based on root move's previousScore
- int tr = sigmoid(prev, 3, 8, 90, 125, 1);
- trend = (us == WHITE ? make_score(tr, tr / 2)
- : -make_score(tr, tr / 2));
-
- int opt = sigmoid(prev, 8, 17, 144, 13966, 183);
+ // Adjust optimism based on root move's previousScore
+ int opt = 118 * prev / (std::abs(prev) + 169);
optimism[ us] = Value(opt);
optimism[~us] = -optimism[us];
}
int failedHighCnt = 0;
while (true)
{
- Depth adjustedDepth = std::max(1, rootDepth - failedHighCnt - searchAgainCounter);
+ // Adjust the effective depth searched, but ensuring at least one effective increment for every
+ // four searchAgain steps (see issue #2717).
+ Depth adjustedDepth = std::max(1, rootDepth - failedHighCnt - 3 * (searchAgainCounter + 1) / 4);
bestValue = Stockfish::search<Root>(rootPos, ss, alpha, beta, adjustedDepth, false);
// Bring the best move to the front. It is critical that sorting
&& multiPV == 1
&& (bestValue <= alpha || bestValue >= beta)
&& Time.elapsed() > 3000)
- sync_cout << UCI::pv(rootPos, rootDepth, alpha, beta) << sync_endl;
+ sync_cout << UCI::pv(rootPos, rootDepth) << sync_endl;
// In case of failing low/high increase aspiration window and
// re-search, otherwise exit the loop.
if ( mainThread
&& (Threads.stop || pvIdx + 1 == multiPV || Time.elapsed() > 3000))
- sync_cout << UCI::pv(rootPos, rootDepth, alpha, beta) << sync_endl;
+ sync_cout << UCI::pv(rootPos, rootDepth) << sync_endl;
}
if (!Threads.stop)
&& !Threads.stop
&& !mainThread->stopOnPonderhit)
{
- double fallingEval = (69 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
- + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 781.4;
+ double fallingEval = (71 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
+ + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 656.7;
fallingEval = std::clamp(fallingEval, 0.5, 1.5);
// If the bestMove is stable over several iterations, reduce time accordingly
- timeReduction = lastBestMoveDepth + 10 < completedDepth ? 1.63 : 0.73;
- double reduction = (1.56 + mainThread->previousTimeReduction) / (2.20 * timeReduction);
+ timeReduction = lastBestMoveDepth + 9 < completedDepth ? 1.37 : 0.65;
+ double reduction = (1.4 + mainThread->previousTimeReduction) / (2.15 * timeReduction);
double bestMoveInstability = 1 + 1.7 * totBestMoveChanges / Threads.size();
int complexity = mainThread->complexityAverage.value();
- double complexPosition = std::clamp(1.0 + (complexity - 326) / 1618.1, 0.5, 1.5);
+ double complexPosition = std::min(1.0 + (complexity - 261) / 1738.7, 1.5);
double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability * complexPosition;
}
else if ( Threads.increaseDepth
&& !mainThread->ponder
- && Time.elapsed() > totalTime * 0.43)
+ && Time.elapsed() > totalTime * 0.53)
Threads.increaseDepth = false;
else
Threads.increaseDepth = true;
Move ttMove, move, excludedMove, bestMove;
Depth extension, newDepth;
Value bestValue, value, ttValue, eval, maxValue, probCutBeta;
- bool givesCheck, improving, didLMR, priorCapture;
- bool capture, doFullDepthSearch, moveCountPruning, ttCapture;
+ bool givesCheck, improving, priorCapture, singularQuietLMR;
+ bool capture, moveCountPruning, ttCapture;
Piece movedPiece;
int moveCount, captureCount, quietCount, improvement, complexity;
// Step 1. Initialize node
Thread* thisThread = pos.this_thread();
- thisThread->depth = depth;
ss->inCheck = pos.checkers();
priorCapture = pos.captured_piece();
Color us = pos.side_to_move();
// At non-PV nodes we check for an early TT cutoff
if ( !PvNode
&& ss->ttHit
- && tte->depth() > depth - (thisThread->id() % 2 == 1)
+ && tte->depth() > depth - (tte->bound() == BOUND_EXACT)
&& ttValue != VALUE_NONE // Possible in case of TT access race
- && (ttValue >= beta ? (tte->bound() & BOUND_LOWER)
- : (tte->bound() & BOUND_UPPER)))
+ && (tte->bound() & (ttValue >= beta ? BOUND_LOWER : BOUND_UPPER)))
{
// If ttMove is quiet, update move sorting heuristics on TT hit (~1 Elo)
if (ttMove)
// Never assume anything about values stored in TT
ss->staticEval = eval = tte->eval();
if (eval == VALUE_NONE)
- ss->staticEval = eval = evaluate(pos);
-
- // Randomize draw evaluation
- if (eval == VALUE_DRAW)
- eval = value_draw(thisThread);
+ ss->staticEval = eval = evaluate(pos, &complexity);
+ else // Fall back to (semi)classical complexity for TT hits, the NNUE complexity is lost
+ complexity = abs(ss->staticEval - pos.psq_eg_stm());
// ttValue can be used as a better position evaluation (~4 Elo)
if ( ttValue != VALUE_NONE
}
else
{
- ss->staticEval = eval = evaluate(pos);
+ ss->staticEval = eval = evaluate(pos, &complexity);
// Save static evaluation into transposition table
if (!excludedMove)
tte->save(posKey, VALUE_NONE, ss->ttPv, BOUND_NONE, DEPTH_NONE, MOVE_NONE, eval);
}
+ thisThread->complexityAverage.update(complexity);
+
// Use static evaluation difference to improve quiet move ordering (~3 Elo)
if (is_ok((ss-1)->currentMove) && !(ss-1)->inCheck && !priorCapture)
{
- int bonus = std::clamp(-16 * int((ss-1)->staticEval + ss->staticEval), -2000, 2000);
+ int bonus = std::clamp(-19 * int((ss-1)->staticEval + ss->staticEval), -1914, 1914);
thisThread->mainHistory[~us][from_to((ss-1)->currentMove)] << bonus;
}
// margin and the improving flag are used in various pruning heuristics.
improvement = (ss-2)->staticEval != VALUE_NONE ? ss->staticEval - (ss-2)->staticEval
: (ss-4)->staticEval != VALUE_NONE ? ss->staticEval - (ss-4)->staticEval
- : 175;
-
+ : 168;
improving = improvement > 0;
- complexity = abs(ss->staticEval - (us == WHITE ? eg_value(pos.psq_score()) : -eg_value(pos.psq_score())));
-
- thisThread->complexityAverage.update(complexity);
// Step 7. Razoring.
// If eval is really low check with qsearch if it can exceed alpha, if it can't,
// return a fail low.
- if ( !PvNode
- && depth <= 7
- && eval < alpha - 348 - 258 * depth * depth)
+ if (eval < alpha - 369 - 254 * depth * depth)
{
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
if (value < alpha)
// The depth condition is important for mate finding.
if ( !ss->ttPv
&& depth < 8
- && eval - futility_margin(depth, improving) - (ss-1)->statScore / 256 >= beta
+ && eval - futility_margin(depth, improving) - (ss-1)->statScore / 303 >= beta
&& eval >= beta
- && eval < 26305) // larger than VALUE_KNOWN_WIN, but smaller than TB wins.
+ && eval < 28031) // larger than VALUE_KNOWN_WIN, but smaller than TB wins
return eval;
// Step 9. Null move search with verification search (~22 Elo)
if ( !PvNode
&& (ss-1)->currentMove != MOVE_NULL
- && (ss-1)->statScore < 14695
+ && (ss-1)->statScore < 17139
&& eval >= beta
&& eval >= ss->staticEval
- && ss->staticEval >= beta - 15 * depth - improvement / 15 + 198 + complexity / 28
+ && ss->staticEval >= beta - 20 * depth - improvement / 13 + 233 + complexity / 25
&& !excludedMove
&& pos.non_pawn_material(us)
&& (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor))
assert(eval - beta >= 0);
// Null move dynamic reduction based on depth, eval and complexity of position
- Depth R = std::min(int(eval - beta) / 147, 5) + depth / 3 + 4 - (complexity > 753);
+ Depth R = std::min(int(eval - beta) / 168, 7) + depth / 3 + 4 - (complexity > 861);
ss->currentMove = MOVE_NULL;
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
}
}
- probCutBeta = beta + 179 - 46 * improving;
+ probCutBeta = beta + 191 - 54 * improving;
// Step 10. ProbCut (~4 Elo)
// If we have a good enough capture and a reduced search returns a value
{
assert(probCutBeta < VALUE_INFINITE);
- MovePicker mp(pos, ttMove, probCutBeta - ss->staticEval, depth - 3, &captureHistory);
- bool ttPv = ss->ttPv;
- ss->ttPv = false;
+ MovePicker mp(pos, ttMove, probCutBeta - ss->staticEval, &captureHistory);
while ((move = mp.next_move()) != MOVE_NONE)
if (move != excludedMove && pos.legal(move))
if (value >= probCutBeta)
{
- // if transposition table doesn't have equal or more deep info write probCut data into it
- if ( !(ss->ttHit
- && tte->depth() >= depth - 3
- && ttValue != VALUE_NONE))
- tte->save(posKey, value_to_tt(value, ss->ply), ttPv,
- BOUND_LOWER,
- depth - 3, move, ss->staticEval);
+ // Save ProbCut data into transposition table
+ tte->save(posKey, value_to_tt(value, ss->ply), ss->ttPv, BOUND_LOWER, depth - 3, move, ss->staticEval);
return value;
}
}
- ss->ttPv = ttPv;
}
- // Step 11. If the position is not in TT, decrease depth by 2 or 1 depending on node type (~3 Elo)
- if ( PvNode
- && depth >= 3
+ // Step 11. If the position is not in TT, decrease depth by 3.
+ // Use qsearch if depth is equal or below zero (~4 Elo)
+ if ( PvNode
&& !ttMove)
- depth -= 2;
+ depth -= 3;
+
+ if (depth <= 0)
+ return qsearch<PV>(pos, ss, alpha, beta);
- if ( cutNode
- && depth >= 8
+ if ( cutNode
+ && depth >= 9
&& !ttMove)
- depth--;
+ depth -= 2;
moves_loop: // When in check, search starts here
// Step 12. A small Probcut idea, when we are in check (~0 Elo)
- probCutBeta = beta + 481;
+ probCutBeta = beta + 417;
if ( ss->inCheck
&& !PvNode
&& depth >= 2
ss->killers);
value = bestValue;
- moveCountPruning = false;
+ moveCountPruning = singularQuietLMR = false;
// Indicate PvNodes that will probably fail low if the node was searched
// at a depth equal or greater than the current depth, and the result of this search was a fail low.
|| givesCheck)
{
// Futility pruning for captures (~0 Elo)
- if ( !pos.empty(to_sq(move))
- && !givesCheck
+ if ( !givesCheck
&& !PvNode
- && lmrDepth < 6
+ && lmrDepth < 7
&& !ss->inCheck
- && ss->staticEval + 281 + 179 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
+ && ss->staticEval + 180 + 201 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
+ captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 6 < alpha)
continue;
// SEE based pruning (~9 Elo)
- if (!pos.see_ge(move, Value(-203) * depth))
+ if (!pos.see_ge(move, Value(-222) * depth))
continue;
}
else
&& history < -3875 * (depth - 1))
continue;
- history += thisThread->mainHistory[us][from_to(move)];
+ history += 2 * thisThread->mainHistory[us][from_to(move)];
// Futility pruning: parent node (~9 Elo)
if ( !ss->inCheck
- && lmrDepth < 11
- && ss->staticEval + 122 + 138 * lmrDepth + history / 60 <= alpha)
+ && lmrDepth < 13
+ && ss->staticEval + 106 + 145 * lmrDepth + history / 52 <= alpha)
continue;
// Prune moves with negative SEE (~3 Elo)
- if (!pos.see_ge(move, Value(-25 * lmrDepth * lmrDepth - 20 * lmrDepth)))
+ if (!pos.see_ge(move, Value(-24 * lmrDepth * lmrDepth - 15 * lmrDepth)))
continue;
}
}
// a reduced search on all the other moves but the ttMove and if the
// result is lower than ttValue minus a margin, then we will extend the ttMove.
if ( !rootNode
- && depth >= 4 - (thisThread->previousDepth > 27) + 2 * (PvNode && tte->is_pv())
+ && depth >= 4 - (thisThread->previousDepth > 24) + 2 * (PvNode && tte->is_pv())
&& move == ttMove
&& !excludedMove // Avoid recursive singular search
/* && ttValue != VALUE_NONE Already implicit in the next condition */
&& (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 3)
{
- Value singularBeta = ttValue - 3 * depth;
+ Value singularBeta = ttValue - (3 + (ss->ttPv && !PvNode)) * depth;
Depth singularDepth = (depth - 1) / 2;
ss->excludedMove = move;
if (value < singularBeta)
{
extension = 1;
+ singularQuietLMR = !ttCapture;
// Avoid search explosion by limiting the number of double extensions
if ( !PvNode
- && value < singularBeta - 26
- && ss->doubleExtensions <= 8)
+ && value < singularBeta - 25
+ && ss->doubleExtensions <= 9)
+ {
extension = 2;
+ depth += depth < 12;
+ }
}
// Multi-cut pruning
// Check extensions (~1 Elo)
else if ( givesCheck
&& depth > 9
- && abs(ss->staticEval) > 71)
+ && abs(ss->staticEval) > 82)
extension = 1;
// Quiet ttMove extensions (~0 Elo)
else if ( PvNode
&& move == ttMove
&& move == ss->killers[0]
- && (*contHist[0])[movedPiece][to_sq(move)] >= 5491)
+ && (*contHist[0])[movedPiece][to_sq(move)] >= 5177)
extension = 1;
}
// Step 16. Make the move
pos.do_move(move, st, givesCheck);
- bool doDeeperSearch = false;
-
// Step 17. Late moves reduction / extension (LMR, ~98 Elo)
// We use various heuristics for the sons of a node after the first son has
// been searched. In general we would like to reduce them, but there are many
r--;
// Increase reduction for cut nodes (~3 Elo)
- if (cutNode && move != ss->killers[0])
+ if (cutNode)
r += 2;
// Increase reduction if ttMove is a capture (~3 Elo)
if (ttCapture)
r++;
- // Decrease reduction at PvNodes if bestvalue
- // is vastly different from static evaluation
- if (PvNode && !ss->inCheck && abs(ss->staticEval - bestValue) > 250)
- r--;
-
// Decrease reduction for PvNodes based on depth
if (PvNode)
- r -= 1 + 15 / ( 3 + depth );
+ r -= 1 + 11 / (3 + depth);
+
+ // Decrease reduction if ttMove has been singularly extended (~1 Elo)
+ if (singularQuietLMR)
+ r--;
- // Increase reduction if next ply has a lot of fail high else reset count to 0
- if ((ss+1)->cutoffCnt > 3 && !PvNode)
+ // Decrease reduction if we move a threatened piece (~1 Elo)
+ if ( depth > 9
+ && (mp.threatenedPieces & from_sq(move)))
+ r--;
+
+ // Increase reduction if next ply has a lot of fail high
+ if ((ss+1)->cutoffCnt > 3)
r++;
- ss->statScore = thisThread->mainHistory[us][from_to(move)]
+ ss->statScore = 2 * thisThread->mainHistory[us][from_to(move)]
+ (*contHist[0])[movedPiece][to_sq(move)]
+ (*contHist[1])[movedPiece][to_sq(move)]
+ (*contHist[3])[movedPiece][to_sq(move)]
- - 4334;
+ - 4433;
// Decrease/increase reduction for moves with a good/bad history (~30 Elo)
- r -= ss->statScore / 15914;
-
- // In general we want to cap the LMR depth search at newDepth. But if reductions
- // are really negative and movecount is low, we allow this move to be searched
- // deeper than the first move (this may lead to hidden double extensions).
- int deeper = r >= -1 ? 0
- : moveCount <= 4 ? 2
- : PvNode ? 1
- : cutNode && moveCount <= 8 ? 1
- : 0;
+ r -= ss->statScore / (13628 + 4000 * (depth > 7 && depth < 19));
- Depth d = std::clamp(newDepth - r, 1, newDepth + deeper);
+ // In general we want to cap the LMR depth search at newDepth, but when
+ // reduction is negative, we allow this move a limited search extension
+ // beyond the first move depth. This may lead to hidden double extensions.
+ Depth d = std::clamp(newDepth - r, 1, newDepth + 1);
value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, d, true);
- // If the son is reduced and fails high it will be re-searched at full depth
- doFullDepthSearch = value > alpha && d < newDepth;
- doDeeperSearch = value > (alpha + 78 + 11 * (newDepth - d));
- didLMR = true;
- }
- else
- {
- doFullDepthSearch = !PvNode || moveCount > 1;
- didLMR = false;
- }
+ // Do full depth search when reduced LMR search fails high
+ if (value > alpha && d < newDepth)
+ {
+ // Adjust full depth search based on LMR results - if result
+ // was good enough search deeper, if it was bad enough search shallower
+ const bool doDeeperSearch = value > (alpha + 64 + 11 * (newDepth - d));
+ const bool doShallowerSearch = value < bestValue + newDepth;
- // Step 18. Full depth search when LMR is skipped or fails high
- if (doFullDepthSearch)
- {
- value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth + doDeeperSearch, !cutNode);
+ newDepth += doDeeperSearch - doShallowerSearch;
+
+ if (newDepth > d)
+ value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth, !cutNode);
- // If the move passed LMR update its stats
- if (didLMR)
- {
int bonus = value > alpha ? stat_bonus(newDepth)
: -stat_bonus(newDepth);
}
}
+ // Step 18. Full depth search when LMR is skipped
+ else if (!PvNode || moveCount > 1)
+ {
+ value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth, !cutNode);
+ }
+
// For PV nodes only, do a full PV search on the first move or after a fail
// high (in the latter case search only if value < beta), otherwise let the
// parent node fail low with value <= alpha and try another move.
// PV move or new best move?
if (moveCount == 1 || value > alpha)
{
- rm.score = value;
+ rm.score = rm.uciScore = value;
rm.selDepth = thisThread->selDepth;
+ rm.scoreLowerbound = rm.scoreUpperbound = false;
+
+ if (value >= beta) {
+ rm.scoreLowerbound = true;
+ rm.uciScore = beta;
+ }
+ else if (value <= alpha) {
+ rm.scoreUpperbound = true;
+ rm.uciScore = alpha;
+ }
rm.pv.resize(1);
assert((ss+1)->pv);
alpha = value;
// Reduce other moves if we have found at least one score improvement
- if ( depth > 2
- && depth < 7
+ if ( depth > 1
+ && depth < 6
&& beta < VALUE_KNOWN_WIN
&& alpha > -VALUE_KNOWN_WIN)
- depth -= 1;
+ depth -= 1;
assert(depth > 0);
}
}
}
}
- else
- ss->cutoffCnt = 0;
// If the move is worse than some previously searched move, remember it to update its stats later
quietsSearched, quietCount, capturesSearched, captureCount, depth);
// Bonus for prior countermove that caused the fail low
- else if ( (depth >= 4 || PvNode)
+ else if ( (depth >= 5 || PvNode)
&& !priorCapture)
{
//Assign extra bonus if current node is PvNode or cutNode
//or fail low was really bad
bool extraBonus = PvNode
|| cutNode
- || bestValue < alpha - 70 * depth;
+ || bestValue < alpha - 62 * depth;
update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * (1 + extraBonus));
}
// qsearch() is the quiescence search function, which is called by the main search
// function with zero depth, or recursively with further decreasing depth per call.
+ // (~155 elo)
template <NodeType nodeType>
Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) {
&& ss->ttHit
&& tte->depth() >= ttDepth
&& ttValue != VALUE_NONE // Only in case of TT access race
- && (ttValue >= beta ? (tte->bound() & BOUND_LOWER)
- : (tte->bound() & BOUND_UPPER)))
+ && (tte->bound() & (ttValue >= beta ? BOUND_LOWER : BOUND_UPPER)))
return ttValue;
// Evaluate the position statically
if (PvNode && bestValue > alpha)
alpha = bestValue;
- futilityBase = bestValue + 118;
+ futilityBase = bestValue + 153;
}
const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,
&& futilityBase > -VALUE_KNOWN_WIN
&& type_of(move) != PROMOTION)
{
-
if (moveCount > 2)
continue;
[to_sq(move)];
// Continuation history based pruning (~2 Elo)
- if ( !capture
+ if ( !capture
&& bestValue > VALUE_TB_LOSS_IN_MAX_PLY
- && (*contHist[0])[pos.moved_piece(move)][to_sq(move)] < CounterMovePruneThreshold
- && (*contHist[1])[pos.moved_piece(move)][to_sq(move)] < CounterMovePruneThreshold)
+ && (*contHist[0])[pos.moved_piece(move)][to_sq(move)] < 0
+ && (*contHist[1])[pos.moved_piece(move)][to_sq(move)] < 0)
continue;
- // movecount pruning for quiet check evasions
- if ( bestValue > VALUE_TB_LOSS_IN_MAX_PLY
- && quietCheckEvasions > 1
- && !capture
- && ss->inCheck)
- continue;
+ // We prune after 2nd quiet check evasion where being 'in check' is implicitly checked through the counter
+ // and being a 'quiet' apart from being a tt move is assumed after an increment because captures are pushed ahead.
+ if ( bestValue > VALUE_TB_LOSS_IN_MAX_PLY
+ && quietCheckEvasions > 1)
+ break;
quietCheckEvasions += !capture && ss->inCheck;
// update_pv() adds current move and appends child pv[]
- void update_pv(Move* pv, Move move, Move* childPv) {
+ void update_pv(Move* pv, Move move, const Move* childPv) {
for (*pv++ = move; childPv && *childPv != MOVE_NONE; )
*pv++ = *childPv++;
void update_all_stats(const Position& pos, Stack* ss, Move bestMove, Value bestValue, Value beta, Square prevSq,
Move* quietsSearched, int quietCount, Move* capturesSearched, int captureCount, Depth depth) {
- int bonus1, bonus2;
Color us = pos.side_to_move();
Thread* thisThread = pos.this_thread();
CapturePieceToHistory& captureHistory = thisThread->captureHistory;
Piece moved_piece = pos.moved_piece(bestMove);
PieceType captured = type_of(pos.piece_on(to_sq(bestMove)));
-
- bonus1 = stat_bonus(depth + 1);
- bonus2 = bestValue > beta + PawnValueMg ? bonus1 // larger bonus
- : stat_bonus(depth); // smaller bonus
+ int bonus1 = stat_bonus(depth + 1);
if (!pos.capture(bestMove))
{
+ int bonus2 = bestValue > beta + 137 ? bonus1 // larger bonus
+ : stat_bonus(depth); // smaller bonus
+
// Increase stats for the best move in case it was a quiet move
update_quiet_stats(pos, ss, bestMove, bonus2);
/// UCI::pv() formats PV information according to the UCI protocol. UCI requires
/// that all (if any) unsearched PV lines are sent using a previous search score.
-string UCI::pv(const Position& pos, Depth depth, Value alpha, Value beta) {
+string UCI::pv(const Position& pos, Depth depth) {
std::stringstream ss;
TimePoint elapsed = Time.elapsed() + 1;
continue;
Depth d = updated ? depth : std::max(1, depth - 1);
- Value v = updated ? rootMoves[i].score : rootMoves[i].previousScore;
+ Value v = updated ? rootMoves[i].uciScore : rootMoves[i].previousScore;
if (v == -VALUE_INFINITE)
v = VALUE_ZERO;
if (Options["UCI_ShowWDL"])
ss << UCI::wdl(v, pos.game_ply());
- if (!tb && i == pvIdx)
- ss << (v >= beta ? " lowerbound" : v <= alpha ? " upperbound" : "");
+ if (i == pvIdx && !tb && updated) // tablebase- and previous-scores are exact
+ ss << (rootMoves[i].scoreLowerbound ? " lowerbound" : (rootMoves[i].scoreUpperbound ? " upperbound" : ""));
ss << " nodes " << nodesSearched
- << " nps " << nodesSearched * 1000 / elapsed;
-
- if (elapsed > 1000) // Earlier makes little sense
- ss << " hashfull " << TT.hashfull();
-
- ss << " tbhits " << tbHits
+ << " nps " << nodesSearched * 1000 / elapsed
+ << " hashfull " << TT.hashfull()
+ << " tbhits " << tbHits
<< " time " << elapsed
<< " pv";