#include "tt.h"
#include "uci.h"
#include "syzygy/tbprobe.h"
+#include "nnue/evaluate_nnue.h"
namespace Stockfish {
// Futility margin
Value futility_margin(Depth d, bool improving) {
- return Value(158 * (d - improving));
+ return Value(154 * (d - improving));
}
// Reductions lookup table, initialized at startup
Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) {
int r = Reductions[d] * Reductions[mn];
- return (r + 1460 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 937);
+ return (r + 1449 - int(delta) * 937 / int(rootDelta)) / 1024 + (!i && r > 941);
}
constexpr int futility_move_count(bool improving, Depth depth) {
// History and stats update bonus, based on depth
int stat_bonus(Depth d) {
- return std::min(350 * d - 400, 1650);
+ return std::min(341 * d - 470, 1710);
}
// Add a small random component to draw evaluations to avoid 3-fold blindness
void Search::init() {
for (int i = 1; i < MAX_MOVES; ++i)
- Reductions[i] = int((20.26 + std::log(Threads.size()) / 2) * std::log(i));
+ Reductions[i] = int((19.47 + std::log(Threads.size()) / 2) * std::log(i));
}
ss->pv = pv;
- bestValue = delta = alpha = -VALUE_INFINITE;
- beta = VALUE_INFINITE;
+ bestValue = -VALUE_INFINITE;
if (mainThread)
{
multiPV = std::min(multiPV, rootMoves.size());
- complexityAverage.set(153, 1);
-
- optimism[us] = optimism[~us] = VALUE_ZERO;
-
int searchAgainCounter = 0;
// Iterative deepening loop until requested to stop or the target depth is reached
pvLast = 0;
if (!Threads.increaseDepth)
- searchAgainCounter++;
+ searchAgainCounter++;
// MultiPV loop. We perform a full root search for each PV line
for (pvIdx = 0; pvIdx < multiPV && !Threads.stop; ++pvIdx)
selDepth = 0;
// Reset aspiration window starting size
- if (rootDepth >= 4)
- {
- Value prev = rootMoves[pvIdx].averageScore;
- delta = Value(10) + int(prev) * prev / 15400;
- alpha = std::max(prev - delta,-VALUE_INFINITE);
- beta = std::min(prev + delta, VALUE_INFINITE);
-
- // Adjust optimism based on root move's previousScore
- int opt = 116 * prev / (std::abs(prev) + 170);
- optimism[ us] = Value(opt);
- optimism[~us] = -optimism[us];
- }
+ Value prev = rootMoves[pvIdx].averageScore;
+ delta = Value(10) + int(prev) * prev / 16502;
+ alpha = std::max(prev - delta,-VALUE_INFINITE);
+ beta = std::min(prev + delta, VALUE_INFINITE);
+
+ // Adjust optimism based on root move's previousScore
+ int opt = 120 * prev / (std::abs(prev) + 161);
+ optimism[ us] = Value(opt);
+ optimism[~us] = -optimism[us];
// Start with a small aspiration window and, in the case of a fail
// high/low, re-search with a bigger window until we don't fail
if (!Threads.stop)
completedDepth = rootDepth;
- if (rootMoves[0].pv[0] != lastBestMove) {
- lastBestMove = rootMoves[0].pv[0];
- lastBestMoveDepth = rootDepth;
+ if (rootMoves[0].pv[0] != lastBestMove)
+ {
+ lastBestMove = rootMoves[0].pv[0];
+ lastBestMoveDepth = rootDepth;
}
// Have we found a "mate in x"?
&& !Threads.stop
&& !mainThread->stopOnPonderhit)
{
- double fallingEval = (71 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
- + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 656.7;
+ double fallingEval = (69 + 13 * (mainThread->bestPreviousAverageScore - bestValue)
+ + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 619.6;
fallingEval = std::clamp(fallingEval, 0.5, 1.5);
// If the bestMove is stable over several iterations, reduce time accordingly
- timeReduction = lastBestMoveDepth + 9 < completedDepth ? 1.37 : 0.65;
- double reduction = (1.4 + mainThread->previousTimeReduction) / (2.15 * timeReduction);
- double bestMoveInstability = 1 + 1.7 * totBestMoveChanges / Threads.size();
- int complexity = mainThread->complexityAverage.value();
- double complexPosition = std::min(1.0 + (complexity - 261) / 1738.7, 1.5);
+ timeReduction = lastBestMoveDepth + 8 < completedDepth ? 1.57 : 0.65;
+ double reduction = (1.4 + mainThread->previousTimeReduction) / (2.08 * timeReduction);
+ double bestMoveInstability = 1 + 1.8 * totBestMoveChanges / Threads.size();
- double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability * complexPosition;
+ double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability;
// Cap used time in case of a single legal move for a better viewer experience in tournaments
// yielding correct scores and sufficiently fast moves.
Threads.stop = true;
}
else if ( !mainThread->ponder
- && Time.elapsed() > totalTime * 0.53)
+ && Time.elapsed() > totalTime * 0.50)
Threads.increaseDepth = false;
else
Threads.increaseDepth = true;
bool givesCheck, improving, priorCapture, singularQuietLMR;
bool capture, moveCountPruning, ttCapture;
Piece movedPiece;
- int moveCount, captureCount, quietCount, improvement, complexity;
+ int moveCount, captureCount, quietCount, improvement;
// Step 1. Initialize node
Thread* thisThread = pos.this_thread();
assert(0 <= ss->ply && ss->ply < MAX_PLY);
- (ss+1)->ttPv = false;
(ss+1)->excludedMove = bestMove = MOVE_NONE;
(ss+2)->killers[0] = (ss+2)->killers[1] = MOVE_NONE;
(ss+2)->cutoffCnt = 0;
ss->doubleExtensions = (ss-1)->doubleExtensions;
- Square prevSq = to_sq((ss-1)->currentMove);
-
- // Initialize statScore to zero for the grandchildren of the current position.
- // So statScore is shared between all grandchildren and only the first grandchild
- // starts with statScore = 0. Later grandchildren start with the last calculated
- // statScore of the previous grandchild. This influences the reduction rules in
- // LMR which are based on the statScore of parent position.
- if (!rootNode)
- (ss+2)->statScore = 0;
+ Square prevSq = is_ok((ss-1)->currentMove) ? to_sq((ss-1)->currentMove) : SQ_NONE;
+ ss->statScore = 0;
// Step 4. Transposition table lookup.
excludedMove = ss->excludedMove;
ttValue = ss->ttHit ? value_from_tt(tte->value(), ss->ply, pos.rule50_count()) : VALUE_NONE;
ttMove = rootNode ? thisThread->rootMoves[thisThread->pvIdx].pv[0]
: ss->ttHit ? tte->move() : MOVE_NONE;
- ttCapture = ttMove && pos.capture(ttMove);
+ ttCapture = ttMove && pos.capture_stage(ttMove);
// At this point, if excluded, skip straight to step 6, static eval. However,
// to save indentation, we list the condition in all code between here and there.
update_quiet_stats(pos, ss, ttMove, stat_bonus(depth));
// Extra penalty for early quiet moves of the previous ply (~0 Elo on STC, ~2 Elo on LTC)
- if ((ss-1)->moveCount <= 2 && !priorCapture)
+ if (prevSq != SQ_NONE && (ss-1)->moveCount <= 2 && !priorCapture)
update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, -stat_bonus(depth + 1));
}
// Penalty for a quiet ttMove that fails low (~1 Elo)
ss->staticEval = eval = VALUE_NONE;
improving = false;
improvement = 0;
- complexity = 0;
goto moves_loop;
}
- else if (excludedMove) {
- // excludeMove implies that we had a ttHit on the containing non-excluded search with ss->staticEval filled from TT
- // However static evals from the TT aren't good enough (-13 elo), presumably due to changing optimism context
- // Recalculate value with current optimism (without updating thread avgComplexity)
- ss->staticEval = eval = evaluate(pos, &complexity);
+ else if (excludedMove)
+ {
+ // Providing the hint that this node's accumulator will be used often brings significant Elo gain (13 Elo)
+ Eval::NNUE::hint_common_parent_position(pos);
+ eval = ss->staticEval;
}
else if (ss->ttHit)
{
// Never assume anything about values stored in TT
ss->staticEval = eval = tte->eval();
if (eval == VALUE_NONE)
- ss->staticEval = eval = evaluate(pos, &complexity);
- else // Fall back to (semi)classical complexity for TT hits, the NNUE complexity is lost
- complexity = abs(ss->staticEval - pos.psq_eg_stm());
+ ss->staticEval = eval = evaluate(pos);
+ else
+ {
+ if (PvNode)
+ Eval::NNUE::hint_common_parent_position(pos);
+ }
// ttValue can be used as a better position evaluation (~7 Elo)
if ( ttValue != VALUE_NONE
}
else
{
- ss->staticEval = eval = evaluate(pos, &complexity);
+ ss->staticEval = eval = evaluate(pos);
// Save static evaluation into transposition table
tte->save(posKey, VALUE_NONE, ss->ttPv, BOUND_NONE, DEPTH_NONE, MOVE_NONE, eval);
}
- thisThread->complexityAverage.update(complexity);
// Use static evaluation difference to improve quiet move ordering (~4 Elo)
if (is_ok((ss-1)->currentMove) && !(ss-1)->inCheck && !priorCapture)
{
- int bonus = std::clamp(-19 * int((ss-1)->staticEval + ss->staticEval), -1940, 1940);
+ int bonus = std::clamp(-19 * int((ss-1)->staticEval + ss->staticEval), -1920, 1920);
thisThread->mainHistory[~us][from_to((ss-1)->currentMove)] << bonus;
}
// margin and the improving flag are used in various pruning heuristics.
improvement = (ss-2)->staticEval != VALUE_NONE ? ss->staticEval - (ss-2)->staticEval
: (ss-4)->staticEval != VALUE_NONE ? ss->staticEval - (ss-4)->staticEval
- : 172;
+ : 156;
improving = improvement > 0;
// Step 7. Razoring (~1 Elo).
// If eval is really low check with qsearch if it can exceed alpha, if it can't,
// return a fail low.
- if (eval < alpha - 394 - 255 * depth * depth)
+ if (eval < alpha - 426 - 256 * depth * depth)
{
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
if (value < alpha)
// Step 8. Futility pruning: child node (~40 Elo).
// The depth condition is important for mate finding.
if ( !ss->ttPv
- && depth < 8
- && eval - futility_margin(depth, improving) - (ss-1)->statScore / 304 >= beta
+ && depth < 9
+ && eval - futility_margin(depth, improving) - (ss-1)->statScore / 280 >= beta
&& eval >= beta
- && eval < 28580) // larger than VALUE_KNOWN_WIN, but smaller than TB wins
+ && eval < 25128) // larger than VALUE_KNOWN_WIN, but smaller than TB wins
return eval;
// Step 9. Null move search with verification search (~35 Elo)
if ( !PvNode
&& (ss-1)->currentMove != MOVE_NULL
- && (ss-1)->statScore < 18200
+ && (ss-1)->statScore < 18755
&& eval >= beta
&& eval >= ss->staticEval
- && ss->staticEval >= beta - 20 * depth - improvement / 14 + 235 + complexity / 24
+ && ss->staticEval >= beta - 20 * depth - improvement / 13 + 253
&& !excludedMove
&& pos.non_pawn_material(us)
- && (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor))
+ && (ss->ply >= thisThread->nmpMinPly))
{
assert(eval - beta >= 0);
- // Null move dynamic reduction based on depth, eval and complexity of position
- Depth R = std::min(int(eval - beta) / 165, 6) + depth / 3 + 4 - (complexity > 800);
+ // Null move dynamic reduction based on depth and eval
+ Depth R = std::min(int(eval - beta) / 172, 6) + depth / 3 + 4;
ss->currentMove = MOVE_NULL;
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
assert(!thisThread->nmpMinPly); // Recursive verification is not allowed
// Do verification search at high depths, with null move pruning disabled
- // for us, until ply exceeds nmpMinPly.
+ // until ply exceeds nmpMinPly.
thisThread->nmpMinPly = ss->ply + 3 * (depth-R) / 4;
- thisThread->nmpColor = us;
Value v = search<NonPV>(pos, ss, beta-1, beta, depth-R, false);
}
}
- probCutBeta = beta + 180 - 54 * improving;
+ probCutBeta = beta + 186 - 54 * improving;
// Step 10. ProbCut (~10 Elo)
- // If we have a good enough capture and a reduced search returns a value
+ // If we have a good enough capture (or queen promotion) and a reduced search returns a value
// much above beta, we can (almost) safely prune the previous move.
if ( !PvNode
&& depth > 4
while ((move = mp.next_move()) != MOVE_NONE)
if (move != excludedMove && pos.legal(move))
{
- assert(pos.capture(move) || promotion_type(move) == QUEEN);
+ assert(pos.capture_stage(move));
ss->currentMove = move;
ss->continuationHistory = &thisThread->continuationHistory[ss->inCheck]
return value;
}
}
+
+ Eval::NNUE::hint_common_parent_position(pos);
}
- // Step 11. If the position is not in TT, decrease depth by 3.
+ // Step 11. If the position is not in TT, decrease depth by 2 (or by 4 if the TT entry for the current position was hit and the stored depth is greater than or equal to the current depth).
// Use qsearch if depth is equal or below zero (~9 Elo)
if ( PvNode
&& !ttMove)
- depth -= 3;
+ depth -= 2 + 2 * (ss->ttHit && tte->depth() >= depth);
if (depth <= 0)
return qsearch<PV>(pos, ss, alpha, beta);
if ( cutNode
- && depth >= 9
+ && depth >= 7
&& !ttMove)
depth -= 2;
moves_loop: // When in check, search starts here
// Step 12. A small Probcut idea, when we are in check (~4 Elo)
- probCutBeta = beta + 402;
+ probCutBeta = beta + 391;
if ( ss->inCheck
&& !PvNode
&& depth >= 2
&& tte->depth() >= depth - 3
&& ttValue >= probCutBeta
&& abs(ttValue) <= VALUE_KNOWN_WIN
- && abs(beta) <= VALUE_KNOWN_WIN
- )
+ && abs(beta) <= VALUE_KNOWN_WIN)
return probCutBeta;
-
const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,
nullptr , (ss-4)->continuationHistory,
nullptr , (ss-6)->continuationHistory };
- Move countermove = thisThread->counterMoves[pos.piece_on(prevSq)][prevSq];
+ Move countermove = prevSq != SQ_NONE ? thisThread->counterMoves[pos.piece_on(prevSq)][prevSq] : MOVE_NONE;
MovePicker mp(pos, ttMove, depth, &thisThread->mainHistory,
&captureHistory,
(ss+1)->pv = nullptr;
extension = 0;
- capture = pos.capture(move);
+ capture = pos.capture_stage(move);
movedPiece = pos.moved_piece(move);
givesCheck = pos.gives_check(move);
Value delta = beta - alpha;
+ Depth r = reduction(improving, depth, moveCount, delta, thisThread->rootDelta);
+
// Step 14. Pruning at shallow depth (~120 Elo). Depth conditions are important for mate finding.
if ( !rootNode
&& pos.non_pawn_material(us)
moveCountPruning = moveCount >= futility_move_count(improving, depth);
// Reduced depth of the next LMR search
- int lmrDepth = std::max(newDepth - reduction(improving, depth, moveCount, delta, thisThread->rootDelta), 0);
+ int lmrDepth = std::max(newDepth - r, 0);
if ( capture
|| givesCheck)
{
// Futility pruning for captures (~2 Elo)
if ( !givesCheck
- && !PvNode
- && lmrDepth < 7
+ && lmrDepth < 6
&& !ss->inCheck
- && ss->staticEval + 185 + 203 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
- + captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 6 < alpha)
+ && ss->staticEval + 182 + 230 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
+ + captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 7 < alpha)
continue;
+ Bitboard occupied;
// SEE based pruning (~11 Elo)
- if (!pos.see_ge(move, Value(-220) * depth))
- continue;
+ if (!pos.see_ge(move, occupied, Value(-206) * depth))
+ {
+ if (depth < 2 - capture)
+ continue;
+ // Don't prune the move if opp. King/Queen/Rook gets a discovered attack during or after the exchanges
+ Bitboard leftEnemies = pos.pieces(~us, KING, QUEEN, ROOK);
+ Bitboard attacks = 0;
+ occupied |= to_sq(move);
+ while (leftEnemies && !attacks)
+ {
+ Square sq = pop_lsb(leftEnemies);
+ attacks = pos.attackers_to(sq, occupied) & pos.pieces(us) & occupied;
+ // Exclude Queen/Rook(s) which were already threatened before SEE (opp King can't be in check when it's our turn)
+ if (attacks && sq != pos.square<KING>(~us) && (pos.attackers_to(sq, pos.pieces()) & pos.pieces(us)))
+ attacks = 0;
+ }
+ if (!attacks)
+ continue;
+ }
}
else
{
// Continuation history based pruning (~2 Elo)
if ( lmrDepth < 5
- && history < -4180 * (depth - 1))
+ && history < -4405 * (depth - 1))
continue;
history += 2 * thisThread->mainHistory[us][from_to(move)];
- lmrDepth += history / 7208;
+ lmrDepth += history / 7278;
lmrDepth = std::max(lmrDepth, -2);
// Futility pruning: parent node (~13 Elo)
if ( !ss->inCheck
&& lmrDepth < 13
- && ss->staticEval + 103 + 136 * lmrDepth <= alpha)
+ && ss->staticEval + 103 + 138 * lmrDepth <= alpha)
continue;
lmrDepth = std::max(lmrDepth, 0);
// Prune moves with negative SEE (~4 Elo)
- if (!pos.see_ge(move, Value(-25 * lmrDepth * lmrDepth - 16 * lmrDepth)))
+ if (!pos.see_ge(move, Value(-24 * lmrDepth * lmrDepth - 16 * lmrDepth)))
continue;
}
}
// a reduced search on all the other moves but the ttMove and if the
// result is lower than ttValue minus a margin, then we will extend the ttMove.
if ( !rootNode
- && depth >= 4 - (thisThread->completedDepth > 22) + 2 * (PvNode && tte->is_pv())
+ && depth >= 4 - (thisThread->completedDepth > 21) + 2 * (PvNode && tte->is_pv())
&& move == ttMove
&& !excludedMove // Avoid recursive singular search
/* && ttValue != VALUE_NONE Already implicit in the next condition */
&& (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 3)
{
- Value singularBeta = ttValue - (3 + (ss->ttPv && !PvNode)) * depth;
+ Value singularBeta = ttValue - (3 + 2 * (ss->ttPv && !PvNode)) * depth / 2;
Depth singularDepth = (depth - 1) / 2;
ss->excludedMove = move;
- // the search with excludedMove will update ss->staticEval
value = search<NonPV>(pos, ss, singularBeta - 1, singularBeta, singularDepth, cutNode);
ss->excludedMove = MOVE_NONE;
&& ss->doubleExtensions <= 10)
{
extension = 2;
- depth += depth < 12;
+ depth += depth < 13;
}
}
else if (singularBeta >= beta)
return singularBeta;
- // If the eval of ttMove is greater than beta, we reduce it (negative extension)
+ // If the eval of ttMove is greater than beta, we reduce it (negative extension) (~7 Elo)
else if (ttValue >= beta)
- extension = -2;
+ extension = -2 - !PvNode;
- // If the eval of ttMove is less than alpha and value, we reduce it (negative extension)
- else if (ttValue <= alpha && ttValue <= value)
+ // If the eval of ttMove is less than value, we reduce it (negative extension) (~1 Elo)
+ else if (ttValue <= value)
+ extension = -1;
+
+ // If the eval of ttMove is less than alpha, we reduce it (negative extension) (~1 Elo)
+ else if (ttValue <= alpha)
extension = -1;
}
// Check extensions (~1 Elo)
else if ( givesCheck
- && depth > 9
- && abs(ss->staticEval) > 78)
+ && depth > 10
+ && abs(ss->staticEval) > 88)
extension = 1;
// Quiet ttMove extensions (~1 Elo)
else if ( PvNode
&& move == ttMove
&& move == ss->killers[0]
- && (*contHist[0])[movedPiece][to_sq(move)] >= 5600)
+ && (*contHist[0])[movedPiece][to_sq(move)] >= 5705)
extension = 1;
}
// Step 16. Make the move
pos.do_move(move, st, givesCheck);
- Depth r = reduction(improving, depth, moveCount, delta, thisThread->rootDelta);
-
// Decrease reduction if position is or has been on the PV
// and node is not likely to fail low. (~3 Elo)
if ( ss->ttPv
if (ttCapture)
r++;
- // Decrease reduction for PvNodes based on depth
+ // Decrease reduction for PvNodes based on depth (~2 Elo)
if (PvNode)
- r -= 1 + 11 / (3 + depth);
+ r -= 1 + 12 / (3 + depth);
// Decrease reduction if ttMove has been singularly extended (~1 Elo)
if (singularQuietLMR)
r--;
- // Decrease reduction if we move a threatened piece (~1 Elo)
- if ( depth > 9
- && (mp.threatenedPieces & from_sq(move)))
- r--;
-
- // Increase reduction if next ply has a lot of fail high
+ // Increase reduction if next ply has a lot of fail high (~5 Elo)
if ((ss+1)->cutoffCnt > 3)
r++;
- // Decrease reduction if move is a killer and we have a good history
- if (move == ss->killers[0]
- && (*contHist[0])[movedPiece][to_sq(move)] >= 3600)
+ else if (move == ttMove)
r--;
ss->statScore = 2 * thisThread->mainHistory[us][from_to(move)]
+ (*contHist[0])[movedPiece][to_sq(move)]
+ (*contHist[1])[movedPiece][to_sq(move)]
+ (*contHist[3])[movedPiece][to_sq(move)]
- - 4467;
+ - 4082;
- // Decrease/increase reduction for moves with a good/bad history (~30 Elo)
- r -= ss->statScore / (12800 + 4410 * (depth > 7 && depth < 19));
+ // Decrease/increase reduction for moves with a good/bad history (~25 Elo)
+ r -= ss->statScore / (11079 + 4626 * (depth > 6 && depth < 19));
// Step 17. Late moves reduction / extension (LMR, ~117 Elo)
// We use various heuristics for the sons of a node after the first son has
{
// Adjust full depth search based on LMR results - if result
// was good enough search deeper, if it was bad enough search shallower
- const bool doDeeperSearch = value > (alpha + 66 + 11 * (newDepth - d));
- const bool doEvenDeeperSearch = value > alpha + 582 && ss->doubleExtensions <= 5;
+ const bool doDeeperSearch = value > (alpha + 58 + 12 * (newDepth - d));
+ const bool doEvenDeeperSearch = value > alpha + 588 && ss->doubleExtensions <= 5;
const bool doShallowerSearch = value < bestValue + newDepth;
ss->doubleExtensions = ss->doubleExtensions + doEvenDeeperSearch;
if (newDepth > d)
value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth, !cutNode);
- int bonus = value > alpha ? stat_bonus(newDepth)
- : -stat_bonus(newDepth);
+ int bonus = value <= alpha ? -stat_bonus(newDepth)
+ : value >= beta ? stat_bonus(newDepth)
+ : 0;
update_continuation_histories(ss, movedPiece, to_sq(move), bonus);
}
// Step 18. Full depth search when LMR is skipped. If expected reduction is high, reduce its depth by 1.
else if (!PvNode || moveCount > 1)
{
- // Increase reduction for cut nodes and not ttMove (~1 Elo)
- if (!ttMove && cutNode)
- r += 2;
+ // Increase reduction for cut nodes and not ttMove (~1 Elo)
+ if (!ttMove && cutNode)
+ r += 2;
- value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth - (r > 4), !cutNode);
+ value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth - (r > 4), !cutNode);
}
// For PV nodes only, do a full PV search on the first move or after a fail
rm.selDepth = thisThread->selDepth;
rm.scoreLowerbound = rm.scoreUpperbound = false;
- if (value >= beta) {
- rm.scoreLowerbound = true;
- rm.uciScore = beta;
+ if (value >= beta)
+ {
+ rm.scoreLowerbound = true;
+ rm.uciScore = beta;
}
- else if (value <= alpha) {
- rm.scoreUpperbound = true;
- rm.uciScore = alpha;
+ else if (value <= alpha)
+ {
+ rm.scoreUpperbound = true;
+ rm.uciScore = alpha;
}
+
rm.pv.resize(1);
assert((ss+1)->pv);
if (PvNode && value < beta) // Update alpha! Always alpha < beta
{
- alpha = value;
-
- // Reduce other moves if we have found at least one score improvement
+ // Reduce other moves if we have found at least one score improvement (~1 Elo)
if ( depth > 1
- && depth < 6
- && beta < VALUE_KNOWN_WIN
- && alpha > -VALUE_KNOWN_WIN)
+ && beta < 12535
+ && value > -12535)
depth -= 1;
assert(depth > 0);
+ alpha = value;
}
else
{
quietsSearched, quietCount, capturesSearched, captureCount, depth);
// Bonus for prior countermove that caused the fail low
- else if (!priorCapture)
+ else if (!priorCapture && prevSq != SQ_NONE)
{
- // Extra bonuses for PV/Cut nodes or bad fail lows
- int bonus = (depth > 4) + (PvNode || cutNode) + (bestValue < alpha - 88 * depth);
+ int bonus = (depth > 5) + (PvNode || cutNode) + (bestValue < alpha - 97 * depth) + ((ss-1)->moveCount > 10);
update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * bonus);
}
bestValue = std::min(bestValue, maxValue);
// If no good move is found and the previous position was ttPv, then the previous
- // opponent move is probably good and the new position is added to the search tree.
+ // opponent move is probably good and the new position is added to the search tree. (~7 Elo)
if (bestValue <= alpha)
ss->ttPv = ss->ttPv || ((ss-1)->ttPv && depth > 3);
// qsearch() is the quiescence search function, which is called by the main search
// function with zero depth, or recursively with further decreasing depth per call.
- // (~155 elo)
+ // (~155 Elo)
template <NodeType nodeType>
Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) {
// TT entry depth that we are going to use. Note that in qsearch we use
// only two types of depth in TT: DEPTH_QS_CHECKS or DEPTH_QS_NO_CHECKS.
ttDepth = ss->inCheck || depth >= DEPTH_QS_CHECKS ? DEPTH_QS_CHECKS
- : DEPTH_QS_NO_CHECKS;
+ : DEPTH_QS_NO_CHECKS;
+
// Step 3. Transposition table lookup
posKey = pos.key();
tte = TT.probe(posKey, ss->ttHit);
if (PvNode && bestValue > alpha)
alpha = bestValue;
- futilityBase = bestValue + 158;
+ futilityBase = bestValue + 168;
}
const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,
// to search the moves. Because the depth is <= 0 here, only captures,
// queen promotions, and other checks (only if depth >= DEPTH_QS_CHECKS)
// will be generated.
- Square prevSq = to_sq((ss-1)->currentMove);
+ Square prevSq = (ss-1)->currentMove != MOVE_NULL ? to_sq((ss-1)->currentMove) : SQ_NONE;
MovePicker mp(pos, ttMove, depth, &thisThread->mainHistory,
&thisThread->captureHistory,
contHist,
continue;
givesCheck = pos.gives_check(move);
- capture = pos.capture(move);
+ capture = pos.capture_stage(move);
moveCount++;
continue;
// Do not search moves with bad enough SEE values (~5 Elo)
- if (!pos.see_ge(move, Value(-108)))
+ if (!pos.see_ge(move, Value(-110)))
continue;
-
}
// Speculative prefetch as early as possible
Thread* thisThread = pos.this_thread();
CapturePieceToHistory& captureHistory = thisThread->captureHistory;
Piece moved_piece = pos.moved_piece(bestMove);
- PieceType captured = type_of(pos.piece_on(to_sq(bestMove)));
+ PieceType captured;
+
int bonus1 = stat_bonus(depth + 1);
- if (!pos.capture(bestMove))
+ if (!pos.capture_stage(bestMove))
{
- int bonus2 = bestValue > beta + 146 ? bonus1 // larger bonus
+ int bonus2 = bestValue > beta + 153 ? bonus1 // larger bonus
: stat_bonus(depth); // smaller bonus
// Increase stats for the best move in case it was a quiet move
}
}
else
+ {
// Increase stats for the best move in case it was a capture move
+ captured = type_of(pos.piece_on(to_sq(bestMove)));
captureHistory[moved_piece][to_sq(bestMove)][captured] << bonus1;
+ }
// Extra penalty for a quiet early move that was not a TT move or
// main killer move in previous ply when it gets refuted.
- if ( ((ss-1)->moveCount == 1 + (ss-1)->ttHit || ((ss-1)->currentMove == (ss-1)->killers[0]))
+ if ( prevSq != SQ_NONE
+ && ((ss-1)->moveCount == 1 + (ss-1)->ttHit || ((ss-1)->currentMove == (ss-1)->killers[0]))
&& !pos.captured_piece())
update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, -bonus1);