// Futility margin
Value futility_margin(Depth d, bool improving) {
- return Value(168 * (d - improving));
+ return Value(165 * (d - improving));
}
// Reductions lookup table, initialized at startup
Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) {
int r = Reductions[d] * Reductions[mn];
- return (r + 1463 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 1010);
+ return (r + 1642 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 916);
}
constexpr int futility_move_count(bool improving, Depth depth) {
- return (3 + depth * depth) / (2 - improving);
+ return improving ? (3 + depth * depth)
+ : (3 + depth * depth) / 2;
}
// History and stats update bonus, based on depth
int stat_bonus(Depth d) {
- return std::min((9 * d + 270) * d - 311 , 2145);
+ return std::min((12 * d + 282) * d - 349 , 1594);
}
// Add a small random component to draw evaluations to avoid 3-fold blindness
- Value value_draw(Thread* thisThread) {
- return VALUE_DRAW + Value(2 * (thisThread->nodes & 1) - 1);
+ Value value_draw(const Thread* thisThread) {
+ return VALUE_DRAW - 1 + Value(thisThread->nodes & 0x2);
}
// Skill structure is used to implement strength limit. If we have an uci_elo then
Value value_to_tt(Value v, int ply);
Value value_from_tt(Value v, int ply, int r50c);
- void update_pv(Move* pv, Move move, Move* childPv);
+ void update_pv(Move* pv, Move move, const Move* childPv);
void update_continuation_histories(Stack* ss, Piece pc, Square to, int bonus);
void update_quiet_stats(const Position& pos, Stack* ss, Move move, int bonus);
void update_all_stats(const Position& pos, Stack* ss, Move bestMove, Value bestValue, Value beta, Square prevSq,
void Search::init() {
for (int i = 1; i < MAX_MOVES; ++i)
- Reductions[i] = int((20.81 + std::log(Threads.size()) / 2) * std::log(i));
+ Reductions[i] = int((20.26 + std::log(Threads.size()) / 2) * std::log(i));
}
bestPreviousScore = bestThread->rootMoves[0].score;
bestPreviousAverageScore = bestThread->rootMoves[0].averageScore;
+ for (Thread* th : Threads)
+ th->previousDepth = bestThread->completedDepth;
+
// Send again PV info if we have a new best thread
if (bestThread != this)
sync_cout << UCI::pv(bestThread->rootPos, bestThread->completedDepth, -VALUE_INFINITE, VALUE_INFINITE) << sync_endl;
multiPV = std::min(multiPV, rootMoves.size());
- complexityAverage.set(202, 1);
+ complexityAverage.set(155, 1);
- trend = SCORE_ZERO;
- optimism[ us] = Value(39);
- optimism[~us] = -optimism[us];
+ trend = SCORE_ZERO;
+ optimism[us] = optimism[~us] = VALUE_ZERO;
int searchAgainCounter = 0;
if (rootDepth >= 4)
{
Value prev = rootMoves[pvIdx].averageScore;
- delta = Value(16) + int(prev) * prev / 19178;
+ delta = Value(10) + int(prev) * prev / 15620;
alpha = std::max(prev - delta,-VALUE_INFINITE);
beta = std::min(prev + delta, VALUE_INFINITE);
// Adjust trend and optimism based on root move's previousScore
- int tr = sigmoid(prev, 3, 8, 90, 125, 1);
+ int tr = 116 * prev / (std::abs(prev) + 89);
trend = (us == WHITE ? make_score(tr, tr / 2)
: -make_score(tr, tr / 2));
- int opt = sigmoid(prev, 8, 17, 144, 13966, 183);
+ int opt = 118 * prev / (std::abs(prev) + 169);
optimism[ us] = Value(opt);
optimism[~us] = -optimism[us];
}
int failedHighCnt = 0;
while (true)
{
- Depth adjustedDepth = std::max(1, rootDepth - failedHighCnt - searchAgainCounter);
+ // Adjust the effective depth searched, but ensuring at least one effective increment for every
+ // four searchAgain steps (see issue #2717).
+ Depth adjustedDepth = std::max(1, rootDepth - failedHighCnt - 3 * (searchAgainCounter + 1) / 4);
bestValue = Stockfish::search<Root>(rootPos, ss, alpha, beta, adjustedDepth, false);
// Bring the best move to the front. It is critical that sorting
&& !Threads.stop
&& !mainThread->stopOnPonderhit)
{
- double fallingEval = (69 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
- + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 781.4;
+ double fallingEval = (71 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
+ + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 656.7;
fallingEval = std::clamp(fallingEval, 0.5, 1.5);
// If the bestMove is stable over several iterations, reduce time accordingly
- timeReduction = lastBestMoveDepth + 10 < completedDepth ? 1.63 : 0.73;
- double reduction = (1.56 + mainThread->previousTimeReduction) / (2.20 * timeReduction);
- double bestMoveInstability = 1.073 + std::max(1.0, 2.25 - 9.9 / rootDepth)
- * totBestMoveChanges / Threads.size();
+ timeReduction = lastBestMoveDepth + 9 < completedDepth ? 1.37 : 0.65;
+ double reduction = (1.4 + mainThread->previousTimeReduction) / (2.15 * timeReduction);
+ double bestMoveInstability = 1 + 1.7 * totBestMoveChanges / Threads.size();
int complexity = mainThread->complexityAverage.value();
- double complexPosition = std::clamp(1.0 + (complexity - 326) / 1618.1, 0.5, 1.5);
+ double complexPosition = std::min(1.0 + (complexity - 261) / 1738.7, 1.5);
double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability * complexPosition;
}
else if ( Threads.increaseDepth
&& !mainThread->ponder
- && Time.elapsed() > totalTime * 0.43)
+ && Time.elapsed() > totalTime * 0.53)
Threads.increaseDepth = false;
else
Threads.increaseDepth = true;
Move ttMove, move, excludedMove, bestMove;
Depth extension, newDepth;
Value bestValue, value, ttValue, eval, maxValue, probCutBeta;
- bool givesCheck, improving, didLMR, priorCapture;
- bool captureOrPromotion, doFullDepthSearch, moveCountPruning, ttCapture;
+ bool givesCheck, improving, priorCapture, singularQuietLMR;
+ bool capture, moveCountPruning, ttCapture;
Piece movedPiece;
- int moveCount, captureCount, quietCount, bestMoveCount, improvement, complexity;
+ int moveCount, captureCount, quietCount, improvement, complexity;
// Step 1. Initialize node
Thread* thisThread = pos.this_thread();
ss->inCheck = pos.checkers();
priorCapture = pos.captured_piece();
Color us = pos.side_to_move();
- moveCount = bestMoveCount = captureCount = quietCount = ss->moveCount = 0;
+ moveCount = captureCount = quietCount = ss->moveCount = 0;
bestValue = -VALUE_INFINITE;
maxValue = VALUE_INFINITE;
(ss+1)->ttPv = false;
(ss+1)->excludedMove = bestMove = MOVE_NONE;
(ss+2)->killers[0] = (ss+2)->killers[1] = MOVE_NONE;
+ (ss+2)->cutoffCnt = 0;
ss->doubleExtensions = (ss-1)->doubleExtensions;
- ss->depth = depth;
Square prevSq = to_sq((ss-1)->currentMove);
// Initialize statScore to zero for the grandchildren of the current position.
ttValue = ss->ttHit ? value_from_tt(tte->value(), ss->ply, pos.rule50_count()) : VALUE_NONE;
ttMove = rootNode ? thisThread->rootMoves[thisThread->pvIdx].pv[0]
: ss->ttHit ? tte->move() : MOVE_NONE;
- ttCapture = ttMove && pos.capture_or_promotion(ttMove);
+ ttCapture = ttMove && pos.capture(ttMove);
if (!excludedMove)
ss->ttPv = PvNode || (ss->ttHit && tte->is_pv());
// At non-PV nodes we check for an early TT cutoff
if ( !PvNode
&& ss->ttHit
- && tte->depth() > depth - (thisThread->id() % 2 == 1)
+ && tte->depth() > depth - (tte->bound() == BOUND_EXACT)
&& ttValue != VALUE_NONE // Possible in case of TT access race
- && (ttValue >= beta ? (tte->bound() & BOUND_LOWER)
- : (tte->bound() & BOUND_UPPER)))
+ && (tte->bound() & (ttValue >= beta ? BOUND_LOWER : BOUND_UPPER)))
{
// If ttMove is quiet, update move sorting heuristics on TT hit (~1 Elo)
if (ttMove)
// Never assume anything about values stored in TT
ss->staticEval = eval = tte->eval();
if (eval == VALUE_NONE)
- ss->staticEval = eval = evaluate(pos);
-
- // Randomize draw evaluation
- if (eval == VALUE_DRAW)
- eval = value_draw(thisThread);
+ ss->staticEval = eval = evaluate(pos, &complexity);
+ else // Fall back to (semi)classical complexity for TT hits, the NNUE complexity is lost
+ complexity = abs(ss->staticEval - pos.psq_eg_stm());
// ttValue can be used as a better position evaluation (~4 Elo)
if ( ttValue != VALUE_NONE
}
else
{
- ss->staticEval = eval = evaluate(pos);
+ ss->staticEval = eval = evaluate(pos, &complexity);
// Save static evaluation into transposition table
if (!excludedMove)
tte->save(posKey, VALUE_NONE, ss->ttPv, BOUND_NONE, DEPTH_NONE, MOVE_NONE, eval);
}
+ thisThread->complexityAverage.update(complexity);
+
// Use static evaluation difference to improve quiet move ordering (~3 Elo)
if (is_ok((ss-1)->currentMove) && !(ss-1)->inCheck && !priorCapture)
{
- int bonus = std::clamp(-16 * int((ss-1)->staticEval + ss->staticEval), -2000, 2000);
+ int bonus = std::clamp(-19 * int((ss-1)->staticEval + ss->staticEval), -1914, 1914);
thisThread->mainHistory[~us][from_to((ss-1)->currentMove)] << bonus;
}
// margin and the improving flag are used in various pruning heuristics.
improvement = (ss-2)->staticEval != VALUE_NONE ? ss->staticEval - (ss-2)->staticEval
: (ss-4)->staticEval != VALUE_NONE ? ss->staticEval - (ss-4)->staticEval
- : 175;
-
+ : 168;
improving = improvement > 0;
- complexity = abs(ss->staticEval - (us == WHITE ? eg_value(pos.psq_score()) : -eg_value(pos.psq_score())));
-
- thisThread->complexityAverage.update(complexity);
// Step 7. Razoring.
// If eval is really low check with qsearch if it can exceed alpha, if it can't,
// return a fail low.
- if ( !PvNode
- && depth <= 7
- && eval < alpha - 348 - 258 * depth * depth)
+ if (eval < alpha - 369 - 254 * depth * depth)
{
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
if (value < alpha)
// The depth condition is important for mate finding.
if ( !ss->ttPv
&& depth < 8
- && eval - futility_margin(depth, improving) - (ss-1)->statScore / 256 >= beta
+ && eval - futility_margin(depth, improving) - (ss-1)->statScore / 303 >= beta
&& eval >= beta
- && eval < 26305) // larger than VALUE_KNOWN_WIN, but smaller than TB wins.
+ && eval < 28031) // larger than VALUE_KNOWN_WIN, but smaller than TB wins
return eval;
// Step 9. Null move search with verification search (~22 Elo)
if ( !PvNode
&& (ss-1)->currentMove != MOVE_NULL
- && (ss-1)->statScore < 14695
+ && (ss-1)->statScore < 17139
&& eval >= beta
&& eval >= ss->staticEval
- && ss->staticEval >= beta - 15 * depth - improvement / 15 + 198 + complexity / 28
+ && ss->staticEval >= beta - 20 * depth - improvement / 13 + 233 + complexity / 25
&& !excludedMove
&& pos.non_pawn_material(us)
&& (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor))
assert(eval - beta >= 0);
// Null move dynamic reduction based on depth, eval and complexity of position
- Depth R = std::min(int(eval - beta) / 147, 5) + depth / 3 + 4 - (complexity > 753);
+ Depth R = std::min(int(eval - beta) / 168, 7) + depth / 3 + 4 - (complexity > 861);
ss->currentMove = MOVE_NULL;
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
}
}
- probCutBeta = beta + 179 - 46 * improving;
+ probCutBeta = beta + 191 - 54 * improving;
// Step 10. ProbCut (~4 Elo)
// If we have a good enough capture and a reduced search returns a value
{
assert(probCutBeta < VALUE_INFINITE);
- MovePicker mp(pos, ttMove, probCutBeta - ss->staticEval, &captureHistory);
- bool ttPv = ss->ttPv;
- ss->ttPv = false;
+ MovePicker mp(pos, ttMove, probCutBeta - ss->staticEval, depth - 3, &captureHistory);
while ((move = mp.next_move()) != MOVE_NONE)
if (move != excludedMove && pos.legal(move))
{
- assert(pos.capture_or_promotion(move));
-
- captureOrPromotion = true;
+ assert(pos.capture(move) || promotion_type(move) == QUEEN);
ss->currentMove = move;
ss->continuationHistory = &thisThread->continuationHistory[ss->inCheck]
- [captureOrPromotion]
+ [true]
[pos.moved_piece(move)]
[to_sq(move)];
if (value >= probCutBeta)
{
- // if transposition table doesn't have equal or more deep info write probCut data into it
- if ( !(ss->ttHit
- && tte->depth() >= depth - 3
- && ttValue != VALUE_NONE))
- tte->save(posKey, value_to_tt(value, ss->ply), ttPv,
- BOUND_LOWER,
- depth - 3, move, ss->staticEval);
+ // Save ProbCut data into transposition table
+ tte->save(posKey, value_to_tt(value, ss->ply), ss->ttPv, BOUND_LOWER, depth - 3, move, ss->staticEval);
return value;
}
}
- ss->ttPv = ttPv;
}
- // Step 11. If the position is not in TT, decrease depth by 2 or 1 depending on node type (~3 Elo)
- if ( PvNode
- && depth >= 3
+ // Step 11. If the position is not in TT, decrease depth by 3.
+ // Use qsearch if depth is equal or below zero (~4 Elo)
+ if ( PvNode
&& !ttMove)
- depth -= 2;
+ depth -= 3;
+
+ if (depth <= 0)
+ return qsearch<PV>(pos, ss, alpha, beta);
- if ( cutNode
- && depth >= 8
+ if ( cutNode
+ && depth >= 9
&& !ttMove)
- depth--;
+ depth -= 2;
moves_loop: // When in check, search starts here
// Step 12. A small Probcut idea, when we are in check (~0 Elo)
- probCutBeta = beta + 481;
+ probCutBeta = beta + 417;
if ( ss->inCheck
&& !PvNode
&& depth >= 2
ss->killers);
value = bestValue;
- moveCountPruning = false;
+ moveCountPruning = singularQuietLMR = false;
// Indicate PvNodes that will probably fail low if the node was searched
// at a depth equal or greater than the current depth, and the result of this search was a fail low.
(ss+1)->pv = nullptr;
extension = 0;
- captureOrPromotion = pos.capture_or_promotion(move);
+ capture = pos.capture(move);
movedPiece = pos.moved_piece(move);
givesCheck = pos.gives_check(move);
// Reduced depth of the next LMR search
int lmrDepth = std::max(newDepth - reduction(improving, depth, moveCount, delta, thisThread->rootDelta), 0);
- if ( captureOrPromotion
+ if ( capture
|| givesCheck)
{
// Futility pruning for captures (~0 Elo)
- if ( !pos.empty(to_sq(move))
- && !givesCheck
+ if ( !givesCheck
&& !PvNode
- && lmrDepth < 6
+ && lmrDepth < 7
&& !ss->inCheck
- && ss->staticEval + 281 + 179 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
+ && ss->staticEval + 180 + 201 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
+ captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 6 < alpha)
continue;
// SEE based pruning (~9 Elo)
- if (!pos.see_ge(move, Value(-203) * depth))
+ if (!pos.see_ge(move, Value(-222) * depth))
continue;
}
else
&& history < -3875 * (depth - 1))
continue;
- history += thisThread->mainHistory[us][from_to(move)];
+ history += 2 * thisThread->mainHistory[us][from_to(move)];
// Futility pruning: parent node (~9 Elo)
if ( !ss->inCheck
- && lmrDepth < 11
- && ss->staticEval + 122 + 138 * lmrDepth + history / 60 <= alpha)
+ && lmrDepth < 13
+ && ss->staticEval + 106 + 145 * lmrDepth + history / 52 <= alpha)
continue;
// Prune moves with negative SEE (~3 Elo)
- if (!pos.see_ge(move, Value(-25 * lmrDepth * lmrDepth - 20 * lmrDepth)))
+ if (!pos.see_ge(move, Value(-24 * lmrDepth * lmrDepth - 15 * lmrDepth)))
continue;
}
}
// a reduced search on all the other moves but the ttMove and if the
// result is lower than ttValue minus a margin, then we will extend the ttMove.
if ( !rootNode
- && depth >= 4 + 2 * (PvNode && tte->is_pv())
+ && depth >= 4 - (thisThread->previousDepth > 24) + 2 * (PvNode && tte->is_pv())
&& move == ttMove
&& !excludedMove // Avoid recursive singular search
/* && ttValue != VALUE_NONE Already implicit in the next condition */
&& (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 3)
{
- Value singularBeta = ttValue - 3 * depth;
+ Value singularBeta = ttValue - (3 + (ss->ttPv && !PvNode)) * depth;
Depth singularDepth = (depth - 1) / 2;
ss->excludedMove = move;
if (value < singularBeta)
{
extension = 1;
+ singularQuietLMR = !ttCapture;
// Avoid search explosion by limiting the number of double extensions
if ( !PvNode
- && value < singularBeta - 26
- && ss->doubleExtensions <= 8)
+ && value < singularBeta - 25
+ && ss->doubleExtensions <= 9)
extension = 2;
}
// If the eval of ttMove is greater than beta, we reduce it (negative extension)
else if (ttValue >= beta)
extension = -2;
+
+ // If the eval of ttMove is less than alpha and value, we reduce it (negative extension)
+ else if (ttValue <= alpha && ttValue <= value)
+ extension = -1;
}
// Check extensions (~1 Elo)
else if ( givesCheck
&& depth > 9
- && abs(ss->staticEval) > 71)
+ && abs(ss->staticEval) > 82)
extension = 1;
// Quiet ttMove extensions (~0 Elo)
else if ( PvNode
&& move == ttMove
&& move == ss->killers[0]
- && (*contHist[0])[movedPiece][to_sq(move)] >= 5491)
+ && (*contHist[0])[movedPiece][to_sq(move)] >= 5177)
extension = 1;
}
// Update the current move (this must be done after singular extension search)
ss->currentMove = move;
ss->continuationHistory = &thisThread->continuationHistory[ss->inCheck]
- [captureOrPromotion]
+ [capture]
[movedPiece]
[to_sq(move)];
// Step 16. Make the move
pos.do_move(move, st, givesCheck);
- bool doDeeperSearch = false;
-
// Step 17. Late moves reduction / extension (LMR, ~98 Elo)
// We use various heuristics for the sons of a node after the first son has
// been searched. In general we would like to reduce them, but there are many
if ( depth >= 2
&& moveCount > 1 + (PvNode && ss->ply <= 1)
&& ( !ss->ttPv
- || !captureOrPromotion
+ || !capture
|| (cutNode && (ss-1)->moveCount > 1)))
{
Depth r = reduction(improving, depth, moveCount, delta, thisThread->rootDelta);
- // Decrease reduction at some PvNodes (~2 Elo)
- if ( PvNode
- && bestMoveCount <= 3)
- r--;
-
// Decrease reduction if position is or has been on the PV
// and node is not likely to fail low. (~3 Elo)
if ( ss->ttPv
r--;
// Increase reduction for cut nodes (~3 Elo)
- if (cutNode && move != ss->killers[0])
+ if (cutNode)
r += 2;
// Increase reduction if ttMove is a capture (~3 Elo)
if (ttCapture)
r++;
- ss->statScore = thisThread->mainHistory[us][from_to(move)]
+ // Decrease reduction for PvNodes based on depth
+ if (PvNode)
+ r -= 1 + 11 / (3 + depth);
+
+ // Decrease reduction if ttMove has been singularly extended (~1 Elo)
+ if (singularQuietLMR)
+ r--;
+
+ // Dicrease reduction if we move a threatened piece (~1 Elo)
+ if ( depth > 9
+ && (mp.threatenedPieces & from_sq(move)))
+ r--;
+
+ // Increase reduction if next ply has a lot of fail high
+ if ((ss+1)->cutoffCnt > 3 && !PvNode)
+ r++;
+
+ ss->statScore = 2 * thisThread->mainHistory[us][from_to(move)]
+ (*contHist[0])[movedPiece][to_sq(move)]
+ (*contHist[1])[movedPiece][to_sq(move)]
+ (*contHist[3])[movedPiece][to_sq(move)]
- - 4334;
+ - 4433;
// Decrease/increase reduction for moves with a good/bad history (~30 Elo)
- r -= ss->statScore / 15914;
-
- // In general we want to cap the LMR depth search at newDepth. But if reductions
- // are really negative and movecount is low, we allow this move to be searched
- // deeper than the first move (this may lead to hidden double extensions).
- int deeper = r >= -1 ? 0
- : moveCount <= 4 ? 2
- : PvNode && depth > 4 ? 1
- : cutNode && moveCount <= 8 ? 1
- : 0;
+ r -= ss->statScore / 13628;
- Depth d = std::clamp(newDepth - r, 1, newDepth + deeper);
+ // In general we want to cap the LMR depth search at newDepth, but when
+ // reduction is negative, we allow this move a limited search extension
+ // beyond the first move depth. This may lead to hidden double extensions.
+ Depth d = std::clamp(newDepth - r, 1, newDepth + 1);
value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, d, true);
- // If the son is reduced and fails high it will be re-searched at full depth
- doFullDepthSearch = value > alpha && d < newDepth;
- doDeeperSearch = value > (alpha + 78 + 11 * (newDepth - d));
- didLMR = true;
- }
- else
- {
- doFullDepthSearch = !PvNode || moveCount > 1;
- didLMR = false;
- }
-
- // Step 18. Full depth search when LMR is skipped or fails high
- if (doFullDepthSearch)
- {
- value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth + doDeeperSearch, !cutNode);
-
- // If the move passed LMR update its stats
- if (didLMR)
+ // Do full depth search when reduced LMR search fails high
+ if (value > alpha && d < newDepth)
{
+ const bool doDeeperSearch = value > (alpha + 64 + 11 * (newDepth - d));
+ value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth + doDeeperSearch, !cutNode);
+
int bonus = value > alpha ? stat_bonus(newDepth)
: -stat_bonus(newDepth);
- if (captureOrPromotion)
+ if (capture)
bonus /= 6;
update_continuation_histories(ss, movedPiece, to_sq(move), bonus);
}
}
+ // Step 18. Full depth search when LMR is skipped
+ else if (!PvNode || moveCount > 1)
+ {
+ value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth, !cutNode);
+ }
+
// For PV nodes only, do a full PV search on the first move or after a fail
// high (in the latter case search only if value < beta), otherwise let the
// parent node fail low with value <= alpha and try another move.
if (PvNode && value < beta) // Update alpha! Always alpha < beta
{
alpha = value;
- bestMoveCount++;
+
+ // Reduce other moves if we have found at least one score improvement
+ if ( depth > 1
+ && depth < 6
+ && beta < VALUE_KNOWN_WIN
+ && alpha > -VALUE_KNOWN_WIN)
+ depth -= 1;
+
+ assert(depth > 0);
}
else
{
+ ss->cutoffCnt++;
assert(value >= beta); // Fail high
break;
}
}
}
+ else
+ ss->cutoffCnt = 0;
+
// If the move is worse than some previously searched move, remember it to update its stats later
if (move != bestMove)
{
- if (captureOrPromotion && captureCount < 32)
+ if (capture && captureCount < 32)
capturesSearched[captureCount++] = move;
- else if (!captureOrPromotion && quietCount < 64)
+ else if (!capture && quietCount < 64)
quietsSearched[quietCount++] = move;
}
}
quietsSearched, quietCount, capturesSearched, captureCount, depth);
// Bonus for prior countermove that caused the fail low
- else if ( (depth >= 4 || PvNode)
+ else if ( (depth >= 5 || PvNode)
&& !priorCapture)
{
//Assign extra bonus if current node is PvNode or cutNode
//or fail low was really bad
bool extraBonus = PvNode
|| cutNode
- || bestValue < alpha - 70 * depth;
+ || bestValue < alpha - 62 * depth;
update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * (1 + extraBonus));
}
// opponent move is probably good and the new position is added to the search tree.
if (bestValue <= alpha)
ss->ttPv = ss->ttPv || ((ss-1)->ttPv && depth > 3);
- // Otherwise, a counter move has been found and if the position is the last leaf
- // in the search tree, remove the position from the search tree.
- else if (depth > 3)
- ss->ttPv = ss->ttPv && (ss+1)->ttPv;
// Write gathered information in transposition table
if (!excludedMove && !(rootNode && thisThread->pvIdx))
// qsearch() is the quiescence search function, which is called by the main search
// function with zero depth, or recursively with further decreasing depth per call.
+ // (~155 elo)
template <NodeType nodeType>
Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) {
Move ttMove, move, bestMove;
Depth ttDepth;
Value bestValue, value, ttValue, futilityValue, futilityBase;
- bool pvHit, givesCheck, captureOrPromotion;
+ bool pvHit, givesCheck, capture;
int moveCount;
if (PvNode)
&& ss->ttHit
&& tte->depth() >= ttDepth
&& ttValue != VALUE_NONE // Only in case of TT access race
- && (ttValue >= beta ? (tte->bound() & BOUND_LOWER)
- : (tte->bound() & BOUND_UPPER)))
+ && (tte->bound() & (ttValue >= beta ? BOUND_LOWER : BOUND_UPPER)))
return ttValue;
// Evaluate the position statically
if (PvNode && bestValue > alpha)
alpha = bestValue;
- futilityBase = bestValue + 118;
+ futilityBase = bestValue + 153;
}
const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,
continue;
givesCheck = pos.gives_check(move);
- captureOrPromotion = pos.capture_or_promotion(move);
+ capture = pos.capture(move);
moveCount++;
ss->currentMove = move;
ss->continuationHistory = &thisThread->continuationHistory[ss->inCheck]
- [captureOrPromotion]
+ [capture]
[pos.moved_piece(move)]
[to_sq(move)];
// Continuation history based pruning (~2 Elo)
- if ( !captureOrPromotion
+ if ( !capture
&& bestValue > VALUE_TB_LOSS_IN_MAX_PLY
- && (*contHist[0])[pos.moved_piece(move)][to_sq(move)] < CounterMovePruneThreshold
- && (*contHist[1])[pos.moved_piece(move)][to_sq(move)] < CounterMovePruneThreshold)
+ && (*contHist[0])[pos.moved_piece(move)][to_sq(move)] < 0
+ && (*contHist[1])[pos.moved_piece(move)][to_sq(move)] < 0)
continue;
// movecount pruning for quiet check evasions
- if ( bestValue > VALUE_TB_LOSS_IN_MAX_PLY
+ if ( bestValue > VALUE_TB_LOSS_IN_MAX_PLY
&& quietCheckEvasions > 1
- && !captureOrPromotion
+ && !capture
&& ss->inCheck)
continue;
- quietCheckEvasions += !captureOrPromotion && ss->inCheck;
+ quietCheckEvasions += !capture && ss->inCheck;
// Make and search the move
pos.do_move(move, st, givesCheck);
// update_pv() adds current move and appends child pv[]
- void update_pv(Move* pv, Move move, Move* childPv) {
+ void update_pv(Move* pv, Move move, const Move* childPv) {
for (*pv++ = move; childPv && *childPv != MOVE_NONE; )
*pv++ = *childPv++;
void update_all_stats(const Position& pos, Stack* ss, Move bestMove, Value bestValue, Value beta, Square prevSq,
Move* quietsSearched, int quietCount, Move* capturesSearched, int captureCount, Depth depth) {
- int bonus1, bonus2;
Color us = pos.side_to_move();
Thread* thisThread = pos.this_thread();
CapturePieceToHistory& captureHistory = thisThread->captureHistory;
Piece moved_piece = pos.moved_piece(bestMove);
PieceType captured = type_of(pos.piece_on(to_sq(bestMove)));
+ int bonus1 = stat_bonus(depth + 1);
- bonus1 = stat_bonus(depth + 1);
- bonus2 = bestValue > beta + PawnValueMg ? bonus1 // larger bonus
+ if (!pos.capture(bestMove))
+ {
+ int bonus2 = bestValue > beta + 137 ? bonus1 // larger bonus
: stat_bonus(depth); // smaller bonus
- if (!pos.capture_or_promotion(bestMove))
- {
// Increase stats for the best move in case it was a quiet move
update_quiet_stats(pos, ss, bestMove, bonus2);
ss << (v >= beta ? " lowerbound" : v <= alpha ? " upperbound" : "");
ss << " nodes " << nodesSearched
- << " nps " << nodesSearched * 1000 / elapsed;
-
- if (elapsed > 1000) // Earlier makes little sense
- ss << " hashfull " << TT.hashfull();
-
- ss << " tbhits " << tbHits
+ << " nps " << nodesSearched * 1000 / elapsed
+ << " hashfull " << TT.hashfull()
+ << " tbhits " << tbHits
<< " time " << elapsed
<< " pv";