X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=src%2Fsearch.cpp;h=160031384838abdc9ba8b5c882dc754a04bb5cd0;hb=15d47a2b3821b92c4d048f39f7f43c301299d365;hp=d5416e407599d03e9c46b56b64297f6de5be57c7;hpb=cf3dbcb5acd66efaaa84fa1e24ce7afb062fba08;p=stockfish diff --git a/src/search.cpp b/src/search.cpp index d5416e40..16003138 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -77,7 +77,7 @@ enum NodeType { // Futility margin Value futility_margin(Depth d, bool noTtCutNode, bool improving) { - return Value((126 - 42 * noTtCutNode) * (d - improving)); + return Value((125 - 43 * noTtCutNode) * (d - improving)); } // Reductions lookup table initialized at startup @@ -85,8 +85,8 @@ int Reductions[MAX_MOVES]; // [depth or moveNumber] Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) { int reductionScale = Reductions[d] * Reductions[mn]; - return (reductionScale + 1560 - int(delta) * 945 / int(rootDelta)) / 1024 - + (!i && reductionScale > 791); + return (reductionScale + 1487 - int(delta) * 976 / int(rootDelta)) / 1024 + + (!i && reductionScale > 808); } constexpr int futility_move_count(bool improving, Depth depth) { @@ -94,7 +94,10 @@ constexpr int futility_move_count(bool improving, Depth depth) { } // History and stats update bonus, based on depth -int stat_bonus(Depth d) { return std::min(334 * d - 531, 1538); } +int stat_bonus(Depth d) { return std::min(291 * d - 350, 1200); } + +// History and stats update malus, based on depth +int stat_malus(Depth d) { return std::min(361 * d - 361, 1182); } // Add a small random component to draw evaluations to avoid 3-fold blindness Value value_draw(const Thread* thisThread) { @@ -279,9 +282,9 @@ void MainThread::search() { // consumed, the user stops the search, or the maximum search depth is reached. void Thread::search() { - // Allocate stack with extra size to allow access from (ss-7) to (ss+2): - // (ss-7) is needed for update_continuation_histories(ss-1) which accesses (ss-6), - // (ss+2) is needed for initialization of statScore and killers. + // Allocate stack with extra size to allow access from (ss - 7) to (ss + 2): + // (ss - 7) is needed for update_continuation_histories(ss - 1) which accesses (ss - 6), + // (ss + 2) is needed for initialization of cutOffCnt and killers. Stack stack[MAX_PLY + 10], *ss = stack + 7; Move pv[MAX_PLY + 1]; Value alpha, beta, delta; @@ -363,14 +366,13 @@ void Thread::search() { selDepth = 0; // Reset aspiration window starting size - Value prev = rootMoves[pvIdx].averageScore; - delta = Value(10) + int(prev) * prev / 17470; - alpha = std::max(prev - delta, -VALUE_INFINITE); - beta = std::min(prev + delta, VALUE_INFINITE); - - // Adjust optimism based on root move's previousScore (~4 Elo) - int opt = 113 * prev / (std::abs(prev) + 109); - optimism[us] = Value(opt); + Value avg = rootMoves[pvIdx].averageScore; + delta = Value(10) + int(avg) * avg / 15335; + alpha = std::max(avg - delta, -VALUE_INFINITE); + beta = std::min(avg + delta, VALUE_INFINITE); + + // Adjust optimism based on root move's averageScore (~4 Elo) + optimism[us] = 110 * avg / (std::abs(avg) + 121); optimism[~us] = -optimism[us]; // Start with a small aspiration window and, in the case of a fail @@ -379,8 +381,8 @@ void Thread::search() { int failedHighCnt = 0; while (true) { - // Adjust the effective depth searched, but ensure at least one effective increment for every - // four searchAgain steps (see issue #2717). + // Adjust the effective depth searched, but ensure at least one effective increment + // for every four searchAgain steps (see issue #2717). Depth adjustedDepth = std::max(1, rootDepth - failedHighCnt - 3 * (searchAgainCounter + 1) / 4); bestValue = Stockfish::search(rootPos, ss, alpha, beta, adjustedDepth, false); @@ -582,7 +584,7 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo : value_draw(pos.this_thread()); // Step 3. Mate distance pruning. Even if we mate at the next move our score - // would be at best mate_in(ss->ply+1), but if alpha is already bigger because + // would be at best mate_in(ss->ply + 1), but if alpha is already bigger because // a shorter mate was found upward in the tree then there is no need to search // because we will never beat the current alpha. Same logic but with reversed // signs apply also in the opposite condition of being mated instead of giving @@ -633,15 +635,16 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo if (!ttCapture) update_quiet_stats(pos, ss, ttMove, stat_bonus(depth)); - // Extra penalty for early quiet moves of the previous ply (~0 Elo on STC, ~2 Elo on LTC) + // Extra penalty for early quiet moves of + // the previous ply (~0 Elo on STC, ~2 Elo on LTC). if (prevSq != SQ_NONE && (ss - 1)->moveCount <= 2 && !priorCapture) update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, - -stat_bonus(depth + 1)); + -stat_malus(depth + 1)); } // Penalty for a quiet ttMove that fails low (~1 Elo) else if (!ttCapture) { - int penalty = -stat_bonus(depth); + int penalty = -stat_malus(depth); thisThread->mainHistory[us][from_to(ttMove)] << penalty; update_continuation_histories(ss, pos.moved_piece(ttMove), to_sq(ttMove), penalty); } @@ -715,7 +718,8 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo } else if (excludedMove) { - // Providing the hint that this node's accumulator will be used often brings significant Elo gain (~13 Elo) + // Providing the hint that this node's accumulator will be used often + // brings significant Elo gain (~13 Elo). Eval::NNUE::hint_common_parent_position(pos); eval = ss->staticEval; } @@ -742,8 +746,10 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo // Use static evaluation difference to improve quiet move ordering (~4 Elo) if (is_ok((ss - 1)->currentMove) && !(ss - 1)->inCheck && !priorCapture) { - int bonus = std::clamp(-18 * int((ss - 1)->staticEval + ss->staticEval), -1812, 1812); + int bonus = std::clamp(-14 * int((ss - 1)->staticEval + ss->staticEval), -1449, 1449); thisThread->mainHistory[~us][from_to((ss - 1)->currentMove)] << bonus; + if (type_of(pos.piece_on(prevSq)) != PAWN && type_of((ss - 1)->currentMove) != PROMOTION) + thisThread->pawnHistory[pawn_structure(pos)][pos.piece_on(prevSq)][prevSq] << bonus / 4; } // Set up the improving flag, which is true if current static evaluation is @@ -759,7 +765,7 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo // If eval is really low check with qsearch if it can exceed alpha, if it can't, // return a fail low. // Adjust razor margin according to cutoffCnt. (~1 Elo) - if (eval < alpha - 492 - (257 - 200 * ((ss + 1)->cutoffCnt > 3)) * depth * depth) + if (eval < alpha - 474 - (270 - 174 * ((ss + 1)->cutoffCnt > 3)) * depth * depth) { value = qsearch(pos, ss, alpha - 1, alpha); if (value < alpha) @@ -773,7 +779,7 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo - (ss - 1)->statScore / 321 >= beta && eval >= beta && eval < 29462 // smaller than TB wins - && !(!ttCapture && ttMove)) + && (!ttMove || ttCapture)) return eval; // Step 9. Null move search with verification search (~35 Elo) @@ -817,15 +823,18 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo } } - // Step 10. If the position doesn't have a ttMove, decrease depth by 2 - // (or by 4 if the TT entry for the current position was hit and the stored depth is greater than or equal to the current depth). - // Use qsearch if depth is equal or below zero (~9 Elo) + // Step 10. Internal iterative reductions (~9 Elo) + // For PV nodes without a ttMove, we decrease depth by 2, + // or by 4 if the current position is present in the TT and + // the stored depth is greater than or equal to the current depth. + // Use qsearch if depth <= 0. if (PvNode && !ttMove) depth -= 2 + 2 * (ss->ttHit && tte->depth() >= depth); if (depth <= 0) return qsearch(pos, ss, alpha, beta); + // For cutNodes without a ttMove, we decrease depth by 2 if depth is high enough. if (cutNode && depth >= 8 && !ttMove) depth -= 2; @@ -852,6 +861,9 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo { assert(pos.capture_stage(move)); + // Prefetch the TT entry for the resulting position + prefetch(TT.first_entry(pos.key_after(move))); + ss->currentMove = move; ss->continuationHistory = &thisThread @@ -901,7 +913,7 @@ moves_loop: // When in check, search starts here prevSq != SQ_NONE ? thisThread->counterMoves[pos.piece_on(prevSq)][prevSq] : MOVE_NONE; MovePicker mp(pos, ttMove, depth, &thisThread->mainHistory, &captureHistory, contHist, - countermove, ss->killers); + &thisThread->pawnHistory, countermove, ss->killers); value = bestValue; moveCountPruning = singularQuietLMR = false; @@ -967,13 +979,15 @@ moves_loop: // When in check, search starts here if (capture || givesCheck) { // Futility pruning for captures (~2 Elo) - if (!givesCheck && lmrDepth < 7 && !ss->inCheck - && ss->staticEval + 188 + 206 * lmrDepth + PieceValue[pos.piece_on(to_sq(move))] - + captureHistory[movedPiece][to_sq(move)] - [type_of(pos.piece_on(to_sq(move)))] - / 7 - < alpha) - continue; + if (!givesCheck && lmrDepth < 7 && !ss->inCheck) + { + Piece capturedPiece = pos.piece_on(to_sq(move)); + int futilityEval = + ss->staticEval + 239 + 291 * lmrDepth + PieceValue[capturedPiece] + + captureHistory[movedPiece][to_sq(move)][type_of(capturedPiece)] / 7; + if (futilityEval < alpha) + continue; + } // SEE based pruning for captures and checks (~11 Elo) if (!pos.see_ge(move, Value(-185) * depth)) @@ -983,25 +997,29 @@ moves_loop: // When in check, search starts here { int history = (*contHist[0])[movedPiece][to_sq(move)] + (*contHist[1])[movedPiece][to_sq(move)] - + (*contHist[3])[movedPiece][to_sq(move)]; + + (*contHist[3])[movedPiece][to_sq(move)] + + thisThread->pawnHistory[pawn_structure(pos)][movedPiece][to_sq(move)]; // Continuation history based pruning (~2 Elo) - if (lmrDepth < 6 && history < -3232 * depth) + if (lmrDepth < 6 && history < -3645 * depth) continue; history += 2 * thisThread->mainHistory[us][from_to(move)]; - lmrDepth += history / 5793; - lmrDepth = std::max(lmrDepth, -2); + lmrDepth += history / 7836; + lmrDepth = std::max(lmrDepth, -1); // Futility pruning: parent node (~13 Elo) - if (!ss->inCheck && lmrDepth < 13 && ss->staticEval + 115 + 122 * lmrDepth <= alpha) + if (!ss->inCheck && lmrDepth < 13 + && ss->staticEval + (bestValue < ss->staticEval - 62 ? 123 : 77) + + 127 * lmrDepth + <= alpha) continue; lmrDepth = std::max(lmrDepth, 0); // Prune moves with negative SEE (~4 Elo) - if (!pos.see_ge(move, Value(-27 * lmrDepth * lmrDepth))) + if (!pos.see_ge(move, Value(-26 * lmrDepth * lmrDepth))) continue; } } @@ -1013,19 +1031,20 @@ moves_loop: // When in check, search starts here // Singular extension search (~94 Elo). If all moves but one fail low on a // search of (alpha-s, beta-s), and just one fails high on (alpha, beta), // then that move is singular and should be extended. To verify this we do - // a reduced search on all the other moves but the ttMove and if the result - // is lower than ttValue minus a margin, then we will extend the ttMove. Note - // that depth margin and singularBeta margin are known for having non-linear + // a reduced search on the position excluding the ttMove and if the result + // is lower than ttValue minus a margin, then we will extend the ttMove. + + // Note: the depth margin and singularBeta margin are known for having non-linear // scaling. Their values are optimized to time controls of 180+1.8 and longer - // so changing them requires tests at this type of time controls. - if (!rootNode + // so changing them requires tests at these types of time controls. + // Recursive singular search is avoided. + if (!rootNode && move == ttMove && !excludedMove && depth >= 4 - (thisThread->completedDepth > 24) + 2 * (PvNode && tte->is_pv()) - && move == ttMove && !excludedMove // Avoid recursive singular search && abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && (tte->bound() & BOUND_LOWER) && tte->depth() >= depth - 3) { Value singularBeta = ttValue - (64 + 57 * (ss->ttPv && !PvNode)) * depth / 64; - Depth singularDepth = (depth - 1) / 2; + Depth singularDepth = newDepth / 2; ss->excludedMove = move; value = @@ -1046,22 +1065,28 @@ moves_loop: // When in check, search starts here } // Multi-cut pruning - // Our ttMove is assumed to fail high, and now we failed high also on a - // reduced search without the ttMove. So we assume this expected cut-node - // is not singular, that multiple moves fail high, and we can prune the - // whole subtree by returning a softbound. + // Our ttMove is assumed to fail high based on the bound of the TT entry, + // and if after excluding the ttMove with a reduced search we fail high over the original beta, + // we assume this expected cut-node is not singular (multiple moves fail high), + // and we can prune the whole subtree by returning a softbound. else if (singularBeta >= beta) return singularBeta; - // If the eval of ttMove is greater than beta, we reduce it (negative extension) (~7 Elo) + // Negative extensions + // If other moves failed high over (ttValue - margin) without the ttMove on a reduced search, + // but we cannot do multi-cut because (ttValue - margin) is lower than the original beta, + // we do not know if the ttMove is singular or can do a multi-cut, + // so we reduce the ttMove in favor of other moves based on some conditions: + + // If the ttMove is assumed to fail high over current beta (~7 Elo) else if (ttValue >= beta) extension = -2 - !PvNode; - // If we are on a cutNode, reduce it based on depth (negative extension) (~1 Elo) + // If we are on a cutNode but the ttMove is not assumed to fail high over current beta (~1 Elo) else if (cutNode) extension = depth < 19 ? -2 : -1; - // If the eval of ttMove is less than value, we reduce it (negative extension) (~1 Elo) + // If the ttMove is assumed to fail low over the value of the reduced search (~1 Elo) else if (ttValue <= value) extension = -1; } @@ -1074,6 +1099,12 @@ moves_loop: // When in check, search starts here else if (PvNode && move == ttMove && move == ss->killers[0] && (*contHist[0])[movedPiece][to_sq(move)] >= 4194) extension = 1; + + // Recapture extensions (~1 Elo) + else if (PvNode && move == ttMove && to_sq(move) == prevSq + && captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] + > 4000) + extension = 1; } // Add extension to new depth @@ -1111,7 +1142,7 @@ moves_loop: // When in check, search starts here if (PvNode) r--; - // Decrease reduction if ttMove has been singularly extended (~1 Elo) + // Decrease reduction if a quiet ttMove has been singularly extended (~1 Elo) if (singularQuietLMR) r--; @@ -1123,9 +1154,10 @@ moves_loop: // When in check, search starts here if ((ss + 1)->cutoffCnt > 3) r++; - // Decrease reduction for first generated move (ttMove) + // Set reduction to 0 for first picked move (ttMove) (~2 Elo) + // Nullifies all previous reduction adjustments to ttMove and leaves only history to do them else if (move == ttMove) - r--; + r = 0; ss->statScore = 2 * thisThread->mainHistory[us][from_to(move)] + (*contHist[0])[movedPiece][to_sq(move)] @@ -1133,19 +1165,21 @@ moves_loop: // When in check, search starts here + (*contHist[3])[movedPiece][to_sq(move)] - 3848; // Decrease/increase reduction for moves with a good/bad history (~25 Elo) - r -= ss->statScore / (10216 + 3855 * (depth > 5 && depth < 23)); + r -= ss->statScore / 14200; // Step 17. Late moves reduction / extension (LMR, ~117 Elo) // We use various heuristics for the sons of a node after the first son has // been searched. In general, we would like to reduce them, but there are many // cases where we extend a son if it has good chances to be "interesting". - if (depth >= 2 && moveCount > 1 + (PvNode && ss->ply <= 1) + if (depth >= 2 && moveCount > 1 + rootNode && (!ss->ttPv || !capture || (cutNode && (ss - 1)->moveCount > 1))) { // In general we want to cap the LMR depth search at newDepth, but when // reduction is negative, we allow this move a limited search extension // beyond the first move depth. This may lead to hidden double extensions. - Depth d = std::clamp(newDepth - r, 1, newDepth + 1); + // To prevent problems when the max value is less than the min value, + // std::clamp has been replaced by a more robust implementation. + Depth d = std::max(1, std::min(newDepth - r, newDepth + 1)); value = -search(pos, ss + 1, -(alpha + 1), -alpha, d, true); @@ -1154,18 +1188,15 @@ moves_loop: // When in check, search starts here { // Adjust full-depth search based on LMR results - if the result // was good enough search deeper, if it was bad enough search shallower. - const bool doDeeperSearch = value > (bestValue + 51 + 10 * (newDepth - d)); - const bool doEvenDeeperSearch = value > alpha + 700 && ss->doubleExtensions <= 6; - const bool doShallowerSearch = value < bestValue + newDepth; - - ss->doubleExtensions = ss->doubleExtensions + doEvenDeeperSearch; + const bool doDeeperSearch = value > (bestValue + 50 + 2 * newDepth); // (~1 Elo) + const bool doShallowerSearch = value < bestValue + newDepth; // (~2 Elo) - newDepth += doDeeperSearch - doShallowerSearch + doEvenDeeperSearch; + newDepth += doDeeperSearch - doShallowerSearch; if (newDepth > d) value = -search(pos, ss + 1, -(alpha + 1), -alpha, newDepth, !cutNode); - int bonus = value <= alpha ? -stat_bonus(newDepth) + int bonus = value <= alpha ? -stat_malus(newDepth) : value >= beta ? stat_bonus(newDepth) : 0; @@ -1176,7 +1207,7 @@ moves_loop: // When in check, search starts here // Step 18. Full-depth search when LMR is skipped else if (!PvNode || moveCount > 1) { - // Increase reduction for cut nodes and not ttMove (~1 Elo) + // Increase reduction for cut nodes without ttMove (~1 Elo) if (!ttMove && cutNode) r += 2; @@ -1311,8 +1342,8 @@ moves_loop: // When in check, search starts here // Bonus for prior countermove that caused the fail low else if (!priorCapture && prevSq != SQ_NONE) { - int bonus = (depth > 6) + (PvNode || cutNode) + (bestValue < alpha - 653) - + ((ss - 1)->moveCount > 11); + int bonus = (depth > 6) + (PvNode || cutNode) + (bestValue < alpha - 657) + + ((ss - 1)->moveCount > 10); update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * bonus); thisThread->mainHistory[~us][from_to((ss - 1)->currentMove)] @@ -1394,9 +1425,7 @@ Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) { assert(0 <= ss->ply && ss->ply < MAX_PLY); - // Decide whether or not to include checks: this fixes also the type of - // TT entry depth that we are going to use. Note that in qsearch we use - // only two types of depth in TT: DEPTH_QS_CHECKS or DEPTH_QS_NO_CHECKS. + // Decide the replacement and cutoff priority of the qsearch TT entries ttDepth = ss->inCheck || depth >= DEPTH_QS_CHECKS ? DEPTH_QS_CHECKS : DEPTH_QS_NO_CHECKS; // Step 3. Transposition table lookup @@ -1429,7 +1458,7 @@ Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) { bestValue = ttValue; } else - // In case of null move search use previous static eval with a different sign + // In case of null move search, use previous static eval with a different sign ss->staticEval = bestValue = (ss - 1)->currentMove != MOVE_NULL ? evaluate(pos) : -(ss - 1)->staticEval; @@ -1458,7 +1487,7 @@ Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) { // will be generated. Square prevSq = is_ok((ss - 1)->currentMove) ? to_sq((ss - 1)->currentMove) : SQ_NONE; MovePicker mp(pos, ttMove, depth, &thisThread->mainHistory, &thisThread->captureHistory, - contHist, prevSq); + contHist, &thisThread->pawnHistory); int quietCheckEvasions = 0; @@ -1658,6 +1687,7 @@ void update_all_stats(const Position& pos, PieceType captured; int quietMoveBonus = stat_bonus(depth + 1); + int quietMoveMalus = stat_malus(depth); if (!pos.capture_stage(bestMove)) { @@ -1666,13 +1696,21 @@ void update_all_stats(const Position& pos, // Increase stats for the best move in case it was a quiet move update_quiet_stats(pos, ss, bestMove, bestMoveBonus); + thisThread->pawnHistory[pawn_structure(pos)][moved_piece][to_sq(bestMove)] + << quietMoveBonus; + + int moveMalus = bestValue > beta + 168 ? quietMoveMalus // larger malus + : stat_malus(depth); // smaller malus // Decrease stats for all non-best quiet moves for (int i = 0; i < quietCount; ++i) { - thisThread->mainHistory[us][from_to(quietsSearched[i])] << -bestMoveBonus; + thisThread->pawnHistory[pawn_structure(pos)][pos.moved_piece(quietsSearched[i])] + [to_sq(quietsSearched[i])] + << -moveMalus; + thisThread->mainHistory[us][from_to(quietsSearched[i])] << -moveMalus; update_continuation_histories(ss, pos.moved_piece(quietsSearched[i]), - to_sq(quietsSearched[i]), -bestMoveBonus); + to_sq(quietsSearched[i]), -moveMalus); } } else @@ -1688,20 +1726,20 @@ void update_all_stats(const Position& pos, && ((ss - 1)->moveCount == 1 + (ss - 1)->ttHit || ((ss - 1)->currentMove == (ss - 1)->killers[0])) && !pos.captured_piece()) - update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, -quietMoveBonus); + update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, -quietMoveMalus); // Decrease stats for all non-best capture moves for (int i = 0; i < captureCount; ++i) { moved_piece = pos.moved_piece(capturesSearched[i]); captured = type_of(pos.piece_on(to_sq(capturesSearched[i]))); - captureHistory[moved_piece][to_sq(capturesSearched[i])][captured] << -quietMoveBonus; + captureHistory[moved_piece][to_sq(capturesSearched[i])][captured] << -quietMoveMalus; } } // Updates histories of the move pairs formed -// by moves at ply -1, -2, -4, and -6 with current move. +// by moves at ply -1, -2, -3, -4, and -6 with current move. void update_continuation_histories(Stack* ss, Piece pc, Square to, int bonus) { for (int i : {1, 2, 3, 4, 6}) @@ -1860,9 +1898,9 @@ string UCI::pv(const Position& pos, Depth depth) { } -// Called in case we have no ponder move -// before exiting the search, for instance, in case we stop the search during a -// fail high at root. We try hard to have a ponder move to return to the GUI, +// Called in case we have no ponder move before exiting the search, +// for instance, in case we stop the search during a fail high at root. +// We try hard to have a ponder move to return to the GUI, // otherwise in case of 'ponder on' we have nothing to think about. bool RootMove::extract_ponder_from_tt(Position& pos) {