X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=src%2Fsearch.cpp;h=160031384838abdc9ba8b5c882dc754a04bb5cd0;hb=15d47a2b3821b92c4d048f39f7f43c301299d365;hp=65b27d16d4f4f68c02350f8f8d65a9bdfc54abfb;hpb=d30af4f669fa9a47e26a54967c571ffa7987d660;p=stockfish diff --git a/src/search.cpp b/src/search.cpp index 65b27d16..16003138 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -94,7 +94,10 @@ constexpr int futility_move_count(bool improving, Depth depth) { } // History and stats update bonus, based on depth -int stat_bonus(Depth d) { return std::min(357 * d - 483, 1511); } +int stat_bonus(Depth d) { return std::min(291 * d - 350, 1200); } + +// History and stats update malus, based on depth +int stat_malus(Depth d) { return std::min(361 * d - 361, 1182); } // Add a small random component to draw evaluations to avoid 3-fold blindness Value value_draw(const Thread* thisThread) { @@ -364,13 +367,12 @@ void Thread::search() { // Reset aspiration window starting size Value avg = rootMoves[pvIdx].averageScore; - delta = Value(10) + int(avg) * avg / 17470; + delta = Value(10) + int(avg) * avg / 15335; alpha = std::max(avg - delta, -VALUE_INFINITE); beta = std::min(avg + delta, VALUE_INFINITE); // Adjust optimism based on root move's averageScore (~4 Elo) - int opt = 113 * avg / (std::abs(avg) + 109); - optimism[us] = Value(opt); + optimism[us] = 110 * avg / (std::abs(avg) + 121); optimism[~us] = -optimism[us]; // Start with a small aspiration window and, in the case of a fail @@ -637,12 +639,12 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo // the previous ply (~0 Elo on STC, ~2 Elo on LTC). if (prevSq != SQ_NONE && (ss - 1)->moveCount <= 2 && !priorCapture) update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, - -stat_bonus(depth + 1)); + -stat_malus(depth + 1)); } // Penalty for a quiet ttMove that fails low (~1 Elo) else if (!ttCapture) { - int penalty = -stat_bonus(depth); + int penalty = -stat_malus(depth); thisThread->mainHistory[us][from_to(ttMove)] << penalty; update_continuation_histories(ss, pos.moved_piece(ttMove), to_sq(ttMove), penalty); } @@ -744,8 +746,10 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo // Use static evaluation difference to improve quiet move ordering (~4 Elo) if (is_ok((ss - 1)->currentMove) && !(ss - 1)->inCheck && !priorCapture) { - int bonus = std::clamp(-18 * int((ss - 1)->staticEval + ss->staticEval), -1812, 1812); + int bonus = std::clamp(-14 * int((ss - 1)->staticEval + ss->staticEval), -1449, 1449); thisThread->mainHistory[~us][from_to((ss - 1)->currentMove)] << bonus; + if (type_of(pos.piece_on(prevSq)) != PAWN && type_of((ss - 1)->currentMove) != PROMOTION) + thisThread->pawnHistory[pawn_structure(pos)][pos.piece_on(prevSq)][prevSq] << bonus / 4; } // Set up the improving flag, which is true if current static evaluation is @@ -765,15 +769,7 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo { value = qsearch(pos, ss, alpha - 1, alpha); if (value < alpha) - { - if (!priorCapture && prevSq != SQ_NONE) - { - int bonus = (depth > 6) + (PvNode || cutNode) + (value < alpha - 658) + ((ss-1)->moveCount > 11); - update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * bonus); - thisThread->mainHistory[~us][from_to((ss-1)->currentMove)] << stat_bonus(depth) * bonus * 57 / 100; - } return value; - } } // Step 8. Futility pruning: child node (~40 Elo) @@ -827,16 +823,18 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo } } - // Step 10. If the position doesn't have a ttMove, decrease depth by 2, - // or by 4 if the TT entry for the current position was hit and + // Step 10. Internal iterative reductions (~9 Elo) + // For PV nodes without a ttMove, we decrease depth by 2, + // or by 4 if the current position is present in the TT and // the stored depth is greater than or equal to the current depth. - // Use qsearch if depth is equal or below zero (~9 Elo) + // Use qsearch if depth <= 0. if (PvNode && !ttMove) depth -= 2 + 2 * (ss->ttHit && tte->depth() >= depth); if (depth <= 0) return qsearch(pos, ss, alpha, beta); + // For cutNodes without a ttMove, we decrease depth by 2 if depth is high enough. if (cutNode && depth >= 8 && !ttMove) depth -= 2; @@ -856,14 +854,16 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo { assert(probCutBeta < VALUE_INFINITE); - MovePicker mp(pos, ttMove, probCutBeta - ss->staticEval, &captureHistory, - thisThread->pawnHistory); + MovePicker mp(pos, ttMove, probCutBeta - ss->staticEval, &captureHistory); while ((move = mp.next_move()) != MOVE_NONE) if (move != excludedMove && pos.legal(move)) { assert(pos.capture_stage(move)); + // Prefetch the TT entry for the resulting position + prefetch(TT.first_entry(pos.key_after(move))); + ss->currentMove = move; ss->continuationHistory = &thisThread @@ -913,7 +913,7 @@ moves_loop: // When in check, search starts here prevSq != SQ_NONE ? thisThread->counterMoves[pos.piece_on(prevSq)][prevSq] : MOVE_NONE; MovePicker mp(pos, ttMove, depth, &thisThread->mainHistory, &captureHistory, contHist, - thisThread->pawnHistory, countermove, ss->killers); + &thisThread->pawnHistory, countermove, ss->killers); value = bestValue; moveCountPruning = singularQuietLMR = false; @@ -1010,7 +1010,10 @@ moves_loop: // When in check, search starts here lmrDepth = std::max(lmrDepth, -1); // Futility pruning: parent node (~13 Elo) - if (!ss->inCheck && lmrDepth < 13 && ss->staticEval + 77 + 124 * lmrDepth <= alpha) + if (!ss->inCheck && lmrDepth < 13 + && ss->staticEval + (bestValue < ss->staticEval - 62 ? 123 : 77) + + 127 * lmrDepth + <= alpha) continue; lmrDepth = std::max(lmrDepth, 0); @@ -1028,11 +1031,12 @@ moves_loop: // When in check, search starts here // Singular extension search (~94 Elo). If all moves but one fail low on a // search of (alpha-s, beta-s), and just one fails high on (alpha, beta), // then that move is singular and should be extended. To verify this we do - // a reduced search on all the other moves but the ttMove and if the result - // is lower than ttValue minus a margin, then we will extend the ttMove. Note - // that depth margin and singularBeta margin are known for having non-linear + // a reduced search on the position excluding the ttMove and if the result + // is lower than ttValue minus a margin, then we will extend the ttMove. + + // Note: the depth margin and singularBeta margin are known for having non-linear // scaling. Their values are optimized to time controls of 180+1.8 and longer - // so changing them requires tests at this type of time controls. + // so changing them requires tests at these types of time controls. // Recursive singular search is avoided. if (!rootNode && move == ttMove && !excludedMove && depth >= 4 - (thisThread->completedDepth > 24) + 2 * (PvNode && tte->is_pv()) @@ -1040,7 +1044,7 @@ moves_loop: // When in check, search starts here && tte->depth() >= depth - 3) { Value singularBeta = ttValue - (64 + 57 * (ss->ttPv && !PvNode)) * depth / 64; - Depth singularDepth = (depth - 1) / 2; + Depth singularDepth = newDepth / 2; ss->excludedMove = move; value = @@ -1061,22 +1065,28 @@ moves_loop: // When in check, search starts here } // Multi-cut pruning - // Our ttMove is assumed to fail high, and now we failed high also on a - // reduced search without the ttMove. So we assume this expected cut-node - // is not singular, that multiple moves fail high, and we can prune the - // whole subtree by returning a softbound. + // Our ttMove is assumed to fail high based on the bound of the TT entry, + // and if after excluding the ttMove with a reduced search we fail high over the original beta, + // we assume this expected cut-node is not singular (multiple moves fail high), + // and we can prune the whole subtree by returning a softbound. else if (singularBeta >= beta) return singularBeta; - // If the eval of ttMove is greater than beta, reduce it (negative extension) (~7 Elo) + // Negative extensions + // If other moves failed high over (ttValue - margin) without the ttMove on a reduced search, + // but we cannot do multi-cut because (ttValue - margin) is lower than the original beta, + // we do not know if the ttMove is singular or can do a multi-cut, + // so we reduce the ttMove in favor of other moves based on some conditions: + + // If the ttMove is assumed to fail high over current beta (~7 Elo) else if (ttValue >= beta) extension = -2 - !PvNode; - // If we are on a cutNode, reduce it based on depth (negative extension) (~1 Elo) + // If we are on a cutNode but the ttMove is not assumed to fail high over current beta (~1 Elo) else if (cutNode) extension = depth < 19 ? -2 : -1; - // If the eval of ttMove is less than value, reduce it (negative extension) (~1 Elo) + // If the ttMove is assumed to fail low over the value of the reduced search (~1 Elo) else if (ttValue <= value) extension = -1; } @@ -1089,6 +1099,12 @@ moves_loop: // When in check, search starts here else if (PvNode && move == ttMove && move == ss->killers[0] && (*contHist[0])[movedPiece][to_sq(move)] >= 4194) extension = 1; + + // Recapture extensions (~1 Elo) + else if (PvNode && move == ttMove && to_sq(move) == prevSq + && captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] + > 4000) + extension = 1; } // Add extension to new depth @@ -1126,7 +1142,7 @@ moves_loop: // When in check, search starts here if (PvNode) r--; - // Decrease reduction if ttMove has been singularly extended (~1 Elo) + // Decrease reduction if a quiet ttMove has been singularly extended (~1 Elo) if (singularQuietLMR) r--; @@ -1138,9 +1154,10 @@ moves_loop: // When in check, search starts here if ((ss + 1)->cutoffCnt > 3) r++; - // Decrease reduction for first generated move (ttMove) + // Set reduction to 0 for first picked move (ttMove) (~2 Elo) + // Nullifies all previous reduction adjustments to ttMove and leaves only history to do them else if (move == ttMove) - r--; + r = 0; ss->statScore = 2 * thisThread->mainHistory[us][from_to(move)] + (*contHist[0])[movedPiece][to_sq(move)] @@ -1148,19 +1165,21 @@ moves_loop: // When in check, search starts here + (*contHist[3])[movedPiece][to_sq(move)] - 3848; // Decrease/increase reduction for moves with a good/bad history (~25 Elo) - r -= ss->statScore / (10216 + 3855 * (depth > 5 && depth < 23)); + r -= ss->statScore / 14200; // Step 17. Late moves reduction / extension (LMR, ~117 Elo) // We use various heuristics for the sons of a node after the first son has // been searched. In general, we would like to reduce them, but there are many // cases where we extend a son if it has good chances to be "interesting". - if (depth >= 2 && moveCount > 1 + (PvNode && ss->ply <= 1) + if (depth >= 2 && moveCount > 1 + rootNode && (!ss->ttPv || !capture || (cutNode && (ss - 1)->moveCount > 1))) { // In general we want to cap the LMR depth search at newDepth, but when // reduction is negative, we allow this move a limited search extension // beyond the first move depth. This may lead to hidden double extensions. - Depth d = std::clamp(newDepth - r, 1, newDepth + 1); + // To prevent problems when the max value is less than the min value, + // std::clamp has been replaced by a more robust implementation. + Depth d = std::max(1, std::min(newDepth - r, newDepth + 1)); value = -search(pos, ss + 1, -(alpha + 1), -alpha, d, true); @@ -1169,18 +1188,15 @@ moves_loop: // When in check, search starts here { // Adjust full-depth search based on LMR results - if the result // was good enough search deeper, if it was bad enough search shallower. - const bool doDeeperSearch = value > (bestValue + 51 + 10 * (newDepth - d)); - const bool doEvenDeeperSearch = value > alpha + 700 && ss->doubleExtensions <= 6; - const bool doShallowerSearch = value < bestValue + newDepth; - - ss->doubleExtensions = ss->doubleExtensions + doEvenDeeperSearch; + const bool doDeeperSearch = value > (bestValue + 50 + 2 * newDepth); // (~1 Elo) + const bool doShallowerSearch = value < bestValue + newDepth; // (~2 Elo) - newDepth += doDeeperSearch - doShallowerSearch + doEvenDeeperSearch; + newDepth += doDeeperSearch - doShallowerSearch; if (newDepth > d) value = -search(pos, ss + 1, -(alpha + 1), -alpha, newDepth, !cutNode); - int bonus = value <= alpha ? -stat_bonus(newDepth) + int bonus = value <= alpha ? -stat_malus(newDepth) : value >= beta ? stat_bonus(newDepth) : 0; @@ -1191,7 +1207,7 @@ moves_loop: // When in check, search starts here // Step 18. Full-depth search when LMR is skipped else if (!PvNode || moveCount > 1) { - // Increase reduction for cut nodes and not ttMove (~1 Elo) + // Increase reduction for cut nodes without ttMove (~1 Elo) if (!ttMove && cutNode) r += 2; @@ -1331,7 +1347,7 @@ moves_loop: // When in check, search starts here update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * bonus); thisThread->mainHistory[~us][from_to((ss - 1)->currentMove)] - << stat_bonus(depth) * bonus * 61 / 100; + << stat_bonus(depth) * bonus / 2; } if (PvNode) @@ -1409,9 +1425,7 @@ Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) { assert(0 <= ss->ply && ss->ply < MAX_PLY); - // Decide whether or not to include checks: this fixes also the type of - // TT entry depth that we are going to use. Note that in qsearch we use - // only two types of depth in TT: DEPTH_QS_CHECKS or DEPTH_QS_NO_CHECKS. + // Decide the replacement and cutoff priority of the qsearch TT entries ttDepth = ss->inCheck || depth >= DEPTH_QS_CHECKS ? DEPTH_QS_CHECKS : DEPTH_QS_NO_CHECKS; // Step 3. Transposition table lookup @@ -1444,7 +1458,7 @@ Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) { bestValue = ttValue; } else - // In case of null move search use previous static eval with a different sign + // In case of null move search, use previous static eval with a different sign ss->staticEval = bestValue = (ss - 1)->currentMove != MOVE_NULL ? evaluate(pos) : -(ss - 1)->staticEval; @@ -1473,7 +1487,7 @@ Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) { // will be generated. Square prevSq = is_ok((ss - 1)->currentMove) ? to_sq((ss - 1)->currentMove) : SQ_NONE; MovePicker mp(pos, ttMove, depth, &thisThread->mainHistory, &thisThread->captureHistory, - contHist, thisThread->pawnHistory, prevSq); + contHist, &thisThread->pawnHistory); int quietCheckEvasions = 0; @@ -1673,6 +1687,7 @@ void update_all_stats(const Position& pos, PieceType captured; int quietMoveBonus = stat_bonus(depth + 1); + int quietMoveMalus = stat_malus(depth); if (!pos.capture_stage(bestMove)) { @@ -1684,15 +1699,18 @@ void update_all_stats(const Position& pos, thisThread->pawnHistory[pawn_structure(pos)][moved_piece][to_sq(bestMove)] << quietMoveBonus; + int moveMalus = bestValue > beta + 168 ? quietMoveMalus // larger malus + : stat_malus(depth); // smaller malus + // Decrease stats for all non-best quiet moves for (int i = 0; i < quietCount; ++i) { thisThread->pawnHistory[pawn_structure(pos)][pos.moved_piece(quietsSearched[i])] [to_sq(quietsSearched[i])] - << -bestMoveBonus; - thisThread->mainHistory[us][from_to(quietsSearched[i])] << -bestMoveBonus; + << -moveMalus; + thisThread->mainHistory[us][from_to(quietsSearched[i])] << -moveMalus; update_continuation_histories(ss, pos.moved_piece(quietsSearched[i]), - to_sq(quietsSearched[i]), -bestMoveBonus); + to_sq(quietsSearched[i]), -moveMalus); } } else @@ -1708,20 +1726,20 @@ void update_all_stats(const Position& pos, && ((ss - 1)->moveCount == 1 + (ss - 1)->ttHit || ((ss - 1)->currentMove == (ss - 1)->killers[0])) && !pos.captured_piece()) - update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, -quietMoveBonus); + update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, -quietMoveMalus); // Decrease stats for all non-best capture moves for (int i = 0; i < captureCount; ++i) { moved_piece = pos.moved_piece(capturesSearched[i]); captured = type_of(pos.piece_on(to_sq(capturesSearched[i]))); - captureHistory[moved_piece][to_sq(capturesSearched[i])][captured] << -quietMoveBonus; + captureHistory[moved_piece][to_sq(capturesSearched[i])][captured] << -quietMoveMalus; } } // Updates histories of the move pairs formed -// by moves at ply -1, -2, -4, and -6 with current move. +// by moves at ply -1, -2, -3, -4, and -6 with current move. void update_continuation_histories(Stack* ss, Piece pc, Square to, int bonus) { for (int i : {1, 2, 3, 4, 6}) @@ -1880,9 +1898,9 @@ string UCI::pv(const Position& pos, Depth depth) { } -// Called in case we have no ponder move -// before exiting the search, for instance, in case we stop the search during a -// fail high at root. We try hard to have a ponder move to return to the GUI, +// Called in case we have no ponder move before exiting the search, +// for instance, in case we stop the search during a fail high at root. +// We try hard to have a ponder move to return to the GUI, // otherwise in case of 'ponder on' we have nothing to think about. bool RootMove::extract_ponder_from_tt(Position& pos) {