X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=src%2Fsearch.cpp;h=160031384838abdc9ba8b5c882dc754a04bb5cd0;hb=15d47a2b3821b92c4d048f39f7f43c301299d365;hp=43d78892f60d7e6374a9e5c9dcd410cb5b3676a9;hpb=b7b7800e2b752c93131e03c31d7456f18b392a7c;p=stockfish diff --git a/src/search.cpp b/src/search.cpp index 43d78892..16003138 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -77,7 +77,7 @@ enum NodeType { // Futility margin Value futility_margin(Depth d, bool noTtCutNode, bool improving) { - return Value((126 - 42 * noTtCutNode) * (d - improving)); + return Value((125 - 43 * noTtCutNode) * (d - improving)); } // Reductions lookup table initialized at startup @@ -85,8 +85,8 @@ int Reductions[MAX_MOVES]; // [depth or moveNumber] Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) { int reductionScale = Reductions[d] * Reductions[mn]; - return (reductionScale + 1560 - int(delta) * 945 / int(rootDelta)) / 1024 - + (!i && reductionScale > 791); + return (reductionScale + 1487 - int(delta) * 976 / int(rootDelta)) / 1024 + + (!i && reductionScale > 808); } constexpr int futility_move_count(bool improving, Depth depth) { @@ -94,7 +94,10 @@ constexpr int futility_move_count(bool improving, Depth depth) { } // History and stats update bonus, based on depth -int stat_bonus(Depth d) { return std::min(334 * d - 531, 1538); } +int stat_bonus(Depth d) { return std::min(291 * d - 350, 1200); } + +// History and stats update malus, based on depth +int stat_malus(Depth d) { return std::min(361 * d - 361, 1182); } // Add a small random component to draw evaluations to avoid 3-fold blindness Value value_draw(const Thread* thisThread) { @@ -148,7 +151,7 @@ void update_all_stats(const Position& pos, int captureCount, Depth depth); -// perft() is our utility to verify move generation. All the leaf nodes up +// Utility to verify move generation. All the leaf nodes up // to the given depth are generated and counted, and the sum is returned. template uint64_t perft(Position& pos, Depth depth) { @@ -179,8 +182,7 @@ uint64_t perft(Position& pos, Depth depth) { } // namespace -// Search::init() is called at startup to initialize various lookup tables - +// Called at startup to initialize various lookup tables void Search::init() { for (int i = 1; i < MAX_MOVES; ++i) @@ -188,8 +190,7 @@ void Search::init() { } -// Search::clear() resets search state to its initial value - +// Resets search state to its initial value void Search::clear() { Threads.main()->wait_for_search_finished(); @@ -201,9 +202,8 @@ void Search::clear() { } -// MainThread::search() is started when the program receives the UCI 'go' +// Called when the program receives the UCI 'go' // command. It searches from the root position and outputs the "bestmove". - void MainThread::search() { if (Limits.perft) @@ -277,15 +277,14 @@ void MainThread::search() { } -// Thread::search() is the main iterative deepening loop. It calls search() +// Main iterative deepening loop. It calls search() // repeatedly with increasing depth until the allocated thinking time has been // consumed, the user stops the search, or the maximum search depth is reached. - void Thread::search() { - // Allocate stack with extra size to allow access from (ss-7) to (ss+2): - // (ss-7) is needed for update_continuation_histories(ss-1) which accesses (ss-6), - // (ss+2) is needed for initialization of statScore and killers. + // Allocate stack with extra size to allow access from (ss - 7) to (ss + 2): + // (ss - 7) is needed for update_continuation_histories(ss - 1) which accesses (ss - 6), + // (ss + 2) is needed for initialization of cutOffCnt and killers. Stack stack[MAX_PLY + 10], *ss = stack + 7; Move pv[MAX_PLY + 1]; Value alpha, beta, delta; @@ -367,14 +366,13 @@ void Thread::search() { selDepth = 0; // Reset aspiration window starting size - Value prev = rootMoves[pvIdx].averageScore; - delta = Value(10) + int(prev) * prev / 17470; - alpha = std::max(prev - delta, -VALUE_INFINITE); - beta = std::min(prev + delta, VALUE_INFINITE); - - // Adjust optimism based on root move's previousScore (~4 Elo) - int opt = 113 * prev / (std::abs(prev) + 109); - optimism[us] = Value(opt); + Value avg = rootMoves[pvIdx].averageScore; + delta = Value(10) + int(avg) * avg / 15335; + alpha = std::max(avg - delta, -VALUE_INFINITE); + beta = std::min(avg + delta, VALUE_INFINITE); + + // Adjust optimism based on root move's averageScore (~4 Elo) + optimism[us] = 110 * avg / (std::abs(avg) + 121); optimism[~us] = -optimism[us]; // Start with a small aspiration window and, in the case of a fail @@ -383,8 +381,8 @@ void Thread::search() { int failedHighCnt = 0; while (true) { - // Adjust the effective depth searched, but ensure at least one effective increment for every - // four searchAgain steps (see issue #2717). + // Adjust the effective depth searched, but ensure at least one effective increment + // for every four searchAgain steps (see issue #2717). Depth adjustedDepth = std::max(1, rootDepth - failedHighCnt - 3 * (searchAgainCounter + 1) / 4); bestValue = Stockfish::search(rootPos, ss, alpha, beta, adjustedDepth, false); @@ -471,15 +469,15 @@ void Thread::search() { // Do we have time for the next iteration? Can we stop searching now? if (Limits.use_time_management() && !Threads.stop && !mainThread->stopOnPonderhit) { - double fallingEval = (69 + 13 * (mainThread->bestPreviousAverageScore - bestValue) + double fallingEval = (66 + 14 * (mainThread->bestPreviousAverageScore - bestValue) + 6 * (mainThread->iterValue[iterIdx] - bestValue)) - / 619.6; + / 583.0; fallingEval = std::clamp(fallingEval, 0.5, 1.5); // If the bestMove is stable over several iterations, reduce time accordingly - timeReduction = lastBestMoveDepth + 8 < completedDepth ? 1.57 : 0.65; - double reduction = (1.4 + mainThread->previousTimeReduction) / (2.08 * timeReduction); - double bestMoveInstability = 1 + 1.8 * totBestMoveChanges / Threads.size(); + timeReduction = lastBestMoveDepth + 8 < completedDepth ? 1.56 : 0.69; + double reduction = (1.4 + mainThread->previousTimeReduction) / (2.03 * timeReduction); + double bestMoveInstability = 1 + 1.79 * totBestMoveChanges / Threads.size(); double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability; @@ -521,8 +519,7 @@ void Thread::search() { namespace { -// search<>() is the main search function for both PV and non-PV nodes - +// Main search function for both PV and non-PV nodes template Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, bool cutNode) { @@ -587,7 +584,7 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo : value_draw(pos.this_thread()); // Step 3. Mate distance pruning. Even if we mate at the next move our score - // would be at best mate_in(ss->ply+1), but if alpha is already bigger because + // would be at best mate_in(ss->ply + 1), but if alpha is already bigger because // a shorter mate was found upward in the tree then there is no need to search // because we will never beat the current alpha. Same logic but with reversed // signs apply also in the opposite condition of being mated instead of giving @@ -638,15 +635,16 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo if (!ttCapture) update_quiet_stats(pos, ss, ttMove, stat_bonus(depth)); - // Extra penalty for early quiet moves of the previous ply (~0 Elo on STC, ~2 Elo on LTC) + // Extra penalty for early quiet moves of + // the previous ply (~0 Elo on STC, ~2 Elo on LTC). if (prevSq != SQ_NONE && (ss - 1)->moveCount <= 2 && !priorCapture) update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, - -stat_bonus(depth + 1)); + -stat_malus(depth + 1)); } // Penalty for a quiet ttMove that fails low (~1 Elo) else if (!ttCapture) { - int penalty = -stat_bonus(depth); + int penalty = -stat_malus(depth); thisThread->mainHistory[us][from_to(ttMove)] << penalty; update_continuation_histories(ss, pos.moved_piece(ttMove), to_sq(ttMove), penalty); } @@ -720,7 +718,8 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo } else if (excludedMove) { - // Providing the hint that this node's accumulator will be used often brings significant Elo gain (~13 Elo) + // Providing the hint that this node's accumulator will be used often + // brings significant Elo gain (~13 Elo). Eval::NNUE::hint_common_parent_position(pos); eval = ss->staticEval; } @@ -747,8 +746,10 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo // Use static evaluation difference to improve quiet move ordering (~4 Elo) if (is_ok((ss - 1)->currentMove) && !(ss - 1)->inCheck && !priorCapture) { - int bonus = std::clamp(-18 * int((ss - 1)->staticEval + ss->staticEval), -1812, 1812); + int bonus = std::clamp(-14 * int((ss - 1)->staticEval + ss->staticEval), -1449, 1449); thisThread->mainHistory[~us][from_to((ss - 1)->currentMove)] << bonus; + if (type_of(pos.piece_on(prevSq)) != PAWN && type_of((ss - 1)->currentMove) != PROMOTION) + thisThread->pawnHistory[pawn_structure(pos)][pos.piece_on(prevSq)][prevSq] << bonus / 4; } // Set up the improving flag, which is true if current static evaluation is @@ -764,7 +765,7 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo // If eval is really low check with qsearch if it can exceed alpha, if it can't, // return a fail low. // Adjust razor margin according to cutoffCnt. (~1 Elo) - if (eval < alpha - 492 - (257 - 200 * ((ss + 1)->cutoffCnt > 3)) * depth * depth) + if (eval < alpha - 474 - (270 - 174 * ((ss + 1)->cutoffCnt > 3)) * depth * depth) { value = qsearch(pos, ss, alpha - 1, alpha); if (value < alpha) @@ -778,7 +779,7 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo - (ss - 1)->statScore / 321 >= beta && eval >= beta && eval < 29462 // smaller than TB wins - && !(!ttCapture && ttMove)) + && (!ttMove || ttCapture)) return eval; // Step 9. Null move search with verification search (~35 Elo) @@ -822,15 +823,18 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo } } - // Step 10. If the position doesn't have a ttMove, decrease depth by 2 - // (or by 4 if the TT entry for the current position was hit and the stored depth is greater than or equal to the current depth). - // Use qsearch if depth is equal or below zero (~9 Elo) + // Step 10. Internal iterative reductions (~9 Elo) + // For PV nodes without a ttMove, we decrease depth by 2, + // or by 4 if the current position is present in the TT and + // the stored depth is greater than or equal to the current depth. + // Use qsearch if depth <= 0. if (PvNode && !ttMove) depth -= 2 + 2 * (ss->ttHit && tte->depth() >= depth); if (depth <= 0) return qsearch(pos, ss, alpha, beta); + // For cutNodes without a ttMove, we decrease depth by 2 if depth is high enough. if (cutNode && depth >= 8 && !ttMove) depth -= 2; @@ -857,6 +861,9 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo { assert(pos.capture_stage(move)); + // Prefetch the TT entry for the resulting position + prefetch(TT.first_entry(pos.key_after(move))); + ss->currentMove = move; ss->continuationHistory = &thisThread @@ -906,7 +913,7 @@ moves_loop: // When in check, search starts here prevSq != SQ_NONE ? thisThread->counterMoves[pos.piece_on(prevSq)][prevSq] : MOVE_NONE; MovePicker mp(pos, ttMove, depth, &thisThread->mainHistory, &captureHistory, contHist, - countermove, ss->killers); + &thisThread->pawnHistory, countermove, ss->killers); value = bestValue; moveCountPruning = singularQuietLMR = false; @@ -972,13 +979,15 @@ moves_loop: // When in check, search starts here if (capture || givesCheck) { // Futility pruning for captures (~2 Elo) - if (!givesCheck && lmrDepth < 7 && !ss->inCheck - && ss->staticEval + 188 + 206 * lmrDepth + PieceValue[pos.piece_on(to_sq(move))] - + captureHistory[movedPiece][to_sq(move)] - [type_of(pos.piece_on(to_sq(move)))] - / 7 - < alpha) - continue; + if (!givesCheck && lmrDepth < 7 && !ss->inCheck) + { + Piece capturedPiece = pos.piece_on(to_sq(move)); + int futilityEval = + ss->staticEval + 239 + 291 * lmrDepth + PieceValue[capturedPiece] + + captureHistory[movedPiece][to_sq(move)][type_of(capturedPiece)] / 7; + if (futilityEval < alpha) + continue; + } // SEE based pruning for captures and checks (~11 Elo) if (!pos.see_ge(move, Value(-185) * depth)) @@ -988,25 +997,29 @@ moves_loop: // When in check, search starts here { int history = (*contHist[0])[movedPiece][to_sq(move)] + (*contHist[1])[movedPiece][to_sq(move)] - + (*contHist[3])[movedPiece][to_sq(move)]; + + (*contHist[3])[movedPiece][to_sq(move)] + + thisThread->pawnHistory[pawn_structure(pos)][movedPiece][to_sq(move)]; // Continuation history based pruning (~2 Elo) - if (lmrDepth < 6 && history < -3232 * depth) + if (lmrDepth < 6 && history < -3645 * depth) continue; history += 2 * thisThread->mainHistory[us][from_to(move)]; - lmrDepth += history / 5793; - lmrDepth = std::max(lmrDepth, -2); + lmrDepth += history / 7836; + lmrDepth = std::max(lmrDepth, -1); // Futility pruning: parent node (~13 Elo) - if (!ss->inCheck && lmrDepth < 13 && ss->staticEval + 115 + 122 * lmrDepth <= alpha) + if (!ss->inCheck && lmrDepth < 13 + && ss->staticEval + (bestValue < ss->staticEval - 62 ? 123 : 77) + + 127 * lmrDepth + <= alpha) continue; lmrDepth = std::max(lmrDepth, 0); // Prune moves with negative SEE (~4 Elo) - if (!pos.see_ge(move, Value(-27 * lmrDepth * lmrDepth))) + if (!pos.see_ge(move, Value(-26 * lmrDepth * lmrDepth))) continue; } } @@ -1018,19 +1031,20 @@ moves_loop: // When in check, search starts here // Singular extension search (~94 Elo). If all moves but one fail low on a // search of (alpha-s, beta-s), and just one fails high on (alpha, beta), // then that move is singular and should be extended. To verify this we do - // a reduced search on all the other moves but the ttMove and if the result - // is lower than ttValue minus a margin, then we will extend the ttMove. Note - // that depth margin and singularBeta margin are known for having non-linear + // a reduced search on the position excluding the ttMove and if the result + // is lower than ttValue minus a margin, then we will extend the ttMove. + + // Note: the depth margin and singularBeta margin are known for having non-linear // scaling. Their values are optimized to time controls of 180+1.8 and longer - // so changing them requires tests at this type of time controls. - if (!rootNode + // so changing them requires tests at these types of time controls. + // Recursive singular search is avoided. + if (!rootNode && move == ttMove && !excludedMove && depth >= 4 - (thisThread->completedDepth > 24) + 2 * (PvNode && tte->is_pv()) - && move == ttMove && !excludedMove // Avoid recursive singular search && abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && (tte->bound() & BOUND_LOWER) && tte->depth() >= depth - 3) { Value singularBeta = ttValue - (64 + 57 * (ss->ttPv && !PvNode)) * depth / 64; - Depth singularDepth = (depth - 1) / 2; + Depth singularDepth = newDepth / 2; ss->excludedMove = move; value = @@ -1051,22 +1065,28 @@ moves_loop: // When in check, search starts here } // Multi-cut pruning - // Our ttMove is assumed to fail high, and now we failed high also on a - // reduced search without the ttMove. So we assume this expected cut-node - // is not singular, that multiple moves fail high, and we can prune the - // whole subtree by returning a softbound. + // Our ttMove is assumed to fail high based on the bound of the TT entry, + // and if after excluding the ttMove with a reduced search we fail high over the original beta, + // we assume this expected cut-node is not singular (multiple moves fail high), + // and we can prune the whole subtree by returning a softbound. else if (singularBeta >= beta) return singularBeta; - // If the eval of ttMove is greater than beta, we reduce it (negative extension) (~7 Elo) + // Negative extensions + // If other moves failed high over (ttValue - margin) without the ttMove on a reduced search, + // but we cannot do multi-cut because (ttValue - margin) is lower than the original beta, + // we do not know if the ttMove is singular or can do a multi-cut, + // so we reduce the ttMove in favor of other moves based on some conditions: + + // If the ttMove is assumed to fail high over current beta (~7 Elo) else if (ttValue >= beta) extension = -2 - !PvNode; - // If we are on a cutNode, reduce it based on depth (negative extension) (~1 Elo) + // If we are on a cutNode but the ttMove is not assumed to fail high over current beta (~1 Elo) else if (cutNode) extension = depth < 19 ? -2 : -1; - // If the eval of ttMove is less than value, we reduce it (negative extension) (~1 Elo) + // If the ttMove is assumed to fail low over the value of the reduced search (~1 Elo) else if (ttValue <= value) extension = -1; } @@ -1079,6 +1099,12 @@ moves_loop: // When in check, search starts here else if (PvNode && move == ttMove && move == ss->killers[0] && (*contHist[0])[movedPiece][to_sq(move)] >= 4194) extension = 1; + + // Recapture extensions (~1 Elo) + else if (PvNode && move == ttMove && to_sq(move) == prevSq + && captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] + > 4000) + extension = 1; } // Add extension to new depth @@ -1116,7 +1142,7 @@ moves_loop: // When in check, search starts here if (PvNode) r--; - // Decrease reduction if ttMove has been singularly extended (~1 Elo) + // Decrease reduction if a quiet ttMove has been singularly extended (~1 Elo) if (singularQuietLMR) r--; @@ -1128,9 +1154,10 @@ moves_loop: // When in check, search starts here if ((ss + 1)->cutoffCnt > 3) r++; - // Decrease reduction for first generated move (ttMove) + // Set reduction to 0 for first picked move (ttMove) (~2 Elo) + // Nullifies all previous reduction adjustments to ttMove and leaves only history to do them else if (move == ttMove) - r--; + r = 0; ss->statScore = 2 * thisThread->mainHistory[us][from_to(move)] + (*contHist[0])[movedPiece][to_sq(move)] @@ -1138,19 +1165,21 @@ moves_loop: // When in check, search starts here + (*contHist[3])[movedPiece][to_sq(move)] - 3848; // Decrease/increase reduction for moves with a good/bad history (~25 Elo) - r -= ss->statScore / (10216 + 3855 * (depth > 5 && depth < 23)); + r -= ss->statScore / 14200; // Step 17. Late moves reduction / extension (LMR, ~117 Elo) // We use various heuristics for the sons of a node after the first son has // been searched. In general, we would like to reduce them, but there are many // cases where we extend a son if it has good chances to be "interesting". - if (depth >= 2 && moveCount > 1 + (PvNode && ss->ply <= 1) + if (depth >= 2 && moveCount > 1 + rootNode && (!ss->ttPv || !capture || (cutNode && (ss - 1)->moveCount > 1))) { // In general we want to cap the LMR depth search at newDepth, but when // reduction is negative, we allow this move a limited search extension // beyond the first move depth. This may lead to hidden double extensions. - Depth d = std::clamp(newDepth - r, 1, newDepth + 1); + // To prevent problems when the max value is less than the min value, + // std::clamp has been replaced by a more robust implementation. + Depth d = std::max(1, std::min(newDepth - r, newDepth + 1)); value = -search(pos, ss + 1, -(alpha + 1), -alpha, d, true); @@ -1159,18 +1188,15 @@ moves_loop: // When in check, search starts here { // Adjust full-depth search based on LMR results - if the result // was good enough search deeper, if it was bad enough search shallower. - const bool doDeeperSearch = value > (bestValue + 51 + 10 * (newDepth - d)); - const bool doEvenDeeperSearch = value > alpha + 700 && ss->doubleExtensions <= 6; - const bool doShallowerSearch = value < bestValue + newDepth; + const bool doDeeperSearch = value > (bestValue + 50 + 2 * newDepth); // (~1 Elo) + const bool doShallowerSearch = value < bestValue + newDepth; // (~2 Elo) - ss->doubleExtensions = ss->doubleExtensions + doEvenDeeperSearch; - - newDepth += doDeeperSearch - doShallowerSearch + doEvenDeeperSearch; + newDepth += doDeeperSearch - doShallowerSearch; if (newDepth > d) value = -search(pos, ss + 1, -(alpha + 1), -alpha, newDepth, !cutNode); - int bonus = value <= alpha ? -stat_bonus(newDepth) + int bonus = value <= alpha ? -stat_malus(newDepth) : value >= beta ? stat_bonus(newDepth) : 0; @@ -1181,7 +1207,7 @@ moves_loop: // When in check, search starts here // Step 18. Full-depth search when LMR is skipped else if (!PvNode || moveCount > 1) { - // Increase reduction for cut nodes and not ttMove (~1 Elo) + // Increase reduction for cut nodes without ttMove (~1 Elo) if (!ttMove && cutNode) r += 2; @@ -1316,8 +1342,8 @@ moves_loop: // When in check, search starts here // Bonus for prior countermove that caused the fail low else if (!priorCapture && prevSq != SQ_NONE) { - int bonus = (depth > 6) + (PvNode || cutNode) + (bestValue < alpha - 653) - + ((ss - 1)->moveCount > 11); + int bonus = (depth > 6) + (PvNode || cutNode) + (bestValue < alpha - 657) + + ((ss - 1)->moveCount > 10); update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * bonus); thisThread->mainHistory[~us][from_to((ss - 1)->currentMove)] @@ -1346,7 +1372,7 @@ moves_loop: // When in check, search starts here } -// qsearch() is the quiescence search function, which is called by the main search +// Quiescence search function, which is called by the main search // function with zero depth, or recursively with further decreasing depth per call. // (~155 Elo) template @@ -1399,9 +1425,7 @@ Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) { assert(0 <= ss->ply && ss->ply < MAX_PLY); - // Decide whether or not to include checks: this fixes also the type of - // TT entry depth that we are going to use. Note that in qsearch we use - // only two types of depth in TT: DEPTH_QS_CHECKS or DEPTH_QS_NO_CHECKS. + // Decide the replacement and cutoff priority of the qsearch TT entries ttDepth = ss->inCheck || depth >= DEPTH_QS_CHECKS ? DEPTH_QS_CHECKS : DEPTH_QS_NO_CHECKS; // Step 3. Transposition table lookup @@ -1434,7 +1458,7 @@ Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) { bestValue = ttValue; } else - // In case of null move search use previous static eval with a different sign + // In case of null move search, use previous static eval with a different sign ss->staticEval = bestValue = (ss - 1)->currentMove != MOVE_NULL ? evaluate(pos) : -(ss - 1)->staticEval; @@ -1463,7 +1487,7 @@ Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) { // will be generated. Square prevSq = is_ok((ss - 1)->currentMove) ? to_sq((ss - 1)->currentMove) : SQ_NONE; MovePicker mp(pos, ttMove, depth, &thisThread->mainHistory, &thisThread->captureHistory, - contHist, prevSq); + contHist, &thisThread->pawnHistory); int quietCheckEvasions = 0; @@ -1593,10 +1617,9 @@ Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) { } -// value_to_tt() adjusts a mate or TB score from "plies to mate from the root" +// Adjusts a mate or TB score from "plies to mate from the root" // to "plies to mate from the current position". Standard scores are unchanged. // The function is called before storing a value in the transposition table. - Value value_to_tt(Value v, int ply) { assert(v != VALUE_NONE); @@ -1605,12 +1628,11 @@ Value value_to_tt(Value v, int ply) { } -// value_from_tt() is the inverse of value_to_tt(): it adjusts a mate or TB score +// Inverse of value_to_tt(): it adjusts a mate or TB score // from the transposition table (which refers to the plies to mate/be mated from // current position) to "plies to mate/be mated (TB win/loss) from the root". // However, to avoid potentially false mate scores related to the 50 moves rule // and the graph history interaction problem, we return an optimal TB score instead. - Value value_from_tt(Value v, int ply, int r50c) { if (v == VALUE_NONE) @@ -1636,8 +1658,7 @@ Value value_from_tt(Value v, int ply, int r50c) { } -// update_pv() adds current move and appends child pv[] - +// Adds current move and appends child pv[] void update_pv(Move* pv, Move move, const Move* childPv) { for (*pv++ = move; childPv && *childPv != MOVE_NONE;) @@ -1646,8 +1667,7 @@ void update_pv(Move* pv, Move move, const Move* childPv) { } -// update_all_stats() updates stats at the end of search() when a bestMove is found - +// Updates stats at the end of search() when a bestMove is found void update_all_stats(const Position& pos, Stack* ss, Move bestMove, @@ -1667,6 +1687,7 @@ void update_all_stats(const Position& pos, PieceType captured; int quietMoveBonus = stat_bonus(depth + 1); + int quietMoveMalus = stat_malus(depth); if (!pos.capture_stage(bestMove)) { @@ -1675,13 +1696,21 @@ void update_all_stats(const Position& pos, // Increase stats for the best move in case it was a quiet move update_quiet_stats(pos, ss, bestMove, bestMoveBonus); + thisThread->pawnHistory[pawn_structure(pos)][moved_piece][to_sq(bestMove)] + << quietMoveBonus; + + int moveMalus = bestValue > beta + 168 ? quietMoveMalus // larger malus + : stat_malus(depth); // smaller malus // Decrease stats for all non-best quiet moves for (int i = 0; i < quietCount; ++i) { - thisThread->mainHistory[us][from_to(quietsSearched[i])] << -bestMoveBonus; + thisThread->pawnHistory[pawn_structure(pos)][pos.moved_piece(quietsSearched[i])] + [to_sq(quietsSearched[i])] + << -moveMalus; + thisThread->mainHistory[us][from_to(quietsSearched[i])] << -moveMalus; update_continuation_histories(ss, pos.moved_piece(quietsSearched[i]), - to_sq(quietsSearched[i]), -bestMoveBonus); + to_sq(quietsSearched[i]), -moveMalus); } } else @@ -1697,21 +1726,20 @@ void update_all_stats(const Position& pos, && ((ss - 1)->moveCount == 1 + (ss - 1)->ttHit || ((ss - 1)->currentMove == (ss - 1)->killers[0])) && !pos.captured_piece()) - update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, -quietMoveBonus); + update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, -quietMoveMalus); // Decrease stats for all non-best capture moves for (int i = 0; i < captureCount; ++i) { moved_piece = pos.moved_piece(capturesSearched[i]); captured = type_of(pos.piece_on(to_sq(capturesSearched[i]))); - captureHistory[moved_piece][to_sq(capturesSearched[i])][captured] << -quietMoveBonus; + captureHistory[moved_piece][to_sq(capturesSearched[i])][captured] << -quietMoveMalus; } } -// update_continuation_histories() updates histories of the move pairs formed -// by moves at ply -1, -2, -4, and -6 with current move. - +// Updates histories of the move pairs formed +// by moves at ply -1, -2, -3, -4, and -6 with current move. void update_continuation_histories(Stack* ss, Piece pc, Square to, int bonus) { for (int i : {1, 2, 3, 4, 6}) @@ -1725,8 +1753,7 @@ void update_continuation_histories(Stack* ss, Piece pc, Square to, int bonus) { } -// update_quiet_stats() updates move sorting heuristics - +// Updates move sorting heuristics void update_quiet_stats(const Position& pos, Stack* ss, Move move, int bonus) { // Update killers @@ -1751,7 +1778,6 @@ void update_quiet_stats(const Position& pos, Stack* ss, Move move, int bonus) { // When playing with strength handicap, choose the best move among a set of RootMoves // using a statistical rule dependent on 'level'. Idea by Heinz van Saanen. - Move Skill::pick_best(size_t multiPV) { const RootMoves& rootMoves = Threads.main()->rootMoves; @@ -1786,9 +1812,8 @@ Move Skill::pick_best(size_t multiPV) { } // namespace -// MainThread::check_time() is used to print debug info and, more importantly, +// Used to print debug info and, more importantly, // to detect when we are out of available time and thus stop the search. - void MainThread::check_time() { if (--callsCnt > 0) @@ -1819,9 +1844,8 @@ void MainThread::check_time() { } -// UCI::pv() formats PV information according to the UCI protocol. UCI requires +// Formats PV information according to the UCI protocol. UCI requires // that all (if any) unsearched PV lines are sent using a previous search score. - string UCI::pv(const Position& pos, Depth depth) { std::stringstream ss; @@ -1874,11 +1898,10 @@ string UCI::pv(const Position& pos, Depth depth) { } -// RootMove::extract_ponder_from_tt() is called in case we have no ponder move -// before exiting the search, for instance, in case we stop the search during a -// fail high at root. We try hard to have a ponder move to return to the GUI, +// Called in case we have no ponder move before exiting the search, +// for instance, in case we stop the search during a fail high at root. +// We try hard to have a ponder move to return to the GUI, // otherwise in case of 'ponder on' we have nothing to think about. - bool RootMove::extract_ponder_from_tt(Position& pos) { StateInfo st;