X-Git-Url: https://git.sesse.net/?p=stockfish;a=blobdiff_plain;f=src%2Fsearch.cpp;h=fef1b518160abf00e7e6e4ac14a7e656d2ed4046;hp=5306bb9a0084b2ee50ae7b4fc83cbd7324ba2bd0;hb=d558f8a673b56b32ab6da8050f41b9e02fe1758b;hpb=785b70809783430ff1e0bf856dac3b9bfa6fe826 diff --git a/src/search.cpp b/src/search.cpp index 5306bb9a..fef1b518 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -152,7 +152,7 @@ namespace { void Search::init() { for (int i = 1; i < MAX_MOVES; ++i) - Reductions[i] = int(21.3 * std::log(i + 0.25 * std::log(i))); + Reductions[i] = int(21.9 * std::log(i)); } @@ -312,19 +312,7 @@ void Thread::search() { multiPV = std::min(multiPV, rootMoves.size()); ttHitAverage = TtHitAverageWindow * TtHitAverageResolution / 2; - int ct = int(Options["Contempt"]) * PawnValueEg / 100; // From centipawns - - // In analysis mode, adjust contempt in accordance with user preference - if (Limits.infinite || Options["UCI_AnalyseMode"]) - ct = Options["Analysis Contempt"] == "Off" ? 0 - : Options["Analysis Contempt"] == "Both" ? ct - : Options["Analysis Contempt"] == "White" && us == BLACK ? -ct - : Options["Analysis Contempt"] == "Black" && us == WHITE ? -ct - : ct; - - // Evaluation score is from the white point of view - contempt = (us == WHITE ? make_score(ct, ct / 2) - : -make_score(ct, ct / 2)); + trend = SCORE_ZERO; int searchAgainCounter = 0; @@ -370,11 +358,11 @@ void Thread::search() { alpha = std::max(prev - delta,-VALUE_INFINITE); beta = std::min(prev + delta, VALUE_INFINITE); - // Adjust contempt based on root move's previousScore (dynamic contempt) - int dct = ct + (113 - ct / 2) * prev / (abs(prev) + 147); + // Adjust trend based on root move's previousScore (dynamic contempt) + int tr = 113 * prev / (abs(prev) + 147); - contempt = (us == WHITE ? make_score(dct, dct / 2) - : -make_score(dct, dct / 2)); + trend = (us == WHITE ? make_score(tr, tr / 2) + : -make_score(tr, tr / 2)); } // Start with a small aspiration window and, in the case of a fail @@ -610,10 +598,11 @@ namespace { assert(0 <= ss->ply && ss->ply < MAX_PLY); - (ss+1)->ttPv = false; + (ss+1)->ttPv = false; (ss+1)->excludedMove = bestMove = MOVE_NONE; - (ss+2)->killers[0] = (ss+2)->killers[1] = MOVE_NONE; - Square prevSq = to_sq((ss-1)->currentMove); + (ss+2)->killers[0] = (ss+2)->killers[1] = MOVE_NONE; + ss->doubleExtensions = (ss-1)->doubleExtensions; + Square prevSq = to_sq((ss-1)->currentMove); // Initialize statScore to zero for the grandchildren of the current position. // So statScore is shared between all grandchildren and only the first grandchild @@ -771,6 +760,7 @@ namespace { ss->staticEval = eval = -(ss-1)->staticEval; // Save static evaluation into transposition table + if(!excludedMove) tte->save(posKey, VALUE_NONE, ss->ttPv, BOUND_NONE, DEPTH_NONE, MOVE_NONE, eval); } @@ -791,7 +781,6 @@ namespace { // Step 7. Futility pruning: child node (~50 Elo) if ( !PvNode - && depth < 9 && eval - futility_margin(depth, improving) >= beta && eval < VALUE_KNOWN_WIN) // Do not return unproven wins return eval; @@ -953,6 +942,7 @@ moves_loop: // When in check, search starts from here value = bestValue; singularQuietLMR = moveCountPruning = false; + bool doubleExtension = false; // Indicate PvNodes that will probably fail low if the node was searched // at a depth equal or greater than the current depth, and the result of this search was a fail low. @@ -1032,8 +1022,7 @@ moves_loop: // When in check, search starts from here continue; // Futility pruning: parent node (~5 Elo) - if ( lmrDepth < 7 - && !ss->inCheck + if ( !ss->inCheck && ss->staticEval + 174 + 157 * lmrDepth <= alpha && (*contHist[0])[movedPiece][to_sq(move)] + (*contHist[1])[movedPiece][to_sq(move)] @@ -1074,8 +1063,15 @@ moves_loop: // When in check, search starts from here { extension = 1; singularQuietLMR = !ttCapture; - if (!PvNode && value < singularBeta - 93) + + // Avoid search explosion by limiting the number of double extensions to at most 3 + if ( !PvNode + && value < singularBeta - 93 + && ss->doubleExtensions < 3) + { extension = 2; + doubleExtension = true; + } } // Multi-cut pruning @@ -1105,6 +1101,7 @@ moves_loop: // When in check, search starts from here // Add extension to new depth newDepth += extension; + ss->doubleExtensions = (ss-1)->doubleExtensions + (extension == 2); // Speculative prefetch as early as possible prefetch(TT.first_entry(pos.key_after(move))); @@ -1147,7 +1144,6 @@ moves_loop: // When in check, search starts from here // Increase reduction at root and non-PV nodes when the best move does not change frequently if ( (rootNode || !PvNode) - && thisThread->rootDepth > 10 && thisThread->bestMoveChanges <= 2) r++; @@ -1160,8 +1156,8 @@ moves_loop: // When in check, search starts from here r--; // Increase reduction for cut nodes (~3 Elo) - if (cutNode) - r += 1 + !captureOrPromotion; + if (cutNode && move != ss->killers[0]) + r += 2; if (!captureOrPromotion) { @@ -1182,8 +1178,8 @@ moves_loop: // When in check, search starts from here // In general we want to cap the LMR depth search at newDepth. But if // reductions are really negative and movecount is low, we allow this move - // to be searched deeper than the first move. - Depth d = std::clamp(newDepth - r, 1, newDepth + (r < -1 && moveCount <= 5)); + // to be searched deeper than the first move, unless ttMove was extended by 2. + Depth d = std::clamp(newDepth - r, 1, newDepth + (r < -1 && moveCount <= 5 && !doubleExtension)); value = -search(pos, ss+1, -(alpha+1), -alpha, d, true);