From 42b7dbcb5e20ae9015122601522be8b455787a4a Mon Sep 17 00:00:00 2001 From: xoto10 Date: Sat, 13 Jun 2020 09:54:07 +0100 Subject: [PATCH 1/1] Tuned values for search constants Tuned search constants after many search patches since the last successful tune. 1st LTC @ 60+0.6 th 1 : LLR: 2.97 (-2.94,2.94) {0.25,1.75} Total: 57656 W: 7369 L: 7036 D: 43251 Ptnml(0-2): 393, 5214, 17336, 5437, 448 https://tests.stockfishchess.org/tests/view/5ee1e074f29b40b0fc95af19 SMP LTC @ 20+0.2 th 8 : LLR: 2.95 (-2.94,2.94) {0.25,1.75} Total: 83576 W: 9731 L: 9341 D: 64504 Ptnml(0-2): 464, 7062, 26369, 7406, 487 https://tests.stockfishchess.org/tests/view/5ee35a21f29b40b0fc95b008 The changes were rebased on top of a successful patch by Viz (see #2734) and two different ways of doing this were tested. The successful test modified the constants in the patch by Viz in a similar manner to the tuning run: LTC (rebased) @ 60+0.6 th 1 : LLR: 2.94 (-2.94,2.94) {0.25,1.75} Total: 193384 W: 24241 L: 23521 D: 145622 Ptnml(0-2): 1309, 17497, 58472, 17993, 1421 https://tests.stockfishchess.org/tests/view/5ee43319ca6c451633a995f9 Further work: the recent patch to quantize eval #2733 affects search quit quite a bit, so doing another tune in, say, three months time might be a good idea. closes https://github.com/official-stockfish/Stockfish/pull/2735 Bench 4246971 --- src/search.cpp | 58 +++++++++++++++++++++++++------------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/src/search.cpp b/src/search.cpp index f5887f3f..5ad650d2 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -65,9 +65,9 @@ namespace { constexpr uint64_t TtHitAverageResolution = 1024; // Razor and futility margins - constexpr int RazorMargin = 531; + constexpr int RazorMargin = 516; Value futility_margin(Depth d, bool improving) { - return Value(217 * (d - improving)); + return Value(224 * (d - improving)); } // Reductions lookup table, initialized at startup @@ -75,16 +75,16 @@ namespace { Depth reduction(bool i, Depth d, int mn) { int r = Reductions[d] * Reductions[mn]; - return (r + 511) / 1024 + (!i && r > 1007); + return (r + 529) / 1024 + (!i && r > 1050); } constexpr int futility_move_count(bool improving, Depth depth) { - return (4 + depth * depth) / (2 - improving); + return (3 + depth * depth) / (2 - improving); } // History and stats update bonus, based on depth int stat_bonus(Depth d) { - return d > 15 ? -8 : 19 * d * d + 155 * d - 132; + return d > 15 ? 28 : 19 * d * d + 135 * d - 136; } // Add a small random component to draw evaluations to avoid 3fold-blindness @@ -194,7 +194,7 @@ namespace { void Search::init() { for (int i = 1; i < MAX_MOVES; ++i) - Reductions[i] = int((24.8 + std::log(Threads.size())) * std::log(i)); + Reductions[i] = int((24.9 + std::log(Threads.size())) * std::log(i)); } @@ -408,7 +408,7 @@ void Thread::search() { beta = std::min(prev + delta, VALUE_INFINITE); // Adjust contempt based on root move's previousScore (dynamic contempt) - int dct = ct + (102 - ct / 2) * prev / (abs(prev) + 157); + int dct = ct + (104 - ct / 2) * prev / (abs(prev) + 143); contempt = (us == WHITE ? make_score(dct, dct / 2) : -make_score(dct, dct / 2)); @@ -506,13 +506,13 @@ void Thread::search() { && !Threads.stop && !mainThread->stopOnPonderhit) { - double fallingEval = (332 + 6 * (mainThread->bestPreviousScore - bestValue) - + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 704.0; + double fallingEval = (293 + 6 * (mainThread->bestPreviousScore - bestValue) + + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 742.0; fallingEval = Utility::clamp(fallingEval, 0.5, 1.5); // If the bestMove is stable over several iterations, reduce time accordingly - timeReduction = lastBestMoveDepth + 9 < completedDepth ? 1.94 : 0.91; - double reduction = (1.41 + mainThread->previousTimeReduction) / (2.27 * timeReduction); + timeReduction = lastBestMoveDepth + 10 < completedDepth ? 1.93 : 0.96; + double reduction = (1.36 + mainThread->previousTimeReduction) / (2.21 * timeReduction); // Use part of the gained time from a previous stable move for the current move for (Thread* th : Threads) @@ -537,7 +537,7 @@ void Thread::search() { } else if ( Threads.increaseDepth && !mainThread->ponder - && Time.elapsed() > totalTime * 0.6) + && Time.elapsed() > totalTime * 0.57) Threads.increaseDepth = false; else Threads.increaseDepth = true; @@ -819,10 +819,10 @@ namespace { // Step 9. Null move search with verification search (~40 Elo) if ( !PvNode && (ss-1)->currentMove != MOVE_NULL - && (ss-1)->statScore < 23397 + && (ss-1)->statScore < 24714 && eval >= beta && eval >= ss->staticEval - && ss->staticEval >= beta - 32 * depth - 30 * improving + 120 * ttPv + 292 + && ss->staticEval >= beta - 29 * depth - 31 * improving + 119 * ttPv + 299 && !excludedMove && pos.non_pawn_material(us) && (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor)) @@ -830,7 +830,7 @@ namespace { assert(eval - beta >= 0); // Null move dynamic reduction based on depth and value - Depth R = (854 + 68 * depth) / 258 + std::min(int(eval - beta) / 192, 3); + Depth R = (793 + 70 * depth) / 252 + std::min(int(eval - beta) / 192, 3); ss->currentMove = MOVE_NULL; ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0]; @@ -870,10 +870,10 @@ namespace { // If we have a good enough capture and a reduced search returns a value // much above beta, we can (almost) safely prune the previous move. if ( !PvNode - && depth >= 5 + && depth > 5 && abs(beta) < VALUE_TB_WIN_IN_MAX_PLY) { - Value raisedBeta = beta + 189 - 45 * improving; + Value raisedBeta = beta + 182 - 48 * improving; assert(raisedBeta < VALUE_INFINITE); MovePicker mp(pos, ttMove, raisedBeta - ss->staticEval, &captureHistory); int probCutCount = 0; @@ -904,7 +904,7 @@ namespace { // If the qsearch held, perform the regular search if (value >= raisedBeta) - value = -search(pos, ss+1, -raisedBeta, -raisedBeta+1, depth - 4, !cutNode); + value = -search(pos, ss+1, -raisedBeta, -raisedBeta+1, depth - 5, !cutNode); pos.undo_move(move); @@ -1003,15 +1003,15 @@ moves_loop: // When in check, search starts from here // Futility pruning: parent node (~5 Elo) if ( lmrDepth < 6 && !ss->inCheck - && ss->staticEval + 235 + 172 * lmrDepth <= alpha + && ss->staticEval + 252 + 176 * lmrDepth <= alpha && (*contHist[0])[movedPiece][to_sq(move)] + (*contHist[1])[movedPiece][to_sq(move)] + (*contHist[3])[movedPiece][to_sq(move)] - + (*contHist[5])[movedPiece][to_sq(move)] / 2 < 31400) + + (*contHist[5])[movedPiece][to_sq(move)] / 2 < 30251) continue; // Prune moves with negative SEE (~20 Elo) - if (!pos.see_ge(move, Value(-(32 - std::min(lmrDepth, 18)) * lmrDepth * lmrDepth))) + if (!pos.see_ge(move, Value(-(31 - std::min(lmrDepth, 18)) * lmrDepth * lmrDepth))) continue; } else @@ -1027,11 +1027,11 @@ moves_loop: // When in check, search starts from here && lmrDepth < 6 && !(PvNode && abs(bestValue) < 2) && !ss->inCheck - && ss->staticEval + 270 + 384 * lmrDepth + PieceValue[MG][type_of(pos.piece_on(to_sq(move)))] <= alpha) + && ss->staticEval + 264 + 397 * lmrDepth + PieceValue[MG][type_of(pos.piece_on(to_sq(move)))] <= alpha) continue; // See based pruning - if (!pos.see_ge(move, Value(-194) * depth)) // (~25 Elo) + if (!pos.see_ge(move, Value(-192) * depth)) // (~25 Elo) continue; } } @@ -1144,12 +1144,12 @@ moves_loop: // When in check, search starts from here || moveCountPruning || ss->staticEval + PieceValue[EG][pos.captured_piece()] <= alpha || cutNode - || thisThread->ttHitAverage < 375 * TtHitAverageResolution * TtHitAverageWindow / 1024)) + || thisThread->ttHitAverage < 399 * TtHitAverageResolution * TtHitAverageWindow / 1024)) { Depth r = reduction(improving, depth, moveCount); // Decrease reduction if the ttHit running average is large - if (thisThread->ttHitAverage > 500 * TtHitAverageResolution * TtHitAverageWindow / 1024) + if (thisThread->ttHitAverage > 492 * TtHitAverageResolution * TtHitAverageWindow / 1024) r--; // Reduction if other threads are searching this position. @@ -1195,14 +1195,14 @@ moves_loop: // When in check, search starts from here - 4926; // Decrease/increase reduction by comparing opponent's stat score (~10 Elo) - if (ss->statScore >= -102 && (ss-1)->statScore < -114) + if (ss->statScore >= -99 && (ss-1)->statScore < -116) r--; - else if ((ss-1)->statScore >= -116 && ss->statScore < -154) + else if ((ss-1)->statScore >= -117 && ss->statScore < -150) r++; // Decrease/increase reduction for moves with a good/bad history (~30 Elo) - r -= ss->statScore / 16434; + r -= ss->statScore / 15896; } else { @@ -1474,7 +1474,7 @@ moves_loop: // When in check, search starts from here if (PvNode && bestValue > alpha) alpha = bestValue; - futilityBase = bestValue + 154; + futilityBase = bestValue + 138; } const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory, -- 2.39.2