X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=src%2Fsearch.cpp;h=9b747e78e41fd96f86e1592b3b2bf88b50cf79c3;hb=675f6a038ba98b6b906a4767f009cf6fe91b0c52;hp=37277ec418bf4556a99c19c17aa62ed0ac40c669;hpb=5304b561ab96ae1c025d98cf4a6d138daa11374d;p=stockfish diff --git a/src/search.cpp b/src/search.cpp index 37277ec4..9b747e78 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -81,7 +81,7 @@ namespace { // History and stats update bonus, based on depth int stat_bonus(Depth d) { - return std::min((9 * d + 270) * d - 311 , 2145); + return std::min((8 * d + 240) * d - 276 , 1907); } // Add a small random component to draw evaluations to avoid 3-fold blindness @@ -307,7 +307,7 @@ void Thread::search() { multiPV = std::min(multiPV, rootMoves.size()); - complexityAverage.set(202, 1); + complexityAverage.set(174, 1); trend = SCORE_ZERO; optimism[ us] = Value(39); @@ -373,7 +373,9 @@ void Thread::search() { int failedHighCnt = 0; while (true) { - Depth adjustedDepth = std::max(1, rootDepth - failedHighCnt - searchAgainCounter); + // Adjust the effective depth searched, but ensuring at least one effective increment for every + // four searchAgain steps (see issue #2717). + Depth adjustedDepth = std::max(1, rootDepth - failedHighCnt - 3 * (searchAgainCounter + 1) / 4); bestValue = Stockfish::search(rootPos, ss, alpha, beta, adjustedDepth, false); // Bring the best move to the front. It is critical that sorting @@ -472,7 +474,7 @@ void Thread::search() { double reduction = (1.56 + mainThread->previousTimeReduction) / (2.20 * timeReduction); double bestMoveInstability = 1 + 1.7 * totBestMoveChanges / Threads.size(); int complexity = mainThread->complexityAverage.value(); - double complexPosition = std::clamp(1.0 + (complexity - 326) / 1618.1, 0.5, 1.5); + double complexPosition = std::min(1.0 + (complexity - 277) / 1819.1, 1.5); double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability * complexPosition; @@ -635,7 +637,7 @@ namespace { // At non-PV nodes we check for an early TT cutoff if ( !PvNode && ss->ttHit - && tte->depth() > depth - ((int)thisThread->id() & 0x1) + && tte->depth() > depth - ((int)thisThread->id() & 0x1) - (tte->bound() == BOUND_EXACT) && ttValue != VALUE_NONE // Possible in case of TT access race && (tte->bound() & (ttValue >= beta ? BOUND_LOWER : BOUND_UPPER))) { @@ -736,7 +738,9 @@ namespace { // Never assume anything about values stored in TT ss->staticEval = eval = tte->eval(); if (eval == VALUE_NONE) - ss->staticEval = eval = evaluate(pos); + ss->staticEval = eval = evaluate(pos, &complexity); + else // Fall back to (semi)classical complexity for TT hits, the NNUE complexity is lost + complexity = abs(ss->staticEval - pos.psq_eg_stm()); // Randomize draw evaluation if (eval == VALUE_DRAW) @@ -749,13 +753,15 @@ namespace { } else { - ss->staticEval = eval = evaluate(pos); + ss->staticEval = eval = evaluate(pos, &complexity); // Save static evaluation into transposition table if (!excludedMove) tte->save(posKey, VALUE_NONE, ss->ttPv, BOUND_NONE, DEPTH_NONE, MOVE_NONE, eval); } + thisThread->complexityAverage.update(complexity); + // Use static evaluation difference to improve quiet move ordering (~3 Elo) if (is_ok((ss-1)->currentMove) && !(ss-1)->inCheck && !priorCapture) { @@ -770,11 +776,7 @@ namespace { improvement = (ss-2)->staticEval != VALUE_NONE ? ss->staticEval - (ss-2)->staticEval : (ss-4)->staticEval != VALUE_NONE ? ss->staticEval - (ss-4)->staticEval : 175; - improving = improvement > 0; - complexity = abs(ss->staticEval - (us == WHITE ? eg_value(pos.psq_score()) : -eg_value(pos.psq_score()))); - - thisThread->complexityAverage.update(complexity); // Step 7. Razoring. // If eval is really low check with qsearch if it can exceed alpha, if it can't, @@ -803,7 +805,7 @@ namespace { && (ss-1)->statScore < 14695 && eval >= beta && eval >= ss->staticEval - && ss->staticEval >= beta - 15 * depth - improvement / 15 + 198 + complexity / 28 + && ss->staticEval >= beta - 15 * depth - improvement / 15 + 201 + complexity / 24 && !excludedMove && pos.non_pawn_material(us) && (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor)) @@ -811,7 +813,7 @@ namespace { assert(eval - beta >= 0); // Null move dynamic reduction based on depth, eval and complexity of position - Depth R = std::min(int(eval - beta) / 147, 5) + depth / 3 + 4 - (complexity > 753); + Depth R = std::min(int(eval - beta) / 147, 5) + depth / 3 + 4 - (complexity > 650); ss->currentMove = MOVE_NULL; ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0]; @@ -1031,7 +1033,7 @@ moves_loop: // When in check, search starts here && history < -3875 * (depth - 1)) continue; - history += thisThread->mainHistory[us][from_to(move)]; + history += 2 * thisThread->mainHistory[us][from_to(move)]; // Futility pruning: parent node (~9 Elo) if ( !ss->inCheck @@ -1154,7 +1156,7 @@ moves_loop: // When in check, search starts here r--; // Increase reduction for cut nodes (~3 Elo) - if (cutNode && move != ss->killers[0]) + if (cutNode) r += 2; // Increase reduction if ttMove is a capture (~3 Elo) @@ -1169,7 +1171,7 @@ moves_loop: // When in check, search starts here if ((ss+1)->cutoffCnt > 3 && !PvNode) r++; - ss->statScore = thisThread->mainHistory[us][from_to(move)] + ss->statScore = 2 * thisThread->mainHistory[us][from_to(move)] + (*contHist[0])[movedPiece][to_sq(move)] + (*contHist[1])[movedPiece][to_sq(move)] + (*contHist[3])[movedPiece][to_sq(move)]