X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=src%2Fsearch.cpp;h=343c098a587dbb6cf3d8b8eb6aae946d178d733b;hb=98965c139df1483a3d684ee8bc7a60dc4b95efa1;hp=3008079e9c4ade413f2ee829ba5af2a7b8ad4f42;hpb=eae0f8dd066b31102b6663a60c36fffccf4e1269;p=stockfish diff --git a/src/search.cpp b/src/search.cpp index 3008079e..343c098a 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -63,7 +63,7 @@ namespace { // Futility margin Value futility_margin(Depth d, bool improving) { - return Value(168 * (d - improving)); + return Value(165 * (d - improving)); } // Reductions lookup table, initialized at startup @@ -71,21 +71,22 @@ namespace { Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) { int r = Reductions[d] * Reductions[mn]; - return (r + 1463 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 1010); + return (r + 1642 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 916); } constexpr int futility_move_count(bool improving, Depth depth) { - return (3 + depth * depth) / (2 - improving); + return improving ? (3 + depth * depth) + : (3 + depth * depth) / 2; } // History and stats update bonus, based on depth int stat_bonus(Depth d) { - return std::min((9 * d + 270) * d - 311 , 2145); + return std::min((12 * d + 282) * d - 349 , 1480); } // Add a small random component to draw evaluations to avoid 3-fold blindness - Value value_draw(Thread* thisThread) { - return VALUE_DRAW + Value(2 * (thisThread->nodes & 1) - 1); + Value value_draw(const Thread* thisThread) { + return VALUE_DRAW - 1 + Value(thisThread->nodes & 0x2); } // Skill structure is used to implement strength limit. If we have an uci_elo then @@ -115,7 +116,7 @@ namespace { Value value_to_tt(Value v, int ply); Value value_from_tt(Value v, int ply, int r50c); - void update_pv(Move* pv, Move move, Move* childPv); + void update_pv(Move* pv, Move move, const Move* childPv); void update_continuation_histories(Stack* ss, Piece pc, Square to, int bonus); void update_quiet_stats(const Position& pos, Stack* ss, Move move, int bonus); void update_all_stats(const Position& pos, Stack* ss, Move bestMove, Value bestValue, Value beta, Square prevSq, @@ -157,7 +158,7 @@ namespace { void Search::init() { for (int i = 1; i < MAX_MOVES; ++i) - Reductions[i] = int((20.81 + std::log(Threads.size()) / 2) * std::log(i)); + Reductions[i] = int((20.26 + std::log(Threads.size()) / 2) * std::log(i)); } @@ -238,9 +239,12 @@ void MainThread::search() { bestPreviousScore = bestThread->rootMoves[0].score; bestPreviousAverageScore = bestThread->rootMoves[0].averageScore; + for (Thread* th : Threads) + th->previousDepth = bestThread->completedDepth; + // Send again PV info if we have a new best thread if (bestThread != this) - sync_cout << UCI::pv(bestThread->rootPos, bestThread->completedDepth, -VALUE_INFINITE, VALUE_INFINITE) << sync_endl; + sync_cout << UCI::pv(bestThread->rootPos, bestThread->completedDepth) << sync_endl; sync_cout << "bestmove " << UCI::move(bestThread->rootMoves[0].pv[0], rootPos.is_chess960()); @@ -303,11 +307,9 @@ void Thread::search() { multiPV = std::min(multiPV, rootMoves.size()); - complexityAverage.set(202, 1); + complexityAverage.set(155, 1); - trend = SCORE_ZERO; - optimism[ us] = Value(39); - optimism[~us] = -optimism[us]; + optimism[us] = optimism[~us] = VALUE_ZERO; int searchAgainCounter = 0; @@ -349,16 +351,12 @@ void Thread::search() { if (rootDepth >= 4) { Value prev = rootMoves[pvIdx].averageScore; - delta = Value(16) + int(prev) * prev / 19178; + delta = Value(10) + int(prev) * prev / 15620; alpha = std::max(prev - delta,-VALUE_INFINITE); beta = std::min(prev + delta, VALUE_INFINITE); - // Adjust trend and optimism based on root move's previousScore - int tr = sigmoid(prev, 3, 8, 90, 125, 1); - trend = (us == WHITE ? make_score(tr, tr / 2) - : -make_score(tr, tr / 2)); - - int opt = sigmoid(prev, 8, 17, 144, 13966, 183); + // Adjust optimism based on root move's previousScore + int opt = 118 * prev / (std::abs(prev) + 169); optimism[ us] = Value(opt); optimism[~us] = -optimism[us]; } @@ -369,7 +367,9 @@ void Thread::search() { int failedHighCnt = 0; while (true) { - Depth adjustedDepth = std::max(1, rootDepth - failedHighCnt - searchAgainCounter); + // Adjust the effective depth searched, but ensuring at least one effective increment for every + // four searchAgain steps (see issue #2717). + Depth adjustedDepth = std::max(1, rootDepth - failedHighCnt - 3 * (searchAgainCounter + 1) / 4); bestValue = Stockfish::search(rootPos, ss, alpha, beta, adjustedDepth, false); // Bring the best move to the front. It is critical that sorting @@ -392,7 +392,7 @@ void Thread::search() { && multiPV == 1 && (bestValue <= alpha || bestValue >= beta) && Time.elapsed() > 3000) - sync_cout << UCI::pv(rootPos, rootDepth, alpha, beta) << sync_endl; + sync_cout << UCI::pv(rootPos, rootDepth) << sync_endl; // In case of failing low/high increase aspiration window and // re-search, otherwise exit the loop. @@ -423,7 +423,7 @@ void Thread::search() { if ( mainThread && (Threads.stop || pvIdx + 1 == multiPV || Time.elapsed() > 3000)) - sync_cout << UCI::pv(rootPos, rootDepth, alpha, beta) << sync_endl; + sync_cout << UCI::pv(rootPos, rootDepth) << sync_endl; } if (!Threads.stop) @@ -459,17 +459,16 @@ void Thread::search() { && !Threads.stop && !mainThread->stopOnPonderhit) { - double fallingEval = (69 + 12 * (mainThread->bestPreviousAverageScore - bestValue) - + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 781.4; + double fallingEval = (71 + 12 * (mainThread->bestPreviousAverageScore - bestValue) + + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 656.7; fallingEval = std::clamp(fallingEval, 0.5, 1.5); // If the bestMove is stable over several iterations, reduce time accordingly - timeReduction = lastBestMoveDepth + 10 < completedDepth ? 1.63 : 0.73; - double reduction = (1.56 + mainThread->previousTimeReduction) / (2.20 * timeReduction); - double bestMoveInstability = 1.073 + std::max(1.0, 2.25 - 9.9 / rootDepth) - * totBestMoveChanges / Threads.size(); + timeReduction = lastBestMoveDepth + 9 < completedDepth ? 1.37 : 0.65; + double reduction = (1.4 + mainThread->previousTimeReduction) / (2.15 * timeReduction); + double bestMoveInstability = 1 + 1.7 * totBestMoveChanges / Threads.size(); int complexity = mainThread->complexityAverage.value(); - double complexPosition = std::clamp(1.0 + (complexity - 326) / 1618.1, 0.5, 1.5); + double complexPosition = std::min(1.0 + (complexity - 261) / 1738.7, 1.5); double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability * complexPosition; @@ -490,7 +489,7 @@ void Thread::search() { } else if ( Threads.increaseDepth && !mainThread->ponder - && Time.elapsed() > totalTime * 0.43) + && Time.elapsed() > totalTime * 0.53) Threads.increaseDepth = false; else Threads.increaseDepth = true; @@ -553,17 +552,17 @@ namespace { Move ttMove, move, excludedMove, bestMove; Depth extension, newDepth; Value bestValue, value, ttValue, eval, maxValue, probCutBeta; - bool givesCheck, improving, didLMR, priorCapture; - bool captureOrPromotion, doFullDepthSearch, moveCountPruning, ttCapture; + bool givesCheck, improving, priorCapture, singularQuietLMR; + bool capture, moveCountPruning, ttCapture; Piece movedPiece; - int moveCount, captureCount, quietCount, bestMoveCount, improvement, complexity; + int moveCount, captureCount, quietCount, improvement, complexity; // Step 1. Initialize node Thread* thisThread = pos.this_thread(); ss->inCheck = pos.checkers(); priorCapture = pos.captured_piece(); Color us = pos.side_to_move(); - moveCount = bestMoveCount = captureCount = quietCount = ss->moveCount = 0; + moveCount = captureCount = quietCount = ss->moveCount = 0; bestValue = -VALUE_INFINITE; maxValue = VALUE_INFINITE; @@ -603,8 +602,8 @@ namespace { (ss+1)->ttPv = false; (ss+1)->excludedMove = bestMove = MOVE_NONE; (ss+2)->killers[0] = (ss+2)->killers[1] = MOVE_NONE; + (ss+2)->cutoffCnt = 0; ss->doubleExtensions = (ss-1)->doubleExtensions; - ss->depth = depth; Square prevSq = to_sq((ss-1)->currentMove); // Initialize statScore to zero for the grandchildren of the current position. @@ -624,17 +623,16 @@ namespace { ttValue = ss->ttHit ? value_from_tt(tte->value(), ss->ply, pos.rule50_count()) : VALUE_NONE; ttMove = rootNode ? thisThread->rootMoves[thisThread->pvIdx].pv[0] : ss->ttHit ? tte->move() : MOVE_NONE; - ttCapture = ttMove && pos.capture_or_promotion(ttMove); + ttCapture = ttMove && pos.capture(ttMove); if (!excludedMove) ss->ttPv = PvNode || (ss->ttHit && tte->is_pv()); // At non-PV nodes we check for an early TT cutoff if ( !PvNode && ss->ttHit - && tte->depth() > depth - (thisThread->id() % 2 == 1) + && tte->depth() > depth - (tte->bound() == BOUND_EXACT) && ttValue != VALUE_NONE // Possible in case of TT access race - && (ttValue >= beta ? (tte->bound() & BOUND_LOWER) - : (tte->bound() & BOUND_UPPER))) + && (tte->bound() & (ttValue >= beta ? BOUND_LOWER : BOUND_UPPER))) { // If ttMove is quiet, update move sorting heuristics on TT hit (~1 Elo) if (ttMove) @@ -733,11 +731,9 @@ namespace { // Never assume anything about values stored in TT ss->staticEval = eval = tte->eval(); if (eval == VALUE_NONE) - ss->staticEval = eval = evaluate(pos); - - // Randomize draw evaluation - if (eval == VALUE_DRAW) - eval = value_draw(thisThread); + ss->staticEval = eval = evaluate(pos, &complexity); + else // Fall back to (semi)classical complexity for TT hits, the NNUE complexity is lost + complexity = abs(ss->staticEval - pos.psq_eg_stm()); // ttValue can be used as a better position evaluation (~4 Elo) if ( ttValue != VALUE_NONE @@ -746,17 +742,19 @@ namespace { } else { - ss->staticEval = eval = evaluate(pos); + ss->staticEval = eval = evaluate(pos, &complexity); // Save static evaluation into transposition table if (!excludedMove) tte->save(posKey, VALUE_NONE, ss->ttPv, BOUND_NONE, DEPTH_NONE, MOVE_NONE, eval); } + thisThread->complexityAverage.update(complexity); + // Use static evaluation difference to improve quiet move ordering (~3 Elo) if (is_ok((ss-1)->currentMove) && !(ss-1)->inCheck && !priorCapture) { - int bonus = std::clamp(-16 * int((ss-1)->staticEval + ss->staticEval), -2000, 2000); + int bonus = std::clamp(-19 * int((ss-1)->staticEval + ss->staticEval), -1914, 1914); thisThread->mainHistory[~us][from_to((ss-1)->currentMove)] << bonus; } @@ -766,19 +764,13 @@ namespace { // margin and the improving flag are used in various pruning heuristics. improvement = (ss-2)->staticEval != VALUE_NONE ? ss->staticEval - (ss-2)->staticEval : (ss-4)->staticEval != VALUE_NONE ? ss->staticEval - (ss-4)->staticEval - : 175; - + : 168; improving = improvement > 0; - complexity = abs(ss->staticEval - (us == WHITE ? eg_value(pos.psq_score()) : -eg_value(pos.psq_score()))); - - thisThread->complexityAverage.update(complexity); // Step 7. Razoring. // If eval is really low check with qsearch if it can exceed alpha, if it can't, // return a fail low. - if ( !PvNode - && depth <= 7 - && eval < alpha - 348 - 258 * depth * depth) + if (eval < alpha - 369 - 254 * depth * depth) { value = qsearch(pos, ss, alpha - 1, alpha); if (value < alpha) @@ -789,18 +781,18 @@ namespace { // The depth condition is important for mate finding. if ( !ss->ttPv && depth < 8 - && eval - futility_margin(depth, improving) - (ss-1)->statScore / 256 >= beta + && eval - futility_margin(depth, improving) - (ss-1)->statScore / 303 >= beta && eval >= beta - && eval < 26305) // larger than VALUE_KNOWN_WIN, but smaller than TB wins. + && eval < 28031) // larger than VALUE_KNOWN_WIN, but smaller than TB wins return eval; // Step 9. Null move search with verification search (~22 Elo) if ( !PvNode && (ss-1)->currentMove != MOVE_NULL - && (ss-1)->statScore < 14695 + && (ss-1)->statScore < 17139 && eval >= beta && eval >= ss->staticEval - && ss->staticEval >= beta - 15 * depth - improvement / 15 + 198 + complexity / 28 + && ss->staticEval >= beta - 20 * depth - improvement / 13 + 233 + complexity / 25 && !excludedMove && pos.non_pawn_material(us) && (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor)) @@ -808,7 +800,7 @@ namespace { assert(eval - beta >= 0); // Null move dynamic reduction based on depth, eval and complexity of position - Depth R = std::min(int(eval - beta) / 147, 5) + depth / 3 + 4 - (complexity > 753); + Depth R = std::min(int(eval - beta) / 168, 7) + depth / 3 + 4 - (complexity > 861); ss->currentMove = MOVE_NULL; ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0]; @@ -844,7 +836,7 @@ namespace { } } - probCutBeta = beta + 179 - 46 * improving; + probCutBeta = beta + 191 - 54 * improving; // Step 10. ProbCut (~4 Elo) // If we have a good enough capture and a reduced search returns a value @@ -863,20 +855,16 @@ namespace { { assert(probCutBeta < VALUE_INFINITE); - MovePicker mp(pos, ttMove, probCutBeta - ss->staticEval, depth - 3, &captureHistory); - bool ttPv = ss->ttPv; - ss->ttPv = false; + MovePicker mp(pos, ttMove, probCutBeta - ss->staticEval, &captureHistory); while ((move = mp.next_move()) != MOVE_NONE) if (move != excludedMove && pos.legal(move)) { - assert(pos.capture_or_promotion(move)); - - captureOrPromotion = true; + assert(pos.capture(move) || promotion_type(move) == QUEEN); ss->currentMove = move; ss->continuationHistory = &thisThread->continuationHistory[ss->inCheck] - [captureOrPromotion] + [true] [pos.moved_piece(move)] [to_sq(move)]; @@ -893,34 +881,31 @@ namespace { if (value >= probCutBeta) { - // if transposition table doesn't have equal or more deep info write probCut data into it - if ( !(ss->ttHit - && tte->depth() >= depth - 3 - && ttValue != VALUE_NONE)) - tte->save(posKey, value_to_tt(value, ss->ply), ttPv, - BOUND_LOWER, - depth - 3, move, ss->staticEval); + // Save ProbCut data into transposition table + tte->save(posKey, value_to_tt(value, ss->ply), ss->ttPv, BOUND_LOWER, depth - 3, move, ss->staticEval); return value; } } - ss->ttPv = ttPv; } - // Step 11. If the position is not in TT, decrease depth by 2 or 1 depending on node type (~3 Elo) - if ( PvNode - && depth >= 3 + // Step 11. If the position is not in TT, decrease depth by 3. + // Use qsearch if depth is equal or below zero (~4 Elo) + if ( PvNode && !ttMove) - depth -= 2; + depth -= 3; - if ( cutNode - && depth >= 8 + if (depth <= 0) + return qsearch(pos, ss, alpha, beta); + + if ( cutNode + && depth >= 9 && !ttMove) - depth--; + depth -= 2; moves_loop: // When in check, search starts here // Step 12. A small Probcut idea, when we are in check (~0 Elo) - probCutBeta = beta + 481; + probCutBeta = beta + 417; if ( ss->inCheck && !PvNode && depth >= 2 @@ -947,7 +932,7 @@ moves_loop: // When in check, search starts here ss->killers); value = bestValue; - moveCountPruning = false; + moveCountPruning = singularQuietLMR = false; // Indicate PvNodes that will probably fail low if the node was searched // at a depth equal or greater than the current depth, and the result of this search was a fail low. @@ -987,7 +972,7 @@ moves_loop: // When in check, search starts here (ss+1)->pv = nullptr; extension = 0; - captureOrPromotion = pos.capture_or_promotion(move); + capture = pos.capture(move); movedPiece = pos.moved_piece(move); givesCheck = pos.gives_check(move); @@ -1007,21 +992,20 @@ moves_loop: // When in check, search starts here // Reduced depth of the next LMR search int lmrDepth = std::max(newDepth - reduction(improving, depth, moveCount, delta, thisThread->rootDelta), 0); - if ( captureOrPromotion + if ( capture || givesCheck) { // Futility pruning for captures (~0 Elo) - if ( !pos.empty(to_sq(move)) - && !givesCheck + if ( !givesCheck && !PvNode - && lmrDepth < 6 + && lmrDepth < 7 && !ss->inCheck - && ss->staticEval + 281 + 179 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))] + && ss->staticEval + 180 + 201 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))] + captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 6 < alpha) continue; // SEE based pruning (~9 Elo) - if (!pos.see_ge(move, Value(-203) * depth)) + if (!pos.see_ge(move, Value(-222) * depth)) continue; } else @@ -1035,16 +1019,16 @@ moves_loop: // When in check, search starts here && history < -3875 * (depth - 1)) continue; - history += thisThread->mainHistory[us][from_to(move)]; + history += 2 * thisThread->mainHistory[us][from_to(move)]; // Futility pruning: parent node (~9 Elo) if ( !ss->inCheck - && lmrDepth < 11 - && ss->staticEval + 122 + 138 * lmrDepth + history / 60 <= alpha) + && lmrDepth < 13 + && ss->staticEval + 106 + 145 * lmrDepth + history / 52 <= alpha) continue; // Prune moves with negative SEE (~3 Elo) - if (!pos.see_ge(move, Value(-25 * lmrDepth * lmrDepth - 20 * lmrDepth))) + if (!pos.see_ge(move, Value(-24 * lmrDepth * lmrDepth - 15 * lmrDepth))) continue; } } @@ -1059,7 +1043,7 @@ moves_loop: // When in check, search starts here // a reduced search on all the other moves but the ttMove and if the // result is lower than ttValue minus a margin, then we will extend the ttMove. if ( !rootNode - && depth >= 4 + 2 * (PvNode && tte->is_pv()) + && depth >= 4 - (thisThread->previousDepth > 24) + 2 * (PvNode && tte->is_pv()) && move == ttMove && !excludedMove // Avoid recursive singular search /* && ttValue != VALUE_NONE Already implicit in the next condition */ @@ -1067,7 +1051,7 @@ moves_loop: // When in check, search starts here && (tte->bound() & BOUND_LOWER) && tte->depth() >= depth - 3) { - Value singularBeta = ttValue - 3 * depth; + Value singularBeta = ttValue - (3 + (ss->ttPv && !PvNode)) * depth; Depth singularDepth = (depth - 1) / 2; ss->excludedMove = move; @@ -1077,11 +1061,12 @@ moves_loop: // When in check, search starts here if (value < singularBeta) { extension = 1; + singularQuietLMR = !ttCapture; // Avoid search explosion by limiting the number of double extensions if ( !PvNode - && value < singularBeta - 26 - && ss->doubleExtensions <= 8) + && value < singularBeta - 25 + && ss->doubleExtensions <= 9) extension = 2; } @@ -1096,19 +1081,23 @@ moves_loop: // When in check, search starts here // If the eval of ttMove is greater than beta, we reduce it (negative extension) else if (ttValue >= beta) extension = -2; + + // If the eval of ttMove is less than alpha and value, we reduce it (negative extension) + else if (ttValue <= alpha && ttValue <= value) + extension = -1; } // Check extensions (~1 Elo) else if ( givesCheck && depth > 9 - && abs(ss->staticEval) > 71) + && abs(ss->staticEval) > 82) extension = 1; // Quiet ttMove extensions (~0 Elo) else if ( PvNode && move == ttMove && move == ss->killers[0] - && (*contHist[0])[movedPiece][to_sq(move)] >= 5491) + && (*contHist[0])[movedPiece][to_sq(move)] >= 5177) extension = 1; } @@ -1122,15 +1111,13 @@ moves_loop: // When in check, search starts here // Update the current move (this must be done after singular extension search) ss->currentMove = move; ss->continuationHistory = &thisThread->continuationHistory[ss->inCheck] - [captureOrPromotion] + [capture] [movedPiece] [to_sq(move)]; // Step 16. Make the move pos.do_move(move, st, givesCheck); - bool doDeeperSearch = false; - // Step 17. Late moves reduction / extension (LMR, ~98 Elo) // We use various heuristics for the sons of a node after the first son has // been searched. In general we would like to reduce them, but there are many @@ -1138,16 +1125,11 @@ moves_loop: // When in check, search starts here if ( depth >= 2 && moveCount > 1 + (PvNode && ss->ply <= 1) && ( !ss->ttPv - || !captureOrPromotion + || !capture || (cutNode && (ss-1)->moveCount > 1))) { Depth r = reduction(improving, depth, moveCount, delta, thisThread->rootDelta); - // Decrease reduction at some PvNodes (~2 Elo) - if ( PvNode - && bestMoveCount <= 3) - r--; - // Decrease reduction if position is or has been on the PV // and node is not likely to fail low. (~3 Elo) if ( ss->ttPv @@ -1159,69 +1141,76 @@ moves_loop: // When in check, search starts here r--; // Increase reduction for cut nodes (~3 Elo) - if (cutNode && move != ss->killers[0]) + if (cutNode) r += 2; // Increase reduction if ttMove is a capture (~3 Elo) if (ttCapture) r++; - // Decrease reduction at PvNodes if bestvalue - // is vastly different from static evaluation - if (PvNode && !ss->inCheck && abs(ss->staticEval - bestValue) > 250) + // Decrease reduction for PvNodes based on depth + if (PvNode) + r -= 1 + 11 / (3 + depth); + + // Decrease reduction if ttMove has been singularly extended (~1 Elo) + if (singularQuietLMR) r--; - ss->statScore = thisThread->mainHistory[us][from_to(move)] + // Decrease reduction if we move a threatened piece (~1 Elo) + if ( depth > 9 + && (mp.threatenedPieces & from_sq(move))) + r--; + + // Increase reduction if next ply has a lot of fail high + if ((ss+1)->cutoffCnt > 3) + r++; + + ss->statScore = 2 * thisThread->mainHistory[us][from_to(move)] + (*contHist[0])[movedPiece][to_sq(move)] + (*contHist[1])[movedPiece][to_sq(move)] + (*contHist[3])[movedPiece][to_sq(move)] - - 4334; + - 4433; // Decrease/increase reduction for moves with a good/bad history (~30 Elo) - r -= ss->statScore / 15914; - - // In general we want to cap the LMR depth search at newDepth. But if reductions - // are really negative and movecount is low, we allow this move to be searched - // deeper than the first move (this may lead to hidden double extensions). - int deeper = r >= -1 ? 0 - : moveCount <= 4 ? 2 - : PvNode && depth > 4 ? 1 - : cutNode && moveCount <= 8 ? 1 - : 0; + r -= ss->statScore / (13000 + 4152 * (depth > 7 && depth < 19)); - Depth d = std::clamp(newDepth - r, 1, newDepth + deeper); + // In general we want to cap the LMR depth search at newDepth, but when + // reduction is negative, we allow this move a limited search extension + // beyond the first move depth. This may lead to hidden double extensions. + Depth d = std::clamp(newDepth - r, 1, newDepth + 1); value = -search(pos, ss+1, -(alpha+1), -alpha, d, true); - // If the son is reduced and fails high it will be re-searched at full depth - doFullDepthSearch = value > alpha && d < newDepth; - doDeeperSearch = value > (alpha + 78 + 11 * (newDepth - d)); - didLMR = true; - } - else - { - doFullDepthSearch = !PvNode || moveCount > 1; - didLMR = false; - } + // Do full depth search when reduced LMR search fails high + if (value > alpha && d < newDepth) + { + // Adjust full depth search based on LMR results - if result + // was good enough search deeper, if it was bad enough search shallower + const bool doDeeperSearch = value > (alpha + 64 + 11 * (newDepth - d)); + const bool doEvenDeeperSearch = value > alpha + 582; + const bool doShallowerSearch = value < bestValue + newDepth; - // Step 18. Full depth search when LMR is skipped or fails high - if (doFullDepthSearch) - { - value = -search(pos, ss+1, -(alpha+1), -alpha, newDepth + doDeeperSearch, !cutNode); + newDepth += doDeeperSearch - doShallowerSearch + doEvenDeeperSearch; + + if (newDepth > d) + value = -search(pos, ss+1, -(alpha+1), -alpha, newDepth, !cutNode); - // If the move passed LMR update its stats - if (didLMR) - { int bonus = value > alpha ? stat_bonus(newDepth) : -stat_bonus(newDepth); - if (captureOrPromotion) + if (capture) bonus /= 6; update_continuation_histories(ss, movedPiece, to_sq(move), bonus); } } + // Step 18. Full depth search when LMR is skipped + else if (!PvNode || moveCount > 1) + { + value = -search(pos, ss+1, -(alpha+1), -alpha, newDepth, !cutNode); + } + // For PV nodes only, do a full PV search on the first move or after a fail // high (in the latter case search only if value < beta), otherwise let the // parent node fail low with value <= alpha and try another move. @@ -1258,6 +1247,8 @@ moves_loop: // When in check, search starts here { rm.score = value; rm.selDepth = thisThread->selDepth; + rm.scoreLowerbound = value >= beta; + rm.scoreUpperbound = value <= alpha; rm.pv.resize(1); assert((ss+1)->pv); @@ -1293,23 +1284,33 @@ moves_loop: // When in check, search starts here if (PvNode && value < beta) // Update alpha! Always alpha < beta { alpha = value; - bestMoveCount++; + + // Reduce other moves if we have found at least one score improvement + if ( depth > 1 + && depth < 6 + && beta < VALUE_KNOWN_WIN + && alpha > -VALUE_KNOWN_WIN) + depth -= 1; + + assert(depth > 0); } else { + ss->cutoffCnt++; assert(value >= beta); // Fail high break; } } } + // If the move is worse than some previously searched move, remember it to update its stats later if (move != bestMove) { - if (captureOrPromotion && captureCount < 32) + if (capture && captureCount < 32) capturesSearched[captureCount++] = move; - else if (!captureOrPromotion && quietCount < 64) + else if (!capture && quietCount < 64) quietsSearched[quietCount++] = move; } } @@ -1340,14 +1341,14 @@ moves_loop: // When in check, search starts here quietsSearched, quietCount, capturesSearched, captureCount, depth); // Bonus for prior countermove that caused the fail low - else if ( (depth >= 4 || PvNode) + else if ( (depth >= 5 || PvNode) && !priorCapture) { //Assign extra bonus if current node is PvNode or cutNode //or fail low was really bad bool extraBonus = PvNode || cutNode - || bestValue < alpha - 70 * depth; + || bestValue < alpha - 62 * depth; update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * (1 + extraBonus)); } @@ -1359,10 +1360,6 @@ moves_loop: // When in check, search starts here // opponent move is probably good and the new position is added to the search tree. if (bestValue <= alpha) ss->ttPv = ss->ttPv || ((ss-1)->ttPv && depth > 3); - // Otherwise, a counter move has been found and if the position is the last leaf - // in the search tree, remove the position from the search tree. - else if (depth > 3) - ss->ttPv = ss->ttPv && (ss+1)->ttPv; // Write gathered information in transposition table if (!excludedMove && !(rootNode && thisThread->pvIdx)) @@ -1379,6 +1376,7 @@ moves_loop: // When in check, search starts here // qsearch() is the quiescence search function, which is called by the main search // function with zero depth, or recursively with further decreasing depth per call. + // (~155 elo) template Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) { @@ -1398,7 +1396,7 @@ moves_loop: // When in check, search starts here Move ttMove, move, bestMove; Depth ttDepth; Value bestValue, value, ttValue, futilityValue, futilityBase; - bool pvHit, givesCheck, captureOrPromotion; + bool pvHit, givesCheck, capture; int moveCount; if (PvNode) @@ -1435,8 +1433,7 @@ moves_loop: // When in check, search starts here && ss->ttHit && tte->depth() >= ttDepth && ttValue != VALUE_NONE // Only in case of TT access race - && (ttValue >= beta ? (tte->bound() & BOUND_LOWER) - : (tte->bound() & BOUND_UPPER))) + && (tte->bound() & (ttValue >= beta ? BOUND_LOWER : BOUND_UPPER))) return ttValue; // Evaluate the position statically @@ -1478,7 +1475,7 @@ moves_loop: // When in check, search starts here if (PvNode && bestValue > alpha) alpha = bestValue; - futilityBase = bestValue + 118; + futilityBase = bestValue + 153; } const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory, @@ -1507,7 +1504,7 @@ moves_loop: // When in check, search starts here continue; givesCheck = pos.gives_check(move); - captureOrPromotion = pos.capture_or_promotion(move); + capture = pos.capture(move); moveCount++; @@ -1547,25 +1544,24 @@ moves_loop: // When in check, search starts here ss->currentMove = move; ss->continuationHistory = &thisThread->continuationHistory[ss->inCheck] - [captureOrPromotion] + [capture] [pos.moved_piece(move)] [to_sq(move)]; // Continuation history based pruning (~2 Elo) - if ( !captureOrPromotion + if ( !capture && bestValue > VALUE_TB_LOSS_IN_MAX_PLY - && (*contHist[0])[pos.moved_piece(move)][to_sq(move)] < CounterMovePruneThreshold - && (*contHist[1])[pos.moved_piece(move)][to_sq(move)] < CounterMovePruneThreshold) + && (*contHist[0])[pos.moved_piece(move)][to_sq(move)] < 0 + && (*contHist[1])[pos.moved_piece(move)][to_sq(move)] < 0) continue; - // movecount pruning for quiet check evasions - if ( bestValue > VALUE_TB_LOSS_IN_MAX_PLY - && quietCheckEvasions > 1 - && !captureOrPromotion - && ss->inCheck) - continue; + // We prune after 2nd quiet check evasion where being 'in check' is implicitly checked through the counter + // and being a 'quiet' apart from being a tt move is assumed after an increment because captures are pushed ahead. + if ( bestValue > VALUE_TB_LOSS_IN_MAX_PLY + && quietCheckEvasions > 1) + break; - quietCheckEvasions += !captureOrPromotion && ss->inCheck; + quietCheckEvasions += !capture && ss->inCheck; // Make and search the move pos.do_move(move, st, givesCheck); @@ -1660,7 +1656,7 @@ moves_loop: // When in check, search starts here // update_pv() adds current move and appends child pv[] - void update_pv(Move* pv, Move move, Move* childPv) { + void update_pv(Move* pv, Move move, const Move* childPv) { for (*pv++ = move; childPv && *childPv != MOVE_NONE; ) *pv++ = *childPv++; @@ -1673,19 +1669,18 @@ moves_loop: // When in check, search starts here void update_all_stats(const Position& pos, Stack* ss, Move bestMove, Value bestValue, Value beta, Square prevSq, Move* quietsSearched, int quietCount, Move* capturesSearched, int captureCount, Depth depth) { - int bonus1, bonus2; Color us = pos.side_to_move(); Thread* thisThread = pos.this_thread(); CapturePieceToHistory& captureHistory = thisThread->captureHistory; Piece moved_piece = pos.moved_piece(bestMove); PieceType captured = type_of(pos.piece_on(to_sq(bestMove))); + int bonus1 = stat_bonus(depth + 1); - bonus1 = stat_bonus(depth + 1); - bonus2 = bestValue > beta + PawnValueMg ? bonus1 // larger bonus + if (!pos.capture(bestMove)) + { + int bonus2 = bestValue > beta + 137 ? bonus1 // larger bonus : stat_bonus(depth); // smaller bonus - if (!pos.capture_or_promotion(bestMove)) - { // Increase stats for the best move in case it was a quiet move update_quiet_stats(pos, ss, bestMove, bonus2); @@ -1828,7 +1823,7 @@ void MainThread::check_time() { /// UCI::pv() formats PV information according to the UCI protocol. UCI requires /// that all (if any) unsearched PV lines are sent using a previous search score. -string UCI::pv(const Position& pos, Depth depth, Value alpha, Value beta) { +string UCI::pv(const Position& pos, Depth depth) { std::stringstream ss; TimePoint elapsed = Time.elapsed() + 1; @@ -1866,16 +1861,13 @@ string UCI::pv(const Position& pos, Depth depth, Value alpha, Value beta) { if (Options["UCI_ShowWDL"]) ss << UCI::wdl(v, pos.game_ply()); - if (!tb && i == pvIdx) - ss << (v >= beta ? " lowerbound" : v <= alpha ? " upperbound" : ""); + if (i == pvIdx && !tb && updated) // tablebase- and previous-scores are exact + ss << (rootMoves[i].scoreLowerbound ? " lowerbound" : (rootMoves[i].scoreUpperbound ? " upperbound" : "")); ss << " nodes " << nodesSearched - << " nps " << nodesSearched * 1000 / elapsed; - - if (elapsed > 1000) // Earlier makes little sense - ss << " hashfull " << TT.hashfull(); - - ss << " tbhits " << tbHits + << " nps " << nodesSearched * 1000 / elapsed + << " hashfull " << TT.hashfull() + << " tbhits " << tbHits << " time " << elapsed << " pv";