X-Git-Url: https://git.sesse.net/?p=stockfish;a=blobdiff_plain;f=src%2Fsearch.cpp;h=e1ac91c3f58e6a3a22f07477ed62b3c893bb9f9d;hp=378c3d20f1f693d48ec2df4eba1a5ed68f6fc06e;hb=6e1cb6e45b63c21ee818fe3f8266bdbcc3aaceb5;hpb=bd358533a4ac2296a6cf7f89ae0f288d91f68029 diff --git a/src/search.cpp b/src/search.cpp index 378c3d20..e1ac91c3 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -53,26 +53,6 @@ namespace { /// Types - // IterationInfoType stores search results for each iteration - // - // Because we use relatively small (dynamic) aspiration window, - // there happens many fail highs and fail lows in root. And - // because we don't do researches in those cases, "value" stored - // here is not necessarily exact. Instead in case of fail high/low - // we guess what the right value might be and store our guess - // as a "speculated value" and then move on. Speculated values are - // used just to calculate aspiration window width, so also if are - // not exact is not big a problem. - - struct IterationInfoType { - - IterationInfoType(Value v = Value(0), Value sv = Value(0)) - : value(v), speculatedValue(sv) {} - - Value value, speculatedValue; - }; - - // The BetaCounterType class is used to order moves at ply one. // Apart for the first one that has its score, following moves // normally have score -VALUE_INFINITE, so are ordered according @@ -146,9 +126,6 @@ namespace { // Search depth at iteration 1 const Depth InitialDepth = OnePly; - // Depth limit for selective search - const Depth SelectiveDepth = 7 * OnePly; - // Use internal iterative deepening? const bool UseIIDAtPVNodes = true; const bool UseIIDAtNonPVNodes = true; @@ -162,17 +139,6 @@ namespace { // better than the second best move. const Value EasyMoveMargin = Value(0x200); - // Problem margin. If the score of the first move at iteration N+1 has - // dropped by more than this since iteration N, the boolean variable - // "Problem" is set to true, which will make the program spend some extra - // time looking for a better move. - const Value ProblemMargin = Value(0x28); - - // No problem margin. If the boolean "Problem" is true, and a new move - // is found at the root which is less than NoProblemMargin worse than the - // best move from the previous iteration, Problem is set back to false. - const Value NoProblemMargin = Value(0x14); - // Null move margin. A null move search will not be done if the static // evaluation of the position is more than NullMoveMargin below beta. const Value NullMoveMargin = Value(0x200); @@ -181,15 +147,6 @@ namespace { // remaining ones we will extend it. const Value SingleReplyMargin = Value(0x20); - // Margins for futility pruning in the quiescence search, and at frontier - // and near frontier nodes. - const Value FutilityMarginQS = Value(0x80); - - Value FutilityMargins[2 * PLY_MAX_PLUS_2]; // Initialized at startup. - - // Each move futility margin is decreased - const Value IncrementalFutilityMargin = Value(0x8); - // Depth limit for razoring const Depth RazorDepth = 4 * OnePly; @@ -213,7 +170,7 @@ namespace { BetaCounterType BetaCounter; // Scores and number of times the best move changed for each iteration - IterationInfoType IterationInfo[PLY_MAX_PLUS_2]; + Value ValueByIteration[PLY_MAX_PLUS_2]; int BestMoveChangesByIteration[PLY_MAX_PLUS_2]; // Search window management @@ -229,7 +186,7 @@ namespace { int MaxSearchTime, AbsoluteMaxSearchTime, ExtraSearchTime, ExactMaxTime; bool UseTimeManagement, InfiniteSearch, PonderSearch, StopOnPonderhit; bool AbortSearch, Quit; - bool FailHigh, FailLow, Problem; + bool AspirationFailLow; // Show current line? bool ShowCurrentLine; @@ -238,9 +195,21 @@ namespace { bool UseLogFile; std::ofstream LogFile; - // Natural logarithmic lookup table and its getter function - float lnArray[512]; - inline float ln(int i) { return lnArray[i]; } + // Futility lookup tables and their getter functions + const Value FutilityMarginQS = Value(0x80); + int32_t FutilityMarginsMatrix[14][64]; // [depth][moveNumber] + int FutilityMoveCountArray[32]; // [depth] + + inline Value futility_margin(Depth d, int mn) { return (Value) (d < 14? FutilityMarginsMatrix[Max(d, 0)][Min(mn, 63)] : 2*VALUE_INFINITE); } + inline int futility_move_count(Depth d) { return (d < 32? FutilityMoveCountArray[d] : 512); } + + // Reduction lookup tables and their getter functions + // Initialized at startup + int8_t PVReductionMatrix[64][64]; // [depth][moveNumber] + int8_t NonPVReductionMatrix[64][64]; // [depth][moveNumber] + + inline Depth pv_reduction(Depth d, int mn) { return (Depth) PVReductionMatrix[Min(d / 2, 63)][Min(mn, 63)]; } + inline Depth nonpv_reduction(Depth d, int mn) { return (Depth) NonPVReductionMatrix[Min(d / 2, 63)][Min(mn, 63)]; } // MP related variables int ActiveThreads = 1; @@ -288,13 +257,10 @@ namespace { bool ok_to_prune(const Position& pos, Move m, Move threat); bool ok_to_use_TT(const TTEntry* tte, Depth depth, Value beta, int ply); Value refine_eval(const TTEntry* tte, Value defaultEval, int ply); - void reduction_parameters(float base, float Inhibitor, Depth depth, float& logLimit, float& gradient); - Depth reduction(int moveCount, const float LogLimit, const float BaseRed, const float Gradient); void update_history(const Position& pos, Move move, Depth depth, Move movesSearched[], int moveCount); void update_killers(Move m, SearchStack& ss); void update_gains(const Position& pos, Move move, Value before, Value after); - bool fail_high_ply_1(); int current_search_time(); int nps(); void poll(); @@ -370,7 +336,7 @@ bool think(const Position& pos, bool infinite, bool ponder, int side_to_move, // Initialize global search variables Idle = StopOnPonderhit = AbortSearch = Quit = false; - FailHigh = FailLow = Problem = false; + AspirationFailLow = false; NodesSincePoll = 0; SearchStartTime = get_system_time(); ExactMaxTime = maxTime; @@ -381,7 +347,7 @@ bool think(const Position& pos, bool infinite, bool ponder, int side_to_move, UseTimeManagement = !ExactMaxTime && !MaxDepth && !MaxNodes && !InfiniteSearch; // Look for a book move, only during games, not tests - if (UseTimeManagement && !ponder && get_option_value_bool("OwnBook")) + if (UseTimeManagement && get_option_value_bool("OwnBook")) { Move bookMove; if (get_option_value_string("Book File") != OpeningBook.file_name()) @@ -390,6 +356,9 @@ bool think(const Position& pos, bool infinite, bool ponder, int side_to_move, bookMove = OpeningBook.get_move(pos); if (bookMove != MOVE_NONE) { + if (PonderSearch) + wait_for_stop_or_ponderhit(); + cout << "bestmove " << bookMove << endl; return true; } @@ -398,7 +367,6 @@ bool think(const Position& pos, bool infinite, bool ponder, int side_to_move, for (int i = 0; i < THREAD_MAX; i++) { Threads[i].nodes = 0ULL; - Threads[i].failHighPly1 = false; } if (button_was_pressed("New Game")) @@ -531,7 +499,6 @@ bool think(const Position& pos, bool infinite, bool ponder, int side_to_move, // We're ready to start thinking. Call the iterative deepening loop function Value v = id_loop(pos, searchMoves); - if (UseLSNFiltering) { // Step 1. If this is sudden death game and our position is hopeless, @@ -572,20 +539,29 @@ void init_threads() { pthread_t pthread[1]; #endif - // Init our logarithmic lookup table - for (i = 0; i < 512; i++) - lnArray[i] = float(log(double(i))); // log() returns base-e logarithm - - for (i = 0; i < THREAD_MAX; i++) - Threads[i].activeSplitPoints = 0; + // Init our reduction lookup tables + for (i = 1; i < 64; i++) // i == depth + for (int j = 1; j < 64; j++) // j == moveNumber + { + double pvRed = 0.5 + log(double(i)) * log(double(j)) / 6.0; + double nonPVRed = 0.5 + log(double(i)) * log(double(j)) / 3.0; + PVReductionMatrix[i][j] = (int8_t) ( pvRed >= 1.0 ? floor( pvRed * int(OnePly)) : 0); + NonPVReductionMatrix[i][j] = (int8_t) (nonPVRed >= 1.0 ? floor(nonPVRed * int(OnePly)) : 0); + } // Init futility margins array - FutilityMargins[0] = FutilityMargins[1] = Value(0); + for (i = 0; i < 14; i++) // i == depth (OnePly = 2) + for (int j = 0; j < 64; j++) // j == moveNumber + { + FutilityMarginsMatrix[i][j] = (i < 2 ? 0 : 112 * bitScanReverse32(i * i / 2)) - 8 * j; // FIXME: test using log instead of BSR + } - for (i = 2; i < 2 * PLY_MAX_PLUS_2; i++) - { - FutilityMargins[i] = Value(112 * bitScanReverse32(i * i / 2)); // FIXME: test using log instead of BSR - } + // Init futility move count array + for (i = 0; i < 32; i++) // i == depth (OnePly = 2) + FutilityMoveCountArray[i] = 3 + (1 << (3 * i / 8)); + + for (i = 0; i < THREAD_MAX; i++) + Threads[i].activeSplitPoints = 0; // Initialize global locks lock_init(&MPLock, NULL); @@ -695,6 +671,7 @@ namespace { // searchMoves are verified, copied, scored and sorted RootMoveList rml(p, searchMoves); + // Handle special case of searching on a mate/stale position if (rml.move_count() == 0) { if (PonderSearch) @@ -716,7 +693,7 @@ namespace { TT.new_search(); H.clear(); init_ss_array(ss); - IterationInfo[1] = IterationInfoType(rml.get_move_score(0), rml.get_move_score(0)); + ValueByIteration[1] = rml.get_move_score(0); Iteration = 1; // Is one move significantly better than others after initial scoring ? @@ -740,18 +717,16 @@ namespace { // Calculate dynamic search window based on previous iterations Value alpha, beta; - if (MultiPV == 1 && Iteration >= 6 && abs(IterationInfo[Iteration - 1].value) < VALUE_KNOWN_WIN) + if (MultiPV == 1 && Iteration >= 6 && abs(ValueByIteration[Iteration - 1]) < VALUE_KNOWN_WIN) { - int prevDelta1 = IterationInfo[Iteration - 1].speculatedValue - IterationInfo[Iteration - 2].speculatedValue; - int prevDelta2 = IterationInfo[Iteration - 2].speculatedValue - IterationInfo[Iteration - 3].speculatedValue; + int prevDelta1 = ValueByIteration[Iteration - 1] - ValueByIteration[Iteration - 2]; + int prevDelta2 = ValueByIteration[Iteration - 2] - ValueByIteration[Iteration - 3]; - int delta = Max(abs(prevDelta1) + abs(prevDelta2) / 2, 16); + AspirationDelta = Max(abs(prevDelta1) + abs(prevDelta2) / 2, 16); + AspirationDelta = (AspirationDelta + 7) / 8 * 8; // Round to match grainSize - delta = (delta + 7) / 8 * 8; // Round to match grainSize - AspirationDelta = delta; - - alpha = Max(IterationInfo[Iteration - 1].value - delta, -VALUE_INFINITE); - beta = Min(IterationInfo[Iteration - 1].value + delta, VALUE_INFINITE); + alpha = Max(ValueByIteration[Iteration - 1] - AspirationDelta, -VALUE_INFINITE); + beta = Min(ValueByIteration[Iteration - 1] + AspirationDelta, VALUE_INFINITE); } else { @@ -770,39 +745,12 @@ namespace { break; // Value cannot be trusted. Break out immediately! //Save info about search result - Value speculatedValue; - bool fHigh = false; - bool fLow = false; - Value delta = value - IterationInfo[Iteration - 1].value; - - if (value >= beta) - { - assert(delta > 0); - - fHigh = true; - speculatedValue = value + delta; - BestMoveChangesByIteration[Iteration] += 2; // Allocate more time - } - else if (value <= alpha) - { - assert(value == alpha); - assert(delta < 0); - - fLow = true; - speculatedValue = value + delta; - BestMoveChangesByIteration[Iteration] += 3; // Allocate more time - } else - speculatedValue = value; - - speculatedValue = Min(Max(speculatedValue, -VALUE_INFINITE), VALUE_INFINITE); - IterationInfo[Iteration] = IterationInfoType(value, speculatedValue); + ValueByIteration[Iteration] = value; // Drop the easy move if it differs from the new best move if (ss[0].pv[0] != EasyMove) EasyMove = MOVE_NONE; - Problem = false; - if (UseTimeManagement) { // Time to stop? @@ -815,15 +763,13 @@ namespace { // Stop search early when the last two iterations returned a mate score if ( Iteration >= 6 - && abs(IterationInfo[Iteration].value) >= abs(VALUE_MATE) - 100 - && abs(IterationInfo[Iteration-1].value) >= abs(VALUE_MATE) - 100) + && abs(ValueByIteration[Iteration]) >= abs(VALUE_MATE) - 100 + && abs(ValueByIteration[Iteration-1]) >= abs(VALUE_MATE) - 100) stopSearch = true; // Stop search early if one move seems to be much better than the rest int64_t nodes = nodes_searched(); if ( Iteration >= 8 - && !fLow - && !fHigh && EasyMove == ss[0].pv[0] && ( ( rml.get_move_cumulative_nodes(0) > (nodes * 85) / 100 && current_search_time() > MaxSearchTime / 16) @@ -906,207 +852,116 @@ namespace { Value root_search(Position& pos, SearchStack ss[], RootMoveList& rml, Value& oldAlpha, Value& beta) { - Value alpha = oldAlpha; + int64_t nodes; + Move move; + StateInfo st; + Depth depth, ext, newDepth; Value value; CheckInfo ci(pos); int researchCount = 0; + bool moveIsCheck, captureOrPromotion, dangerous; + Value alpha = oldAlpha; bool isCheck = pos.is_check(); // Evaluate the position statically EvalInfo ei; - if (!isCheck) - ss[0].eval = evaluate(pos, ei, 0); - else - ss[0].eval = VALUE_NONE; + ss[0].eval = !isCheck ? evaluate(pos, ei, 0) : VALUE_NONE; - while(1) // Fail low loop + while (1) // Fail low loop { - // Loop through all the moves in the root move list - for (int i = 0; i < rml.move_count() && !AbortSearch; i++) - { - if (alpha >= beta) + // Loop through all the moves in the root move list + for (int i = 0; i < rml.move_count() && !AbortSearch; i++) { - // We failed high, invalidate and skip next moves, leave node-counters - // and beta-counters as they are and quickly return, we will try to do - // a research at the next iteration with a bigger aspiration window. - rml.set_move_score(i, -VALUE_INFINITE); - continue; - } - int64_t nodes; - Move move; - StateInfo st; - Depth depth, ext, newDepth; - - RootMoveNumber = i + 1; - FailHigh = false; - - // Save the current node count before the move is searched - nodes = nodes_searched(); - - // Reset beta cut-off counters - BetaCounter.clear(); - - // Pick the next root move, and print the move and the move number to - // the standard output. - move = ss[0].currentMove = rml.get_move(i); - - if (current_search_time() >= 1000) - cout << "info currmove " << move - << " currmovenumber " << RootMoveNumber << endl; - - // Decide search depth for this move - bool moveIsCheck = pos.move_is_check(move); - bool captureOrPromotion = pos.move_is_capture_or_promotion(move); - bool dangerous; - depth = (Iteration - 2) * OnePly + InitialDepth; - ext = extension(pos, move, true, captureOrPromotion, moveIsCheck, false, false, &dangerous); - newDepth = depth + ext; + if (alpha >= beta) + { + // We failed high, invalidate and skip next moves, leave node-counters + // and beta-counters as they are and quickly return, we will try to do + // a research at the next iteration with a bigger aspiration window. + rml.set_move_score(i, -VALUE_INFINITE); + continue; + } - value = - VALUE_INFINITE; + RootMoveNumber = i + 1; - // Precalculate reduction parameters - float LogLimit, Gradient, BaseReduction = 0.5; - reduction_parameters(BaseReduction, 6.0, depth, LogLimit, Gradient); + // Save the current node count before the move is searched + nodes = nodes_searched(); - while (1) // Fail high loop - { + // Reset beta cut-off counters + BetaCounter.clear(); - // Make the move, and search it - pos.do_move(move, st, ci, moveIsCheck); + // Pick the next root move, and print the move and the move number to + // the standard output. + move = ss[0].currentMove = rml.get_move(i); - if (i < MultiPV || value > alpha) - { - // Aspiration window is disabled in multi-pv case - if (MultiPV > 1) - alpha = -VALUE_INFINITE; + if (current_search_time() >= 1000) + cout << "info currmove " << move + << " currmovenumber " << RootMoveNumber << endl; - value = -search_pv(pos, ss, -beta, -alpha, newDepth, 1, 0); + // Decide search depth for this move + moveIsCheck = pos.move_is_check(move); + captureOrPromotion = pos.move_is_capture_or_promotion(move); + depth = (Iteration - 2) * OnePly + InitialDepth; + ext = extension(pos, move, true, captureOrPromotion, moveIsCheck, false, false, &dangerous); + newDepth = depth + ext; - // If the value has dropped a lot compared to the last iteration, - // set the boolean variable Problem to true. This variable is used - // for time managment: When Problem is true, we try to complete the - // current iteration before playing a move. - Problem = ( Iteration >= 2 - && value <= IterationInfo[Iteration - 1].value - ProblemMargin); + value = - VALUE_INFINITE; - if (Problem && StopOnPonderhit) - StopOnPonderhit = false; - } - else - { - // Try to reduce non-pv search depth by one ply if move seems not problematic, - // if the move fails high will be re-searched at full depth. - bool doFullDepthSearch = true; - - if ( depth >= 3*OnePly // FIXME was newDepth - && !dangerous - && !captureOrPromotion - && !move_is_castle(move)) + while (1) // Fail high loop { - ss[0].reduction = reduction(RootMoveNumber - MultiPV + 1, LogLimit, BaseReduction, Gradient); - if (ss[0].reduction) - { - value = -search(pos, ss, -alpha, newDepth-ss[0].reduction, 1, true, 0); - doFullDepthSearch = (value > alpha); - } - } - if (doFullDepthSearch) - { - ss[0].reduction = Depth(0); - value = -search(pos, ss, -alpha, newDepth, 1, true, 0); + // Make the move, and search it + pos.do_move(move, st, ci, moveIsCheck); - if (value > alpha) + if (i < MultiPV || value > alpha) { - // Fail high! Set the boolean variable FailHigh to true, and - // re-search the move using a PV search. The variable FailHigh - // is used for time managment: We try to avoid aborting the - // search prematurely during a fail high research. - FailHigh = true; + // Aspiration window is disabled in multi-pv case + if (MultiPV > 1) + alpha = -VALUE_INFINITE; + value = -search_pv(pos, ss, -beta, -alpha, newDepth, 1, 0); } - } - } - - pos.undo_move(move); - - if (AbortSearch || value < beta) - break; // We are not failing high - - // We are failing high and going to do a research. It's important to update score - // before research in case we run out of time while researching. - rml.set_move_score(i, value); - update_pv(ss, 0); - TT.extract_pv(pos, ss[0].pv, PLY_MAX); - rml.set_move_pv(i, ss[0].pv); - - // Print search information to the standard output - cout << "info depth " << Iteration - << " score " << value_to_string(value) - << ((value >= beta) ? " lowerbound" : - ((value <= alpha)? " upperbound" : "")) - << " time " << current_search_time() - << " nodes " << nodes_searched() - << " nps " << nps() - << " pv "; - - for (int j = 0; ss[0].pv[j] != MOVE_NONE && j < PLY_MAX; j++) - cout << ss[0].pv[j] << " "; - - cout << endl; - - if (UseLogFile) - { - ValueType type = (value >= beta ? VALUE_TYPE_LOWER - : (value <= alpha ? VALUE_TYPE_UPPER : VALUE_TYPE_EXACT)); - - LogFile << pretty_pv(pos, current_search_time(), Iteration, - nodes_searched(), value, type, ss[0].pv) << endl; - } - - // Prepare for research - researchCount++; - beta = Min(beta + AspirationDelta * (1 << researchCount), VALUE_INFINITE); - - } // End of fail high loop - - // Finished searching the move. If AbortSearch is true, the search - // was aborted because the user interrupted the search or because we - // ran out of time. In this case, the return value of the search cannot - // be trusted, and we break out of the loop without updating the best - // move and/or PV. - if (AbortSearch) - break; - - // Remember beta-cutoff and searched nodes counts for this move. The - // info is used to sort the root moves at the next iteration. - int64_t our, their; - BetaCounter.read(pos.side_to_move(), our, their); - rml.set_beta_counters(i, our, their); - rml.set_move_nodes(i, nodes_searched() - nodes); - - assert(value >= -VALUE_INFINITE && value <= VALUE_INFINITE); + else + { + // Try to reduce non-pv search depth by one ply if move seems not problematic, + // if the move fails high will be re-searched at full depth. + bool doFullDepthSearch = true; + + if ( depth >= 3*OnePly // FIXME was newDepth + && !dangerous + && !captureOrPromotion + && !move_is_castle(move)) + { + ss[0].reduction = pv_reduction(depth, RootMoveNumber - MultiPV + 1); + if (ss[0].reduction) + { + value = -search(pos, ss, -alpha, newDepth-ss[0].reduction, 1, true, 0); + doFullDepthSearch = (value > alpha); + } + } + + if (doFullDepthSearch) + { + ss[0].reduction = Depth(0); + value = -search(pos, ss, -alpha, newDepth, 1, true, 0); + + if (value > alpha) + value = -search_pv(pos, ss, -beta, -alpha, newDepth, 1, 0); + } + } - if (value <= alpha && i >= MultiPV) - rml.set_move_score(i, -VALUE_INFINITE); - else - { - // PV move or new best move! + pos.undo_move(move); - // Update PV - rml.set_move_score(i, value); - update_pv(ss, 0); - TT.extract_pv(pos, ss[0].pv, PLY_MAX); - rml.set_move_pv(i, ss[0].pv); + // Can we exit fail high loop ? + if (AbortSearch || value < beta) + break; - if (MultiPV == 1) - { - // We record how often the best move has been changed in each - // iteration. This information is used for time managment: When - // the best move changes frequently, we allocate some more time. - if (i > 0) - BestMoveChangesByIteration[Iteration]++; + // We are failing high and going to do a research. It's important to update score + // before research in case we run out of time while researching. + rml.set_move_score(i, value); + update_pv(ss, 0); + TT.extract_pv(pos, ss[0].pv, PLY_MAX); + rml.set_move_pv(i, ss[0].pv); // Print search information to the standard output cout << "info depth " << Iteration @@ -1131,48 +986,114 @@ namespace { LogFile << pretty_pv(pos, current_search_time(), Iteration, nodes_searched(), value, type, ss[0].pv) << endl; } - if (value > alpha) - alpha = value; - // Reset the global variable Problem to false if the value isn't too - // far below the final value from the last iteration. - if (value > IterationInfo[Iteration - 1].value - NoProblemMargin) - Problem = false; - } - else // MultiPV > 1 + // Prepare for a research after a fail high, each time with a wider window + researchCount++; + beta = Min(beta + AspirationDelta * (1 << researchCount), VALUE_INFINITE); + + } // End of fail high loop + + // Finished searching the move. If AbortSearch is true, the search + // was aborted because the user interrupted the search or because we + // ran out of time. In this case, the return value of the search cannot + // be trusted, and we break out of the loop without updating the best + // move and/or PV. + if (AbortSearch) + break; + + // Remember beta-cutoff and searched nodes counts for this move. The + // info is used to sort the root moves at the next iteration. + int64_t our, their; + BetaCounter.read(pos.side_to_move(), our, their); + rml.set_beta_counters(i, our, their); + rml.set_move_nodes(i, nodes_searched() - nodes); + + assert(value >= -VALUE_INFINITE && value <= VALUE_INFINITE); + + if (value <= alpha && i >= MultiPV) + rml.set_move_score(i, -VALUE_INFINITE); + else { - rml.sort_multipv(i); - for (int j = 0; j < Min(MultiPV, rml.move_count()); j++) + // PV move or new best move! + + // Update PV + rml.set_move_score(i, value); + update_pv(ss, 0); + TT.extract_pv(pos, ss[0].pv, PLY_MAX); + rml.set_move_pv(i, ss[0].pv); + + if (MultiPV == 1) { - cout << "info multipv " << j + 1 - << " score " << value_to_string(rml.get_move_score(j)) - << " depth " << ((j <= i)? Iteration : Iteration - 1) - << " time " << current_search_time() + // We record how often the best move has been changed in each + // iteration. This information is used for time managment: When + // the best move changes frequently, we allocate some more time. + if (i > 0) + BestMoveChangesByIteration[Iteration]++; + + // Print search information to the standard output + cout << "info depth " << Iteration + << " score " << value_to_string(value) + << ((value >= beta) ? " lowerbound" : + ((value <= alpha)? " upperbound" : "")) + << " time " << current_search_time() << " nodes " << nodes_searched() - << " nps " << nps() + << " nps " << nps() << " pv "; - for (int k = 0; rml.get_move_pv(j, k) != MOVE_NONE && k < PLY_MAX; k++) - cout << rml.get_move_pv(j, k) << " "; + for (int j = 0; ss[0].pv[j] != MOVE_NONE && j < PLY_MAX; j++) + cout << ss[0].pv[j] << " "; cout << endl; + + if (UseLogFile) + { + ValueType type = (value >= beta ? VALUE_TYPE_LOWER + : (value <= alpha ? VALUE_TYPE_UPPER : VALUE_TYPE_EXACT)); + + LogFile << pretty_pv(pos, current_search_time(), Iteration, + nodes_searched(), value, type, ss[0].pv) << endl; + } + if (value > alpha) + alpha = value; } - alpha = rml.get_move_score(Min(i, MultiPV-1)); - } - } // PV move or new best move + else // MultiPV > 1 + { + rml.sort_multipv(i); + for (int j = 0; j < Min(MultiPV, rml.move_count()); j++) + { + cout << "info multipv " << j + 1 + << " score " << value_to_string(rml.get_move_score(j)) + << " depth " << ((j <= i)? Iteration : Iteration - 1) + << " time " << current_search_time() + << " nodes " << nodes_searched() + << " nps " << nps() + << " pv "; + + for (int k = 0; rml.get_move_pv(j, k) != MOVE_NONE && k < PLY_MAX; k++) + cout << rml.get_move_pv(j, k) << " "; + + cout << endl; + } + alpha = rml.get_move_score(Min(i, MultiPV-1)); + } + } // PV move or new best move - assert(alpha >= oldAlpha); + assert(alpha >= oldAlpha); - FailLow = (alpha == oldAlpha); - } + AspirationFailLow = (alpha == oldAlpha); - if (AbortSearch || alpha > oldAlpha) - break; // End search, we are not failing low + if (AspirationFailLow && StopOnPonderhit) + StopOnPonderhit = false; + } - // Prepare for research - researchCount++; - alpha = Max(alpha - AspirationDelta * (1 << researchCount), -VALUE_INFINITE); - oldAlpha = alpha; + // Can we exit fail low loop ? + if (AbortSearch || alpha > oldAlpha) + break; + + // Prepare for a research after a fail low, each time with a wider window + researchCount++; + alpha = Max(alpha - AspirationDelta * (1 << researchCount), -VALUE_INFINITE); + oldAlpha = alpha; } // Fail low loop @@ -1259,10 +1180,6 @@ namespace { CheckInfo ci(pos); MovePicker mp = MovePicker(pos, ttMove, depth, H, &ss[ply]); - // Precalculate reduction parameters - float LogLimit, Gradient, BaseReduction = 0.5; - reduction_parameters(BaseReduction, 6.0, depth, LogLimit, Gradient); - // Loop through all legal moves until no moves remain or a beta cutoff // occurs. while ( alpha < beta @@ -1320,8 +1237,8 @@ namespace { && !captureOrPromotion && !move_is_castle(move) && !move_is_killer(move, ss[ply])) - { - ss[ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient); + { + ss[ply].reduction = pv_reduction(depth, moveCount); if (ss[ply].reduction) { value = -search(pos, ss, -alpha, newDepth-ss[ply].reduction, ply+1, true, threadID); @@ -1334,19 +1251,7 @@ namespace { ss[ply].reduction = Depth(0); value = -search(pos, ss, -alpha, newDepth, ply+1, true, threadID); if (value > alpha && value < beta) - { - // When the search fails high at ply 1 while searching the first - // move at the root, set the flag failHighPly1. This is used for - // time managment: We don't want to stop the search early in - // such cases, because resolving the fail high at ply 1 could - // result in a big drop in score at the root. - if (ply == 1 && RootMoveNumber == 1) - Threads[threadID].failHighPly1 = true; - - // A fail high occurred. Re-search at full window (pv search) value = -search_pv(pos, ss, -beta, -alpha, newDepth, ply+1, threadID); - Threads[threadID].failHighPly1 = false; - } } } pos.undo_move(move); @@ -1364,13 +1269,6 @@ namespace { if (value == value_mate_in(ply + 1)) ss[ply].mateKiller = move; } - // If we are at ply 1, and we are searching the first root move at - // ply 0, set the 'Problem' variable if the score has dropped a lot - // (from the computer's point of view) since the previous iteration. - if ( ply == 1 - && Iteration >= 2 - && -value <= IterationInfo[Iteration-1].value - ProblemMargin) - Problem = true; } // Split? @@ -1475,9 +1373,6 @@ namespace { isCheck = pos.is_check(); - // Calculate depth dependant futility pruning parameters - const int FutilityMoveCountMargin = 3 + (1 << (3 * int(depth) / 8)); - // Evaluate the position statically if (!isCheck) { @@ -1490,18 +1385,19 @@ namespace { } ss[ply].eval = staticValue; - futilityValue = staticValue + FutilityMargins[int(depth)]; //FIXME: Remove me, only for split + futilityValue = staticValue + futility_margin(depth, 0); //FIXME: Remove me, only for split staticValue = refine_eval(tte, staticValue, ply); // Enhance accuracy with TT value if possible update_gains(pos, ss[ply - 1].currentMove, ss[ply - 1].eval, ss[ply].eval); } - // Do a "stand pat". If we are above beta by a good margin then - // return immediately. + // Static null move pruning. We're betting that the opponent doesn't have + // a move that will reduce the score by more than FutilityMargins[int(depth)] + // if we do a null move. if ( !isCheck && allowNullmove && depth < RazorDepth - && staticValue - FutilityMargins[int(depth)] >= beta) - return staticValue - FutilityMargins[int(depth)]; + && staticValue - futility_margin(depth, 0) >= beta) + return staticValue - futility_margin(depth, 0); // Null move search if ( allowNullmove @@ -1573,7 +1469,7 @@ namespace { { search(pos, ss, beta, Min(depth/2, depth-2*OnePly), ply, false, threadID); ttMove = ss[ply].pv[ply]; - tte = TT.retrieve(pos.get_key()); + tte = TT.retrieve(posKey); } // Initialize a MovePicker object for the current position, and prepare @@ -1581,10 +1477,6 @@ namespace { MovePicker mp = MovePicker(pos, ttMove, depth, H, &ss[ply]); CheckInfo ci(pos); - // Precalculate reduction parameters - float LogLimit, Gradient, BaseReduction = 0.5; - reduction_parameters(BaseReduction, 3.0, depth, LogLimit, Gradient); - // Loop through all legal moves until no moves remain or a beta cutoff occurs while ( bestValue < beta && (move = mp.get_next_move()) != MOVE_NONE @@ -1629,36 +1521,6 @@ namespace { // Update current move movesSearched[moveCount++] = ss[ply].currentMove = move; - // Futility pruning for captures - // FIXME: test disabling 'Futility pruning for captures' - // FIXME: test with 'newDepth < RazorDepth' - Color them = opposite_color(pos.side_to_move()); - - if ( !isCheck - && newDepth < SelectiveDepth - && !dangerous - && pos.move_is_capture(move) - && !pos.move_is_check(move, ci) - && !move_is_promotion(move) - && move != ttMove - && !move_is_ep(move) - && (pos.type_of_piece_on(move_to(move)) != PAWN || !pos.pawn_is_passed(them, move_to(move)))) // Do not prune passed pawn captures - { - int preFutilityValueMargin = 0; - - if (newDepth >= OnePly) - preFutilityValueMargin = FutilityMargins[int(newDepth)]; - - Value futilityCaptureValue = ss[ply].eval + pos.endgame_value_of_piece_on(move_to(move)) + preFutilityValueMargin + ei.futilityMargin + 90; - - if (futilityCaptureValue < beta) - { - if (futilityCaptureValue > bestValue) - bestValue = futilityCaptureValue; - continue; - } - } - // Futility pruning if ( !isCheck && !dangerous @@ -1667,35 +1529,20 @@ namespace { && move != ttMove) { // Move count based pruning - if ( moveCount >= FutilityMoveCountMargin + if ( moveCount >= futility_move_count(depth) && ok_to_prune(pos, move, ss[ply].threatMove) && bestValue > value_mated_in(PLY_MAX)) continue; // Value based pruning - Depth predictedDepth = newDepth; + Depth predictedDepth = newDepth - nonpv_reduction(depth, moveCount); //FIXME: We are ignoring condition: depth >= 3*OnePly, BUG?? + futilityValueScaled = ss[ply].eval + futility_margin(predictedDepth, moveCount) + H.gain(pos.piece_on(move_from(move)), move_to(move)) + 45; - //FIXME: We are ignoring condition: depth >= 3*OnePly, BUG?? - ss[ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient); - if (ss[ply].reduction) - predictedDepth -= ss[ply].reduction; - - if (predictedDepth < SelectiveDepth) + if (futilityValueScaled < beta) { - int preFutilityValueMargin = 0; - if (predictedDepth >= OnePly) - preFutilityValueMargin = FutilityMargins[int(predictedDepth)]; - - preFutilityValueMargin += H.gain(pos.piece_on(move_from(move)), move_from(move), move_to(move)) + 45; - - futilityValueScaled = ss[ply].eval + preFutilityValueMargin - moveCount * IncrementalFutilityMargin; - - if (futilityValueScaled < beta) - { - if (futilityValueScaled > bestValue) - bestValue = futilityValueScaled; - continue; - } + if (futilityValueScaled > bestValue) + bestValue = futilityValueScaled; + continue; } } @@ -1712,7 +1559,7 @@ namespace { && !move_is_castle(move) && !move_is_killer(move, ss[ply])) { - ss[ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient); + ss[ply].reduction = nonpv_reduction(depth, moveCount); if (ss[ply].reduction) { value = -search(pos, ss, -(beta-1), newDepth-ss[ply].reduction, ply+1, true, threadID); @@ -1805,6 +1652,7 @@ namespace { const TTEntry* tte = NULL; int moveCount = 0; bool pvNode = (beta - alpha != 1); + Value oldAlpha = alpha; // Initialize, and make an early exit in case of an aborted search, // an instant draw, maximum ply reached, etc. @@ -1853,7 +1701,7 @@ namespace { if (bestValue >= beta) { // Store the score to avoid a future costly evaluation() call - if (!isCheck && !tte && ei.futilityMargin == 0) + if (!isCheck && !tte && ei.futilityMargin[pos.side_to_move()] == 0) TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_EV_LO, Depth(-127*OnePly), MOVE_NONE); return bestValue; @@ -1872,7 +1720,7 @@ namespace { MovePicker mp = MovePicker(pos, ttMove, deepChecks ? Depth(0) : depth, H); CheckInfo ci(pos); enoughMaterial = pos.non_pawn_material(pos.side_to_move()) > RookValueMidgame; - futilityBase = staticValue + FutilityMarginQS + ei.futilityMargin; + futilityBase = staticValue + FutilityMarginQS + ei.futilityMargin[pos.side_to_move()]; // Loop through the moves until no moves remain or a beta cutoff // occurs. @@ -1948,14 +1796,14 @@ namespace { // Update transposition table Depth d = (depth == Depth(0) ? Depth(0) : Depth(-1)); - if (bestValue < beta) + if (bestValue <= oldAlpha) { // If bestValue isn't changed it means it is still the static evaluation // of the node, so keep this info to avoid a future evaluation() call. - ValueType type = (bestValue == staticValue && !ei.futilityMargin ? VALUE_TYPE_EV_UP : VALUE_TYPE_UPPER); + ValueType type = (bestValue == staticValue && !ei.futilityMargin[pos.side_to_move()] ? VALUE_TYPE_EV_UP : VALUE_TYPE_UPPER); TT.store(pos.get_key(), value_to_tt(bestValue, ply), type, d, MOVE_NONE); } - else + else if (bestValue >= beta) { move = ss[ply].pv[ply]; TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_LOWER, d, move); @@ -1964,6 +1812,8 @@ namespace { if (!pos.move_is_capture_or_promotion(move)) update_killers(move, ss[ply]); } + else + TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_EXACT, d, ss[ply].pv[ply]); assert(bestValue > -VALUE_INFINITE && bestValue < VALUE_INFINITE); @@ -1991,15 +1841,9 @@ namespace { Move move; int moveCount; bool isCheck = pos.is_check(); - bool useFutilityPruning = sp->depth < SelectiveDepth + bool useFutilityPruning = sp->depth < 7 * OnePly //FIXME: sync with search && !isCheck; - const int FutilityMoveCountMargin = 3 + (1 << (3 * int(sp->depth) / 8)); - - // Precalculate reduction parameters - float LogLimit, Gradient, BaseReduction = 0.5; - reduction_parameters(BaseReduction, 3.0, sp->depth, LogLimit, Gradient); - while ( lock_grab_bool(&(sp->lock)) && sp->bestValue < sp->beta && !thread_should_stop(threadID) @@ -2026,13 +1870,13 @@ namespace { && !captureOrPromotion) { // Move count based pruning - if ( moveCount >= FutilityMoveCountMargin + if ( moveCount >= futility_move_count(sp->depth) && ok_to_prune(pos, move, ss[sp->ply].threatMove) && sp->bestValue > value_mated_in(PLY_MAX)) continue; // Value based pruning - Value futilityValueScaled = sp->futilityValue - moveCount * IncrementalFutilityMargin; + Value futilityValueScaled = sp->futilityValue - moveCount * 8; //FIXME: sync with search if (futilityValueScaled < sp->beta) { @@ -2060,7 +1904,7 @@ namespace { && !move_is_castle(move) && !move_is_killer(move, ss[sp->ply])) { - ss[sp->ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient); + ss[sp->ply].reduction = nonpv_reduction(sp->depth, moveCount); if (ss[sp->ply].reduction) { value = -search(pos, ss, -(sp->beta-1), newDepth-ss[sp->ply].reduction, sp->ply+1, true, threadID); @@ -2140,10 +1984,6 @@ namespace { int moveCount; Move move; - // Precalculate reduction parameters - float LogLimit, Gradient, BaseReduction = 0.5; - reduction_parameters(BaseReduction, 6.0, sp->depth, LogLimit, Gradient); - while ( lock_grab_bool(&(sp->lock)) && sp->alpha < sp->beta && !thread_should_stop(threadID) @@ -2177,7 +2017,7 @@ namespace { && !move_is_castle(move) && !move_is_killer(move, ss[sp->ply])) { - ss[sp->ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient); + ss[sp->ply].reduction = pv_reduction(sp->depth, moveCount); if (ss[sp->ply].reduction) { Value localAlpha = sp->alpha; @@ -2194,14 +2034,6 @@ namespace { if (value > localAlpha && value < sp->beta) { - // When the search fails high at ply 1 while searching the first - // move at the root, set the flag failHighPly1. This is used for - // time managment: We don't want to stop the search early in - // such cases, because resolving the fail high at ply 1 could - // result in a big drop in score at the root. - if (sp->ply == 1 && RootMoveNumber == 1) - Threads[threadID].failHighPly1 = true; - // If another thread has failed high then sp->alpha has been increased // to be higher or equal then beta, if so, avoid to start a PV search. localAlpha = sp->alpha; @@ -2209,8 +2041,6 @@ namespace { value = -search_pv(pos, ss, -sp->beta, -localAlpha, newDepth, sp->ply+1, threadID); else assert(thread_should_stop(threadID)); - - Threads[threadID].failHighPly1 = false; } } pos.undo_move(move); @@ -2248,13 +2078,6 @@ namespace { if (value == value_mate_in(sp->ply + 1)) ss[sp->ply].mateKiller = move; } - // If we are at ply 1, and we are searching the first root move at - // ply 0, set the 'Problem' variable if the score has dropped a lot - // (from the computer's point of view) since the previous iteration. - if ( sp->ply == 1 - && Iteration >= 2 - && -value <= IterationInfo[Iteration-1].value - ProblemMargin) - Problem = true; } lock_release(&(sp->lock)); } @@ -2308,7 +2131,9 @@ namespace { RootMoveList::RootMoveList(Position& pos, Move searchMoves[]) : count(0) { + SearchStack ss[PLY_MAX_PLUS_2]; MoveStack mlist[MaxRootMoves]; + StateInfo st; bool includeAllMoves = (searchMoves[0] == MOVE_NONE); // Generate all legal moves @@ -2326,16 +2151,13 @@ namespace { continue; // Find a quick score for the move - StateInfo st; - SearchStack ss[PLY_MAX_PLUS_2]; init_ss_array(ss); - + pos.do_move(cur->move, st); moves[count].move = cur->move; - pos.do_move(moves[count].move, st); moves[count].score = -qsearch(pos, ss, -VALUE_INFINITE, VALUE_INFINITE, Depth(0), 1, 0); - pos.undo_move(moves[count].move); - moves[count].pv[0] = moves[count].move; + moves[count].pv[0] = cur->move; moves[count].pv[1] = MOVE_NONE; + pos.undo_move(cur->move); count++; } sort(); @@ -2705,34 +2527,6 @@ namespace { } - // reduction_parameters() precalculates some parameters used later by reduction. Becasue - // floating point operations are involved we try to recalculate reduction at each move, but - // we do the most consuming computation only once per node. - - void reduction_parameters(float baseReduction, float reductionInhibitor, Depth depth, float& logLimit, float& gradient) - { - // Precalculate some parameters to avoid to calculate the following formula for each move: - // - // red = baseReduction + ln(moveCount) * ln(depth / 2) / reductionInhibitor; - // - logLimit = depth > OnePly ? (1 - baseReduction) * reductionInhibitor / ln(depth / 2) : 1000; - gradient = depth > OnePly ? ln(depth / 2) / reductionInhibitor : 0; - } - - - // reduction() returns reduction in plies based on moveCount and depth. - // Reduction is always at least one ply. - - Depth reduction(int moveCount, float logLimit, float baseReduction, float gradient) { - - if (ln(moveCount) < logLimit) - return Depth(0); - - float red = baseReduction + ln(moveCount) * gradient; - return Depth(int(floor(red * int(OnePly)))); - } - - // update_history() registers a good move that produced a beta-cutoff // in history and marks as failures all the other moves of that ply. @@ -2781,21 +2575,7 @@ namespace { && pos.captured_piece() == NO_PIECE_TYPE && !move_is_castle(m) && !move_is_promotion(m)) - H.set_gain(pos.piece_on(move_to(m)), move_from(m), move_to(m), -(before + after)); - } - - - // fail_high_ply_1() checks if some thread is currently resolving a fail - // high at ply 1 at the node below the first root node. This information - // is used for time management. - - bool fail_high_ply_1() { - - for (int i = 0; i < ActiveThreads; i++) - if (Threads[i].failHighPly1) - return true; - - return false; + H.set_gain(pos.piece_on(move_to(m)), move_to(m), -(before + after)); } @@ -2885,18 +2665,11 @@ namespace { return; bool stillAtFirstMove = RootMoveNumber == 1 - && !FailLow + && !AspirationFailLow && t > MaxSearchTime + ExtraSearchTime; - bool noProblemFound = !FailHigh - && !FailLow - && !fail_high_ply_1() - && !Problem - && t > 6 * (MaxSearchTime + ExtraSearchTime); - bool noMoreTime = t > AbsoluteMaxSearchTime - || stillAtFirstMove //FIXME: We are not checking any problem flags, BUG? - || noProblemFound; + || stillAtFirstMove; if ( (Iteration >= 3 && UseTimeManagement && noMoreTime) || (ExactMaxTime && t >= ExactMaxTime) @@ -2915,18 +2688,11 @@ namespace { PonderSearch = false; bool stillAtFirstMove = RootMoveNumber == 1 - && !FailLow + && !AspirationFailLow && t > MaxSearchTime + ExtraSearchTime; - bool noProblemFound = !FailHigh - && !FailLow - && !fail_high_ply_1() - && !Problem - && t > 6 * (MaxSearchTime + ExtraSearchTime); - bool noMoreTime = t > AbsoluteMaxSearchTime - || stillAtFirstMove - || noProblemFound; + || stillAtFirstMove; if (Iteration >= 3 && UseTimeManagement && (noMoreTime || StopOnPonderhit)) AbortSearch = true; @@ -3242,7 +3008,7 @@ namespace { for (int i = 0; i < ActiveThreads; i++) if (i == master || splitPoint->slaves[i]) { - memcpy(splitPoint->sstack[i] + ply - 1, sstck + ply - 1, 3 * sizeof(SearchStack)); + memcpy(splitPoint->sstack[i] + ply - 1, sstck + ply - 1, 4 * sizeof(SearchStack)); Threads[i].workIsWaiting = true; // This makes the slave to exit from idle_loop() }