// Easy move margin. An easy move candidate must be at least this much
// better than the second best move.
- const Value EasyMoveMargin = Value(0x200);
+ const Value EasyMoveMargin = Value(0x150);
/// Namespace variables
Value bestValues[PLY_MAX_PLUS_2];
int bestMoveChanges[PLY_MAX_PLUS_2];
int depth, aspirationDelta;
- Value value, alpha, beta;
- Move bestMove, easyMove, skillBest, skillPonder;
+ Value bestValue, alpha, beta;
+ Move bestMove, skillBest, skillPonder;
+ bool bestMoveNeverChanged = true;
// Initialize stuff before a new search
memset(ss, 0, 4 * sizeof(SearchStack));
TT.new_search();
H.clear();
- *ponderMove = bestMove = easyMove = skillBest = skillPonder = MOVE_NONE;
+ *ponderMove = bestMove = skillBest = skillPonder = MOVE_NONE;
depth = aspirationDelta = 0;
- value = alpha = -VALUE_INFINITE, beta = VALUE_INFINITE;
+ bestValue = alpha = -VALUE_INFINITE, beta = VALUE_INFINITE;
ss->currentMove = MOVE_NULL; // Hack to skip update gains
// Moves to search are verified and copied
do {
// Search starts from ss+1 to allow referencing (ss-1). This is
// needed by update gains and ss copy when splitting at Root.
- value = search<Root>(pos, ss+1, alpha, beta, depth * ONE_PLY);
+ bestValue = search<Root>(pos, ss+1, alpha, beta, depth * ONE_PLY);
// Bring to front the best move. It is critical that sorting is
// done with a stable algorithm because all the values but the first
// the fail high/low loop then reorder the PV moves, otherwise
// leave the last PV move in its position so to be searched again.
// Of course this is needed only in MultiPV search.
- if (MultiPVIdx && value > alpha && value < beta)
+ if (MultiPVIdx && bestValue > alpha && bestValue < beta)
sort<RootMove>(Rml.begin(), Rml.begin() + MultiPVIdx);
// Write PV back to transposition table in case the relevant entries
// if we have a fail high/low and we are deep in the search. UCI
// protocol requires to send all the PV lines also if are still
// to be searched and so refer to the previous search's score.
- if ((value > alpha && value < beta) || elapsed_search_time() > 2000)
+ if ((bestValue > alpha && bestValue < beta) || elapsed_search_time() > 2000)
for (int i = 0; i < std::min(UCIMultiPV, (int)Rml.size()); i++)
{
bool updated = (i <= MultiPVIdx);
// In case of failing high/low increase aspiration window and
// research, otherwise exit the fail high/low loop.
- if (value >= beta)
+ if (bestValue >= beta)
{
beta = std::min(beta + aspirationDelta, VALUE_INFINITE);
aspirationDelta += aspirationDelta / 2;
}
- else if (value <= alpha)
+ else if (bestValue <= alpha)
{
AspirationFailLow = true;
StopOnPonderhit = false;
else
break;
- } while (abs(value) < VALUE_KNOWN_WIN);
+ } while (abs(bestValue) < VALUE_KNOWN_WIN);
}
// Collect info about search result
bestMove = Rml[0].pv[0];
*ponderMove = Rml[0].pv[1];
- bestValues[depth] = value;
+ bestValues[depth] = bestValue;
bestMoveChanges[depth] = Rml.bestMoveChanges;
// Skills: Do we need to pick now the best and the ponder moves ?
if (Options["Use Search Log"].value<bool>())
{
Log log(Options["Search Log Filename"].value<string>());
- log << pretty_pv(pos, depth, value, elapsed_search_time(), &Rml[0].pv[0]) << endl;
+ log << pretty_pv(pos, depth, bestValue, elapsed_search_time(), &Rml[0].pv[0]) << endl;
}
- // Init easyMove at first iteration or drop it if differs from the best move
- if (depth == 1 && (Rml.size() == 1 || Rml[0].score > Rml[1].score + EasyMoveMargin))
- easyMove = bestMove;
- else if (bestMove != easyMove)
- easyMove = MOVE_NONE;
+ // Filter out startup noise when monitoring best move stability
+ if (depth > 2 && bestMoveChanges[depth])
+ bestMoveNeverChanged = false;
// Check for some early stop condition
if (!StopRequest && Limits.useTimeManagement())
{
- // Easy move: Stop search early if one move seems to be much better
- // than the others or if there is only a single legal move. Also in
- // the latter case search to some depth anyway to get a proper score.
- if ( depth >= 7
- && easyMove == bestMove
- && ( Rml.size() == 1
- ||( Rml[0].nodes > (pos.nodes_searched() * 85) / 100
- && elapsed_search_time() > TimeMgr.available_time() / 16)
- ||( Rml[0].nodes > (pos.nodes_searched() * 98) / 100
- && elapsed_search_time() > TimeMgr.available_time() / 32)))
- StopRequest = true;
-
// Take in account some extra time if the best move has changed
if (depth > 4 && depth < 50)
TimeMgr.pv_instability(bestMoveChanges[depth], bestMoveChanges[depth - 1]);
if (elapsed_search_time() > (TimeMgr.available_time() * 62) / 100)
StopRequest = true;
+ // Stop search early if one move seems to be much better than others
+ if ( depth >= 10
+ && !StopRequest
+ && ( bestMoveNeverChanged
+ || elapsed_search_time() > (TimeMgr.available_time() * 40) / 100))
+ {
+ Value rBeta = bestValue - EasyMoveMargin;
+ (ss+1)->excludedMove = bestMove;
+ (ss+1)->skipNullMove = true;
+ Value v = search<NonPV>(pos, ss+1, rBeta - 1, rBeta, (depth * ONE_PLY) / 2);
+ (ss+1)->skipNullMove = false;
+ (ss+1)->excludedMove = MOVE_NONE;
+
+ if (v < rBeta)
+ StopRequest = true;
+ }
+
// If we are allowed to ponder do not stop the search now but keep pondering
if (StopRequest && Limits.ponder)
{
<< " currmovenumber " << moveCount + MultiPVIdx << endl;
}
- // At Root and at first iteration do a PV search on all the moves to score root moves
- isPvMove = (PvNode && moveCount <= (RootNode && depth <= ONE_PLY ? MAX_MOVES : 1));
+ isPvMove = (PvNode && moveCount <= 1);
givesCheck = pos.move_gives_check(move, ci);
captureOrPromotion = pos.is_capture_or_promotion(move);