/// Types
- // IterationInfoType stores search results for each iteration
- //
- // Because we use relatively small (dynamic) aspiration window,
- // there happens many fail highs and fail lows in root. And
- // because we don't do researches in those cases, "value" stored
- // here is not necessarily exact. Instead in case of fail high/low
- // we guess what the right value might be and store our guess
- // as a "speculated value" and then move on. Speculated values are
- // used just to calculate aspiration window width, so also if are
- // not exact is not big a problem.
-
- struct IterationInfoType {
-
- IterationInfoType(Value v = Value(0), Value sv = Value(0))
- : value(v), speculatedValue(sv) {}
-
- Value value, speculatedValue;
- };
-
-
// The BetaCounterType class is used to order moves at ply one.
// Apart for the first one that has its score, following moves
// normally have score -VALUE_INFINITE, so are ordered according
BetaCounterType BetaCounter;
// Scores and number of times the best move changed for each iteration
- IterationInfoType IterationInfo[PLY_MAX_PLUS_2];
+ Value ValueByIteration[PLY_MAX_PLUS_2];
int BestMoveChangesByIteration[PLY_MAX_PLUS_2];
// Search window management
// We're ready to start thinking. Call the iterative deepening loop function
Value v = id_loop(pos, searchMoves);
-
if (UseLSNFiltering)
{
// Step 1. If this is sudden death game and our position is hopeless,
// searchMoves are verified, copied, scored and sorted
RootMoveList rml(p, searchMoves);
+ // Handle special case of searching on a mate/stale position
if (rml.move_count() == 0)
{
if (PonderSearch)
TT.new_search();
H.clear();
init_ss_array(ss);
- IterationInfo[1] = IterationInfoType(rml.get_move_score(0), rml.get_move_score(0));
+ ValueByIteration[1] = rml.get_move_score(0);
Iteration = 1;
// Is one move significantly better than others after initial scoring ?
// Calculate dynamic search window based on previous iterations
Value alpha, beta;
- if (MultiPV == 1 && Iteration >= 6 && abs(IterationInfo[Iteration - 1].value) < VALUE_KNOWN_WIN)
+ if (MultiPV == 1 && Iteration >= 6 && abs(ValueByIteration[Iteration - 1]) < VALUE_KNOWN_WIN)
{
- int prevDelta1 = IterationInfo[Iteration - 1].speculatedValue - IterationInfo[Iteration - 2].speculatedValue;
- int prevDelta2 = IterationInfo[Iteration - 2].speculatedValue - IterationInfo[Iteration - 3].speculatedValue;
+ int prevDelta1 = ValueByIteration[Iteration - 1] - ValueByIteration[Iteration - 2];
+ int prevDelta2 = ValueByIteration[Iteration - 2] - ValueByIteration[Iteration - 3];
- int delta = Max(abs(prevDelta1) + abs(prevDelta2) / 2, 16);
+ AspirationDelta = Max(abs(prevDelta1) + abs(prevDelta2) / 2, 16);
+ AspirationDelta = (AspirationDelta + 7) / 8 * 8; // Round to match grainSize
- delta = (delta + 7) / 8 * 8; // Round to match grainSize
- AspirationDelta = delta;
-
- alpha = Max(IterationInfo[Iteration - 1].value - delta, -VALUE_INFINITE);
- beta = Min(IterationInfo[Iteration - 1].value + delta, VALUE_INFINITE);
+ alpha = Max(ValueByIteration[Iteration - 1] - AspirationDelta, -VALUE_INFINITE);
+ beta = Min(ValueByIteration[Iteration - 1] + AspirationDelta, VALUE_INFINITE);
}
else
{
break; // Value cannot be trusted. Break out immediately!
//Save info about search result
- Value speculatedValue;
- bool fHigh = false;
- bool fLow = false;
- Value delta = value - IterationInfo[Iteration - 1].value;
-
- if (value >= beta)
- {
- assert(delta > 0);
-
- fHigh = true;
- speculatedValue = value + delta;
- BestMoveChangesByIteration[Iteration] += 2; // Allocate more time
- }
- else if (value <= alpha)
- {
- assert(value == alpha);
- assert(delta < 0);
-
- fLow = true;
- speculatedValue = value + delta;
- BestMoveChangesByIteration[Iteration] += 3; // Allocate more time
- } else
- speculatedValue = value;
-
- speculatedValue = Min(Max(speculatedValue, -VALUE_INFINITE), VALUE_INFINITE);
- IterationInfo[Iteration] = IterationInfoType(value, speculatedValue);
+ ValueByIterationInfo[Iteration] = value;
// Drop the easy move if it differs from the new best move
if (ss[0].pv[0] != EasyMove)
// Stop search early when the last two iterations returned a mate score
if ( Iteration >= 6
- && abs(IterationInfo[Iteration].value) >= abs(VALUE_MATE) - 100
- && abs(IterationInfo[Iteration-1].value) >= abs(VALUE_MATE) - 100)
+ && abs(ValueByIteration[Iteration]) >= abs(VALUE_MATE) - 100
+ && abs(ValueByIteration[Iteration-1]) >= abs(VALUE_MATE) - 100)
stopSearch = true;
// Stop search early if one move seems to be much better than the rest
int64_t nodes = nodes_searched();
if ( Iteration >= 8
- && !fLow
- && !fHigh
&& EasyMove == ss[0].pv[0]
&& ( ( rml.get_move_cumulative_nodes(0) > (nodes * 85) / 100
&& current_search_time() > MaxSearchTime / 16)
Value root_search(Position& pos, SearchStack ss[], RootMoveList& rml, Value& oldAlpha, Value& beta) {
- Value alpha = oldAlpha;
+ int64_t nodes;
+ Move move;
+ StateInfo st;
+ Depth depth, ext, newDepth;
Value value;
CheckInfo ci(pos);
int researchCount = 0;
+ bool moveIsCheck, captureOrPromotion, dangerous;
+ Value alpha = oldAlpha;
bool isCheck = pos.is_check();
// Evaluate the position statically
EvalInfo ei;
- if (!isCheck)
- ss[0].eval = evaluate(pos, ei, 0);
- else
- ss[0].eval = VALUE_NONE;
+ ss[0].eval = !isCheck ? evaluate(pos, ei, 0) : VALUE_NONE;
- while(1) // Fail low loop
+ while (1) // Fail low loop
{
// Loop through all the moves in the root move list
rml.set_move_score(i, -VALUE_INFINITE);
continue;
}
- int64_t nodes;
- Move move;
- StateInfo st;
- Depth depth, ext, newDepth;
RootMoveNumber = i + 1;
FailHigh = false;
<< " currmovenumber " << RootMoveNumber << endl;
// Decide search depth for this move
- bool moveIsCheck = pos.move_is_check(move);
- bool captureOrPromotion = pos.move_is_capture_or_promotion(move);
- bool dangerous;
- depth = (Iteration - 2) * OnePly + InitialDepth;
+ moveIsCheck = pos.move_is_check(move);
+ captureOrPromotion = pos.move_is_capture_or_promotion(move);
+ depth = (Iteration - 2) * OnePly + InitialDepth;
ext = extension(pos, move, true, captureOrPromotion, moveIsCheck, false, false, &dangerous);
newDepth = depth + ext;
// for time managment: When Problem is true, we try to complete the
// current iteration before playing a move.
Problem = ( Iteration >= 2
- && value <= IterationInfo[Iteration - 1].value - ProblemMargin);
+ && value <= ValueByIteration[Iteration - 1] - ProblemMargin);
if (Problem && StopOnPonderhit)
StopOnPonderhit = false;
pos.undo_move(move);
+ // Can we exit fail high loop ?
if (AbortSearch || value < beta)
- break; // We are not failing high
+ break;
// We are failing high and going to do a research. It's important to update score
// before research in case we run out of time while researching.
nodes_searched(), value, type, ss[0].pv) << endl;
}
- // Prepare for research
+ // Prepare for a research after a fail high, each time with a wider window
researchCount++;
beta = Min(beta + AspirationDelta * (1 << researchCount), VALUE_INFINITE);
// Reset the global variable Problem to false if the value isn't too
// far below the final value from the last iteration.
- if (value > IterationInfo[Iteration - 1].value - NoProblemMargin)
+ if (value > ValueByIteration[Iteration - 1] - NoProblemMargin)
Problem = false;
}
else // MultiPV > 1
FailLow = (alpha == oldAlpha);
}
+ // Can we exit fail low loop ?
if (AbortSearch || alpha > oldAlpha)
- break; // End search, we are not failing low
+ break;
- // Prepare for research
+ // Prepare for a research after a fail low, each time with a wider window
researchCount++;
alpha = Max(alpha - AspirationDelta * (1 << researchCount), -VALUE_INFINITE);
oldAlpha = alpha;
&& !captureOrPromotion
&& !move_is_castle(move)
&& !move_is_killer(move, ss[ply]))
- {
+ {
ss[ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient);
if (ss[ply].reduction)
{
// (from the computer's point of view) since the previous iteration.
if ( ply == 1
&& Iteration >= 2
- && -value <= IterationInfo[Iteration-1].value - ProblemMargin)
+ && -value <= ValueByIteration[Iteration-1] - ProblemMargin)
Problem = true;
}
update_gains(pos, ss[ply - 1].currentMove, ss[ply - 1].eval, ss[ply].eval);
}
- // Do a "stand pat". If we are above beta by a good margin then
- // return immediately.
+ // Static null move pruning. We're betting that the opponent doesn't have
+ // a move that will reduce the score by more than FutilityMargins[int(depth)]
+ // if we do a null move.
if ( !isCheck
&& allowNullmove
&& depth < RazorDepth
if (bestValue >= beta)
{
// Store the score to avoid a future costly evaluation() call
- if (!isCheck && !tte && ei.futilityMargin == 0)
+ if (!isCheck && !tte && ei.futilityMargin[pos.side_to_move()] == 0)
TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_EV_LO, Depth(-127*OnePly), MOVE_NONE);
return bestValue;
MovePicker mp = MovePicker(pos, ttMove, deepChecks ? Depth(0) : depth, H);
CheckInfo ci(pos);
enoughMaterial = pos.non_pawn_material(pos.side_to_move()) > RookValueMidgame;
- futilityBase = staticValue + FutilityMarginQS + ei.futilityMargin;
+ futilityBase = staticValue + FutilityMarginQS + ei.futilityMargin[pos.side_to_move()];
// Loop through the moves until no moves remain or a beta cutoff
// occurs.
{
// If bestValue isn't changed it means it is still the static evaluation
// of the node, so keep this info to avoid a future evaluation() call.
- ValueType type = (bestValue == staticValue && !ei.futilityMargin ? VALUE_TYPE_EV_UP : VALUE_TYPE_UPPER);
+ ValueType type = (bestValue == staticValue && !ei.futilityMargin[pos.side_to_move()] ? VALUE_TYPE_EV_UP : VALUE_TYPE_UPPER);
TT.store(pos.get_key(), value_to_tt(bestValue, ply), type, d, MOVE_NONE);
}
else if (bestValue >= beta)
// (from the computer's point of view) since the previous iteration.
if ( sp->ply == 1
&& Iteration >= 2
- && -value <= IterationInfo[Iteration-1].value - ProblemMargin)
+ && -value <= ValueByIteration[Iteration-1] - ProblemMargin)
Problem = true;
}
lock_release(&(sp->lock));
RootMoveList::RootMoveList(Position& pos, Move searchMoves[]) : count(0) {
+ SearchStack ss[PLY_MAX_PLUS_2];
MoveStack mlist[MaxRootMoves];
+ StateInfo st;
bool includeAllMoves = (searchMoves[0] == MOVE_NONE);
// Generate all legal moves
continue;
// Find a quick score for the move
- StateInfo st;
- SearchStack ss[PLY_MAX_PLUS_2];
init_ss_array(ss);
-
+ pos.do_move(cur->move, st);
moves[count].move = cur->move;
- pos.do_move(moves[count].move, st);
moves[count].score = -qsearch(pos, ss, -VALUE_INFINITE, VALUE_INFINITE, Depth(0), 1, 0);
- pos.undo_move(moves[count].move);
- moves[count].pv[0] = moves[count].move;
+ moves[count].pv[0] = cur->move;
moves[count].pv[1] = MOVE_NONE;
+ pos.undo_move(cur->move);
count++;
}
sort();
// reduction() returns reduction in plies based on moveCount and depth.
// Reduction is always at least one ply.
- Depth reduction(int moveCount, float logLimit, float baseReduction, float gradient) {
+ Depth reduction(int moveCount, float logLimit, float baseReduction, float gradient) {
if (ln(moveCount) < logLimit)
return Depth(0);