/// Types
- // IterationInfoType stores search results for each iteration
- //
- // Because we use relatively small (dynamic) aspiration window,
- // there happens many fail highs and fail lows in root. And
- // because we don't do researches in those cases, "value" stored
- // here is not necessarily exact. Instead in case of fail high/low
- // we guess what the right value might be and store our guess
- // as a "speculated value" and then move on. Speculated values are
- // used just to calculate aspiration window width, so also if are
- // not exact is not big a problem.
-
- struct IterationInfoType {
-
- IterationInfoType(Value v = Value(0), Value sv = Value(0))
- : value(v), speculatedValue(sv) {}
-
- Value value, speculatedValue;
- };
-
-
// The BetaCounterType class is used to order moves at ply one.
// Apart for the first one that has its score, following moves
// normally have score -VALUE_INFINITE, so are ordered according
BetaCounterType BetaCounter;
// Scores and number of times the best move changed for each iteration
- IterationInfoType IterationInfo[PLY_MAX_PLUS_2];
+ Value ValueByIteration[PLY_MAX_PLUS_2];
int BestMoveChangesByIteration[PLY_MAX_PLUS_2];
// Search window management
std::ofstream LogFile;
// Natural logarithmic lookup table and its getter function
- double lnArray[512];
- inline double ln(int i) { return lnArray[i]; }
+ float lnArray[512];
+ inline float ln(int i) { return lnArray[i]; }
// MP related variables
int ActiveThreads = 1;
bool ok_to_prune(const Position& pos, Move m, Move threat);
bool ok_to_use_TT(const TTEntry* tte, Depth depth, Value beta, int ply);
Value refine_eval(const TTEntry* tte, Value defaultEval, int ply);
- void reduction_parameters(double base, double Inhibitor, Depth depth, double& logLimit, double& gradient);
- Depth reduction(int moveCount, const double LogLimit, const double BaseRed, const double Gradient);
+ void reduction_parameters(float base, float Inhibitor, Depth depth, float& logLimit, float& gradient);
+ Depth reduction(int moveCount, const float LogLimit, const float BaseRed, const float Gradient);
void update_history(const Position& pos, Move move, Depth depth, Move movesSearched[], int moveCount);
void update_killers(Move m, SearchStack& ss);
void update_gains(const Position& pos, Move move, Value before, Value after);
// We're ready to start thinking. Call the iterative deepening loop function
Value v = id_loop(pos, searchMoves);
-
if (UseLSNFiltering)
{
// Step 1. If this is sudden death game and our position is hopeless,
// Init our logarithmic lookup table
for (i = 0; i < 512; i++)
- lnArray[i] = log(double(i)); // log() returns base-e logarithm
+ lnArray[i] = float(log(double(i))); // log() returns base-e logarithm
for (i = 0; i < THREAD_MAX; i++)
Threads[i].activeSplitPoints = 0;
// searchMoves are verified, copied, scored and sorted
RootMoveList rml(p, searchMoves);
+ // Handle special case of searching on a mate/stale position
if (rml.move_count() == 0)
{
if (PonderSearch)
TT.new_search();
H.clear();
init_ss_array(ss);
- IterationInfo[1] = IterationInfoType(rml.get_move_score(0), rml.get_move_score(0));
+ ValueByIteration[1] = rml.get_move_score(0);
Iteration = 1;
// Is one move significantly better than others after initial scoring ?
// Calculate dynamic search window based on previous iterations
Value alpha, beta;
- if (MultiPV == 1 && Iteration >= 6 && abs(IterationInfo[Iteration - 1].value) < VALUE_KNOWN_WIN)
+ if (MultiPV == 1 && Iteration >= 6 && abs(ValueByIteration[Iteration - 1]) < VALUE_KNOWN_WIN)
{
- int prevDelta1 = IterationInfo[Iteration - 1].speculatedValue - IterationInfo[Iteration - 2].speculatedValue;
- int prevDelta2 = IterationInfo[Iteration - 2].speculatedValue - IterationInfo[Iteration - 3].speculatedValue;
-
- int delta = Max(abs(prevDelta1) + abs(prevDelta2) / 2, 16);
+ int prevDelta1 = ValueByIteration[Iteration - 1] - ValueByIteration[Iteration - 2];
+ int prevDelta2 = ValueByIteration[Iteration - 2] - ValueByIteration[Iteration - 3];
- delta = (delta + 7) / 8 * 8; // Round to match grainSize
- AspirationDelta = delta;
+ AspirationDelta = Max(abs(prevDelta1) + abs(prevDelta2) / 2, 16);
+ AspirationDelta = (AspirationDelta + 7) / 8 * 8; // Round to match grainSize
- alpha = Max(IterationInfo[Iteration - 1].value - delta, -VALUE_INFINITE);
- beta = Min(IterationInfo[Iteration - 1].value + delta, VALUE_INFINITE);
+ alpha = Max(ValueByIteration[Iteration - 1] - AspirationDelta, -VALUE_INFINITE);
+ beta = Min(ValueByIteration[Iteration - 1] + AspirationDelta, VALUE_INFINITE);
}
else
{
break; // Value cannot be trusted. Break out immediately!
//Save info about search result
- Value speculatedValue;
- bool fHigh = false;
- bool fLow = false;
- Value delta = value - IterationInfo[Iteration - 1].value;
-
- if (value >= beta)
- {
- assert(delta > 0);
-
- fHigh = true;
- speculatedValue = value + delta;
- BestMoveChangesByIteration[Iteration] += 2; // Allocate more time
- }
- else if (value <= alpha)
- {
- assert(value == alpha);
- assert(delta < 0);
-
- fLow = true;
- speculatedValue = value + delta;
- BestMoveChangesByIteration[Iteration] += 3; // Allocate more time
- } else
- speculatedValue = value;
-
- speculatedValue = Min(Max(speculatedValue, -VALUE_INFINITE), VALUE_INFINITE);
- IterationInfo[Iteration] = IterationInfoType(value, speculatedValue);
+ ValueByIterationInfo[Iteration] = value;
// Drop the easy move if it differs from the new best move
if (ss[0].pv[0] != EasyMove)
// Stop search early when the last two iterations returned a mate score
if ( Iteration >= 6
- && abs(IterationInfo[Iteration].value) >= abs(VALUE_MATE) - 100
- && abs(IterationInfo[Iteration-1].value) >= abs(VALUE_MATE) - 100)
+ && abs(ValueByIteration[Iteration]) >= abs(VALUE_MATE) - 100
+ && abs(ValueByIteration[Iteration-1]) >= abs(VALUE_MATE) - 100)
stopSearch = true;
// Stop search early if one move seems to be much better than the rest
int64_t nodes = nodes_searched();
if ( Iteration >= 8
- && !fLow
- && !fHigh
&& EasyMove == ss[0].pv[0]
&& ( ( rml.get_move_cumulative_nodes(0) > (nodes * 85) / 100
&& current_search_time() > MaxSearchTime / 16)
Value root_search(Position& pos, SearchStack ss[], RootMoveList& rml, Value& oldAlpha, Value& beta) {
- Value alpha = oldAlpha;
+ int64_t nodes;
+ Move move;
+ StateInfo st;
+ Depth depth, ext, newDepth;
Value value;
CheckInfo ci(pos);
int researchCount = 0;
+ bool moveIsCheck, captureOrPromotion, dangerous;
+ Value alpha = oldAlpha;
bool isCheck = pos.is_check();
// Evaluate the position statically
EvalInfo ei;
- if (!isCheck)
- ss[0].eval = evaluate(pos, ei, 0);
- else
- ss[0].eval = VALUE_NONE;
+ ss[0].eval = !isCheck ? evaluate(pos, ei, 0) : VALUE_NONE;
- while(1) // Fail low loop
+ while (1) // Fail low loop
{
// Loop through all the moves in the root move list
rml.set_move_score(i, -VALUE_INFINITE);
continue;
}
- int64_t nodes;
- Move move;
- StateInfo st;
- Depth depth, ext, newDepth;
RootMoveNumber = i + 1;
FailHigh = false;
<< " currmovenumber " << RootMoveNumber << endl;
// Decide search depth for this move
- bool moveIsCheck = pos.move_is_check(move);
- bool captureOrPromotion = pos.move_is_capture_or_promotion(move);
- bool dangerous;
- depth = (Iteration - 2) * OnePly + InitialDepth;
+ moveIsCheck = pos.move_is_check(move);
+ captureOrPromotion = pos.move_is_capture_or_promotion(move);
+ depth = (Iteration - 2) * OnePly + InitialDepth;
ext = extension(pos, move, true, captureOrPromotion, moveIsCheck, false, false, &dangerous);
newDepth = depth + ext;
value = - VALUE_INFINITE;
// Precalculate reduction parameters
- double LogLimit, Gradient, BaseReduction = 0.5;
+ float LogLimit, Gradient, BaseReduction = 0.5;
reduction_parameters(BaseReduction, 6.0, depth, LogLimit, Gradient);
while (1) // Fail high loop
// for time managment: When Problem is true, we try to complete the
// current iteration before playing a move.
Problem = ( Iteration >= 2
- && value <= IterationInfo[Iteration - 1].value - ProblemMargin);
+ && value <= ValueByIteration[Iteration - 1] - ProblemMargin);
if (Problem && StopOnPonderhit)
StopOnPonderhit = false;
pos.undo_move(move);
+ // Can we exit fail high loop ?
if (AbortSearch || value < beta)
- break; // We are not failing high
+ break;
// We are failing high and going to do a research. It's important to update score
// before research in case we run out of time while researching.
nodes_searched(), value, type, ss[0].pv) << endl;
}
- // Prepare for research
+ // Prepare for a research after a fail high, each time with a wider window
researchCount++;
beta = Min(beta + AspirationDelta * (1 << researchCount), VALUE_INFINITE);
// Reset the global variable Problem to false if the value isn't too
// far below the final value from the last iteration.
- if (value > IterationInfo[Iteration - 1].value - NoProblemMargin)
+ if (value > ValueByIteration[Iteration - 1] - NoProblemMargin)
Problem = false;
}
else // MultiPV > 1
FailLow = (alpha == oldAlpha);
}
+ // Can we exit fail low loop ?
if (AbortSearch || alpha > oldAlpha)
- break; // End search, we are not failing low
+ break;
- // Prepare for research
+ // Prepare for a research after a fail low, each time with a wider window
researchCount++;
alpha = Max(alpha - AspirationDelta * (1 << researchCount), -VALUE_INFINITE);
oldAlpha = alpha;
MovePicker mp = MovePicker(pos, ttMove, depth, H, &ss[ply]);
// Precalculate reduction parameters
- double LogLimit, Gradient, BaseReduction = 0.5;
+ float LogLimit, Gradient, BaseReduction = 0.5;
reduction_parameters(BaseReduction, 6.0, depth, LogLimit, Gradient);
// Loop through all legal moves until no moves remain or a beta cutoff
&& !captureOrPromotion
&& !move_is_castle(move)
&& !move_is_killer(move, ss[ply]))
- {
+ {
ss[ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient);
if (ss[ply].reduction)
{
// (from the computer's point of view) since the previous iteration.
if ( ply == 1
&& Iteration >= 2
- && -value <= IterationInfo[Iteration-1].value - ProblemMargin)
+ && -value <= ValueByIteration[Iteration-1] - ProblemMargin)
Problem = true;
}
update_gains(pos, ss[ply - 1].currentMove, ss[ply - 1].eval, ss[ply].eval);
}
- // Do a "stand pat". If we are above beta by a good margin then
- // return immediately.
- // FIXME: test with added condition 'allowNullmove || depth <= OnePly' and !value_is_mate(beta)
- // FIXME: test with modified condition 'depth < RazorDepth'
+ // Static null move pruning. We're betting that the opponent doesn't have
+ // a move that will reduce the score by more than FutilityMargins[int(depth)]
+ // if we do a null move.
if ( !isCheck
- && depth < SelectiveDepth
+ && allowNullmove
+ && depth < RazorDepth
&& staticValue - FutilityMargins[int(depth)] >= beta)
return staticValue - FutilityMargins[int(depth)];
CheckInfo ci(pos);
// Precalculate reduction parameters
- double LogLimit, Gradient, BaseReduction = 0.5;
+ float LogLimit, Gradient, BaseReduction = 0.5;
reduction_parameters(BaseReduction, 3.0, depth, LogLimit, Gradient);
// Loop through all legal moves until no moves remain or a beta cutoff occurs
// Update current move
movesSearched[moveCount++] = ss[ply].currentMove = move;
- // Futility pruning for captures
- // FIXME: test disabling 'Futility pruning for captures'
- // FIXME: test with 'newDepth < RazorDepth'
- Color them = opposite_color(pos.side_to_move());
-
- if ( !isCheck
- && newDepth < SelectiveDepth
- && !dangerous
- && pos.move_is_capture(move)
- && !pos.move_is_check(move, ci)
- && !move_is_promotion(move)
- && move != ttMove
- && !move_is_ep(move)
- && (pos.type_of_piece_on(move_to(move)) != PAWN || !pos.pawn_is_passed(them, move_to(move)))) // Do not prune passed pawn captures
- {
- int preFutilityValueMargin = 0;
-
- if (newDepth >= OnePly)
- preFutilityValueMargin = FutilityMargins[int(newDepth)];
-
- Value futilityCaptureValue = ss[ply].eval + pos.endgame_value_of_piece_on(move_to(move)) + preFutilityValueMargin + ei.futilityMargin + 90;
-
- if (futilityCaptureValue < beta)
- {
- if (futilityCaptureValue > bestValue)
- bestValue = futilityCaptureValue;
- continue;
- }
- }
-
// Futility pruning
if ( !isCheck
&& !dangerous
const TTEntry* tte = NULL;
int moveCount = 0;
bool pvNode = (beta - alpha != 1);
+ Value oldAlpha = alpha;
// Initialize, and make an early exit in case of an aborted search,
// an instant draw, maximum ply reached, etc.
if (bestValue >= beta)
{
// Store the score to avoid a future costly evaluation() call
- if (!isCheck && !tte && ei.futilityMargin == 0)
+ if (!isCheck && !tte && ei.futilityMargin[pos.side_to_move()] == 0)
TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_EV_LO, Depth(-127*OnePly), MOVE_NONE);
return bestValue;
MovePicker mp = MovePicker(pos, ttMove, deepChecks ? Depth(0) : depth, H);
CheckInfo ci(pos);
enoughMaterial = pos.non_pawn_material(pos.side_to_move()) > RookValueMidgame;
- futilityBase = staticValue + FutilityMarginQS + ei.futilityMargin;
+ futilityBase = staticValue + FutilityMarginQS + ei.futilityMargin[pos.side_to_move()];
// Loop through the moves until no moves remain or a beta cutoff
// occurs.
// Update transposition table
Depth d = (depth == Depth(0) ? Depth(0) : Depth(-1));
- if (bestValue < beta)
+ if (bestValue <= oldAlpha)
{
// If bestValue isn't changed it means it is still the static evaluation
// of the node, so keep this info to avoid a future evaluation() call.
- ValueType type = (bestValue == staticValue && !ei.futilityMargin ? VALUE_TYPE_EV_UP : VALUE_TYPE_UPPER);
+ ValueType type = (bestValue == staticValue && !ei.futilityMargin[pos.side_to_move()] ? VALUE_TYPE_EV_UP : VALUE_TYPE_UPPER);
TT.store(pos.get_key(), value_to_tt(bestValue, ply), type, d, MOVE_NONE);
}
- else
+ else if (bestValue >= beta)
{
move = ss[ply].pv[ply];
TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_LOWER, d, move);
if (!pos.move_is_capture_or_promotion(move))
update_killers(move, ss[ply]);
}
+ else
+ TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_EXACT, d, ss[ply].pv[ply]);
assert(bestValue > -VALUE_INFINITE && bestValue < VALUE_INFINITE);
const int FutilityMoveCountMargin = 3 + (1 << (3 * int(sp->depth) / 8));
// Precalculate reduction parameters
- double LogLimit, Gradient, BaseReduction = 0.5;
+ float LogLimit, Gradient, BaseReduction = 0.5;
reduction_parameters(BaseReduction, 3.0, sp->depth, LogLimit, Gradient);
while ( lock_grab_bool(&(sp->lock))
Move move;
// Precalculate reduction parameters
- double LogLimit, Gradient, BaseReduction = 0.5;
+ float LogLimit, Gradient, BaseReduction = 0.5;
reduction_parameters(BaseReduction, 6.0, sp->depth, LogLimit, Gradient);
while ( lock_grab_bool(&(sp->lock))
// (from the computer's point of view) since the previous iteration.
if ( sp->ply == 1
&& Iteration >= 2
- && -value <= IterationInfo[Iteration-1].value - ProblemMargin)
+ && -value <= ValueByIteration[Iteration-1] - ProblemMargin)
Problem = true;
}
lock_release(&(sp->lock));
RootMoveList::RootMoveList(Position& pos, Move searchMoves[]) : count(0) {
+ SearchStack ss[PLY_MAX_PLUS_2];
MoveStack mlist[MaxRootMoves];
+ StateInfo st;
bool includeAllMoves = (searchMoves[0] == MOVE_NONE);
// Generate all legal moves
continue;
// Find a quick score for the move
- StateInfo st;
- SearchStack ss[PLY_MAX_PLUS_2];
init_ss_array(ss);
-
+ pos.do_move(cur->move, st);
moves[count].move = cur->move;
- pos.do_move(moves[count].move, st);
moves[count].score = -qsearch(pos, ss, -VALUE_INFINITE, VALUE_INFINITE, Depth(0), 1, 0);
- pos.undo_move(moves[count].move);
- moves[count].pv[0] = moves[count].move;
+ moves[count].pv[0] = cur->move;
moves[count].pv[1] = MOVE_NONE;
+ pos.undo_move(cur->move);
count++;
}
sort();
// floating point operations are involved we try to recalculate reduction at each move, but
// we do the most consuming computation only once per node.
- void reduction_parameters(double baseReduction, double reductionInhibitor, Depth depth, double& logLimit, double& gradient)
+ void reduction_parameters(float baseReduction, float reductionInhibitor, Depth depth, float& logLimit, float& gradient)
{
// Precalculate some parameters to avoid to calculate the following formula for each move:
//
// red = baseReduction + ln(moveCount) * ln(depth / 2) / reductionInhibitor;
//
- logLimit = depth > OnePly ? (1.0 - baseReduction) * reductionInhibitor / ln(depth / 2) : 1000.0;
- gradient = depth > OnePly ? ln(depth / 2) / reductionInhibitor : 0.0;
+ logLimit = depth > OnePly ? (1 - baseReduction) * reductionInhibitor / ln(depth / 2) : 1000;
+ gradient = depth > OnePly ? ln(depth / 2) / reductionInhibitor : 0;
}
// reduction() returns reduction in plies based on moveCount and depth.
// Reduction is always at least one ply.
- Depth reduction(int moveCount, double logLimit, double baseReduction, double gradient) {
+ Depth reduction(int moveCount, float logLimit, float baseReduction, float gradient) {
if (ln(moveCount) < logLimit)
return Depth(0);
- double red = baseReduction + ln(moveCount) * gradient;
+ float red = baseReduction + ln(moveCount) * gradient;
return Depth(int(floor(red * int(OnePly))));
}