/// Types
- // IterationInfoType stores search results for each iteration
- //
- // Because we use relatively small (dynamic) aspiration window,
- // there happens many fail highs and fail lows in root. And
- // because we don't do researches in those cases, "value" stored
- // here is not necessarily exact. Instead in case of fail high/low
- // we guess what the right value might be and store our guess
- // as a "speculated value" and then move on. Speculated values are
- // used just to calculate aspiration window width, so also if are
- // not exact is not big a problem.
-
- struct IterationInfoType {
-
- IterationInfoType(Value v = Value(0), Value sv = Value(0))
- : value(v), speculatedValue(sv) {}
-
- Value value, speculatedValue;
- };
-
-
// The BetaCounterType class is used to order moves at ply one.
// Apart for the first one that has its score, following moves
// normally have score -VALUE_INFINITE, so are ordered according
// and near frontier nodes.
const Value FutilityMarginQS = Value(0x80);
+ Value FutilityMargins[2 * PLY_MAX_PLUS_2]; // Initialized at startup.
+
// Each move futility margin is decreased
const Value IncrementalFutilityMargin = Value(0x8);
BetaCounterType BetaCounter;
// Scores and number of times the best move changed for each iteration
- IterationInfoType IterationInfo[PLY_MAX_PLUS_2];
+ Value ValueByIteration[PLY_MAX_PLUS_2];
int BestMoveChangesByIteration[PLY_MAX_PLUS_2];
+ // Search window management
+ int AspirationDelta;
+
// MultiPV mode
int MultiPV;
std::ofstream LogFile;
// Natural logarithmic lookup table and its getter function
- double lnArray[512];
- inline double ln(int i) { return lnArray[i]; }
+ float lnArray[512];
+ inline float ln(int i) { return lnArray[i]; }
// MP related variables
int ActiveThreads = 1;
/// Functions
Value id_loop(const Position& pos, Move searchMoves[]);
- Value root_search(Position& pos, SearchStack ss[], RootMoveList& rml, Value alpha, Value beta);
+ Value root_search(Position& pos, SearchStack ss[], RootMoveList& rml, Value& oldAlpha, Value& beta);
Value search_pv(Position& pos, SearchStack ss[], Value alpha, Value beta, Depth depth, int ply, int threadID);
Value search(Position& pos, SearchStack ss[], Value beta, Depth depth, int ply, bool allowNullmove, int threadID, Move excludedMove = MOVE_NONE);
Value qsearch(Position& pos, SearchStack ss[], Value alpha, Value beta, Depth depth, int ply, int threadID);
bool ok_to_prune(const Position& pos, Move m, Move threat);
bool ok_to_use_TT(const TTEntry* tte, Depth depth, Value beta, int ply);
Value refine_eval(const TTEntry* tte, Value defaultEval, int ply);
+ void reduction_parameters(float base, float Inhibitor, Depth depth, float& logLimit, float& gradient);
+ Depth reduction(int moveCount, const float LogLimit, const float BaseRed, const float Gradient);
void update_history(const Position& pos, Move move, Depth depth, Move movesSearched[], int moveCount);
void update_killers(Move m, SearchStack& ss);
void update_gains(const Position& pos, Move move, Value before, Value after);
// We're ready to start thinking. Call the iterative deepening loop function
Value v = id_loop(pos, searchMoves);
-
if (UseLSNFiltering)
{
// Step 1. If this is sudden death game and our position is hopeless,
// Init our logarithmic lookup table
for (i = 0; i < 512; i++)
- lnArray[i] = log(double(i)); // log() returns base-e logarithm
+ lnArray[i] = float(log(double(i))); // log() returns base-e logarithm
for (i = 0; i < THREAD_MAX; i++)
Threads[i].activeSplitPoints = 0;
+ // Init futility margins array
+ FutilityMargins[0] = FutilityMargins[1] = Value(0);
+
+ for (i = 2; i < 2 * PLY_MAX_PLUS_2; i++)
+ {
+ FutilityMargins[i] = Value(112 * bitScanReverse32(i * i / 2)); // FIXME: test using log instead of BSR
+ }
+
// Initialize global locks
lock_init(&MPLock, NULL);
lock_init(&IOLock, NULL);
// searchMoves are verified, copied, scored and sorted
RootMoveList rml(p, searchMoves);
+ // Handle special case of searching on a mate/stale position
if (rml.move_count() == 0)
{
if (PonderSearch)
TT.new_search();
H.clear();
init_ss_array(ss);
- IterationInfo[1] = IterationInfoType(rml.get_move_score(0), rml.get_move_score(0));
+ ValueByIteration[1] = rml.get_move_score(0);
Iteration = 1;
// Is one move significantly better than others after initial scoring ?
// Calculate dynamic search window based on previous iterations
Value alpha, beta;
- if (MultiPV == 1 && Iteration >= 6 && abs(IterationInfo[Iteration - 1].value) < VALUE_KNOWN_WIN)
+ if (MultiPV == 1 && Iteration >= 6 && abs(ValueByIteration[Iteration - 1]) < VALUE_KNOWN_WIN)
{
- int prevDelta1 = IterationInfo[Iteration - 1].speculatedValue - IterationInfo[Iteration - 2].speculatedValue;
- int prevDelta2 = IterationInfo[Iteration - 2].speculatedValue - IterationInfo[Iteration - 3].speculatedValue;
+ int prevDelta1 = ValueByIteration[Iteration - 1] - ValueByIteration[Iteration - 2];
+ int prevDelta2 = ValueByIteration[Iteration - 2] - ValueByIteration[Iteration - 3];
- int delta = Max(2 * abs(prevDelta1) + abs(prevDelta2), ProblemMargin);
+ AspirationDelta = Max(abs(prevDelta1) + abs(prevDelta2) / 2, 16);
+ AspirationDelta = (AspirationDelta + 7) / 8 * 8; // Round to match grainSize
- alpha = Max(IterationInfo[Iteration - 1].value - delta, -VALUE_INFINITE);
- beta = Min(IterationInfo[Iteration - 1].value + delta, VALUE_INFINITE);
+ alpha = Max(ValueByIteration[Iteration - 1] - AspirationDelta, -VALUE_INFINITE);
+ beta = Min(ValueByIteration[Iteration - 1] + AspirationDelta, VALUE_INFINITE);
}
else
{
break; // Value cannot be trusted. Break out immediately!
//Save info about search result
- Value speculatedValue;
- bool fHigh = false;
- bool fLow = false;
- Value delta = value - IterationInfo[Iteration - 1].value;
-
- if (value >= beta)
- {
- assert(delta > 0);
-
- fHigh = true;
- speculatedValue = value + delta;
- BestMoveChangesByIteration[Iteration] += 2; // Allocate more time
- }
- else if (value <= alpha)
- {
- assert(value == alpha);
- assert(delta < 0);
-
- fLow = true;
- speculatedValue = value + delta;
- BestMoveChangesByIteration[Iteration] += 3; // Allocate more time
- } else
- speculatedValue = value;
-
- speculatedValue = Min(Max(speculatedValue, -VALUE_INFINITE), VALUE_INFINITE);
- IterationInfo[Iteration] = IterationInfoType(value, speculatedValue);
+ ValueByIterationInfo[Iteration] = value;
// Drop the easy move if it differs from the new best move
if (ss[0].pv[0] != EasyMove)
// Stop search early when the last two iterations returned a mate score
if ( Iteration >= 6
- && abs(IterationInfo[Iteration].value) >= abs(VALUE_MATE) - 100
- && abs(IterationInfo[Iteration-1].value) >= abs(VALUE_MATE) - 100)
+ && abs(ValueByIteration[Iteration]) >= abs(VALUE_MATE) - 100
+ && abs(ValueByIteration[Iteration-1]) >= abs(VALUE_MATE) - 100)
stopSearch = true;
// Stop search early if one move seems to be much better than the rest
int64_t nodes = nodes_searched();
if ( Iteration >= 8
- && !fLow
- && !fHigh
&& EasyMove == ss[0].pv[0]
&& ( ( rml.get_move_cumulative_nodes(0) > (nodes * 85) / 100
&& current_search_time() > MaxSearchTime / 16)
// similar to search_pv except that it uses a different move ordering
// scheme and prints some information to the standard output.
- Value root_search(Position& pos, SearchStack ss[], RootMoveList& rml, Value alpha, Value beta) {
+ Value root_search(Position& pos, SearchStack ss[], RootMoveList& rml, Value& oldAlpha, Value& beta) {
- Value oldAlpha = alpha;
- Value value = -VALUE_INFINITE;
+ int64_t nodes;
+ Move move;
+ StateInfo st;
+ Depth depth, ext, newDepth;
+ Value value;
CheckInfo ci(pos);
+ int researchCount = 0;
+ bool moveIsCheck, captureOrPromotion, dangerous;
+ Value alpha = oldAlpha;
bool isCheck = pos.is_check();
// Evaluate the position statically
EvalInfo ei;
- if (!isCheck)
- ss[0].eval = evaluate(pos, ei, 0);
- else
- ss[0].eval = VALUE_NONE;
+ ss[0].eval = !isCheck ? evaluate(pos, ei, 0) : VALUE_NONE;
+
+ while (1) // Fail low loop
+ {
// Loop through all the moves in the root move list
for (int i = 0; i < rml.move_count() && !AbortSearch; i++)
rml.set_move_score(i, -VALUE_INFINITE);
continue;
}
- int64_t nodes;
- Move move;
- StateInfo st;
- Depth depth, ext, newDepth;
RootMoveNumber = i + 1;
FailHigh = false;
<< " currmovenumber " << RootMoveNumber << endl;
// Decide search depth for this move
- bool moveIsCheck = pos.move_is_check(move);
- bool captureOrPromotion = pos.move_is_capture_or_promotion(move);
- bool dangerous;
- depth = (Iteration - 2) * OnePly + InitialDepth;
+ moveIsCheck = pos.move_is_check(move);
+ captureOrPromotion = pos.move_is_capture_or_promotion(move);
+ depth = (Iteration - 2) * OnePly + InitialDepth;
ext = extension(pos, move, true, captureOrPromotion, moveIsCheck, false, false, &dangerous);
newDepth = depth + ext;
+ value = - VALUE_INFINITE;
+
+ // Precalculate reduction parameters
+ float LogLimit, Gradient, BaseReduction = 0.5;
+ reduction_parameters(BaseReduction, 6.0, depth, LogLimit, Gradient);
+
+ while (1) // Fail high loop
+ {
+
// Make the move, and search it
pos.do_move(move, st, ci, moveIsCheck);
- if (i < MultiPV)
+ if (i < MultiPV || value > alpha)
{
// Aspiration window is disabled in multi-pv case
if (MultiPV > 1)
// for time managment: When Problem is true, we try to complete the
// current iteration before playing a move.
Problem = ( Iteration >= 2
- && value <= IterationInfo[Iteration - 1].value - ProblemMargin);
+ && value <= ValueByIteration[Iteration - 1] - ProblemMargin);
if (Problem && StopOnPonderhit)
StopOnPonderhit = false;
&& !captureOrPromotion
&& !move_is_castle(move))
{
- double red = 0.5 + ln(RootMoveNumber - MultiPV + 1) * ln(depth / 2) / 6.0;
- if (red >= 1.0)
+ ss[0].reduction = reduction(RootMoveNumber - MultiPV + 1, LogLimit, BaseReduction, Gradient);
+ if (ss[0].reduction)
{
- ss[0].reduction = Depth(int(floor(red * int(OnePly))));
value = -search(pos, ss, -alpha, newDepth-ss[0].reduction, 1, true, 0);
doFullDepthSearch = (value > alpha);
}
if (doFullDepthSearch)
{
+ ss[0].reduction = Depth(0);
value = -search(pos, ss, -alpha, newDepth, 1, true, 0);
if (value > alpha)
pos.undo_move(move);
+ // Can we exit fail high loop ?
+ if (AbortSearch || value < beta)
+ break;
+
+ // We are failing high and going to do a research. It's important to update score
+ // before research in case we run out of time while researching.
+ rml.set_move_score(i, value);
+ update_pv(ss, 0);
+ TT.extract_pv(pos, ss[0].pv, PLY_MAX);
+ rml.set_move_pv(i, ss[0].pv);
+
+ // Print search information to the standard output
+ cout << "info depth " << Iteration
+ << " score " << value_to_string(value)
+ << ((value >= beta) ? " lowerbound" :
+ ((value <= alpha)? " upperbound" : ""))
+ << " time " << current_search_time()
+ << " nodes " << nodes_searched()
+ << " nps " << nps()
+ << " pv ";
+
+ for (int j = 0; ss[0].pv[j] != MOVE_NONE && j < PLY_MAX; j++)
+ cout << ss[0].pv[j] << " ";
+
+ cout << endl;
+
+ if (UseLogFile)
+ {
+ ValueType type = (value >= beta ? VALUE_TYPE_LOWER
+ : (value <= alpha ? VALUE_TYPE_UPPER : VALUE_TYPE_EXACT));
+
+ LogFile << pretty_pv(pos, current_search_time(), Iteration,
+ nodes_searched(), value, type, ss[0].pv) << endl;
+ }
+
+ // Prepare for a research after a fail high, each time with a wider window
+ researchCount++;
+ beta = Min(beta + AspirationDelta * (1 << researchCount), VALUE_INFINITE);
+
+ } // End of fail high loop
+
// Finished searching the move. If AbortSearch is true, the search
// was aborted because the user interrupted the search or because we
// ran out of time. In this case, the return value of the search cannot
// Reset the global variable Problem to false if the value isn't too
// far below the final value from the last iteration.
- if (value > IterationInfo[Iteration - 1].value - NoProblemMargin)
+ if (value > ValueByIteration[Iteration - 1] - NoProblemMargin)
Problem = false;
}
else // MultiPV > 1
FailLow = (alpha == oldAlpha);
}
+
+ // Can we exit fail low loop ?
+ if (AbortSearch || alpha > oldAlpha)
+ break;
+
+ // Prepare for a research after a fail low, each time with a wider window
+ researchCount++;
+ alpha = Max(alpha - AspirationDelta * (1 << researchCount), -VALUE_INFINITE);
+ oldAlpha = alpha;
+
+ } // Fail low loop
+
return alpha;
}
CheckInfo ci(pos);
MovePicker mp = MovePicker(pos, ttMove, depth, H, &ss[ply]);
+ // Precalculate reduction parameters
+ float LogLimit, Gradient, BaseReduction = 0.5;
+ reduction_parameters(BaseReduction, 6.0, depth, LogLimit, Gradient);
+
// Loop through all legal moves until no moves remain or a beta cutoff
// occurs.
while ( alpha < beta
&& !move_is_castle(move)
&& !move_is_killer(move, ss[ply]))
{
- double red = 0.5 + ln(moveCount) * ln(depth / 2) / 6.0;
- if (red >= 1.0)
- {
- ss[ply].reduction = Depth(int(floor(red * int(OnePly))));
- value = -search(pos, ss, -alpha, newDepth-ss[ply].reduction, ply+1, true, threadID);
- doFullDepthSearch = (value > alpha);
- }
+ ss[ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient);
+ if (ss[ply].reduction)
+ {
+ value = -search(pos, ss, -alpha, newDepth-ss[ply].reduction, ply+1, true, threadID);
+ doFullDepthSearch = (value > alpha);
+ }
}
if (doFullDepthSearch) // Go with full depth non-pv search
// (from the computer's point of view) since the previous iteration.
if ( ply == 1
&& Iteration >= 2
- && -value <= IterationInfo[Iteration-1].value - ProblemMargin)
+ && -value <= ValueByIteration[Iteration-1] - ProblemMargin)
Problem = true;
}
// Calculate depth dependant futility pruning parameters
const int FutilityMoveCountMargin = 3 + (1 << (3 * int(depth) / 8));
- const int PostFutilityValueMargin = 112 * bitScanReverse32(int(depth) * int(depth) / 2);
// Evaluate the position statically
if (!isCheck)
}
ss[ply].eval = staticValue;
- futilityValue = staticValue + PostFutilityValueMargin; //FIXME: Remove me, only for split
+ futilityValue = staticValue + FutilityMargins[int(depth)]; //FIXME: Remove me, only for split
staticValue = refine_eval(tte, staticValue, ply); // Enhance accuracy with TT value if possible
update_gains(pos, ss[ply - 1].currentMove, ss[ply - 1].eval, ss[ply].eval);
}
- // Do a "stand pat". If we are above beta by a good margin then
- // return immediately.
- // FIXME: test with added condition 'allowNullmove || depth <= OnePly' and !value_is_mate(beta)
- // FIXME: test with modified condition 'depth < RazorDepth'
+ // Static null move pruning. We're betting that the opponent doesn't have
+ // a move that will reduce the score by more than FutilityMargins[int(depth)]
+ // if we do a null move.
if ( !isCheck
- && depth < SelectiveDepth
- && staticValue - PostFutilityValueMargin >= beta)
- return staticValue - PostFutilityValueMargin;
+ && allowNullmove
+ && depth < RazorDepth
+ && staticValue - FutilityMargins[int(depth)] >= beta)
+ return staticValue - FutilityMargins[int(depth)];
// Null move search
if ( allowNullmove
MovePicker mp = MovePicker(pos, ttMove, depth, H, &ss[ply]);
CheckInfo ci(pos);
+ // Precalculate reduction parameters
+ float LogLimit, Gradient, BaseReduction = 0.5;
+ reduction_parameters(BaseReduction, 3.0, depth, LogLimit, Gradient);
+
// Loop through all legal moves until no moves remain or a beta cutoff occurs
while ( bestValue < beta
&& (move = mp.get_next_move()) != MOVE_NONE
// Update current move
movesSearched[moveCount++] = ss[ply].currentMove = move;
- // Futility pruning for captures
- // FIXME: test disabling 'Futility pruning for captures'
- // FIXME: test with 'newDepth < RazorDepth'
- Color them = opposite_color(pos.side_to_move());
-
- if ( !isCheck
- && newDepth < SelectiveDepth
- && !dangerous
- && pos.move_is_capture(move)
- && !pos.move_is_check(move, ci)
- && !move_is_promotion(move)
- && move != ttMove
- && !move_is_ep(move)
- && (pos.type_of_piece_on(move_to(move)) != PAWN || !pos.pawn_is_passed(them, move_to(move)))) // Do not prune passed pawn captures
- {
- int preFutilityValueMargin = 0;
-
- if (newDepth >= OnePly)
- preFutilityValueMargin = 112 * bitScanReverse32(int(newDepth) * int(newDepth) / 2);
-
- Value futilityCaptureValue = ss[ply].eval + pos.endgame_value_of_piece_on(move_to(move)) + preFutilityValueMargin + ei.futilityMargin + 90;
-
- if (futilityCaptureValue < beta)
- {
- if (futilityCaptureValue > bestValue)
- bestValue = futilityCaptureValue;
- continue;
- }
- }
-
// Futility pruning
if ( !isCheck
&& !dangerous
// Value based pruning
Depth predictedDepth = newDepth;
- //FIXME HACK: awful code duplication
- double red = 0.5 + ln(moveCount) * ln(depth / 2) / 3.0;
- if (red >= 1.0)
- predictedDepth -= int(floor(red * int(OnePly)));
+ //FIXME: We are ignoring condition: depth >= 3*OnePly, BUG??
+ ss[ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient);
+ if (ss[ply].reduction)
+ predictedDepth -= ss[ply].reduction;
if (predictedDepth < SelectiveDepth)
{
int preFutilityValueMargin = 0;
if (predictedDepth >= OnePly)
- preFutilityValueMargin = 112 * bitScanReverse32(int(predictedDepth) * int(predictedDepth) / 2);
+ preFutilityValueMargin = FutilityMargins[int(predictedDepth)];
preFutilityValueMargin += H.gain(pos.piece_on(move_from(move)), move_from(move), move_to(move)) + 45;
&& !dangerous
&& !captureOrPromotion
&& !move_is_castle(move)
- && !move_is_killer(move, ss[ply])
- /* && move != ttMove*/)
+ && !move_is_killer(move, ss[ply]))
{
- double red = 0.5 + ln(moveCount) * ln(depth / 2) / 3.0;
- if (red >= 1.0)
+ ss[ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient);
+ if (ss[ply].reduction)
{
- ss[ply].reduction = Depth(int(floor(red * int(OnePly))));
value = -search(pos, ss, -(beta-1), newDepth-ss[ply].reduction, ply+1, true, threadID);
doFullDepthSearch = (value >= beta);
}
const TTEntry* tte = NULL;
int moveCount = 0;
bool pvNode = (beta - alpha != 1);
+ Value oldAlpha = alpha;
// Initialize, and make an early exit in case of an aborted search,
// an instant draw, maximum ply reached, etc.
if (bestValue >= beta)
{
// Store the score to avoid a future costly evaluation() call
- if (!isCheck && !tte && ei.futilityMargin == 0)
+ if (!isCheck && !tte && ei.futilityMargin[pos.side_to_move()] == 0)
TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_EV_LO, Depth(-127*OnePly), MOVE_NONE);
return bestValue;
MovePicker mp = MovePicker(pos, ttMove, deepChecks ? Depth(0) : depth, H);
CheckInfo ci(pos);
enoughMaterial = pos.non_pawn_material(pos.side_to_move()) > RookValueMidgame;
- futilityBase = staticValue + FutilityMarginQS + ei.futilityMargin;
+ futilityBase = staticValue + FutilityMarginQS + ei.futilityMargin[pos.side_to_move()];
// Loop through the moves until no moves remain or a beta cutoff
// occurs.
// Update transposition table
Depth d = (depth == Depth(0) ? Depth(0) : Depth(-1));
- if (bestValue < beta)
+ if (bestValue <= oldAlpha)
{
// If bestValue isn't changed it means it is still the static evaluation
// of the node, so keep this info to avoid a future evaluation() call.
- ValueType type = (bestValue == staticValue && !ei.futilityMargin ? VALUE_TYPE_EV_UP : VALUE_TYPE_UPPER);
+ ValueType type = (bestValue == staticValue && !ei.futilityMargin[pos.side_to_move()] ? VALUE_TYPE_EV_UP : VALUE_TYPE_UPPER);
TT.store(pos.get_key(), value_to_tt(bestValue, ply), type, d, MOVE_NONE);
}
- else
+ else if (bestValue >= beta)
{
move = ss[ply].pv[ply];
TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_LOWER, d, move);
if (!pos.move_is_capture_or_promotion(move))
update_killers(move, ss[ply]);
}
+ else
+ TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_EXACT, d, ss[ply].pv[ply]);
assert(bestValue > -VALUE_INFINITE && bestValue < VALUE_INFINITE);
const int FutilityMoveCountMargin = 3 + (1 << (3 * int(sp->depth) / 8));
+ // Precalculate reduction parameters
+ float LogLimit, Gradient, BaseReduction = 0.5;
+ reduction_parameters(BaseReduction, 3.0, sp->depth, LogLimit, Gradient);
+
while ( lock_grab_bool(&(sp->lock))
&& sp->bestValue < sp->beta
&& !thread_should_stop(threadID)
&& !move_is_castle(move)
&& !move_is_killer(move, ss[sp->ply]))
{
- double red = 0.5 + ln(moveCount) * ln(sp->depth / 2) / 3.0;
- if (red >= 1.0)
+ ss[sp->ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient);
+ if (ss[sp->ply].reduction)
{
- ss[sp->ply].reduction = Depth(int(floor(red * int(OnePly))));
value = -search(pos, ss, -(sp->beta-1), newDepth-ss[sp->ply].reduction, sp->ply+1, true, threadID);
doFullDepthSearch = (value >= sp->beta);
}
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
if (thread_should_stop(threadID))
+ {
+ lock_grab(&(sp->lock));
break;
+ }
// New best move?
if (value > sp->bestValue) // Less then 2% of cases
int moveCount;
Move move;
+ // Precalculate reduction parameters
+ float LogLimit, Gradient, BaseReduction = 0.5;
+ reduction_parameters(BaseReduction, 6.0, sp->depth, LogLimit, Gradient);
+
while ( lock_grab_bool(&(sp->lock))
&& sp->alpha < sp->beta
&& !thread_should_stop(threadID)
&& !move_is_castle(move)
&& !move_is_killer(move, ss[sp->ply]))
{
- double red = 0.5 + ln(moveCount) * ln(sp->depth / 2) / 6.0;
- if (red >= 1.0)
+ ss[sp->ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient);
+ if (ss[sp->ply].reduction)
{
Value localAlpha = sp->alpha;
- ss[sp->ply].reduction = Depth(int(floor(red * int(OnePly))));
value = -search(pos, ss, -localAlpha, newDepth-ss[sp->ply].reduction, sp->ply+1, true, threadID);
doFullDepthSearch = (value > localAlpha);
}
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
if (thread_should_stop(threadID))
+ {
+ lock_grab(&(sp->lock));
break;
+ }
// New best move?
if (value > sp->bestValue) // Less then 2% of cases
// (from the computer's point of view) since the previous iteration.
if ( sp->ply == 1
&& Iteration >= 2
- && -value <= IterationInfo[Iteration-1].value - ProblemMargin)
+ && -value <= ValueByIteration[Iteration-1] - ProblemMargin)
Problem = true;
}
lock_release(&(sp->lock));
RootMoveList::RootMoveList(Position& pos, Move searchMoves[]) : count(0) {
+ SearchStack ss[PLY_MAX_PLUS_2];
MoveStack mlist[MaxRootMoves];
+ StateInfo st;
bool includeAllMoves = (searchMoves[0] == MOVE_NONE);
// Generate all legal moves
continue;
// Find a quick score for the move
- StateInfo st;
- SearchStack ss[PLY_MAX_PLUS_2];
init_ss_array(ss);
-
+ pos.do_move(cur->move, st);
moves[count].move = cur->move;
- pos.do_move(moves[count].move, st);
moves[count].score = -qsearch(pos, ss, -VALUE_INFINITE, VALUE_INFINITE, Depth(0), 1, 0);
- pos.undo_move(moves[count].move);
- moves[count].pv[0] = moves[count].move;
+ moves[count].pv[0] = cur->move;
moves[count].pv[1] = MOVE_NONE;
+ pos.undo_move(cur->move);
count++;
}
sort();
return defaultEval;
}
+
+ // reduction_parameters() precalculates some parameters used later by reduction. Becasue
+ // floating point operations are involved we try to recalculate reduction at each move, but
+ // we do the most consuming computation only once per node.
+
+ void reduction_parameters(float baseReduction, float reductionInhibitor, Depth depth, float& logLimit, float& gradient)
+ {
+ // Precalculate some parameters to avoid to calculate the following formula for each move:
+ //
+ // red = baseReduction + ln(moveCount) * ln(depth / 2) / reductionInhibitor;
+ //
+ logLimit = depth > OnePly ? (1 - baseReduction) * reductionInhibitor / ln(depth / 2) : 1000;
+ gradient = depth > OnePly ? ln(depth / 2) / reductionInhibitor : 0;
+ }
+
+
+ // reduction() returns reduction in plies based on moveCount and depth.
+ // Reduction is always at least one ply.
+
+ Depth reduction(int moveCount, float logLimit, float baseReduction, float gradient) {
+
+ if (ln(moveCount) < logLimit)
+ return Depth(0);
+
+ float red = baseReduction + ln(moveCount) * gradient;
+ return Depth(int(floor(red * int(OnePly))));
+ }
+
+
// update_history() registers a good move that produced a beta-cutoff
// in history and marks as failures all the other moves of that ply.