// and near frontier nodes.
const Value FutilityMarginQS = Value(0x80);
+ Value FutilityMargins[2 * PLY_MAX_PLUS_2]; // Initialized at startup.
+
// Each move futility margin is decreased
const Value IncrementalFutilityMargin = Value(0x8);
std::ofstream LogFile;
// Natural logarithmic lookup table and its getter function
- double lnArray[512];
- inline double ln(int i) { return lnArray[i]; }
+ float lnArray[512];
+ inline float ln(int i) { return lnArray[i]; }
// MP related variables
int ActiveThreads = 1;
bool ok_to_prune(const Position& pos, Move m, Move threat);
bool ok_to_use_TT(const TTEntry* tte, Depth depth, Value beta, int ply);
Value refine_eval(const TTEntry* tte, Value defaultEval, int ply);
- Depth calculate_reduction(double baseReduction, int moveCount, Depth depth, double reductionInhibitor);
+ void reduction_parameters(float base, float Inhibitor, Depth depth, float& logLimit, float& gradient);
+ Depth reduction(int moveCount, const float LogLimit, const float BaseRed, const float Gradient);
void update_history(const Position& pos, Move move, Depth depth, Move movesSearched[], int moveCount);
void update_killers(Move m, SearchStack& ss);
void update_gains(const Position& pos, Move move, Value before, Value after);
// Init our logarithmic lookup table
for (i = 0; i < 512; i++)
- lnArray[i] = log(double(i)); // log() returns base-e logarithm
+ lnArray[i] = float(log(double(i))); // log() returns base-e logarithm
for (i = 0; i < THREAD_MAX; i++)
Threads[i].activeSplitPoints = 0;
+ // Init futility margins array
+ FutilityMargins[0] = FutilityMargins[1] = Value(0);
+
+ for (i = 2; i < 2 * PLY_MAX_PLUS_2; i++)
+ {
+ FutilityMargins[i] = Value(112 * bitScanReverse32(i * i / 2)); // FIXME: test using log instead of BSR
+ }
+
// Initialize global locks
lock_init(&MPLock, NULL);
lock_init(&IOLock, NULL);
value = - VALUE_INFINITE;
+ // Precalculate reduction parameters
+ float LogLimit, Gradient, BaseReduction = 0.5;
+ reduction_parameters(BaseReduction, 6.0, depth, LogLimit, Gradient);
+
while (1) // Fail high loop
{
&& !captureOrPromotion
&& !move_is_castle(move))
{
- ss[0].reduction = calculate_reduction(0.5, RootMoveNumber - MultiPV + 1, depth, 6.0);
+ ss[0].reduction = reduction(RootMoveNumber - MultiPV + 1, LogLimit, BaseReduction, Gradient);
if (ss[0].reduction)
{
value = -search(pos, ss, -alpha, newDepth-ss[0].reduction, 1, true, 0);
CheckInfo ci(pos);
MovePicker mp = MovePicker(pos, ttMove, depth, H, &ss[ply]);
+ // Precalculate reduction parameters
+ float LogLimit, Gradient, BaseReduction = 0.5;
+ reduction_parameters(BaseReduction, 6.0, depth, LogLimit, Gradient);
+
// Loop through all legal moves until no moves remain or a beta cutoff
// occurs.
while ( alpha < beta
&& !captureOrPromotion
&& !move_is_castle(move)
&& !move_is_killer(move, ss[ply]))
- {
- ss[ply].reduction = calculate_reduction(0.5, moveCount, depth, 6.0);
+ {
+ ss[ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient);
if (ss[ply].reduction)
{
value = -search(pos, ss, -alpha, newDepth-ss[ply].reduction, ply+1, true, threadID);
// Calculate depth dependant futility pruning parameters
const int FutilityMoveCountMargin = 3 + (1 << (3 * int(depth) / 8));
- const int PostFutilityValueMargin = 112 * bitScanReverse32(int(depth) * int(depth) / 2);
// Evaluate the position statically
if (!isCheck)
}
ss[ply].eval = staticValue;
- futilityValue = staticValue + PostFutilityValueMargin; //FIXME: Remove me, only for split
+ futilityValue = staticValue + FutilityMargins[int(depth)]; //FIXME: Remove me, only for split
staticValue = refine_eval(tte, staticValue, ply); // Enhance accuracy with TT value if possible
update_gains(pos, ss[ply - 1].currentMove, ss[ply - 1].eval, ss[ply].eval);
}
// Do a "stand pat". If we are above beta by a good margin then
// return immediately.
- // FIXME: test with added condition 'allowNullmove || depth <= OnePly' and !value_is_mate(beta)
- // FIXME: test with modified condition 'depth < RazorDepth'
if ( !isCheck
- && depth < SelectiveDepth
- && staticValue - PostFutilityValueMargin >= beta)
- return staticValue - PostFutilityValueMargin;
+ && allowNullmove
+ && depth < RazorDepth
+ && staticValue - FutilityMargins[int(depth)] >= beta)
+ return staticValue - FutilityMargins[int(depth)];
// Null move search
if ( allowNullmove
MovePicker mp = MovePicker(pos, ttMove, depth, H, &ss[ply]);
CheckInfo ci(pos);
+ // Precalculate reduction parameters
+ float LogLimit, Gradient, BaseReduction = 0.5;
+ reduction_parameters(BaseReduction, 3.0, depth, LogLimit, Gradient);
+
// Loop through all legal moves until no moves remain or a beta cutoff occurs
while ( bestValue < beta
&& (move = mp.get_next_move()) != MOVE_NONE
// Update current move
movesSearched[moveCount++] = ss[ply].currentMove = move;
- // Futility pruning for captures
- // FIXME: test disabling 'Futility pruning for captures'
- // FIXME: test with 'newDepth < RazorDepth'
- Color them = opposite_color(pos.side_to_move());
-
- if ( !isCheck
- && newDepth < SelectiveDepth
- && !dangerous
- && pos.move_is_capture(move)
- && !pos.move_is_check(move, ci)
- && !move_is_promotion(move)
- && move != ttMove
- && !move_is_ep(move)
- && (pos.type_of_piece_on(move_to(move)) != PAWN || !pos.pawn_is_passed(them, move_to(move)))) // Do not prune passed pawn captures
- {
- int preFutilityValueMargin = 0;
-
- if (newDepth >= OnePly)
- preFutilityValueMargin = 112 * bitScanReverse32(int(newDepth) * int(newDepth) / 2);
-
- Value futilityCaptureValue = ss[ply].eval + pos.endgame_value_of_piece_on(move_to(move)) + preFutilityValueMargin + ei.futilityMargin + 90;
-
- if (futilityCaptureValue < beta)
- {
- if (futilityCaptureValue > bestValue)
- bestValue = futilityCaptureValue;
- continue;
- }
- }
-
// Futility pruning
if ( !isCheck
&& !dangerous
Depth predictedDepth = newDepth;
//FIXME: We are ignoring condition: depth >= 3*OnePly, BUG??
- ss[ply].reduction = calculate_reduction(0.5, moveCount, depth, 3.0);
+ ss[ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient);
if (ss[ply].reduction)
predictedDepth -= ss[ply].reduction;
{
int preFutilityValueMargin = 0;
if (predictedDepth >= OnePly)
- preFutilityValueMargin = 112 * bitScanReverse32(int(predictedDepth) * int(predictedDepth) / 2);
+ preFutilityValueMargin = FutilityMargins[int(predictedDepth)];
preFutilityValueMargin += H.gain(pos.piece_on(move_from(move)), move_from(move), move_to(move)) + 45;
&& !move_is_castle(move)
&& !move_is_killer(move, ss[ply]))
{
- ss[ply].reduction = calculate_reduction(0.5, moveCount, depth, 3.0);
+ ss[ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient);
if (ss[ply].reduction)
{
value = -search(pos, ss, -(beta-1), newDepth-ss[ply].reduction, ply+1, true, threadID);
const TTEntry* tte = NULL;
int moveCount = 0;
bool pvNode = (beta - alpha != 1);
+ Value oldAlpha = alpha;
// Initialize, and make an early exit in case of an aborted search,
// an instant draw, maximum ply reached, etc.
// Update transposition table
Depth d = (depth == Depth(0) ? Depth(0) : Depth(-1));
- if (bestValue < beta)
+ if (bestValue <= oldAlpha)
{
// If bestValue isn't changed it means it is still the static evaluation
// of the node, so keep this info to avoid a future evaluation() call.
ValueType type = (bestValue == staticValue && !ei.futilityMargin ? VALUE_TYPE_EV_UP : VALUE_TYPE_UPPER);
TT.store(pos.get_key(), value_to_tt(bestValue, ply), type, d, MOVE_NONE);
}
- else
+ else if (bestValue >= beta)
{
move = ss[ply].pv[ply];
TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_LOWER, d, move);
if (!pos.move_is_capture_or_promotion(move))
update_killers(move, ss[ply]);
}
+ else
+ TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_EXACT, d, ss[ply].pv[ply]);
assert(bestValue > -VALUE_INFINITE && bestValue < VALUE_INFINITE);
const int FutilityMoveCountMargin = 3 + (1 << (3 * int(sp->depth) / 8));
+ // Precalculate reduction parameters
+ float LogLimit, Gradient, BaseReduction = 0.5;
+ reduction_parameters(BaseReduction, 3.0, sp->depth, LogLimit, Gradient);
+
while ( lock_grab_bool(&(sp->lock))
&& sp->bestValue < sp->beta
&& !thread_should_stop(threadID)
&& !move_is_castle(move)
&& !move_is_killer(move, ss[sp->ply]))
{
- ss[sp->ply].reduction = calculate_reduction(0.5, moveCount, sp->depth, 3.0);
+ ss[sp->ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient);
if (ss[sp->ply].reduction)
{
value = -search(pos, ss, -(sp->beta-1), newDepth-ss[sp->ply].reduction, sp->ply+1, true, threadID);
int moveCount;
Move move;
+ // Precalculate reduction parameters
+ float LogLimit, Gradient, BaseReduction = 0.5;
+ reduction_parameters(BaseReduction, 6.0, sp->depth, LogLimit, Gradient);
+
while ( lock_grab_bool(&(sp->lock))
&& sp->alpha < sp->beta
&& !thread_should_stop(threadID)
&& !move_is_castle(move)
&& !move_is_killer(move, ss[sp->ply]))
{
- ss[sp->ply].reduction = calculate_reduction(0.5, moveCount, sp->depth, 6.0);
+ ss[sp->ply].reduction = reduction(moveCount, LogLimit, BaseReduction, Gradient);
if (ss[sp->ply].reduction)
{
Value localAlpha = sp->alpha;
return defaultEval;
}
- // calculate_reduction() returns reduction in plies based on
- // moveCount and depth. Reduction is always at least one ply.
- Depth calculate_reduction(double baseReduction, int moveCount, Depth depth, double reductionInhibitor) {
+ // reduction_parameters() precalculates some parameters used later by reduction. Becasue
+ // floating point operations are involved we try to recalculate reduction at each move, but
+ // we do the most consuming computation only once per node.
- double red = baseReduction + ln(moveCount) * ln(depth / 2) / reductionInhibitor;
+ void reduction_parameters(float baseReduction, float reductionInhibitor, Depth depth, float& logLimit, float& gradient)
+ {
+ // Precalculate some parameters to avoid to calculate the following formula for each move:
+ //
+ // red = baseReduction + ln(moveCount) * ln(depth / 2) / reductionInhibitor;
+ //
+ logLimit = depth > OnePly ? (1 - baseReduction) * reductionInhibitor / ln(depth / 2) : 1000;
+ gradient = depth > OnePly ? ln(depth / 2) / reductionInhibitor : 0;
+ }
- if (red >= 1.0)
- return Depth(int(floor(red * int(OnePly))));
- else
+
+ // reduction() returns reduction in plies based on moveCount and depth.
+ // Reduction is always at least one ply.
+
+ Depth reduction(int moveCount, float logLimit, float baseReduction, float gradient) {
+
+ if (ln(moveCount) < logLimit)
return Depth(0);
+ float red = baseReduction + ln(moveCount) * gradient;
+ return Depth(int(floor(red * int(OnePly))));
}
+
// update_history() registers a good move that produced a beta-cutoff
// in history and marks as failures all the other moves of that ply.