bool thread_should_stop(int threadID) const;
void wake_sleeping_threads();
void put_threads_to_sleep();
- void idle_loop(int threadID, SplitPoint* waitSp);
+ void idle_loop(int threadID, SplitPoint* sp);
bool split(const Position& pos, SearchStack* ss, int ply, Value* alpha, const Value beta, Value* bestValue,
Depth depth, bool mateThreat, int* moves, MovePicker* mp, int master, bool pvNode);
Depth extension(const Position&, Move, bool, bool, bool, bool, bool, bool*);
bool ok_to_do_nullmove(const Position& pos);
bool ok_to_prune(const Position& pos, Move m, Move threat);
- bool ok_to_use_TT(const TTEntry* tte, Depth depth, Value beta, int ply, bool allowNullmove);
+ bool ok_to_use_TT(const TTEntry* tte, Depth depth, Value beta, int ply);
Value refine_eval(const TTEntry* tte, Value defaultEval, int ply);
void update_history(const Position& pos, Move move, Depth depth, Move movesSearched[], int moveCount);
void update_killers(Move m, SearchStack& ss);
for (int i = 1; i < 64; i++) // i == depth (OnePly = 1)
for (int j = 1; j < 64; j++) // j == moveNumber
{
- double pvRed = 0.5 + log(double(i)) * log(double(j)) / 6.0;
- double nonPVRed = 0.5 + log(double(i)) * log(double(j)) / 3.0;
+ double pvRed = log(double(i)) * log(double(j)) / 3.0;
+ double nonPVRed = log(double(i)) * log(double(j)) / 1.5;
PVReductionMatrix[i][j] = (int8_t) ( pvRed >= 1.0 ? floor( pvRed * int(OnePly)) : 0);
NonPVReductionMatrix[i][j] = (int8_t) (nonPVRed >= 1.0 ? floor(nonPVRed * int(OnePly)) : 0);
}
for (int j = 0; j < 64; j++) // j == moveNumber
{
// FIXME: test using log instead of BSR
- FutilityMarginsMatrix[i][j] = (i < 2 ? 0 : 112 * bitScanReverse32(i * i / 2)) - 8 * j;
+ FutilityMarginsMatrix[i][j] = (i < 2 ? 0 : 112 * bitScanReverse32(i * i / 2)) - 8 * j + 45;
}
// Init futility move count array
tte = TT.retrieve(posKey);
ttMove = (tte ? tte->move() : MOVE_NONE);
- if (tte && ok_to_use_TT(tte, depth, beta, ply, allowNullmove))
+ if (tte && ok_to_use_TT(tte, depth, beta, ply))
{
+ // Refresh tte entry to avoid aging
+ TT.store(posKey, tte->value(), tte->type(), tte->depth(), ttMove);
+
ss[ply].currentMove = ttMove; // Can be MOVE_NONE
return value_from_tt(tte->value(), ply);
}
if (nullValue >= value_mate_in(PLY_MAX))
nullValue = beta;
- // Do zugzwang verification search for high depths, don't store in TT
- // if search was stopped.
- if ( ( depth < 6 * OnePly
- || search(pos, ss, beta, depth-5*OnePly, ply, false, threadID) >= beta)
- && !AbortSearch
- && !TM.thread_should_stop(threadID))
- {
- assert(value_to_tt(nullValue, ply) == nullValue);
+ if (depth < 6 * OnePly)
+ return nullValue;
- TT.store(posKey, nullValue, VALUE_TYPE_NS_LO, depth, MOVE_NONE);
+ // Do zugzwang verification search
+ Value v = search(pos, ss, beta, depth-5*OnePly, ply, false, threadID);
+ if (v >= beta)
return nullValue;
- }
} else {
// The null move failed low, which means that we may be faced with
// some kind of threat. If the previous move was reduced, check if
// Value based pruning
Depth predictedDepth = newDepth - nonpv_reduction(depth, moveCount); // We illogically ignore reduction condition depth >= 3*OnePly
futilityValueScaled = ss[ply].eval + futility_margin(predictedDepth, moveCount)
- + H.gain(pos.piece_on(move_from(move)), move_to(move)) + 45;
+ + H.gain(pos.piece_on(move_from(move)), move_to(move));
if (futilityValueScaled < beta)
{
tte = TT.retrieve(pos.get_key());
ttMove = (tte ? tte->move() : MOVE_NONE);
- if (!pvNode && tte && ok_to_use_TT(tte, depth, beta, ply, true))
+ if (!pvNode && tte && ok_to_use_TT(tte, depth, beta, ply))
{
assert(tte->type() != VALUE_TYPE_EVAL);
if (bestValue >= beta)
{
// Store the score to avoid a future costly evaluation() call
- if (!isCheck && !tte && ei.futilityMargin[pos.side_to_move()] == 0)
+ if (!isCheck && !tte && ei.kingDanger[pos.side_to_move()] == 0)
TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_EV_LO, Depth(-127*OnePly), MOVE_NONE);
return bestValue;
MovePicker mp = MovePicker(pos, ttMove, deepChecks ? Depth(0) : depth, H);
CheckInfo ci(pos);
enoughMaterial = pos.non_pawn_material(pos.side_to_move()) > RookValueMidgame;
- futilityBase = staticValue + FutilityMarginQS + ei.futilityMargin[pos.side_to_move()];
+ futilityBase = staticValue + FutilityMarginQS + ei.kingDanger[pos.side_to_move()];
// Loop through the moves until no moves remain or a beta cutoff occurs
while ( alpha < beta
{
// If bestValue isn't changed it means it is still the static evaluation
// of the node, so keep this info to avoid a future evaluation() call.
- ValueType type = (bestValue == staticValue && !ei.futilityMargin[pos.side_to_move()] ? VALUE_TYPE_EV_UP : VALUE_TYPE_UPPER);
+ ValueType type = (bestValue == staticValue && !ei.kingDanger[pos.side_to_move()] ? VALUE_TYPE_EV_UP : VALUE_TYPE_UPPER);
TT.store(pos.get_key(), value_to_tt(bestValue, ply), type, d, MOVE_NONE);
}
else if (bestValue >= beta)
// Value based pruning
Depth predictedDepth = newDepth - nonpv_reduction(sp->depth, moveCount);
futilityValueScaled = ss[sp->ply].eval + futility_margin(predictedDepth, moveCount)
- + H.gain(pos.piece_on(move_from(move)), move_to(move)) + 45;
+ + H.gain(pos.piece_on(move_from(move)), move_to(move));
if (futilityValueScaled < sp->beta)
{
}
- // ok_to_use_TT() returns true if a transposition table score can be used at a
- // given point in search. To avoid zugzwang issues TT cutoffs at the root node
- // of a null move verification search are not allowed if the TT value was found
- // by a null search, this is implemented testing allowNullmove and TT entry type.
+ // ok_to_use_TT() returns true if a transposition table score
+ // can be used at a given point in search.
- bool ok_to_use_TT(const TTEntry* tte, Depth depth, Value beta, int ply, bool allowNullmove) {
+ bool ok_to_use_TT(const TTEntry* tte, Depth depth, Value beta, int ply) {
Value v = value_from_tt(tte->value(), ply);
- return (allowNullmove || !(tte->type() & VALUE_TYPE_NULL))
-
- && ( tte->depth() >= depth
+ return ( tte->depth() >= depth
|| v >= Max(value_mate_in(PLY_MAX), beta)
|| v < Min(value_mated_in(PLY_MAX), beta))
// idle_loop() is where the threads are parked when they have no work to do.
- // The parameter "waitSp", if non-NULL, is a pointer to an active SplitPoint
+ // The parameter 'sp', if non-NULL, is a pointer to an active SplitPoint
// object for which the current thread is the master.
- void ThreadsManager::idle_loop(int threadID, SplitPoint* waitSp) {
+ void ThreadsManager::idle_loop(int threadID, SplitPoint* sp) {
assert(threadID >= 0 && threadID < MAX_THREADS);
// master should exit as last one.
if (AllThreadsShouldExit)
{
- assert(!waitSp);
+ assert(!sp);
threads[threadID].state = THREAD_TERMINATED;
return;
}
// instead of wasting CPU time polling for work.
while (AllThreadsShouldSleep || threadID >= ActiveThreads)
{
- assert(!waitSp);
+ assert(!sp);
assert(threadID != 0);
threads[threadID].state = THREAD_SLEEPING;
// If this thread is the master of a split point and all threads have
// finished their work at this split point, return from the idle loop.
- if (waitSp != NULL && waitSp->cpus == 0)
+ if (sp && sp->cpus == 0)
{
+ // Because sp->cpus is decremented under lock protection,
+ // be sure sp->lock has been released before to proceed.
+ lock_grab(&(sp->lock));
+ lock_release(&(sp->lock));
+
assert(threads[threadID].state == THREAD_AVAILABLE);
threads[threadID].state = THREAD_SEARCHING;