void wake_sleeping_threads();
void put_threads_to_sleep();
void idle_loop(int threadID, SplitPoint* sp);
+
+ template <bool Fake>
bool split(const Position& pos, SearchStack* ss, int ply, Value* alpha, const Value beta, Value* bestValue,
Depth depth, bool mateThreat, int* moves, MovePicker* mp, int master, bool pvNode);
template <NodeType PvNode>
Value search(Position& pos, SearchStack ss[], Value alpha, Value beta, Depth depth, int ply, bool allowNullmove, int threadID, Move excludedMove = MOVE_NONE);
+ template <NodeType PvNode>
+ Value qsearch(Position& pos, SearchStack ss[], Value alpha, Value beta, Depth depth, int ply, int threadID);
+
template <NodeType PvNode>
Depth extension(const Position& pos, Move m, bool captureOrPromotion, bool moveIsCheck, bool singleEvasion, bool mateThreat, bool* dangerous);
- Value qsearch(Position& pos, SearchStack ss[], Value alpha, Value beta, Depth depth, int ply, int threadID);
void sp_search(SplitPoint* sp, int threadID);
void sp_search_pv(SplitPoint* sp, int threadID);
void init_node(SearchStack ss[], int ply, int threadID);
// search<>() is the main search function for both PV and non-PV nodes
template <NodeType PvNode>
- Value search(Position& pos, SearchStack ss[], Value alpha, Value beta,
- Depth depth, int ply, bool allowNullmove, int threadID, Move excludedMove) {
+ Value search(Position& pos, SearchStack ss[], Value alpha, Value beta, Depth depth,
+ int ply, bool allowNullmove, int threadID, Move excludedMove) {
assert(alpha >= -VALUE_INFINITE && alpha <= VALUE_INFINITE);
assert(beta > alpha && beta <= VALUE_INFINITE);
+ assert(PvNode || alpha == beta - 1);
assert(ply >= 0 && ply < PLY_MAX);
assert(threadID >= 0 && threadID < TM.active_threads());
oldAlpha = alpha;
if (depth < OnePly)
- return qsearch(pos, ss, alpha, beta, Depth(0), ply, threadID);
+ return qsearch<PvNode>(pos, ss, alpha, beta, Depth(0), ply, threadID);
// Step 1. Initialize node and poll
// Polling can abort search.
isCheck = pos.is_check();
if (!isCheck)
{
- if (!PvNode && tte && (tte->type() & VALUE_TYPE_EVAL))
+ if (tte && (tte->type() & VALUE_TYPE_EVAL))
ss[ply].eval = value_from_tt(tte->value(), ply);
else
ss[ply].eval = evaluate(pos, ei, threadID);
&& !pos.has_pawn_on_7th(pos.side_to_move()))
{
Value rbeta = beta - razor_margin(depth);
- Value v = qsearch(pos, ss, rbeta-1, rbeta, Depth(0), ply, threadID);
+ Value v = qsearch<NonPV>(pos, ss, rbeta-1, rbeta, Depth(0), ply, threadID);
if (v < rbeta)
// Logically we should return (v + razor_margin(depth)), but
// surprisingly this did slightly weaker in tests.
if (abs(ttValue) < VALUE_KNOWN_WIN)
{
- Value excValue = search<NonPV>(pos, ss, ttValue - SingularExtensionMargin - 1, ttValue - SingularExtensionMargin, depth / 2, ply, false, threadID, move);
+ Value b = ttValue - SingularExtensionMargin;
+ Value v = search<NonPV>(pos, ss, b - 1, b, depth / 2, ply, false, threadID, move);
- if (excValue < ttValue - SingularExtensionMargin)
+ if (v < ttValue - SingularExtensionMargin)
ext = OnePly;
}
}
continue;
// Value based pruning
- Depth predictedDepth = newDepth - reduction<NonPV>(depth, moveCount); // We illogically ignore reduction condition depth >= 3*OnePly
+ Depth predictedDepth = newDepth - reduction<NonPV>(depth, moveCount); // FIXME We illogically ignore reduction condition depth >= 3*OnePly
futilityValueScaled = ss[ply].eval + futility_margin(predictedDepth, moveCount)
+ H.gain(pos.piece_on(move_from(move)), move_to(move));
value = -search<PV>(pos, ss, -beta, -alpha, newDepth, ply+1, false, threadID);
else
{
- // Step 14. Reduced search
- // if the move fails high will be re-searched at full depth.
- bool doFullDepthSearch = true;
-
- if ( depth >= 3 * OnePly
- && !dangerous
- && !captureOrPromotion
- && !move_is_castle(move)
- && !move_is_killer(move, ss[ply]))
- {
- ss[ply].reduction = reduction<PvNode>(depth, moveCount);
- if (ss[ply].reduction)
- {
- value = -search<NonPV>(pos, ss, -(alpha+1), -alpha, newDepth-ss[ply].reduction, ply+1, true, threadID);
- doFullDepthSearch = (value > alpha);
- }
- }
-
- // Step 15. Full depth search
- if (doFullDepthSearch)
- {
- ss[ply].reduction = Depth(0);
- value = -search<NonPV>(pos, ss, -(alpha+1), -alpha, newDepth, ply+1, true, threadID);
+ // Step 14. Reduced search
+ // if the move fails high will be re-searched at full depth.
+ bool doFullDepthSearch = true;
+
+ if ( depth >= 3 * OnePly
+ && !dangerous
+ && !captureOrPromotion
+ && !move_is_castle(move)
+ && !move_is_killer(move, ss[ply]))
+ {
+ ss[ply].reduction = reduction<PvNode>(depth, moveCount);
+ if (ss[ply].reduction)
+ {
+ value = -search<NonPV>(pos, ss, -(alpha+1), -alpha, newDepth-ss[ply].reduction, ply+1, true, threadID);
+ doFullDepthSearch = (value > alpha);
+ }
+ }
- // Step extra. pv search (only in PV nodes)
- if (PvNode && value > alpha && value < beta)
- value = -search<PV>(pos, ss, -beta, -alpha, newDepth, ply+1, false, threadID);
- }
+ // Step 15. Full depth search
+ if (doFullDepthSearch)
+ {
+ ss[ply].reduction = Depth(0);
+ value = -search<NonPV>(pos, ss, -(alpha+1), -alpha, newDepth, ply+1, true, threadID);
+
+ // Step extra. pv search (only in PV nodes)
+ // Search only for possible new PV nodes, if instead value >= beta then
+ // parent node fails low with value <= alpha and tries another move.
+ if (PvNode && value > alpha && value < beta)
+ value = -search<PV>(pos, ss, -beta, -alpha, newDepth, ply+1, false, threadID);
+ }
}
// Step 16. Undo move
&& TM.available_thread_exists(threadID)
&& !AbortSearch
&& !TM.thread_should_stop(threadID)
- && TM.split(pos, ss, ply, &alpha, beta, &bestValue,
+ && TM.split<false>(pos, ss, ply, &alpha, beta, &bestValue,
+ depth, mateThreat, &moveCount, &mp, threadID, PvNode))
+ break;
+
+ // Uncomment to debug sp_search() in single thread mode
+ if ( bestValue < beta
+ && depth >= 4
+ && Iteration <= 99
+ && !AbortSearch
+ && !TM.thread_should_stop(threadID)
+ && TM.split<true>(pos, ss, ply, &alpha, beta, &bestValue,
depth, mateThreat, &moveCount, &mp, threadID, PvNode))
break;
}
// search function when the remaining depth is zero (or, to be more precise,
// less than OnePly).
+ template <NodeType PvNode>
Value qsearch(Position& pos, SearchStack ss[], Value alpha, Value beta,
Depth depth, int ply, int threadID) {
assert(alpha >= -VALUE_INFINITE && alpha <= VALUE_INFINITE);
assert(beta >= -VALUE_INFINITE && beta <= VALUE_INFINITE);
+ assert(PvNode || alpha == beta - 1);
assert(depth <= 0);
assert(ply >= 0 && ply < PLY_MAX);
assert(threadID >= 0 && threadID < TM.active_threads());
bool isCheck, enoughMaterial, moveIsCheck, evasionPrunable;
const TTEntry* tte = NULL;
int moveCount = 0;
- bool pvNode = (beta - alpha != 1);
Value oldAlpha = alpha;
// Initialize, and make an early exit in case of an aborted search,
tte = TT.retrieve(pos.get_key());
ttMove = (tte ? tte->move() : MOVE_NONE);
- if (!pvNode && tte && ok_to_use_TT(tte, depth, beta, ply))
+ if (!PvNode && tte && ok_to_use_TT(tte, depth, beta, ply))
{
assert(tte->type() != VALUE_TYPE_EVAL);
ss[ply].currentMove = move;
// Futility pruning
- if ( enoughMaterial
+ if ( !PvNode
+ && enoughMaterial
&& !isCheck
- && !pvNode
&& !moveIsCheck
&& move != ttMove
&& !move_is_promotion(move)
&& !pos.can_castle(pos.side_to_move());
// Don't search moves with negative SEE values
- if ( (!isCheck || evasionPrunable)
- && !pvNode
+ if ( !PvNode
+ && (!isCheck || evasionPrunable)
&& move != ttMove
&& !move_is_promotion(move)
&& pos.see_sign(move) < 0)
// Make and search the move
pos.do_move(move, st, ci, moveIsCheck);
- value = -qsearch(pos, ss, -beta, -alpha, depth-OnePly, ply+1, threadID);
+ value = -qsearch<PvNode>(pos, ss, -beta, -alpha, depth-OnePly, ply+1, threadID);
pos.undo_move(move);
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
void sp_search(SplitPoint* sp, int threadID) {
assert(threadID >= 0 && threadID < TM.active_threads());
- assert(TM.active_threads() > 1);
+ //assert(TM.active_threads() > 1);
StateInfo st;
Move move;
void sp_search_pv(SplitPoint* sp, int threadID) {
assert(threadID >= 0 && threadID < TM.active_threads());
- assert(TM.active_threads() > 1);
+ //assert(TM.active_threads() > 1);
StateInfo st;
Move move;
// threads have returned from sp_search_pv (or, equivalently, when
// splitPoint->cpus becomes 0), split() returns true.
+ template <bool Fake>
bool ThreadsManager::split(const Position& p, SearchStack* sstck, int ply,
Value* alpha, const Value beta, Value* bestValue,
Depth depth, bool mateThreat, int* moves, MovePicker* mp, int master, bool pvNode) {
assert(sstck != NULL);
assert(ply >= 0 && ply < PLY_MAX);
assert(*bestValue >= -VALUE_INFINITE);
- assert( ( pvNode && *bestValue <= *alpha)
- || (!pvNode && *bestValue < beta ));
- assert(!pvNode || *alpha < beta);
+ assert(*bestValue <= *alpha);
+ assert(*alpha < beta);
assert(beta <= VALUE_INFINITE);
assert(depth > Depth(0));
assert(master >= 0 && master < ActiveThreads);
- assert(ActiveThreads > 1);
+ assert(Fake || ActiveThreads > 1);
SplitPoint* splitPoint;
// If no other thread is available to help us, or if we have too many
// active split points, don't split.
- if ( !available_thread_exists(master)
+ if ( (!Fake && !available_thread_exists(master))
|| threads[master].activeSplitPoints >= ACTIVE_SPLIT_POINTS_MAX)
{
lock_release(&MPLock);
// Allocate available threads setting state to THREAD_BOOKED
for (int i = 0; i < ActiveThreads && splitPoint->cpus < MaxThreadsPerSplitPoint; i++)
- if (thread_is_available(i, master))
+ if (!Fake && thread_is_available(i, master))
{
threads[i].state = THREAD_BOOKED;
threads[i].splitPoint = splitPoint;
splitPoint->cpus++;
}
- assert(splitPoint->cpus > 1);
+ assert(Fake || splitPoint->cpus > 1);
// We can release the lock because slave threads are already booked and master is not available
lock_release(&MPLock);
init_ss_array(ss);
pos.do_move(cur->move, st);
moves[count].move = cur->move;
- moves[count].score = -qsearch(pos, ss, -VALUE_INFINITE, VALUE_INFINITE, Depth(0), 1, 0);
+ moves[count].score = -qsearch<PV>(pos, ss, -VALUE_INFINITE, VALUE_INFINITE, Depth(0), 1, 0);
moves[count].pv[0] = cur->move;
moves[count].pv[1] = MOVE_NONE;
pos.undo_move(cur->move);