namespace {
- /// Types
+ // Types
enum NodeType { NonPV, PV };
// Set to true to force running with one thread.
int active_threads() const { return ActiveThreads; }
void set_active_threads(int newActiveThreads) { ActiveThreads = newActiveThreads; }
void incrementNodeCounter(int threadID) { threads[threadID].nodes++; }
- void incrementBetaCounter(Color us, Depth d, int threadID) { threads[threadID].betaCutOffs[us] += unsigned(d); }
void resetNodeCounters();
- void resetBetaCounters();
int64_t nodes_searched() const;
- void get_beta_counters(Color us, int64_t& our, int64_t& their) const;
bool available_thread_exists(int master) const;
bool thread_is_available(int slave, int master) const;
bool thread_should_stop(int threadID) const;
- void wake_sleeping_threads();
+ void wake_sleeping_thread(int threadID);
void put_threads_to_sleep();
void idle_loop(int threadID, SplitPoint* sp);
template <bool Fake>
void split(const Position& pos, SearchStack* ss, int ply, Value* alpha, const Value beta, Value* bestValue,
- Depth depth, Move threatMove, bool mateThreat, int* moveCount, MovePicker* mp, bool pvNode);
+ Depth depth, Move threatMove, bool mateThreat, int moveCount, MovePicker* mp, bool pvNode);
private:
friend void poll();
Lock MPLock, WaitLock;
#if !defined(_MSC_VER)
- pthread_cond_t WaitCond;
+ pthread_cond_t WaitCond[MAX_THREADS];
#else
HANDLE SitIdleEvent[MAX_THREADS];
#endif
struct RootMove {
- RootMove() { nodes = cumulativeNodes = ourBeta = theirBeta = 0ULL; }
+ RootMove() : mp_score(0), nodes(0) {}
// RootMove::operator<() is the comparison function used when
// sorting the moves. A move m1 is considered to be better
// have equal score but m1 has the higher beta cut-off count.
bool operator<(const RootMove& m) const {
- return score != m.score ? score < m.score : theirBeta <= m.theirBeta;
+ return score != m.score ? score < m.score : mp_score <= m.mp_score;
}
Move move;
Value score;
- int64_t nodes, cumulativeNodes, ourBeta, theirBeta;
+ int mp_score;
+ int64_t nodes;
Move pv[PLY_MAX_PLUS_2];
};
public:
RootMoveList(Position& pos, Move searchMoves[]);
+ Move move(int moveNum) const { return moves[moveNum].move; }
+ Move move_pv(int moveNum, int i) const { return moves[moveNum].pv[i]; }
int move_count() const { return count; }
- Move get_move(int moveNum) const { return moves[moveNum].move; }
- Value get_move_score(int moveNum) const { return moves[moveNum].score; }
+ Value move_score(int moveNum) const { return moves[moveNum].score; }
+ int64_t move_nodes(int moveNum) const { return moves[moveNum].nodes; }
+ void add_move_nodes(int moveNum, int64_t nodes) { moves[moveNum].nodes += nodes; }
void set_move_score(int moveNum, Value score) { moves[moveNum].score = score; }
- Move get_move_pv(int moveNum, int i) const { return moves[moveNum].pv[i]; }
- int64_t get_move_cumulative_nodes(int moveNum) const { return moves[moveNum].cumulativeNodes; }
- void set_move_nodes(int moveNum, int64_t nodes);
- void set_beta_counters(int moveNum, int64_t our, int64_t their);
void set_move_pv(int moveNum, const Move pv[]);
+ void score_moves(const Position& pos);
void sort();
void sort_multipv(int n);
private:
- static const int MaxRootMoves = 500;
- RootMove moves[MaxRootMoves];
+ RootMove moves[MOVES_MAX];
int count;
};
+ // When formatting a move for std::cout we must know if we are in Chess960
+ // or not. To keep using the handy operator<<() on the move the trick is to
+ // embed this flag in the stream itself. Function-like named enum set960 is
+ // used as a custom manipulator and the stream internal general-purpose array,
+ // accessed through ios_base::iword(), is used to pass the flag to the move's
+ // operator<<() that will use it to properly format castling moves.
+ enum set960 {};
+
+ std::ostream& operator<< (std::ostream& os, const set960& m) {
+
+ os.iword(0) = int(m);
+ return os;
+ }
+
+
/// Adjustments
// Step 6. Razoring
// Dynamic razoring margin based on depth
inline Value razor_margin(Depth d) { return Value(0x200 + 0x10 * int(d)); }
- // Step 8. Null move search with verification search
-
- // Null move margin. A null move search will not be done if the static
- // evaluation of the position is more than NullMoveMargin below beta.
- const Value NullMoveMargin = Value(0x200);
-
// Maximum depth for use of dynamic threat detection when null move fails low
const Depth ThreatDepth = 5 * ONE_PLY;
const Value FutilityMarginQS = Value(0x80);
// Futility lookup tables (initialized at startup) and their getter functions
- int32_t FutilityMarginsMatrix[16][64]; // [depth][moveNumber]
+ Value FutilityMarginsMatrix[16][64]; // [depth][moveNumber]
int FutilityMoveCountArray[32]; // [depth]
- inline Value futility_margin(Depth d, int mn) { return Value(d < 7 * ONE_PLY ? FutilityMarginsMatrix[Max(d, 1)][Min(mn, 63)] : 2 * VALUE_INFINITE); }
+ inline Value futility_margin(Depth d, int mn) { return d < 7 * ONE_PLY ? FutilityMarginsMatrix[Max(d, 1)][Min(mn, 63)] : 2 * VALUE_INFINITE; }
inline int futility_move_count(Depth d) { return d < 16 * ONE_PLY ? FutilityMoveCountArray[d] : 512; }
// Step 14. Reduced search
Value id_loop(const Position& pos, Move searchMoves[]);
Value root_search(Position& pos, SearchStack* ss, Move* pv, RootMoveList& rml, Value* alphaPtr, Value* betaPtr);
- template <NodeType PvNode>
+ template <NodeType PvNode, bool SpNode>
Value search(Position& pos, SearchStack* ss, Value alpha, Value beta, Depth depth, int ply);
template <NodeType PvNode>
- Value qsearch(Position& pos, SearchStack* ss, Value alpha, Value beta, Depth depth, int ply);
+ inline Value search(Position& pos, SearchStack* ss, Value alpha, Value beta, Depth depth, int ply) {
+ return search<PvNode, false>(pos, ss, alpha, beta, depth, ply);
+ }
template <NodeType PvNode>
- void sp_search(SplitPoint* sp, int threadID);
+ Value qsearch(Position& pos, SearchStack* ss, Value alpha, Value beta, Depth depth, int ply);
template <NodeType PvNode>
Depth extension(const Position& pos, Move m, bool captureOrPromotion, bool moveIsCheck, bool singleEvasion, bool mateThreat, bool* dangerous);
bool value_is_mate(Value value);
Value value_to_tt(Value v, int ply);
Value value_from_tt(Value v, int ply);
- bool move_is_killer(Move m, SearchStack* ss);
bool ok_to_use_TT(const TTEntry* tte, Depth depth, Value beta, int ply);
bool connected_threat(const Position& pos, Move m, Move threat);
Value refine_eval(const TTEntry* tte, Value defaultEval, int ply);
// Init futility margins array
for (d = 1; d < 16; d++) for (mc = 0; mc < 64; mc++)
- FutilityMarginsMatrix[d][mc] = 112 * int(log(double(d * d) / 2) / log(2.0) + 1.001) - 8 * mc + 45;
+ FutilityMarginsMatrix[d][mc] = Value(112 * int(log(double(d * d) / 2) / log(2.0) + 1.001) - 8 * mc + 45);
// Init futility move count array
for (d = 0; d < 32; d++)
- FutilityMoveCountArray[d] = 3 + (1 << (3 * d / 8));
+ FutilityMoveCountArray[d] = int(3.001 + 0.25 * pow(d, 2.0));
}
int perft(Position& pos, Depth depth)
{
- MoveStack mlist[256];
+ MoveStack mlist[MOVES_MAX];
StateInfo st;
Move m;
int sum = 0;
MinimumSplitDepth = get_option_value_int("Minimum Split Depth") * ONE_PLY;
MaxThreadsPerSplitPoint = get_option_value_int("Maximum Number of Threads per Split Point");
MultiPV = get_option_value_int("MultiPV");
- Chess960 = get_option_value_bool("UCI_Chess960");
UseLogFile = get_option_value_bool("Use Search Log");
if (UseLogFile)
init_eval(ThreadsMgr.active_threads());
}
- // Wake up sleeping threads
- ThreadsMgr.wake_sleeping_threads();
+ // Wake up needed threads
+ for (int i = 1; i < newActiveThreads; i++)
+ ThreadsMgr.wake_sleeping_thread(i);
// Set thinking time
int myTime = time[pos.side_to_move()];
// Print RootMoveList startup scoring to the standard output,
// so to output information also for iteration 1.
- cout << "info depth " << 1
+ cout << set960(p.is_chess960()) // Is enough to set once at the beginning
+ << "info depth " << 1
<< "\ninfo depth " << 1
- << " score " << value_to_uci(rml.get_move_score(0))
+ << " score " << value_to_uci(rml.move_score(0))
<< " time " << current_search_time()
<< " nodes " << ThreadsMgr.nodes_searched()
<< " nps " << nps()
- << " pv " << rml.get_move(0) << "\n";
+ << " pv " << rml.move(0) << "\n";
// Initialize
TT.new_search();
H.clear();
init_ss_array(ss, PLY_MAX_PLUS_2);
pv[0] = pv[1] = MOVE_NONE;
- ValueByIteration[1] = rml.get_move_score(0);
+ ValueByIteration[1] = rml.move_score(0);
Iteration = 1;
// Is one move significantly better than others after initial scoring ?
if ( rml.move_count() == 1
- || rml.get_move_score(0) > rml.get_move_score(1) + EasyMoveMargin)
- EasyMove = rml.get_move(0);
+ || rml.move_score(0) > rml.move_score(1) + EasyMoveMargin)
+ EasyMove = rml.move(0);
// Iterative deepening loop
while (Iteration < PLY_MAX)
int64_t nodes = ThreadsMgr.nodes_searched();
if ( Iteration >= 8
&& EasyMove == pv[0]
- && ( ( rml.get_move_cumulative_nodes(0) > (nodes * 85) / 100
+ && ( ( rml.move_nodes(0) > (nodes * 85) / 100
&& current_search_time() > TimeMgr.available_time() / 16)
- ||( rml.get_move_cumulative_nodes(0) > (nodes * 98) / 100
+ ||( rml.move_nodes(0) > (nodes * 98) / 100
&& current_search_time() > TimeMgr.available_time() / 32)))
stopSearch = true;
// Add some extra time if the best move has changed during the last two iterations
if (Iteration > 5 && Iteration <= 50)
- TimeMgr.pv_unstability(BestMoveChangesByIteration[Iteration],
+ TimeMgr.pv_instability(BestMoveChangesByIteration[Iteration],
BestMoveChangesByIteration[Iteration-1]);
// Stop search if most of MaxSearchTime is consumed at the end of the
// Print the best move and the ponder move to the standard output
if (pv[0] == MOVE_NONE)
{
- pv[0] = rml.get_move(0);
+ pv[0] = rml.move(0);
pv[1] = MOVE_NONE;
}
<< move_to_san(p, pv[1]) // Works also with MOVE_NONE
<< endl;
}
- return rml.get_move_score(0);
+ return rml.move_score(0);
}
Value root_search(Position& pos, SearchStack* ss, Move* pv, RootMoveList& rml, Value* alphaPtr, Value* betaPtr) {
- EvalInfo ei;
StateInfo st;
CheckInfo ci(pos);
int64_t nodes;
// Step 5. Evaluate the position statically
// At root we do this only to get reference value for child nodes
- ss->eval = isCheck ? VALUE_NONE : evaluate(pos, ei);
+ ss->evalMargin = VALUE_NONE;
+ ss->eval = isCheck ? VALUE_NONE : evaluate(pos, ss->evalMargin);
// Step 6. Razoring (omitted at root)
// Step 7. Static null move pruning (omitted at root)
while (1)
{
// Sort the moves before to (re)search
+ rml.score_moves(pos);
rml.sort();
// Step 10. Loop through all moves in the root move list
// Save the current node count before the move is searched
nodes = ThreadsMgr.nodes_searched();
- // Reset beta cut-off counters
- ThreadsMgr.resetBetaCounters();
-
// Pick the next root move, and print the move and the move number to
// the standard output.
- move = ss->currentMove = rml.get_move(i);
+ move = ss->currentMove = rml.move(i);
if (current_search_time() >= 1000)
cout << "info currmove " << move
if (AbortSearch)
break;
- // Remember beta-cutoff and searched nodes counts for this move. The
- // info is used to sort the root moves for the next iteration.
- int64_t our, their;
- ThreadsMgr.get_beta_counters(pos.side_to_move(), our, their);
- rml.set_beta_counters(i, our, their);
- rml.set_move_nodes(i, ThreadsMgr.nodes_searched() - nodes);
+ // Remember searched nodes counts for this move
+ rml.add_move_nodes(i, ThreadsMgr.nodes_searched() - nodes);
assert(value >= -VALUE_INFINITE && value <= VALUE_INFINITE);
assert(value < beta);
for (int j = 0; j < Min(MultiPV, rml.move_count()); j++)
{
cout << "info multipv " << j + 1
- << " score " << value_to_uci(rml.get_move_score(j))
+ << " score " << value_to_uci(rml.move_score(j))
<< " depth " << (j <= i ? Iteration : Iteration - 1)
<< " time " << current_search_time()
<< " nodes " << ThreadsMgr.nodes_searched()
<< " nps " << nps()
<< " pv ";
- for (int k = 0; rml.get_move_pv(j, k) != MOVE_NONE && k < PLY_MAX; k++)
- cout << rml.get_move_pv(j, k) << " ";
+ for (int k = 0; rml.move_pv(j, k) != MOVE_NONE && k < PLY_MAX; k++)
+ cout << rml.move_pv(j, k) << " ";
cout << endl;
}
- alpha = rml.get_move_score(Min(i, MultiPV - 1));
+ alpha = rml.move_score(Min(i, MultiPV - 1));
}
} // PV move or new best move
}
- // search<>() is the main search function for both PV and non-PV nodes
+ // search<>() is the main search function for both PV and non-PV nodes and for
+ // normal and SplitPoint nodes. When called just after a split point the search
+ // is simpler because we have already probed the hash table, done a null move
+ // search, and searched the first move before splitting, we don't have to repeat
+ // all this work again. We also don't need to store anything to the hash table
+ // here: This is taken care of after we return from the split point.
- template <NodeType PvNode>
+ template <NodeType PvNode, bool SpNode>
Value search(Position& pos, SearchStack* ss, Value alpha, Value beta, Depth depth, int ply) {
assert(alpha >= -VALUE_INFINITE && alpha <= VALUE_INFINITE);
assert(ply > 0 && ply < PLY_MAX);
assert(pos.thread() >= 0 && pos.thread() < ThreadsMgr.active_threads());
- Move movesSearched[256];
- EvalInfo ei;
+ Move movesSearched[MOVES_MAX];
StateInfo st;
const TTEntry *tte;
Key posKey;
Move ttMove, move, excludedMove, threatMove;
Depth ext, newDepth;
Value bestValue, value, oldAlpha;
- Value refinedValue, nullValue, futilityValueScaled; // Non-PV specific
+ Value refinedValue, nullValue, futilityBase, futilityValueScaled; // Non-PV specific
bool isCheck, singleEvasion, singularExtensionNode, moveIsCheck, captureOrPromotion, dangerous;
bool mateThreat = false;
int moveCount = 0;
int threadID = pos.thread();
+ SplitPoint* sp = NULL;
refinedValue = bestValue = value = -VALUE_INFINITE;
oldAlpha = alpha;
+ isCheck = pos.is_check();
+
+ if (SpNode)
+ {
+ sp = ss->sp;
+ tte = NULL;
+ ttMove = excludedMove = MOVE_NONE;
+ threatMove = sp->threatMove;
+ mateThreat = sp->mateThreat;
+ goto split_point_start;
+ }
// Step 1. Initialize node and poll. Polling can abort search
ThreadsMgr.incrementNodeCounter(threadID);
// Step 2. Check for aborted search and immediate draw
if (AbortSearch || ThreadsMgr.thread_should_stop(threadID))
- return VALUE_ZERO;
+ return VALUE_DRAW;
if (pos.is_draw() || ply >= PLY_MAX - 1)
return VALUE_DRAW;
if (!PvNode && tte && ok_to_use_TT(tte, depth, beta, ply))
{
// Refresh tte entry to avoid aging
- TT.store(posKey, tte->value(), tte->type(), tte->depth(), ttMove, tte->static_value(), tte->king_danger());
+ TT.store(posKey, tte->value(), tte->type(), tte->depth(), ttMove, tte->static_value(), tte->static_value_margin());
ss->bestMove = ttMove; // Can be MOVE_NONE
return value_from_tt(tte->value(), ply);
// Step 5. Evaluate the position statically and
// update gain statistics of parent move.
- isCheck = pos.is_check();
if (isCheck)
- ss->eval = VALUE_NONE;
+ ss->eval = ss->evalMargin = VALUE_NONE;
else if (tte)
{
assert(tte->static_value() != VALUE_NONE);
ss->eval = tte->static_value();
- ei.kingDanger[pos.side_to_move()] = tte->king_danger();
+ ss->evalMargin = tte->static_value_margin();
refinedValue = refine_eval(tte, ss->eval, ply);
}
else
{
- refinedValue = ss->eval = evaluate(pos, ei);
- TT.store(posKey, VALUE_NONE, VALUE_TYPE_NONE, DEPTH_NONE, MOVE_NONE, ss->eval, ei.kingDanger[pos.side_to_move()]);
+ refinedValue = ss->eval = evaluate(pos, ss->evalMargin);
+ TT.store(posKey, VALUE_NONE, VALUE_TYPE_NONE, DEPTH_NONE, MOVE_NONE, ss->eval, ss->evalMargin);
}
// Save gain for the parent non-capture move
return refinedValue - futility_margin(depth, 0);
// Step 8. Null move search with verification search (is omitted in PV nodes)
- // When we jump directly to qsearch() we do a null move only if static value is
- // at least beta. Otherwise we do a null move if static value is not more than
- // NullMoveMargin under beta.
if ( !PvNode
&& !ss->skipNullMove
&& depth > ONE_PLY
&& !isCheck
- && refinedValue >= beta - (depth >= 4 * ONE_PLY ? NullMoveMargin : 0)
+ && refinedValue >= beta
&& !value_is_mate(beta)
&& pos.non_pawn_material(pos.side_to_move()))
{
if (PvNode)
mateThreat = pos.has_mate_threat();
+split_point_start: // At split points actual search starts from here
+
// Initialize a MovePicker object for the current position
- MovePicker mp = MovePicker(pos, ttMove, depth, H, ss, (PvNode ? -VALUE_INFINITE : beta));
+ // FIXME currently MovePicker() c'tor is needless called also in SplitPoint
+ MovePicker mpBase = MovePicker(pos, ttMove, depth, H, ss, (PvNode ? -VALUE_INFINITE : beta));
+ MovePicker& mp = SpNode ? *sp->mp : mpBase;
CheckInfo ci(pos);
ss->bestMove = MOVE_NONE;
- singleEvasion = isCheck && mp.number_of_evasions() == 1;
- singularExtensionNode = depth >= SingularExtensionDepth[PvNode]
+ singleEvasion = !SpNode && isCheck && mp.number_of_evasions() == 1;
+ futilityBase = ss->eval + ss->evalMargin;
+ singularExtensionNode = !SpNode
+ && depth >= SingularExtensionDepth[PvNode]
&& tte
&& tte->move()
&& !excludedMove // Do not allow recursive singular extension search
- && is_lower_bound(tte->type())
+ && (tte->type() & VALUE_TYPE_LOWER)
&& tte->depth() >= depth - 3 * ONE_PLY;
+ if (SpNode)
+ {
+ lock_grab(&(sp->lock));
+ bestValue = sp->bestValue;
+ }
// Step 10. Loop through moves
// Loop through all legal moves until no moves remain or a beta cutoff occurs
&& (move = mp.get_next_move()) != MOVE_NONE
&& !ThreadsMgr.thread_should_stop(threadID))
{
+ if (SpNode)
+ {
+ moveCount = ++sp->moveCount;
+ lock_release(&(sp->lock));
+ }
+
assert(move_is_ok(move));
if (move == excludedMove)
// Move count based pruning
if ( moveCount >= futility_move_count(depth)
&& !(threatMove && connected_threat(pos, move, threatMove))
- && bestValue > value_mated_in(PLY_MAX))
+ && bestValue > value_mated_in(PLY_MAX)) // FIXME bestValue is racy
+ {
+ if (SpNode)
+ lock_grab(&(sp->lock));
+
continue;
+ }
// Value based pruning
// We illogically ignore reduction condition depth >= 3*ONE_PLY for predicted depth,
// but fixing this made program slightly weaker.
Depth predictedDepth = newDepth - reduction<NonPV>(depth, moveCount);
- futilityValueScaled = ss->eval + futility_margin(predictedDepth, moveCount)
+ futilityValueScaled = futilityBase + futility_margin(predictedDepth, moveCount)
+ H.gain(pos.piece_on(move_from(move)), move_to(move));
if (futilityValueScaled < beta)
{
- if (futilityValueScaled > bestValue)
+ if (SpNode)
+ {
+ lock_grab(&(sp->lock));
+ if (futilityValueScaled > sp->bestValue)
+ sp->bestValue = bestValue = futilityValueScaled;
+ }
+ else if (futilityValueScaled > bestValue)
bestValue = futilityValueScaled;
+
continue;
}
}
// Step extra. pv search (only in PV nodes)
// The first move in list is the expected PV
- if (PvNode && moveCount == 1)
+ if (!SpNode && PvNode && moveCount == 1)
value = newDepth < ONE_PLY ? -qsearch<PV>(pos, ss+1, -beta, -alpha, DEPTH_ZERO, ply+1)
: - search<PV>(pos, ss+1, -beta, -alpha, newDepth, ply+1);
else
&& !captureOrPromotion
&& !dangerous
&& !move_is_castle(move)
- && !move_is_killer(move, ss))
+ && !(ss->killers[0] == move || ss->killers[1] == move))
{
ss->reduction = reduction<PvNode>(depth, moveCount);
if (ss->reduction)
{
+ alpha = SpNode ? sp->alpha : alpha;
Depth d = newDepth - ss->reduction;
value = d < ONE_PLY ? -qsearch<NonPV>(pos, ss+1, -(alpha+1), -alpha, DEPTH_ZERO, ply+1)
: - search<NonPV>(pos, ss+1, -(alpha+1), -alpha, d, ply+1);
assert(newDepth - ONE_PLY >= ONE_PLY);
ss->reduction = ONE_PLY;
+ alpha = SpNode ? sp->alpha : alpha;
value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth-ss->reduction, ply+1);
doFullDepthSearch = (value > alpha);
}
// Step 15. Full depth search
if (doFullDepthSearch)
{
+ alpha = SpNode ? sp->alpha : alpha;
value = newDepth < ONE_PLY ? -qsearch<NonPV>(pos, ss+1, -(alpha+1), -alpha, DEPTH_ZERO, ply+1)
: - search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth, ply+1);
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
// Step 17. Check for new best move
- if (value > bestValue)
+ if (SpNode)
+ {
+ lock_grab(&(sp->lock));
+ bestValue = sp->bestValue;
+ alpha = sp->alpha;
+ }
+
+ if (value > bestValue && !(SpNode && ThreadsMgr.thread_should_stop(threadID)))
{
bestValue = value;
if (value > alpha)
{
+ if (SpNode && (!PvNode || value >= beta))
+ sp->stopRequest = true;
+
if (PvNode && value < beta) // We want always alpha < beta
alpha = value;
ss->bestMove = move;
}
+ if (SpNode)
+ {
+ sp->bestValue = bestValue;
+ sp->alpha = alpha;
+ sp->parentSstack->bestMove = ss->bestMove;
+ }
}
// Step 18. Check for split
- if ( depth >= MinimumSplitDepth
+ if ( !SpNode
+ && depth >= MinimumSplitDepth
&& ThreadsMgr.active_threads() > 1
&& bestValue < beta
&& ThreadsMgr.available_thread_exists(threadID)
&& !ThreadsMgr.thread_should_stop(threadID)
&& Iteration <= 99)
ThreadsMgr.split<FakeSplit>(pos, ss, ply, &alpha, beta, &bestValue, depth,
- threatMove, mateThreat, &moveCount, &mp, PvNode);
+ threatMove, mateThreat, moveCount, &mp, PvNode);
+ }
+
+ if (SpNode)
+ {
+ /* Here we have the lock still grabbed */
+ sp->slaves[threadID] = 0;
+ lock_release(&(sp->lock));
+ return bestValue;
}
// Step 19. Check for mate and stalemate
ValueType vt = (bestValue <= oldAlpha ? VALUE_TYPE_UPPER : bestValue >= beta ? VALUE_TYPE_LOWER : VALUE_TYPE_EXACT);
move = (bestValue <= oldAlpha ? MOVE_NONE : ss->bestMove);
- TT.store(posKey, value_to_tt(bestValue, ply), vt, depth, move, ss->eval, ei.kingDanger[pos.side_to_move()]);
+ TT.store(posKey, value_to_tt(bestValue, ply), vt, depth, move, ss->eval, ss->evalMargin);
// Update killers and history only for non capture moves that fails high
- if (bestValue >= beta)
+ if ( bestValue >= beta
+ && !pos.move_is_capture_or_promotion(move))
{
- ThreadsMgr.incrementBetaCounter(pos.side_to_move(), depth, threadID);
- if (!pos.move_is_capture_or_promotion(move))
- {
update_history(pos, move, depth, movesSearched, moveCount);
update_killers(move, ss);
- }
}
assert(bestValue > -VALUE_INFINITE && bestValue < VALUE_INFINITE);
assert(ply > 0 && ply < PLY_MAX);
assert(pos.thread() >= 0 && pos.thread() < ThreadsMgr.active_threads());
- EvalInfo ei;
StateInfo st;
Move ttMove, move;
- Value bestValue, value, futilityValue, futilityBase;
+ Value bestValue, value, evalMargin, futilityValue, futilityBase;
bool isCheck, deepChecks, enoughMaterial, moveIsCheck, evasionPrunable;
const TTEntry* tte;
Value oldAlpha = alpha;
if (isCheck)
{
bestValue = futilityBase = -VALUE_INFINITE;
- ss->eval = VALUE_NONE;
+ ss->eval = evalMargin = VALUE_NONE;
deepChecks = enoughMaterial = false;
}
else
{
assert(tte->static_value() != VALUE_NONE);
- ei.kingDanger[pos.side_to_move()] = tte->king_danger();
- bestValue = tte->static_value();
+ evalMargin = tte->static_value_margin();
+ ss->eval = bestValue = tte->static_value();
}
else
- bestValue = evaluate(pos, ei);
+ ss->eval = bestValue = evaluate(pos, evalMargin);
- ss->eval = bestValue;
update_gains(pos, (ss-1)->currentMove, (ss-1)->eval, ss->eval);
// Stand pat. Return immediately if static value is at least beta
if (bestValue >= beta)
{
if (!tte)
- TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_LOWER, DEPTH_NONE, MOVE_NONE, ss->eval, ei.kingDanger[pos.side_to_move()]);
+ TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_LOWER, DEPTH_NONE, MOVE_NONE, ss->eval, evalMargin);
return bestValue;
}
deepChecks = (depth == -ONE_PLY && bestValue >= beta - PawnValueMidgame / 8);
// Futility pruning parameters, not needed when in check
- futilityBase = bestValue + FutilityMarginQS + ei.kingDanger[pos.side_to_move()];
+ futilityBase = ss->eval + evalMargin + FutilityMarginQS;
enoughMaterial = pos.non_pawn_material(pos.side_to_move()) > RookValueMidgame;
}
}
}
- // Detect blocking evasions that are candidate to be pruned
+ // Detect non-capture evasions that are candidate to be pruned
evasionPrunable = isCheck
&& bestValue > value_mated_in(PLY_MAX)
&& !pos.move_is_capture(move)
- && pos.type_of_piece_on(move_from(move)) != KING
&& !pos.can_castle(pos.side_to_move());
// Don't search moves with negative SEE values
// Update transposition table
Depth d = (depth == DEPTH_ZERO ? DEPTH_ZERO : DEPTH_ZERO - ONE_PLY);
ValueType vt = (bestValue <= oldAlpha ? VALUE_TYPE_UPPER : bestValue >= beta ? VALUE_TYPE_LOWER : VALUE_TYPE_EXACT);
- TT.store(pos.get_key(), value_to_tt(bestValue, ply), vt, d, ss->bestMove, ss->eval, ei.kingDanger[pos.side_to_move()]);
-
- // Update killers only for checking moves that fails high
- if ( bestValue >= beta
- && !pos.move_is_capture_or_promotion(ss->bestMove))
- update_killers(ss->bestMove, ss);
+ TT.store(pos.get_key(), value_to_tt(bestValue, ply), vt, d, ss->bestMove, ss->eval, evalMargin);
assert(bestValue > -VALUE_INFINITE && bestValue < VALUE_INFINITE);
}
- // sp_search() is used to search from a split point. This function is called
- // by each thread working at the split point. It is similar to the normal
- // search() function, but simpler. Because we have already probed the hash
- // table, done a null move search, and searched the first move before
- // splitting, we don't have to repeat all this work in sp_search(). We
- // also don't need to store anything to the hash table here: This is taken
- // care of after we return from the split point.
-
- template <NodeType PvNode>
- void sp_search(SplitPoint* sp, int threadID) {
-
- assert(threadID >= 0 && threadID < ThreadsMgr.active_threads());
- assert(ThreadsMgr.active_threads() > 1);
-
- StateInfo st;
- Move move;
- Depth ext, newDepth;
- Value value;
- Value futilityValueScaled; // NonPV specific
- bool isCheck, moveIsCheck, captureOrPromotion, dangerous;
- int moveCount;
- value = -VALUE_INFINITE;
-
- Position pos(*sp->pos, threadID);
- CheckInfo ci(pos);
- SearchStack* ss = sp->sstack[threadID] + 1;
- isCheck = pos.is_check();
-
- // Step 10. Loop through moves
- // Loop through all legal moves until no moves remain or a beta cutoff occurs
- lock_grab(&(sp->lock));
-
- while ( sp->bestValue < sp->beta
- && (move = sp->mp->get_next_move()) != MOVE_NONE
- && !ThreadsMgr.thread_should_stop(threadID))
- {
- moveCount = ++sp->moveCount;
- lock_release(&(sp->lock));
-
- assert(move_is_ok(move));
-
- moveIsCheck = pos.move_is_check(move, ci);
- captureOrPromotion = pos.move_is_capture_or_promotion(move);
-
- // Step 11. Decide the new search depth
- ext = extension<PvNode>(pos, move, captureOrPromotion, moveIsCheck, false, sp->mateThreat, &dangerous);
- newDepth = sp->depth - ONE_PLY + ext;
-
- // Update current move
- ss->currentMove = move;
-
- // Step 12. Futility pruning (is omitted in PV nodes)
- if ( !PvNode
- && !captureOrPromotion
- && !isCheck
- && !dangerous
- && !move_is_castle(move))
- {
- // Move count based pruning
- if ( moveCount >= futility_move_count(sp->depth)
- && !(sp->threatMove && connected_threat(pos, move, sp->threatMove))
- && sp->bestValue > value_mated_in(PLY_MAX))
- {
- lock_grab(&(sp->lock));
- continue;
- }
-
- // Value based pruning
- Depth predictedDepth = newDepth - reduction<NonPV>(sp->depth, moveCount);
- futilityValueScaled = ss->eval + futility_margin(predictedDepth, moveCount)
- + H.gain(pos.piece_on(move_from(move)), move_to(move));
-
- if (futilityValueScaled < sp->beta)
- {
- lock_grab(&(sp->lock));
-
- if (futilityValueScaled > sp->bestValue)
- sp->bestValue = futilityValueScaled;
- continue;
- }
- }
-
- // Step 13. Make the move
- pos.do_move(move, st, ci, moveIsCheck);
-
- // Step 14. Reduced search
- // If the move fails high will be re-searched at full depth.
- bool doFullDepthSearch = true;
-
- if ( !captureOrPromotion
- && !dangerous
- && !move_is_castle(move)
- && !move_is_killer(move, ss))
- {
- ss->reduction = reduction<PvNode>(sp->depth, moveCount);
- if (ss->reduction)
- {
- Value localAlpha = sp->alpha;
- Depth d = newDepth - ss->reduction;
- value = d < ONE_PLY ? -qsearch<NonPV>(pos, ss+1, -(localAlpha+1), -localAlpha, DEPTH_ZERO, sp->ply+1)
- : - search<NonPV>(pos, ss+1, -(localAlpha+1), -localAlpha, d, sp->ply+1);
-
- doFullDepthSearch = (value > localAlpha);
- }
-
- // The move failed high, but if reduction is very big we could
- // face a false positive, retry with a less aggressive reduction,
- // if the move fails high again then go with full depth search.
- if (doFullDepthSearch && ss->reduction > 2 * ONE_PLY)
- {
- assert(newDepth - ONE_PLY >= ONE_PLY);
-
- ss->reduction = ONE_PLY;
- Value localAlpha = sp->alpha;
- value = -search<NonPV>(pos, ss+1, -(localAlpha+1), -localAlpha, newDepth-ss->reduction, sp->ply+1);
- doFullDepthSearch = (value > localAlpha);
- }
- ss->reduction = DEPTH_ZERO; // Restore original reduction
- }
-
- // Step 15. Full depth search
- if (doFullDepthSearch)
- {
- Value localAlpha = sp->alpha;
- value = newDepth < ONE_PLY ? -qsearch<NonPV>(pos, ss+1, -(localAlpha+1), -localAlpha, DEPTH_ZERO, sp->ply+1)
- : - search<NonPV>(pos, ss+1, -(localAlpha+1), -localAlpha, newDepth, sp->ply+1);
-
- // Step extra. pv search (only in PV nodes)
- // Search only for possible new PV nodes, if instead value >= beta then
- // parent node fails low with value <= alpha and tries another move.
- if (PvNode && value > localAlpha && value < sp->beta)
- value = newDepth < ONE_PLY ? -qsearch<PV>(pos, ss+1, -sp->beta, -sp->alpha, DEPTH_ZERO, sp->ply+1)
- : - search<PV>(pos, ss+1, -sp->beta, -sp->alpha, newDepth, sp->ply+1);
- }
-
- // Step 16. Undo move
- pos.undo_move(move);
-
- assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
-
- // Step 17. Check for new best move
- lock_grab(&(sp->lock));
-
- if (value > sp->bestValue && !ThreadsMgr.thread_should_stop(threadID))
- {
- sp->bestValue = value;
-
- if (sp->bestValue > sp->alpha)
- {
- if (!PvNode || value >= sp->beta)
- sp->stopRequest = true;
-
- if (PvNode && value < sp->beta) // This guarantees that always: sp->alpha < sp->beta
- sp->alpha = value;
-
- sp->parentSstack->bestMove = ss->bestMove = move;
- }
- }
- }
-
- /* Here we have the lock still grabbed */
-
- sp->slaves[threadID] = 0;
-
- lock_release(&(sp->lock));
- }
-
-
// connected_moves() tests whether two moves are 'connected' in the sense
// that the first move somehow made the second move possible (for instance
// if the moving piece is the same in both moves). The first move is assumed
}
- // move_is_killer() checks if the given move is among the killer moves
-
- bool move_is_killer(Move m, SearchStack* ss) {
-
- if (ss->killers[0] == m || ss->killers[1] == m)
- return true;
-
- return false;
- }
-
-
// extension() decides whether a move should be searched with normal depth,
// or with extended depth. Certain classes of moves (checking moves, in
// particular) are searched with bigger depth than ordinary moves and in
|| v >= Max(value_mate_in(PLY_MAX), beta)
|| v < Min(value_mated_in(PLY_MAX), beta))
- && ( (is_lower_bound(tte->type()) && v >= beta)
- || (is_upper_bound(tte->type()) && v < beta));
+ && ( ((tte->type() & VALUE_TYPE_LOWER) && v >= beta)
+ || ((tte->type() & VALUE_TYPE_UPPER) && v < beta));
}
Value v = value_from_tt(tte->value(), ply);
- if ( (is_lower_bound(tte->type()) && v >= defaultEval)
- || (is_upper_bound(tte->type()) && v < defaultEval))
+ if ( ((tte->type() & VALUE_TYPE_LOWER) && v >= defaultEval)
+ || ((tte->type() & VALUE_TYPE_UPPER) && v < defaultEval))
return v;
return defaultEval;
void update_history(const Position& pos, Move move, Depth depth,
Move movesSearched[], int moveCount) {
-
Move m;
H.success(pos.piece_on(move_from(move)), move_to(move), depth);
ss->excludedMove = MOVE_NONE;
ss->skipNullMove = false;
ss->reduction = DEPTH_ZERO;
+ ss->sp = NULL;
if (i < 3)
ss->killers[0] = ss->killers[1] = ss->mateKiller = MOVE_NONE;
StateInfo st;
TTEntry* tte;
Position p(pos, pos.thread());
- EvalInfo ei;
- Value v;
+ Value v, m = VALUE_NONE;
for (int i = 0; pv[i] != MOVE_NONE; i++)
{
tte = TT.retrieve(p.get_key());
if (!tte || tte->move() != pv[i])
{
- v = (p.is_check() ? VALUE_NONE : evaluate(p, ei));
- TT.store(p.get_key(), VALUE_NONE, VALUE_TYPE_NONE, DEPTH_NONE, pv[i], v, ei.kingDanger[pos.side_to_move()]);
+ v = (p.is_check() ? VALUE_NONE : evaluate(p, m));
+ TT.store(p.get_key(), VALUE_NONE, VALUE_TYPE_NONE, DEPTH_NONE, pv[i], v, m);
}
p.do_move(pv[i], st);
}
threads[i].nodes = 0ULL;
}
- void ThreadsManager::resetBetaCounters() {
-
- for (int i = 0; i < MAX_THREADS; i++)
- threads[i].betaCutOffs[WHITE] = threads[i].betaCutOffs[BLACK] = 0ULL;
- }
-
int64_t ThreadsManager::nodes_searched() const {
int64_t result = 0ULL;
return result;
}
- void ThreadsManager::get_beta_counters(Color us, int64_t& our, int64_t& their) const {
-
- our = their = 0UL;
- for (int i = 0; i < MAX_THREADS; i++)
- {
- our += threads[i].betaCutOffs[us];
- their += threads[i].betaCutOffs[opposite_color(us)];
- }
- }
-
// idle_loop() is where the threads are parked when they have no work to do.
// The parameter 'sp', if non-NULL, is a pointer to an active SplitPoint
#if !defined(_MSC_VER)
lock_grab(&WaitLock);
if (AllThreadsShouldSleep || threadID >= ActiveThreads)
- pthread_cond_wait(&WaitCond, &WaitLock);
+ pthread_cond_wait(&WaitCond[threadID], &WaitLock);
lock_release(&WaitLock);
#else
WaitForSingleObject(SitIdleEvent[threadID], INFINITE);
threads[threadID].state = THREAD_SEARCHING;
- if (threads[threadID].splitPoint->pvNode)
- sp_search<PV>(threads[threadID].splitPoint, threadID);
+ // Here we call search() with SplitPoint template parameter set to true
+ SplitPoint* tsp = threads[threadID].splitPoint;
+ Position pos(*tsp->pos, threadID);
+ SearchStack* ss = tsp->sstack[threadID] + 1;
+ ss->sp = tsp;
+
+ if (tsp->pvNode)
+ search<PV, true>(pos, ss, tsp->alpha, tsp->beta, tsp->depth, tsp->ply);
else
- sp_search<NonPV>(threads[threadID].splitPoint, threadID);
+ search<NonPV, true>(pos, ss, tsp->alpha, tsp->beta, tsp->depth, tsp->ply);
assert(threads[threadID].state == THREAD_SEARCHING);
lock_grab(&(sp->lock));
lock_release(&(sp->lock));
+ // In helpful master concept a master can help only a sub-tree, and
+ // because here is all finished is not possible master is booked.
assert(threads[threadID].state == THREAD_AVAILABLE);
threads[threadID].state = THREAD_SEARCHING;
lock_init(&MPLock);
lock_init(&WaitLock);
+ for (i = 0; i < MAX_THREADS; i++)
#if !defined(_MSC_VER)
- pthread_cond_init(&WaitCond, NULL);
+ pthread_cond_init(&WaitCond[i], NULL);
#else
- for (i = 0; i < MAX_THREADS; i++)
SitIdleEvent[i] = CreateEvent(0, FALSE, FALSE, 0);
#endif
void ThreadsManager::exit_threads() {
- ActiveThreads = MAX_THREADS; // HACK
- AllThreadsShouldSleep = true; // HACK
- wake_sleeping_threads();
+ AllThreadsShouldExit = true; // Let the woken up threads to exit idle_loop()
+ ActiveThreads = MAX_THREADS; // Avoid any woken up thread comes back to sleep
- // This makes the threads to exit idle_loop()
- AllThreadsShouldExit = true;
-
- // Wait for thread termination
+ // Wake up all the threads and waits for termination
for (int i = 1; i < MAX_THREADS; i++)
+ {
+ wake_sleeping_thread(i);
while (threads[i].state != THREAD_TERMINATED) {}
+ }
// Now we can safely destroy the locks
for (int i = 0; i < MAX_THREADS; i++)
assert(threadID >= 0 && threadID < ActiveThreads);
- SplitPoint* sp;
+ SplitPoint* sp = threads[threadID].splitPoint;
- for (sp = threads[threadID].splitPoint; sp && !sp->stopRequest; sp = sp->parent) {}
+ for ( ; sp && !sp->stopRequest; sp = sp->parent) {}
return sp != NULL;
}
// Make a local copy to be sure doesn't change under our feet
int localActiveSplitPoints = threads[slave].activeSplitPoints;
- if (localActiveSplitPoints == 0)
- // No active split points means that the thread is available as
- // a slave for any other thread.
- return true;
-
- if (ActiveThreads == 2)
+ // No active split points means that the thread is available as
+ // a slave for any other thread.
+ if (localActiveSplitPoints == 0 || ActiveThreads == 2)
return true;
// Apply the "helpful master" concept if possible. Use localActiveSplitPoints
template <bool Fake>
void ThreadsManager::split(const Position& p, SearchStack* ss, int ply, Value* alpha,
const Value beta, Value* bestValue, Depth depth, Move threatMove,
- bool mateThreat, int* moveCount, MovePicker* mp, bool pvNode) {
+ bool mateThreat, int moveCount, MovePicker* mp, bool pvNode) {
assert(p.is_ok());
assert(ply > 0 && ply < PLY_MAX);
assert(*bestValue >= -VALUE_INFINITE);
splitPoint.pvNode = pvNode;
splitPoint.bestValue = *bestValue;
splitPoint.mp = mp;
- splitPoint.moveCount = *moveCount;
+ splitPoint.moveCount = moveCount;
splitPoint.pos = &p;
splitPoint.parentSstack = ss;
for (i = 0; i < ActiveThreads; i++)
}
- // wake_sleeping_threads() wakes up all sleeping threads when it is time
+ // wake_sleeping_thread() wakes up all sleeping threads when it is time
// to start a new search from the root.
- void ThreadsManager::wake_sleeping_threads() {
+ void ThreadsManager::wake_sleeping_thread(int threadID) {
- assert(AllThreadsShouldSleep);
- assert(ActiveThreads > 0);
+ assert(threadID > 0);
+ assert(threads[threadID].state == THREAD_SLEEPING);
- AllThreadsShouldSleep = false;
-
- if (ActiveThreads == 1)
- return;
+ AllThreadsShouldSleep = false; // Avoid the woken up thread comes back to sleep
#if !defined(_MSC_VER)
- pthread_mutex_lock(&WaitLock);
- pthread_cond_broadcast(&WaitCond);
- pthread_mutex_unlock(&WaitLock);
+ pthread_mutex_lock(&WaitLock);
+ pthread_cond_signal(&WaitCond[threadID]);
+ pthread_mutex_unlock(&WaitLock);
#else
- for (int i = 1; i < MAX_THREADS; i++)
- SetEvent(SitIdleEvent[i]);
+ SetEvent(SitIdleEvent[threadID]);
#endif
-
}
void ThreadsManager::put_threads_to_sleep() {
- assert(!AllThreadsShouldSleep);
+ assert(!AllThreadsShouldSleep || ActiveThreads == 1);
// This makes the threads to go to sleep
AllThreadsShouldSleep = true;
// RootMoveList c'tor
- RootMoveList::RootMoveList(Position& pos, Move searchMoves[]) : count(0) {
+ RootMoveList::RootMoveList(Position& pos, Move searchMoves[]) {
SearchStack ss[PLY_MAX_PLUS_2];
- MoveStack mlist[MaxRootMoves];
+ MoveStack mlist[MOVES_MAX];
StateInfo st;
bool includeAllMoves = (searchMoves[0] == MOVE_NONE);
// Initialize search stack
init_ss_array(ss, PLY_MAX_PLUS_2);
- ss[0].currentMove = ss[0].bestMove = MOVE_NONE;
- ss[0].eval = VALUE_NONE;
+ ss[0].eval = ss[0].evalMargin = VALUE_NONE;
+ count = 0;
// Generate all legal moves
MoveStack* last = generate_moves(pos, mlist);
continue;
// Find a quick score for the move
+ moves[count].move = ss[0].currentMove = moves[count].pv[0] = cur->move;
+ moves[count].pv[1] = MOVE_NONE;
pos.do_move(cur->move, st);
- ss[0].currentMove = cur->move;
- moves[count].move = cur->move;
moves[count].score = -qsearch<PV>(pos, ss+1, -VALUE_INFINITE, VALUE_INFINITE, DEPTH_ZERO, 1);
- moves[count].pv[0] = cur->move;
- moves[count].pv[1] = MOVE_NONE;
pos.undo_move(cur->move);
count++;
}
sort();
}
+ // Score root moves using the standard way used in main search, the moves
+ // are scored according to the order in which are returned by MovePicker.
- // RootMoveList simple methods definitions
-
- void RootMoveList::set_move_nodes(int moveNum, int64_t nodes) {
+ void RootMoveList::score_moves(const Position& pos)
+ {
+ Move move;
+ int score = 1000;
+ MovePicker mp = MovePicker(pos, MOVE_NONE, ONE_PLY, H);
- moves[moveNum].nodes = nodes;
- moves[moveNum].cumulativeNodes += nodes;
+ while ((move = mp.get_next_move()) != MOVE_NONE)
+ for (int i = 0; i < count; i++)
+ if (moves[i].move == move)
+ {
+ moves[i].mp_score = score--;
+ break;
+ }
}
- void RootMoveList::set_beta_counters(int moveNum, int64_t our, int64_t their) {
-
- moves[moveNum].ourBeta = our;
- moves[moveNum].theirBeta = their;
- }
+ // RootMoveList simple methods definitions
void RootMoveList::set_move_pv(int moveNum, const Move pv[]) {
}
}
-} // namspace
+} // namespace