int active_threads() const { return ActiveThreads; }
void set_active_threads(int newActiveThreads) { ActiveThreads = newActiveThreads; }
- void set_stop_request(int threadID) { threads[threadID].stop = true; }
void incrementNodeCounter(int threadID) { threads[threadID].nodes++; }
void incrementBetaCounter(Color us, Depth d, int threadID) { threads[threadID].betaCutOffs[us] += unsigned(d); }
- void print_current_line(SearchStack ss[], int ply, int threadID);
void resetNodeCounters();
void resetBetaCounters();
int64_t nodes_searched() const;
void get_beta_counters(Color us, int64_t& our, int64_t& their) const;
- bool idle_thread_exists(int master) const;
+ bool available_thread_exists(int master) const;
bool thread_is_available(int slave, int master) const;
bool thread_should_stop(int threadID) const;
void wake_sleeping_threads();
void put_threads_to_sleep();
void idle_loop(int threadID, SplitPoint* waitSp);
- bool split(const Position& pos, SearchStack* ss, int ply, Value* alpha, Value* beta, Value* bestValue,
- const Value futilityValue, Depth depth, int* moves, MovePicker* mp, int master, bool pvNode);
+ bool split(const Position& pos, SearchStack* ss, int ply, Value* alpha, const Value beta, Value* bestValue,
+ Depth depth, int* moves, MovePicker* mp, int master, bool pvNode);
private:
- friend void poll();
+ friend void poll(SearchStack ss[], int ply);
int ActiveThreads;
- bool AllThreadsShouldExit, AllThreadsShouldSleep;
- Thread threads[THREAD_MAX];
- SplitPoint SplitPointStack[THREAD_MAX][ACTIVE_SPLIT_POINTS_MAX];
+ volatile bool AllThreadsShouldExit, AllThreadsShouldSleep;
+ Thread threads[MAX_THREADS];
+ SplitPoint SplitPointStack[MAX_THREADS][ACTIVE_SPLIT_POINTS_MAX];
- Lock MPLock, IOLock;
+ Lock MPLock;
#if !defined(_MSC_VER)
pthread_cond_t WaitCond;
pthread_mutex_t WaitLock;
#else
- HANDLE SitIdleEvent[THREAD_MAX];
+ HANDLE SitIdleEvent[MAX_THREADS];
#endif
};
};
- /// Constants
+ /// Adjustments
- // Search depth at iteration 1
- const Depth InitialDepth = OnePly;
+ // Step 6. Razoring
- // Use internal iterative deepening?
- const bool UseIIDAtPVNodes = true;
- const bool UseIIDAtNonPVNodes = true;
+ // Maximum depth for razoring
+ const Depth RazorDepth = 4 * OnePly;
- // Internal iterative deepening margin. At Non-PV moves, when
- // UseIIDAtNonPVNodes is true, we do an internal iterative deepening
- // search when the static evaluation is at most IIDMargin below beta.
- const Value IIDMargin = Value(0x100);
+ // Dynamic razoring margin based on depth
+ inline Value razor_margin(Depth d) { return Value(0x200 + 0x10 * d); }
- // Easy move margin. An easy move candidate must be at least this much
- // better than the second best move.
- const Value EasyMoveMargin = Value(0x200);
+ // Step 8. Null move search with verification search
// Null move margin. A null move search will not be done if the static
// evaluation of the position is more than NullMoveMargin below beta.
const Value NullMoveMargin = Value(0x200);
- // If the TT move is at least SingleReplyMargin better then the
- // remaining ones we will extend it.
- const Value SingleReplyMargin = Value(0x20);
+ // Maximum depth for use of dynamic threat detection when null move fails low
+ const Depth ThreatDepth = 5 * OnePly;
- // Depth limit for razoring
- const Depth RazorDepth = 4 * OnePly;
+ // Step 9. Internal iterative deepening
- /// Lookup tables initialized at startup
+ // Minimum depth for use of internal iterative deepening
+ const Depth IIDDepthAtPVNodes = 5 * OnePly;
+ const Depth IIDDepthAtNonPVNodes = 8 * OnePly;
- // Reduction lookup tables and their getter functions
- int8_t PVReductionMatrix[64][64]; // [depth][moveNumber]
- int8_t NonPVReductionMatrix[64][64]; // [depth][moveNumber]
+ // Internal iterative deepening margin. At Non-PV nodes
+ // we do an internal iterative deepening
+ // search when the static evaluation is at most IIDMargin below beta.
+ const Value IIDMargin = Value(0x100);
- inline Depth pv_reduction(Depth d, int mn) { return (Depth) PVReductionMatrix[Min(d / 2, 63)][Min(mn, 63)]; }
- inline Depth nonpv_reduction(Depth d, int mn) { return (Depth) NonPVReductionMatrix[Min(d / 2, 63)][Min(mn, 63)]; }
+ // Step 11. Decide the new search depth
- // Futility lookup tables and their getter functions
+ // Extensions. Configurable UCI options.
+ // Array index 0 is used at non-PV nodes, index 1 at PV nodes.
+ Depth CheckExtension[2], SingleEvasionExtension[2], PawnPushTo7thExtension[2];
+ Depth PassedPawnExtension[2], PawnEndgameExtension[2], MateThreatExtension[2];
+
+ // Minimum depth for use of singular extension
+ const Depth SingularExtensionDepthAtPVNodes = 6 * OnePly;
+ const Depth SingularExtensionDepthAtNonPVNodes = 8 * OnePly;
+
+ // If the TT move is at least SingularExtensionMargin better then the
+ // remaining ones we will extend it.
+ const Value SingularExtensionMargin = Value(0x20);
+
+ // Step 12. Futility pruning
+
+ // Futility margin for quiescence search
const Value FutilityMarginQS = Value(0x80);
+
+ // Futility lookup tables (initialized at startup) and their getter functions
int32_t FutilityMarginsMatrix[14][64]; // [depth][moveNumber]
int FutilityMoveCountArray[32]; // [depth]
inline Value futility_margin(Depth d, int mn) { return Value(d < 7*OnePly ? FutilityMarginsMatrix[Max(d, 0)][Min(mn, 63)] : 2 * VALUE_INFINITE); }
inline int futility_move_count(Depth d) { return d < 16*OnePly ? FutilityMoveCountArray[d] : 512; }
- /// Variables initialized by UCI options
+ // Step 14. Reduced search
+
+ // Reduction lookup tables (initialized at startup) and their getter functions
+ int8_t PVReductionMatrix[64][64]; // [depth][moveNumber]
+ int8_t NonPVReductionMatrix[64][64]; // [depth][moveNumber]
+
+ inline Depth pv_reduction(Depth d, int mn) { return (Depth) PVReductionMatrix[Min(d / 2, 63)][Min(mn, 63)]; }
+ inline Depth nonpv_reduction(Depth d, int mn) { return (Depth) NonPVReductionMatrix[Min(d / 2, 63)][Min(mn, 63)]; }
+
+ // Step. Common adjustments
+
+ // Search depth at iteration 1
+ const Depth InitialDepth = OnePly;
- // Depth limit for use of dynamic threat detection
- Depth ThreatDepth;
+ // Easy move margin. An easy move candidate must be at least this much
+ // better than the second best move.
+ const Value EasyMoveMargin = Value(0x200);
// Last seconds noise filtering (LSN)
const bool UseLSNFiltering = true;
const Value LSNValue = value_from_centipawns(200);
bool loseOnTime = false;
- // Extensions. Array index 0 is used at non-PV nodes, index 1 at PV nodes.
- Depth CheckExtension[2], SingleEvasionExtension[2], PawnPushTo7thExtension[2];
- Depth PassedPawnExtension[2], PawnEndgameExtension[2], MateThreatExtension[2];
+
+ /// Global variables
// Iteration counters
int Iteration;
int current_search_time();
int nps();
- void poll();
+ void poll(SearchStack ss[], int ply);
void ponderhit();
void wait_for_stop_or_ponderhit();
void init_ss_array(SearchStack ss[]);
MateThreatExtension[1] = Depth(get_option_value_int("Mate Threat Extension (PV nodes)"));
MateThreatExtension[0] = Depth(get_option_value_int("Mate Threat Extension (non-PV nodes)"));
- ThreatDepth = get_option_value_int("Threat Depth") * OnePly;
-
Chess960 = get_option_value_bool("UCI_Chess960");
ShowCurrentLine = get_option_value_bool("UCI_ShowCurrLine");
UseLogFile = get_option_value_bool("Use Search Log");
// Wake up sleeping threads
TM.wake_sleeping_threads();
- for (int i = 1; i < TM.active_threads(); i++)
- assert(TM.thread_is_available(i, 0));
-
// Set thinking time
int myTime = time[side_to_move];
int myIncrement = increment[side_to_move];
assert(threadID >= 0 && threadID < TM.active_threads());
Move movesSearched[256];
+ EvalInfo ei;
StateInfo st;
const TTEntry* tte;
Move ttMove, move;
Depth ext, newDepth;
- Value oldAlpha, value;
- bool isCheck, mateThreat, singleEvasion, moveIsCheck, captureOrPromotion, dangerous;
+ Value bestValue, value, oldAlpha;
+ bool isCheck, singleEvasion, moveIsCheck, captureOrPromotion, dangerous;
+ bool mateThreat = false;
int moveCount = 0;
- Value bestValue = value = -VALUE_INFINITE;
+ bestValue = value = -VALUE_INFINITE;
if (depth < OnePly)
return qsearch(pos, ss, alpha, beta, Depth(0), ply, threadID);
- // Initialize, and make an early exit in case of an aborted search,
- // an instant draw, maximum ply reached, etc.
+ // Step 1. Initialize node and poll
+ // Polling can abort search.
init_node(ss, ply, threadID);
- // After init_node() that calls poll()
+ // Step 2. Check for aborted search and immediate draw
if (AbortSearch || TM.thread_should_stop(threadID))
return Value(0);
if (pos.is_draw() || ply >= PLY_MAX - 1)
return VALUE_DRAW;
- // Mate distance pruning
+ // Step 3. Mate distance pruning
oldAlpha = alpha;
alpha = Max(value_mated_in(ply), alpha);
beta = Min(value_mate_in(ply+1), beta);
if (alpha >= beta)
return alpha;
- // Transposition table lookup. At PV nodes, we don't use the TT for
- // pruning, but only for move ordering. This is to avoid problems in
- // the following areas:
+ // Step 4. Transposition table lookup
+ // At PV nodes, we don't use the TT for pruning, but only for move ordering.
+ // This is to avoid problems in the following areas:
//
// * Repetition draw detection
// * Fifty move rule detection
// * Searching for a mate
// * Printing of full PV line
- //
tte = TT.retrieve(pos.get_key());
ttMove = (tte ? tte->move() : MOVE_NONE);
- // Go with internal iterative deepening if we don't have a TT move
- if ( UseIIDAtPVNodes
- && depth >= 5*OnePly
+ // Step 5. Evaluate the position statically
+ // At PV nodes we do this only to update gain statistics
+ isCheck = pos.is_check();
+ if (!isCheck)
+ {
+ ss[ply].eval = evaluate(pos, ei, threadID);
+ update_gains(pos, ss[ply - 1].currentMove, ss[ply - 1].eval, ss[ply].eval);
+ }
+
+ // Step 6. Razoring (is omitted in PV nodes)
+ // Step 7. Static null move pruning (is omitted in PV nodes)
+ // Step 8. Null move search with verification search (is omitted in PV nodes)
+
+ // Step 9. Internal iterative deepening
+ if ( depth >= IIDDepthAtPVNodes
&& ttMove == MOVE_NONE)
{
search_pv(pos, ss, alpha, beta, depth-2*OnePly, ply, threadID);
tte = TT.retrieve(pos.get_key());
}
- isCheck = pos.is_check();
- if (!isCheck)
- {
- // Update gain statistics of the previous move that lead
- // us in this position.
- EvalInfo ei;
- ss[ply].eval = evaluate(pos, ei, threadID);
- update_gains(pos, ss[ply - 1].currentMove, ss[ply - 1].eval, ss[ply].eval);
- }
+ // Step 10. Loop through moves
+ // Loop through all legal moves until no moves remain or a beta cutoff occurs
- // Initialize a MovePicker object for the current position, and prepare
- // to search all moves
+ // Initialize a MovePicker object for the current position
mateThreat = pos.has_mate_threat(opposite_color(pos.side_to_move()));
- CheckInfo ci(pos);
MovePicker mp = MovePicker(pos, ttMove, depth, H, &ss[ply]);
+ CheckInfo ci(pos);
- // Loop through all legal moves until no moves remain or a beta cutoff
- // occurs.
while ( alpha < beta
&& (move = mp.get_next_move()) != MOVE_NONE
&& !TM.thread_should_stop(threadID))
moveIsCheck = pos.move_is_check(move, ci);
captureOrPromotion = pos.move_is_capture_or_promotion(move);
- // Decide the new search depth
+ // Step 11. Decide the new search depth
ext = extension(pos, move, true, captureOrPromotion, moveIsCheck, singleEvasion, mateThreat, &dangerous);
// Singular extension search. We extend the TT move if its value is much better than
// its siblings. To verify this we do a reduced search on all the other moves but the
// ttMove, if result is lower then ttValue minus a margin then we extend ttMove.
- if ( depth >= 6 * OnePly
+ if ( depth >= SingularExtensionDepthAtPVNodes
&& tte
&& move == tte->move()
&& ext < OnePly
if (abs(ttValue) < VALUE_KNOWN_WIN)
{
- Value excValue = search(pos, ss, ttValue - SingleReplyMargin, depth / 2, ply, false, threadID, move);
+ Value excValue = search(pos, ss, ttValue - SingularExtensionMargin, depth / 2, ply, false, threadID, move);
- if (excValue < ttValue - SingleReplyMargin)
+ if (excValue < ttValue - SingularExtensionMargin)
ext = OnePly;
}
}
newDepth = depth - OnePly + ext;
- // Update current move
+ // Update current move (this must be done after singular extension search)
movesSearched[moveCount++] = ss[ply].currentMove = move;
- // Make and search the move
+ // Step 12. Futility pruning (is omitted in PV nodes)
+
+ // Step 13. Make the move
pos.do_move(move, st, ci, moveIsCheck);
- if (moveCount == 1) // The first move in list is the PV
+ // Step extra. pv search (only in PV nodes)
+ // The first move in list is the expected PV
+ if (moveCount == 1)
value = -search_pv(pos, ss, -beta, -alpha, newDepth, ply+1, threadID);
else
{
- // Try to reduce non-pv search depth by one ply if move seems not problematic,
+ // Step 14. Reduced search
// if the move fails high will be re-searched at full depth.
bool doFullDepthSearch = true;
}
}
- if (doFullDepthSearch) // Go with full depth non-pv search
+ // Step 15. Full depth search
+ if (doFullDepthSearch)
{
ss[ply].reduction = Depth(0);
value = -search(pos, ss, -alpha, newDepth, ply+1, true, threadID);
+
+ // Step extra. pv search (only in PV nodes)
if (value > alpha && value < beta)
value = -search_pv(pos, ss, -beta, -alpha, newDepth, ply+1, threadID);
}
}
+
+ // Step 16. Undo move
pos.undo_move(move);
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
- // New best move?
+ // Step 17. Check for new best move
if (value > bestValue)
{
bestValue = value;
}
}
- // Split?
+ // Step 18. Check for split
if ( TM.active_threads() > 1
&& bestValue < beta
&& depth >= MinimumSplitDepth
&& Iteration <= 99
- && TM.idle_thread_exists(threadID)
+ && TM.available_thread_exists(threadID)
&& !AbortSearch
&& !TM.thread_should_stop(threadID)
- && TM.split(pos, ss, ply, &alpha, &beta, &bestValue, VALUE_NONE,
+ && TM.split(pos, ss, ply, &alpha, beta, &bestValue,
depth, &moveCount, &mp, threadID, true))
break;
}
- // All legal moves have been searched. A special case: If there were
+ // Step 19. Check for mate and stalemate
+ // All legal moves have been searched and if there were
// no legal moves, it must be mate or stalemate.
if (moveCount == 0)
return (isCheck ? value_mated_in(ply) : VALUE_DRAW);
+ // Step 20. Update tables
// If the search is not aborted, update the transposition table,
// history counters, and killer moves.
if (AbortSearch || TM.thread_should_stop(threadID))
const TTEntry* tte;
Move ttMove, move;
Depth ext, newDepth;
- Value bestValue, staticValue, nullValue, value, futilityValue, futilityValueScaled;
+ Value bestValue, refinedValue, nullValue, value, futilityValueScaled;
bool isCheck, singleEvasion, moveIsCheck, captureOrPromotion, dangerous;
bool mateThreat = false;
int moveCount = 0;
- futilityValue = staticValue = bestValue = value = -VALUE_INFINITE;
+ refinedValue = bestValue = value = -VALUE_INFINITE;
if (depth < OnePly)
return qsearch(pos, ss, beta-1, beta, Depth(0), ply, threadID);
- // Initialize, and make an early exit in case of an aborted search,
- // an instant draw, maximum ply reached, etc.
+ // Step 1. Initialize node and poll
+ // Polling can abort search.
init_node(ss, ply, threadID);
- // After init_node() that calls poll()
+ // Step 2. Check for aborted search and immediate draw
if (AbortSearch || TM.thread_should_stop(threadID))
return Value(0);
if (pos.is_draw() || ply >= PLY_MAX - 1)
return VALUE_DRAW;
- // Mate distance pruning
+ // Step 3. Mate distance pruning
if (value_mated_in(ply) >= beta)
return beta;
if (value_mate_in(ply + 1) < beta)
return beta - 1;
+ // Step 4. Transposition table lookup
+
// We don't want the score of a partial search to overwrite a previous full search
- // TT value, so we use a different position key in case of an excluded move exsists.
+ // TT value, so we use a different position key in case of an excluded move exists.
Key posKey = excludedMove ? pos.get_exclusion_key() : pos.get_key();
- // Transposition table lookup
tte = TT.retrieve(posKey);
ttMove = (tte ? tte->move() : MOVE_NONE);
return value_from_tt(tte->value(), ply);
}
+ // Step 5. Evaluate the position statically
isCheck = pos.is_check();
- // Evaluate the position statically
if (!isCheck)
{
if (tte && (tte->type() & VALUE_TYPE_EVAL))
- staticValue = value_from_tt(tte->value(), ply);
+ ss[ply].eval = value_from_tt(tte->value(), ply);
else
- staticValue = evaluate(pos, ei, threadID);
+ ss[ply].eval = evaluate(pos, ei, threadID);
- ss[ply].eval = staticValue;
- futilityValue = staticValue + futility_margin(depth, 0); //FIXME: Remove me, only for split
- staticValue = refine_eval(tte, staticValue, ply); // Enhance accuracy with TT value if possible
+ refinedValue = refine_eval(tte, ss[ply].eval, ply); // Enhance accuracy with TT value if possible
update_gains(pos, ss[ply - 1].currentMove, ss[ply - 1].eval, ss[ply].eval);
}
- // Static null move pruning. We're betting that the opponent doesn't have
- // a move that will reduce the score by more than FutilityMargins[int(depth)]
- // if we do a null move.
+ // Step 6. Razoring
+ if ( !value_is_mate(beta)
+ && !isCheck
+ && depth < RazorDepth
+ && refinedValue < beta - razor_margin(depth)
+ && ss[ply - 1].currentMove != MOVE_NULL
+ && ttMove == MOVE_NONE
+ && !pos.has_pawn_on_7th(pos.side_to_move()))
+ {
+ Value rbeta = beta - razor_margin(depth);
+ Value v = qsearch(pos, ss, rbeta-1, rbeta, Depth(0), ply, threadID);
+ if (v < rbeta)
+ return v; //FIXME: Logically should be: return (v + razor_margin(depth));
+ }
+
+ // Step 7. Static null move pruning
+ // We're betting that the opponent doesn't have a move that will reduce
+ // the score by more than fuility_margin(depth) if we do a null move.
if ( !isCheck
&& allowNullmove
&& depth < RazorDepth
- && staticValue - futility_margin(depth, 0) >= beta)
- return staticValue - futility_margin(depth, 0);
+ && refinedValue - futility_margin(depth, 0) >= beta)
+ return refinedValue - futility_margin(depth, 0);
- // Null move search
+ // Step 8. Null move search with verification search
+ // When we jump directly to qsearch() we do a null move only if static value is
+ // at least beta. Otherwise we do a null move if static value is not more than
+ // NullMoveMargin under beta.
if ( allowNullmove
&& depth > OnePly
&& !isCheck
&& !value_is_mate(beta)
&& ok_to_do_nullmove(pos)
- && staticValue >= beta - NullMoveMargin)
+ && refinedValue >= beta - (depth >= 4 * OnePly ? NullMoveMargin : 0))
{
ss[ply].currentMove = MOVE_NULL;
int R = 3 + (depth >= 5 * OnePly ? depth / 8 : 0);
// Null move dynamic reduction based on value
- if (staticValue - beta > PawnValueMidgame)
+ if (refinedValue - beta > PawnValueMidgame)
R++;
nullValue = -search(pos, ss, -(beta-1), depth-R*OnePly, ply+1, false, threadID);
return beta - 1;
}
}
- // Null move search not allowed, try razoring
- else if ( !value_is_mate(beta)
- && !isCheck
- && depth < RazorDepth
- && staticValue < beta - (NullMoveMargin + 16 * depth)
- && ss[ply - 1].currentMove != MOVE_NULL
- && ttMove == MOVE_NONE
- && !pos.has_pawn_on_7th(pos.side_to_move()))
- {
- Value rbeta = beta - (NullMoveMargin + 16 * depth);
- Value v = qsearch(pos, ss, rbeta-1, rbeta, Depth(0), ply, threadID);
- if (v < rbeta)
- return v;
- }
- // Go with internal iterative deepening if we don't have a TT move
- if (UseIIDAtNonPVNodes && ttMove == MOVE_NONE && depth >= 8*OnePly &&
- !isCheck && ss[ply].eval >= beta - IIDMargin)
+ // Step 9. Internal iterative deepening
+ if ( depth >= IIDDepthAtNonPVNodes
+ && ttMove == MOVE_NONE
+ && !isCheck
+ && ss[ply].eval >= beta - IIDMargin)
{
- search(pos, ss, beta, Min(depth/2, depth-2*OnePly), ply, false, threadID);
+ search(pos, ss, beta, depth/2, ply, false, threadID);
ttMove = ss[ply].pv[ply];
tte = TT.retrieve(posKey);
}
- // Initialize a MovePicker object for the current position, and prepare
- // to search all moves.
+ // Step 10. Loop through moves
+ // Loop through all legal moves until no moves remain or a beta cutoff occurs
+
+ // Initialize a MovePicker object for the current position
MovePicker mp = MovePicker(pos, ttMove, depth, H, &ss[ply]);
CheckInfo ci(pos);
- // Loop through all legal moves until no moves remain or a beta cutoff occurs
while ( bestValue < beta
&& (move = mp.get_next_move()) != MOVE_NONE
&& !TM.thread_should_stop(threadID))
singleEvasion = (isCheck && mp.number_of_evasions() == 1);
captureOrPromotion = pos.move_is_capture_or_promotion(move);
- // Decide the new search depth
+ // Step 11. Decide the new search depth
ext = extension(pos, move, false, captureOrPromotion, moveIsCheck, singleEvasion, mateThreat, &dangerous);
// Singular extension search. We extend the TT move if its value is much better than
// its siblings. To verify this we do a reduced search on all the other moves but the
// ttMove, if result is lower then ttValue minus a margin then we extend ttMove.
- if ( depth >= 8 * OnePly
+ if ( depth >= SingularExtensionDepthAtNonPVNodes
&& tte
&& move == tte->move()
&& !excludedMove // Do not allow recursive single-reply search
if (abs(ttValue) < VALUE_KNOWN_WIN)
{
- Value excValue = search(pos, ss, ttValue - SingleReplyMargin, depth / 2, ply, false, threadID, move);
+ Value excValue = search(pos, ss, ttValue - SingularExtensionMargin, depth / 2, ply, false, threadID, move);
- if (excValue < ttValue - SingleReplyMargin)
+ if (excValue < ttValue - SingularExtensionMargin)
ext = OnePly;
}
}
newDepth = depth - OnePly + ext;
- // Update current move
+ // Update current move (this must be done after singular extension search)
movesSearched[moveCount++] = ss[ply].currentMove = move;
- // Futility pruning
+ // Step 12. Futility pruning
if ( !isCheck
&& !dangerous
&& !captureOrPromotion
continue;
// Value based pruning
- Depth predictedDepth = newDepth - nonpv_reduction(depth, moveCount); //FIXME: We are ignoring condition: depth >= 3*OnePly, BUG??
+ Depth predictedDepth = newDepth - nonpv_reduction(depth, moveCount); // We illogically ignore reduction condition depth >= 3*OnePly
futilityValueScaled = ss[ply].eval + futility_margin(predictedDepth, moveCount)
+ H.gain(pos.piece_on(move_from(move)), move_to(move)) + 45;
}
}
- // Make and search the move
+ // Step 13. Make the move
pos.do_move(move, st, ci, moveIsCheck);
- // Try to reduce non-pv search depth by one ply if move seems not problematic,
+ // Step 14. Reduced search
// if the move fails high will be re-searched at full depth.
bool doFullDepthSearch = true;
}
}
- if (doFullDepthSearch) // Go with full depth non-pv search
+ // Step 15. Full depth search
+ if (doFullDepthSearch)
{
ss[ply].reduction = Depth(0);
value = -search(pos, ss, -(beta-1), newDepth, ply+1, true, threadID);
}
+
+ // Step 16. Undo move
pos.undo_move(move);
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
- // New best move?
+ // Step 17. Check for new best move
if (value > bestValue)
{
bestValue = value;
ss[ply].mateKiller = move;
}
- // Split?
+ // Step 18. Check for split
if ( TM.active_threads() > 1
&& bestValue < beta
&& depth >= MinimumSplitDepth
&& Iteration <= 99
- && TM.idle_thread_exists(threadID)
+ && TM.available_thread_exists(threadID)
&& !AbortSearch
&& !TM.thread_should_stop(threadID)
- && TM.split(pos, ss, ply, &beta, &beta, &bestValue, futilityValue, //FIXME: SMP & futilityValue
+ && TM.split(pos, ss, ply, NULL, beta, &bestValue,
depth, &moveCount, &mp, threadID, false))
break;
}
- // All legal moves have been searched. A special case: If there were
+ // Step 19. Check for mate and stalemate
+ // All legal moves have been searched and if there were
// no legal moves, it must be mate or stalemate.
+ // If one move was excluded return fail low.
if (!moveCount)
return excludedMove ? beta - 1 : (pos.is_check() ? value_mated_in(ply) : VALUE_DRAW);
+ // Step 20. Update tables
// If the search is not aborted, update the transposition table,
// history counters, and killer moves.
if (AbortSearch || TM.thread_should_stop(threadID))
// Don't search moves with negative SEE values
if ( (!isCheck || evasionPrunable)
+ && !pvNode
&& move != ttMove
&& !move_is_promotion(move)
&& pos.see_sign(move) < 0)
// splitting, we don't have to repeat all this work in sp_search(). We
// also don't need to store anything to the hash table here: This is taken
// care of after we return from the split point.
+ // FIXME: We are currently ignoring mateThreat flag here
void sp_search(SplitPoint* sp, int threadID) {
assert(threadID >= 0 && threadID < TM.active_threads());
assert(TM.active_threads() > 1);
+ StateInfo st;
+ Move move;
+ Depth ext, newDepth;
+ Value value, futilityValueScaled;
+ bool isCheck, moveIsCheck, captureOrPromotion, dangerous;
+ int moveCount;
+ value = -VALUE_INFINITE;
+
Position pos(*sp->pos);
CheckInfo ci(pos);
SearchStack* ss = sp->sstack[threadID];
- Value value = -VALUE_INFINITE;
- Move move;
- int moveCount;
- bool isCheck = pos.is_check();
- bool useFutilityPruning = sp->depth < 7 * OnePly //FIXME: sync with search
- && !isCheck;
+ isCheck = pos.is_check();
+
+ // Step 10. Loop through moves
+ // Loop through all legal moves until no moves remain or a beta cutoff occurs
+ lock_grab(&(sp->lock));
- while ( lock_grab_bool(&(sp->lock))
- && sp->bestValue < sp->beta
+ while ( sp->bestValue < sp->beta
&& !TM.thread_should_stop(threadID)
&& (move = sp->mp->get_next_move()) != MOVE_NONE)
{
assert(move_is_ok(move));
- bool moveIsCheck = pos.move_is_check(move, ci);
- bool captureOrPromotion = pos.move_is_capture_or_promotion(move);
+ moveIsCheck = pos.move_is_check(move, ci);
+ captureOrPromotion = pos.move_is_capture_or_promotion(move);
- ss[sp->ply].currentMove = move;
+ // Step 11. Decide the new search depth
+ ext = extension(pos, move, false, captureOrPromotion, moveIsCheck, false, false, &dangerous);
+ newDepth = sp->depth - OnePly + ext;
- // Decide the new search depth
- bool dangerous;
- Depth ext = extension(pos, move, false, captureOrPromotion, moveIsCheck, false, false, &dangerous);
- Depth newDepth = sp->depth - OnePly + ext;
+ // Update current move
+ ss[sp->ply].currentMove = move;
- // Prune?
- if ( useFutilityPruning
+ // Step 12. Futility pruning
+ if ( !isCheck
&& !dangerous
- && !captureOrPromotion)
+ && !captureOrPromotion
+ && !move_is_castle(move))
{
// Move count based pruning
if ( moveCount >= futility_move_count(sp->depth)
&& ok_to_prune(pos, move, ss[sp->ply].threatMove)
&& sp->bestValue > value_mated_in(PLY_MAX))
+ {
+ lock_grab(&(sp->lock));
continue;
+ }
// Value based pruning
- Value futilityValueScaled = sp->futilityValue - moveCount * 8; //FIXME: sync with search
+ Depth predictedDepth = newDepth - nonpv_reduction(sp->depth, moveCount);
+ futilityValueScaled = ss[sp->ply].eval + futility_margin(predictedDepth, moveCount)
+ + H.gain(pos.piece_on(move_from(move)), move_to(move)) + 45;
if (futilityValueScaled < sp->beta)
{
- if (futilityValueScaled > sp->bestValue) // Less then 1% of cases
- {
- lock_grab(&(sp->lock));
- if (futilityValueScaled > sp->bestValue)
- sp->bestValue = futilityValueScaled;
- lock_release(&(sp->lock));
- }
+ lock_grab(&(sp->lock));
+
+ if (futilityValueScaled > sp->bestValue)
+ sp->bestValue = futilityValueScaled;
continue;
}
}
- // Make and search the move.
- StateInfo st;
+ // Step 13. Make the move
pos.do_move(move, st, ci, moveIsCheck);
- // Try to reduce non-pv search depth by one ply if move seems not problematic,
+ // Step 14. Reduced search
// if the move fails high will be re-searched at full depth.
bool doFullDepthSearch = true;
if (ss[sp->ply].reduction)
{
value = -search(pos, ss, -(sp->beta-1), newDepth-ss[sp->ply].reduction, sp->ply+1, true, threadID);
- doFullDepthSearch = (value >= sp->beta);
+ doFullDepthSearch = (value >= sp->beta && !TM.thread_should_stop(threadID));
}
}
- if (doFullDepthSearch) // Go with full depth non-pv search
+ // Step 15. Full depth search
+ if (doFullDepthSearch)
{
ss[sp->ply].reduction = Depth(0);
value = -search(pos, ss, -(sp->beta - 1), newDepth, sp->ply+1, true, threadID);
}
+
+ // Step 16. Undo move
pos.undo_move(move);
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
- if (TM.thread_should_stop(threadID))
- {
- lock_grab(&(sp->lock));
- break;
- }
+ // Step 17. Check for new best move
+ lock_grab(&(sp->lock));
- // New best move?
- if (value > sp->bestValue) // Less then 2% of cases
+ if (value > sp->bestValue && !TM.thread_should_stop(threadID))
{
- lock_grab(&(sp->lock));
- if (value > sp->bestValue && !TM.thread_should_stop(threadID))
+ sp->bestValue = value;
+ if (sp->bestValue >= sp->beta)
{
- sp->bestValue = value;
- if (sp->bestValue >= sp->beta)
- {
- sp_update_pv(sp->parentSstack, ss, sp->ply);
- for (int i = 0; i < TM.active_threads(); i++)
- if (i != threadID && (i == sp->master || sp->slaves[i]))
- TM.set_stop_request(i);
-
- sp->finished = true;
- }
+ sp->stopRequest = true;
+ sp_update_pv(sp->parentSstack, ss, sp->ply);
}
- lock_release(&(sp->lock));
}
}
/* Here we have the lock still grabbed */
- // If this is the master thread and we have been asked to stop because of
- // a beta cutoff higher up in the tree, stop all slave threads. Note that
- // thread_should_stop(threadID) does not imply that 'stop' flag is set, so
- // do this explicitly now, under lock protection.
- if (sp->master == threadID && TM.thread_should_stop(threadID))
- for (int i = 0; i < TM.active_threads(); i++)
- if (sp->slaves[i] || i == threadID)
- TM.set_stop_request(i);
-
- sp->cpus--;
sp->slaves[threadID] = 0;
+ sp->cpus--;
lock_release(&(sp->lock));
}
// don't have to repeat all this work in sp_search_pv(). We also don't
// need to store anything to the hash table here: This is taken care of
// after we return from the split point.
+ // FIXME: We are ignoring mateThreat flag!
void sp_search_pv(SplitPoint* sp, int threadID) {
assert(threadID >= 0 && threadID < TM.active_threads());
assert(TM.active_threads() > 1);
+ StateInfo st;
+ Move move;
+ Depth ext, newDepth;
+ Value value;
+ bool moveIsCheck, captureOrPromotion, dangerous;
+ int moveCount;
+ value = -VALUE_INFINITE;
+
Position pos(*sp->pos);
CheckInfo ci(pos);
SearchStack* ss = sp->sstack[threadID];
- Value value = -VALUE_INFINITE;
- int moveCount;
- Move move;
- while ( lock_grab_bool(&(sp->lock))
- && sp->alpha < sp->beta
+ // Step 10. Loop through moves
+ // Loop through all legal moves until no moves remain or a beta cutoff occurs
+ lock_grab(&(sp->lock));
+
+ while ( sp->alpha < sp->beta
&& !TM.thread_should_stop(threadID)
&& (move = sp->mp->get_next_move()) != MOVE_NONE)
{
assert(move_is_ok(move));
- bool moveIsCheck = pos.move_is_check(move, ci);
- bool captureOrPromotion = pos.move_is_capture_or_promotion(move);
+ moveIsCheck = pos.move_is_check(move, ci);
+ captureOrPromotion = pos.move_is_capture_or_promotion(move);
+
+ // Step 11. Decide the new search depth
+ ext = extension(pos, move, true, captureOrPromotion, moveIsCheck, false, false, &dangerous);
+ newDepth = sp->depth - OnePly + ext;
+ // Update current move
ss[sp->ply].currentMove = move;
- // Decide the new search depth
- bool dangerous;
- Depth ext = extension(pos, move, true, captureOrPromotion, moveIsCheck, false, false, &dangerous);
- Depth newDepth = sp->depth - OnePly + ext;
+ // Step 12. Futility pruning (is omitted in PV nodes)
- // Make and search the move.
- StateInfo st;
+ // Step 13. Make the move
pos.do_move(move, st, ci, moveIsCheck);
- // Try to reduce non-pv search depth by one ply if move seems not problematic,
+ // Step 14. Reduced search
// if the move fails high will be re-searched at full depth.
bool doFullDepthSearch = true;
{
Value localAlpha = sp->alpha;
value = -search(pos, ss, -localAlpha, newDepth-ss[sp->ply].reduction, sp->ply+1, true, threadID);
- doFullDepthSearch = (value > localAlpha);
+ doFullDepthSearch = (value > localAlpha && !TM.thread_should_stop(threadID));
}
}
- if (doFullDepthSearch) // Go with full depth non-pv search
+ // Step 15. Full depth search
+ if (doFullDepthSearch)
{
Value localAlpha = sp->alpha;
ss[sp->ply].reduction = Depth(0);
value = -search(pos, ss, -localAlpha, newDepth, sp->ply+1, true, threadID);
- if (value > localAlpha && value < sp->beta)
+ if (value > localAlpha && value < sp->beta && !TM.thread_should_stop(threadID))
{
// If another thread has failed high then sp->alpha has been increased
// to be higher or equal then beta, if so, avoid to start a PV search.
localAlpha = sp->alpha;
if (localAlpha < sp->beta)
value = -search_pv(pos, ss, -sp->beta, -localAlpha, newDepth, sp->ply+1, threadID);
- else
- assert(TM.thread_should_stop(threadID));
- }
+ }
}
+
+ // Step 16. Undo move
pos.undo_move(move);
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
- if (TM.thread_should_stop(threadID))
- {
- lock_grab(&(sp->lock));
- break;
- }
+ // Step 17. Check for new best move
+ lock_grab(&(sp->lock));
- // New best move?
- if (value > sp->bestValue) // Less then 2% of cases
+ if (value > sp->bestValue && !TM.thread_should_stop(threadID))
{
- lock_grab(&(sp->lock));
- if (value > sp->bestValue && !TM.thread_should_stop(threadID))
+ sp->bestValue = value;
+ if (value > sp->alpha)
{
- sp->bestValue = value;
- if (value > sp->alpha)
- {
- // Ask threads to stop before to modify sp->alpha
- if (value >= sp->beta)
- {
- for (int i = 0; i < TM.active_threads(); i++)
- if (i != threadID && (i == sp->master || sp->slaves[i]))
- TM.set_stop_request(i);
-
- sp->finished = true;
- }
-
- sp->alpha = value;
-
- sp_update_pv(sp->parentSstack, ss, sp->ply);
- if (value == value_mate_in(sp->ply + 1))
- ss[sp->ply].mateKiller = move;
- }
+ // Ask threads to stop before to modify sp->alpha
+ if (value >= sp->beta)
+ sp->stopRequest = true;
+
+ sp->alpha = value;
+
+ sp_update_pv(sp->parentSstack, ss, sp->ply);
+ if (value == value_mate_in(sp->ply + 1))
+ ss[sp->ply].mateKiller = move;
}
- lock_release(&(sp->lock));
}
}
/* Here we have the lock still grabbed */
- // If this is the master thread and we have been asked to stop because of
- // a beta cutoff higher up in the tree, stop all slave threads. Note that
- // thread_should_stop(threadID) does not imply that 'stop' flag is set, so
- // do this explicitly now, under lock protection.
- if (sp->master == threadID && TM.thread_should_stop(threadID))
- for (int i = 0; i < TM.active_threads(); i++)
- if (sp->slaves[i] || i == threadID)
- TM.set_stop_request(i);
-
- sp->cpus--;
sp->slaves[threadID] = 0;
+ sp->cpus--;
lock_release(&(sp->lock));
}
NodesSincePoll++;
if (NodesSincePoll >= NodesBetweenPolls)
{
- poll();
+ poll(ss, ply);
NodesSincePoll = 0;
}
}
ss[ply].init(ply);
ss[ply + 2].initKillers();
- TM.print_current_line(ss, ply, threadID);
}
// looks at the time consumed so far and decides if it's time to abort the
// search.
- void poll() {
+ void poll(SearchStack ss[], int ply) {
static int lastInfoTime;
int t = current_search_time();
else if (t - lastInfoTime >= 1000)
{
lastInfoTime = t;
- lock_grab(&TM.IOLock);
if (dbg_show_mean)
dbg_print_mean();
cout << "info nodes " << TM.nodes_searched() << " nps " << nps()
<< " time " << t << " hashfull " << TT.full() << endl;
- lock_release(&TM.IOLock);
+ // We only support current line printing in single thread mode
+ if (ShowCurrentLine && TM.active_threads() == 1)
+ {
+ cout << "info currline";
+ for (int p = 0; p < ply; p++)
+ cout << " " << ss[p].currentMove;
- if (ShowCurrentLine)
- TM.threads[0].printCurrentLineRequest = true;
+ cout << endl;
+ }
}
// Should we stop the search?
void ThreadsManager::resetNodeCounters() {
- for (int i = 0; i < THREAD_MAX; i++)
+ for (int i = 0; i < MAX_THREADS; i++)
threads[i].nodes = 0ULL;
}
void ThreadsManager::resetBetaCounters() {
- for (int i = 0; i < THREAD_MAX; i++)
+ for (int i = 0; i < MAX_THREADS; i++)
threads[i].betaCutOffs[WHITE] = threads[i].betaCutOffs[BLACK] = 0ULL;
}
void ThreadsManager::get_beta_counters(Color us, int64_t& our, int64_t& their) const {
our = their = 0UL;
- for (int i = 0; i < THREAD_MAX; i++)
+ for (int i = 0; i < MAX_THREADS; i++)
{
our += threads[i].betaCutOffs[us];
their += threads[i].betaCutOffs[opposite_color(us)];
void ThreadsManager::idle_loop(int threadID, SplitPoint* waitSp) {
- assert(threadID >= 0 && threadID < THREAD_MAX);
+ assert(threadID >= 0 && threadID < MAX_THREADS);
- threads[threadID].running = true;
-
- while (!AllThreadsShouldExit || threadID == 0)
+ while (true)
{
+ // Slave threads can exit as soon as AllThreadsShouldExit raises,
+ // master should exit as last one.
+ if (AllThreadsShouldExit)
+ {
+ assert(!waitSp);
+ threads[threadID].state = THREAD_TERMINATED;
+ return;
+ }
+
// If we are not thinking, wait for a condition to be signaled
// instead of wasting CPU time polling for work.
- while ( threadID != 0
- && !AllThreadsShouldExit
- && (AllThreadsShouldSleep || threadID >= ActiveThreads))
+ while (AllThreadsShouldSleep || threadID >= ActiveThreads)
{
-
- threads[threadID].sleeping = true;
+ assert(!waitSp);
+ assert(threadID != 0);
+ threads[threadID].state = THREAD_SLEEPING;
#if !defined(_MSC_VER)
pthread_mutex_lock(&WaitLock);
if (AllThreadsShouldSleep || threadID >= ActiveThreads)
pthread_cond_wait(&WaitCond, &WaitLock);
-
pthread_mutex_unlock(&WaitLock);
#else
WaitForSingleObject(SitIdleEvent[threadID], INFINITE);
#endif
}
- // Out of the while loop to avoid races in case thread is woken up but
- // while condition still holds true so that is put to sleep again.
- threads[threadID].sleeping = false;
+ // If thread has just woken up, mark it as available
+ if (threads[threadID].state == THREAD_SLEEPING)
+ threads[threadID].state = THREAD_AVAILABLE;
// If this thread has been assigned work, launch a search
- if (threads[threadID].workIsWaiting)
+ if (threads[threadID].state == THREAD_WORKISWAITING)
{
- assert(!threads[threadID].idle);
+ assert(!AllThreadsShouldExit && !AllThreadsShouldSleep);
+
+ threads[threadID].state = THREAD_SEARCHING;
- threads[threadID].workIsWaiting = false;
if (threads[threadID].splitPoint->pvNode)
sp_search_pv(threads[threadID].splitPoint, threadID);
else
sp_search(threads[threadID].splitPoint, threadID);
- threads[threadID].idle = true;
+ assert(threads[threadID].state == THREAD_SEARCHING);
+
+ threads[threadID].state = THREAD_AVAILABLE;
}
// If this thread is the master of a split point and all threads have
// finished their work at this split point, return from the idle loop.
if (waitSp != NULL && waitSp->cpus == 0)
+ {
+ assert(threads[threadID].state == THREAD_AVAILABLE);
+
+ threads[threadID].state = THREAD_SEARCHING;
return;
+ }
}
-
- threads[threadID].running = false;
}
// Initialize global locks
lock_init(&MPLock, NULL);
- lock_init(&IOLock, NULL);
// Initialize SplitPointStack locks
- for (int i = 0; i < THREAD_MAX; i++)
+ for (i = 0; i < MAX_THREADS; i++)
for (int j = 0; j < ACTIVE_SPLIT_POINTS_MAX; j++)
{
SplitPointStack[i][j].parent = NULL;
pthread_mutex_init(&WaitLock, NULL);
pthread_cond_init(&WaitCond, NULL);
#else
- for (i = 0; i < THREAD_MAX; i++)
+ for (i = 0; i < MAX_THREADS; i++)
SitIdleEvent[i] = CreateEvent(0, FALSE, FALSE, 0);
#endif
// Threads will be put to sleep as soon as created
AllThreadsShouldSleep = true;
- // All threads except the main thread should be initialized to idle state
+ // All threads except the main thread should be initialized to THREAD_AVAILABLE
ActiveThreads = 1;
- for (i = 1; i < THREAD_MAX; i++)
- threads[i].idle = true;
+ threads[0].state = THREAD_SEARCHING;
+ for (i = 1; i < MAX_THREADS; i++)
+ threads[i].state = THREAD_AVAILABLE;
// Launch the helper threads
- for (i = 1; i < THREAD_MAX; i++)
+ for (i = 1; i < MAX_THREADS; i++)
{
#if !defined(_MSC_VER)
}
// Wait until the thread has finished launching and is gone to sleep
- while (!threads[i].running || !threads[i].sleeping);
+ while (threads[i].state != THREAD_SLEEPING);
}
}
void ThreadsManager::exit_threads() {
- ActiveThreads = THREAD_MAX; // HACK
+ ActiveThreads = MAX_THREADS; // HACK
AllThreadsShouldSleep = true; // HACK
wake_sleeping_threads();
+
+ // This makes the threads to exit idle_loop()
AllThreadsShouldExit = true;
- for (int i = 1; i < THREAD_MAX; i++)
- {
- threads[i].stop = true;
- while (threads[i].running);
- }
+
+ // Wait for thread termination
+ for (int i = 1; i < MAX_THREADS; i++)
+ while (threads[i].state != THREAD_TERMINATED);
// Now we can safely destroy the locks
- for (int i = 0; i < THREAD_MAX; i++)
+ for (int i = 0; i < MAX_THREADS; i++)
for (int j = 0; j < ACTIVE_SPLIT_POINTS_MAX; j++)
lock_destroy(&(SplitPointStack[i][j].lock));
}
- // thread_should_stop() checks whether the thread with a given threadID has
- // been asked to stop, directly or indirectly. This can happen if a beta
- // cutoff has occurred in the thread's currently active split point, or in
- // some ancestor of the current split point.
+ // thread_should_stop() checks whether the thread should stop its search.
+ // This can happen if a beta cutoff has occurred in the thread's currently
+ // active split point, or in some ancestor of the current split point.
bool ThreadsManager::thread_should_stop(int threadID) const {
SplitPoint* sp;
- if (threads[threadID].stop)
- return true;
-
- if (ActiveThreads <= 2)
- return false;
-
- for (sp = threads[threadID].splitPoint; sp != NULL; sp = sp->parent)
- if (sp->finished)
- return true;
-
- return false;
+ for (sp = threads[threadID].splitPoint; sp && !sp->stopRequest; sp = sp->parent);
+ return sp != NULL;
}
assert(master >= 0 && master < ActiveThreads);
assert(ActiveThreads > 1);
- if (!threads[slave].idle || slave == master)
+ if (threads[slave].state != THREAD_AVAILABLE || slave == master)
return false;
// Make a local copy to be sure doesn't change under our feet
}
- // idle_thread_exists() tries to find an idle thread which is available as
+ // available_thread_exists() tries to find an idle thread which is available as
// a slave for the thread with threadID "master".
- bool ThreadsManager::idle_thread_exists(int master) const {
+ bool ThreadsManager::available_thread_exists(int master) const {
assert(master >= 0 && master < ActiveThreads);
assert(ActiveThreads > 1);
// splitPoint->cpus becomes 0), split() returns true.
bool ThreadsManager::split(const Position& p, SearchStack* sstck, int ply,
- Value* alpha, Value* beta, Value* bestValue, const Value futilityValue,
+ Value* alpha, const Value beta, Value* bestValue,
Depth depth, int* moves, MovePicker* mp, int master, bool pvNode) {
assert(p.is_ok());
assert(sstck != NULL);
assert(ply >= 0 && ply < PLY_MAX);
- assert(*bestValue >= -VALUE_INFINITE && *bestValue <= *alpha);
- assert(!pvNode || *alpha < *beta);
- assert(*beta <= VALUE_INFINITE);
+ assert(*bestValue >= -VALUE_INFINITE);
+ assert( ( pvNode && *bestValue <= *alpha)
+ || (!pvNode && *bestValue < beta ));
+ assert(!pvNode || *alpha < beta);
+ assert(beta <= VALUE_INFINITE);
assert(depth > Depth(0));
assert(master >= 0 && master < ActiveThreads);
assert(ActiveThreads > 1);
// If no other thread is available to help us, or if we have too many
// active split points, don't split.
- if ( !idle_thread_exists(master)
+ if ( !available_thread_exists(master)
|| threads[master].activeSplitPoints >= ACTIVE_SPLIT_POINTS_MAX)
{
lock_release(&MPLock);
}
// Pick the next available split point object from the split point stack
- splitPoint = SplitPointStack[master] + threads[master].activeSplitPoints;
- threads[master].activeSplitPoints++;
+ splitPoint = &SplitPointStack[master][threads[master].activeSplitPoints];
// Initialize the split point object
splitPoint->parent = threads[master].splitPoint;
- splitPoint->finished = false;
+ splitPoint->stopRequest = false;
splitPoint->ply = ply;
splitPoint->depth = depth;
- splitPoint->alpha = pvNode ? *alpha : (*beta - 1);
- splitPoint->beta = *beta;
+ splitPoint->alpha = pvNode ? *alpha : beta - 1;
+ splitPoint->beta = beta;
splitPoint->pvNode = pvNode;
splitPoint->bestValue = *bestValue;
- splitPoint->futilityValue = futilityValue;
splitPoint->master = master;
splitPoint->mp = mp;
splitPoint->moves = *moves;
for (int i = 0; i < ActiveThreads; i++)
splitPoint->slaves[i] = 0;
- threads[master].idle = false;
- threads[master].stop = false;
threads[master].splitPoint = splitPoint;
+ threads[master].activeSplitPoints++;
- // Allocate available threads setting idle flag to false
+ // If we are here it means we are not available
+ assert(threads[master].state != THREAD_AVAILABLE);
+
+ // Allocate available threads setting state to THREAD_BOOKED
for (int i = 0; i < ActiveThreads && splitPoint->cpus < MaxThreadsPerSplitPoint; i++)
if (thread_is_available(i, master))
{
- threads[i].idle = false;
- threads[i].stop = false;
+ threads[i].state = THREAD_BOOKED;
threads[i].splitPoint = splitPoint;
splitPoint->slaves[i] = 1;
splitPoint->cpus++;
assert(splitPoint->cpus > 1);
- // We can release the lock because master and slave threads are already booked
+ // We can release the lock because slave threads are already booked and master is not available
lock_release(&MPLock);
// Tell the threads that they have work to do. This will make them leave
if (i == master || splitPoint->slaves[i])
{
memcpy(splitPoint->sstack[i] + ply - 1, sstck + ply - 1, 4 * sizeof(SearchStack));
- threads[i].workIsWaiting = true; // This makes the slave to exit from idle_loop()
+
+ assert(i == master || threads[i].state == THREAD_BOOKED);
+
+ threads[i].state = THREAD_WORKISWAITING; // This makes the slave to exit from idle_loop()
}
// Everything is set up. The master thread enters the idle loop, from
- // which it will instantly launch a search, because its workIsWaiting
- // slot is 'true'. We send the split point as a second parameter to the
+ // which it will instantly launch a search, because its state is
+ // THREAD_WORKISWAITING. We send the split point as a second parameter to the
// idle loop, which means that the main thread will return from the idle
// loop when all threads have finished their work at this split point
// (i.e. when splitPoint->cpus == 0).
if (pvNode)
*alpha = splitPoint->alpha;
- *beta = splitPoint->beta;
*bestValue = splitPoint->bestValue;
- threads[master].stop = false;
- threads[master].idle = false;
threads[master].activeSplitPoints--;
threads[master].splitPoint = splitPoint->parent;
if (ActiveThreads == 1)
return;
- for (int i = 1; i < ActiveThreads; i++)
- {
- assert(threads[i].sleeping == true);
-
- threads[i].idle = true;
- threads[i].workIsWaiting = false;
- }
-
#if !defined(_MSC_VER)
pthread_mutex_lock(&WaitLock);
pthread_cond_broadcast(&WaitCond);
pthread_mutex_unlock(&WaitLock);
#else
- for (int i = 1; i < THREAD_MAX; i++)
+ for (int i = 1; i < MAX_THREADS; i++)
SetEvent(SitIdleEvent[i]);
#endif
- // Wait for the threads to be all woken up
- for (int i = 1; i < ActiveThreads; i++)
- while (threads[i].sleeping);
}
// put_threads_to_sleep() makes all the threads go to sleep just before
- // to leave think(), at the end of the search. threads should have already
+ // to leave think(), at the end of the search. Threads should have already
// finished the job and should be idle.
void ThreadsManager::put_threads_to_sleep() {
assert(!AllThreadsShouldSleep);
+ // This makes the threads to go to sleep
AllThreadsShouldSleep = true;
-
- // Wait for the threads to be all sleeping
- for (int i = 1; i < ActiveThreads; i++)
- while (!threads[i].sleeping);
- }
-
-
- // print_current_line() prints _once_ the current line of search for a
- // given thread and then setup the print request for the next thread.
- // Called when the UCI option UCI_ShowCurrLine is 'true'.
-
- void ThreadsManager::print_current_line(SearchStack ss[], int ply, int threadID) {
-
- assert(ply >= 0 && ply < PLY_MAX);
- assert(threadID >= 0 && threadID < ActiveThreads);
-
- if (!threads[threadID].printCurrentLineRequest)
- return;
-
- // One shot only
- threads[threadID].printCurrentLineRequest = false;
-
- if (!threads[threadID].idle)
- {
- lock_grab(&IOLock);
- cout << "info currline " << (threadID + 1);
- for (int p = 0; p < ply; p++)
- cout << " " << ss[p].currentMove;
-
- cout << endl;
- lock_release(&IOLock);
- }
-
- // Setup print request for the next thread ID
- if (threadID + 1 < ActiveThreads)
- threads[threadID + 1].printCurrentLineRequest = true;
}
-
/// The RootMoveList class
// RootMoveList c'tor