int active_threads() const { return ActiveThreads; }
void set_active_threads(int newActiveThreads) { ActiveThreads = newActiveThreads; }
- void set_stop_request(int threadID) { threads[threadID].stopRequest = true; }
void incrementNodeCounter(int threadID) { threads[threadID].nodes++; }
void incrementBetaCounter(Color us, Depth d, int threadID) { threads[threadID].betaCutOffs[us] += unsigned(d); }
void print_current_line(SearchStack ss[], int ply, int threadID);
void resetBetaCounters();
int64_t nodes_searched() const;
void get_beta_counters(Color us, int64_t& our, int64_t& their) const;
- bool idle_thread_exists(int master) const;
+ bool available_thread_exists(int master) const;
bool thread_is_available(int slave, int master) const;
bool thread_should_stop(int threadID) const;
void wake_sleeping_threads();
void put_threads_to_sleep();
void idle_loop(int threadID, SplitPoint* waitSp);
- bool split(const Position& pos, SearchStack* ss, int ply, Value* alpha, Value* beta, Value* bestValue,
- const Value futilityValue, Depth depth, int* moves, MovePicker* mp, int master, bool pvNode);
+ bool split(const Position& pos, SearchStack* ss, int ply, Value* alpha, const Value beta, Value* bestValue,
+ Depth depth, int* moves, MovePicker* mp, int master, bool pvNode);
private:
friend void poll();
int ActiveThreads;
- bool AllThreadsShouldExit, AllThreadsShouldSleep;
- Thread threads[THREAD_MAX];
- SplitPoint SplitPointStack[THREAD_MAX][ACTIVE_SPLIT_POINTS_MAX];
+ volatile bool AllThreadsShouldExit, AllThreadsShouldSleep;
+ Thread threads[MAX_THREADS];
+ SplitPoint SplitPointStack[MAX_THREADS][ACTIVE_SPLIT_POINTS_MAX];
Lock MPLock, IOLock;
pthread_cond_t WaitCond;
pthread_mutex_t WaitLock;
#else
- HANDLE SitIdleEvent[THREAD_MAX];
+ HANDLE SitIdleEvent[MAX_THREADS];
#endif
};
// Wake up sleeping threads
TM.wake_sleeping_threads();
- for (int i = 1; i < TM.active_threads(); i++)
- assert(TM.thread_is_available(i, 0));
-
// Set thinking time
int myTime = time[side_to_move];
int myIncrement = increment[side_to_move];
if (depth < OnePly)
return qsearch(pos, ss, alpha, beta, Depth(0), ply, threadID);
- // Initialize, and make an early exit in case of an aborted search,
- // an instant draw, maximum ply reached, etc.
+ // Step 1. Initialize node and poll
+ // Polling can abort search.
init_node(ss, ply, threadID);
- // After init_node() that calls poll()
+ // Step 2. Check for aborted search and immediate draw
if (AbortSearch || TM.thread_should_stop(threadID))
return Value(0);
if (pos.is_draw() || ply >= PLY_MAX - 1)
return VALUE_DRAW;
- // Mate distance pruning
+ // Step 3. Mate distance pruning
oldAlpha = alpha;
alpha = Max(value_mated_in(ply), alpha);
beta = Min(value_mate_in(ply+1), beta);
if (alpha >= beta)
return alpha;
- // Transposition table lookup. At PV nodes, we don't use the TT for
- // pruning, but only for move ordering. This is to avoid problems in
- // the following areas:
+ // Step 4. Transposition table lookup
+ // At PV nodes, we don't use the TT for pruning, but only for move ordering.
+ // This is to avoid problems in the following areas:
//
// * Repetition draw detection
// * Fifty move rule detection
// * Searching for a mate
// * Printing of full PV line
- //
tte = TT.retrieve(pos.get_key());
ttMove = (tte ? tte->move() : MOVE_NONE);
- // Go with internal iterative deepening if we don't have a TT move
+ // Step 5. Evaluate the position statically
+ // At PV nodes we do this only to update gain statistics
+ isCheck = pos.is_check();
+ if (!isCheck)
+ {
+ EvalInfo ei;
+ ss[ply].eval = evaluate(pos, ei, threadID);
+ update_gains(pos, ss[ply - 1].currentMove, ss[ply - 1].eval, ss[ply].eval);
+ }
+
+ // Step 6. Razoring (is omitted in PV nodes)
+ // Step 7. Static null move pruning (is omitted in PV nodes)
+ // Step 8. Null move search with verification search (is omitted in PV nodes)
+
+ // Step 9. Internal iterative deepening
if ( UseIIDAtPVNodes
&& depth >= 5*OnePly
&& ttMove == MOVE_NONE)
tte = TT.retrieve(pos.get_key());
}
- isCheck = pos.is_check();
- if (!isCheck)
- {
- // Update gain statistics of the previous move that lead
- // us in this position.
- EvalInfo ei;
- ss[ply].eval = evaluate(pos, ei, threadID);
- update_gains(pos, ss[ply - 1].currentMove, ss[ply - 1].eval, ss[ply].eval);
- }
+ // Step 10. Loop through moves
+ // Loop through all legal moves until no moves remain or a beta cutoff occurs
- // Initialize a MovePicker object for the current position, and prepare
- // to search all moves
+ // Initialize a MovePicker object for the current position
mateThreat = pos.has_mate_threat(opposite_color(pos.side_to_move()));
- CheckInfo ci(pos);
MovePicker mp = MovePicker(pos, ttMove, depth, H, &ss[ply]);
+ CheckInfo ci(pos);
- // Loop through all legal moves until no moves remain or a beta cutoff
- // occurs.
while ( alpha < beta
&& (move = mp.get_next_move()) != MOVE_NONE
&& !TM.thread_should_stop(threadID))
moveIsCheck = pos.move_is_check(move, ci);
captureOrPromotion = pos.move_is_capture_or_promotion(move);
- // Decide the new search depth
+ // Step 11. Decide the new search depth
ext = extension(pos, move, true, captureOrPromotion, moveIsCheck, singleEvasion, mateThreat, &dangerous);
// Singular extension search. We extend the TT move if its value is much better than
newDepth = depth - OnePly + ext;
- // Update current move
+ // Update current move (this must be done after singular extension search)
movesSearched[moveCount++] = ss[ply].currentMove = move;
- // Make and search the move
+ // Step 12. Futility pruning (is omitted in PV nodes)
+
+ // Step 13. Make the move
pos.do_move(move, st, ci, moveIsCheck);
- if (moveCount == 1) // The first move in list is the PV
+ // Step extra. pv search (only in PV nodes)
+ // The first move in list is the expected PV
+ if (moveCount == 1)
value = -search_pv(pos, ss, -beta, -alpha, newDepth, ply+1, threadID);
else
{
- // Try to reduce non-pv search depth by one ply if move seems not problematic,
+ // Step 14. Reduced search
// if the move fails high will be re-searched at full depth.
bool doFullDepthSearch = true;
}
}
- if (doFullDepthSearch) // Go with full depth non-pv search
+ // Step 15. Full depth search
+ if (doFullDepthSearch)
{
ss[ply].reduction = Depth(0);
value = -search(pos, ss, -alpha, newDepth, ply+1, true, threadID);
+
+ // Step extra. pv search (only in PV nodes)
if (value > alpha && value < beta)
value = -search_pv(pos, ss, -beta, -alpha, newDepth, ply+1, threadID);
}
}
+
+ // Step 16. Undo move
pos.undo_move(move);
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
- // New best move?
+ // Step 17. Check for new best move
if (value > bestValue)
{
bestValue = value;
}
}
- // Split?
+ // Step 18. Check for split
if ( TM.active_threads() > 1
&& bestValue < beta
&& depth >= MinimumSplitDepth
&& Iteration <= 99
- && TM.idle_thread_exists(threadID)
+ && TM.available_thread_exists(threadID)
&& !AbortSearch
&& !TM.thread_should_stop(threadID)
- && TM.split(pos, ss, ply, &alpha, &beta, &bestValue, VALUE_NONE,
+ && TM.split(pos, ss, ply, &alpha, beta, &bestValue,
depth, &moveCount, &mp, threadID, true))
break;
}
- // All legal moves have been searched. A special case: If there were
+ // Step 19. Check for mate and stalemate
+ // All legal moves have been searched and if there were
// no legal moves, it must be mate or stalemate.
if (moveCount == 0)
return (isCheck ? value_mated_in(ply) : VALUE_DRAW);
+ // Step 20. Update tables
// If the search is not aborted, update the transposition table,
// history counters, and killer moves.
if (AbortSearch || TM.thread_should_stop(threadID))
const TTEntry* tte;
Move ttMove, move;
Depth ext, newDepth;
- Value bestValue, staticValue, nullValue, value, futilityValue, futilityValueScaled;
+ Value bestValue, refinedValue, nullValue, value, futilityValueScaled;
bool isCheck, singleEvasion, moveIsCheck, captureOrPromotion, dangerous;
bool mateThreat = false;
int moveCount = 0;
- futilityValue = staticValue = bestValue = value = -VALUE_INFINITE;
+ refinedValue = bestValue = value = -VALUE_INFINITE;
if (depth < OnePly)
return qsearch(pos, ss, beta-1, beta, Depth(0), ply, threadID);
- // Initialize, and make an early exit in case of an aborted search,
- // an instant draw, maximum ply reached, etc.
+ // Step 1. Initialize node and poll
+ // Polling can abort search.
init_node(ss, ply, threadID);
- // After init_node() that calls poll()
+ // Step 2. Check for aborted search and immediate draw
if (AbortSearch || TM.thread_should_stop(threadID))
return Value(0);
if (pos.is_draw() || ply >= PLY_MAX - 1)
return VALUE_DRAW;
- // Mate distance pruning
+ // Step 3. Mate distance pruning
if (value_mated_in(ply) >= beta)
return beta;
if (value_mate_in(ply + 1) < beta)
return beta - 1;
+ // Step 4. Transposition table lookup
+
// We don't want the score of a partial search to overwrite a previous full search
- // TT value, so we use a different position key in case of an excluded move exsists.
+ // TT value, so we use a different position key in case of an excluded move exists.
Key posKey = excludedMove ? pos.get_exclusion_key() : pos.get_key();
- // Transposition table lookup
tte = TT.retrieve(posKey);
ttMove = (tte ? tte->move() : MOVE_NONE);
return value_from_tt(tte->value(), ply);
}
+ // Step 5. Evaluate the position statically
isCheck = pos.is_check();
- // Evaluate the position statically
if (!isCheck)
{
if (tte && (tte->type() & VALUE_TYPE_EVAL))
- staticValue = value_from_tt(tte->value(), ply);
+ ss[ply].eval = value_from_tt(tte->value(), ply);
else
- staticValue = evaluate(pos, ei, threadID);
+ ss[ply].eval = evaluate(pos, ei, threadID);
- ss[ply].eval = staticValue;
- futilityValue = staticValue + futility_margin(depth, 0); //FIXME: Remove me, only for split
- staticValue = refine_eval(tte, staticValue, ply); // Enhance accuracy with TT value if possible
+ refinedValue = refine_eval(tte, ss[ply].eval, ply); // Enhance accuracy with TT value if possible
update_gains(pos, ss[ply - 1].currentMove, ss[ply - 1].eval, ss[ply].eval);
}
- // Static null move pruning. We're betting that the opponent doesn't have
- // a move that will reduce the score by more than FutilityMargins[int(depth)]
- // if we do a null move.
+ // Step 6. Razoring
+ if ( !value_is_mate(beta)
+ && !isCheck
+ && depth < RazorDepth
+ && refinedValue < beta - (0x200 + 16 * depth)
+ && ss[ply - 1].currentMove != MOVE_NULL
+ && ttMove == MOVE_NONE
+ && !pos.has_pawn_on_7th(pos.side_to_move()))
+ {
+ Value rbeta = beta - (0x200 + 16 * depth);
+ Value v = qsearch(pos, ss, rbeta-1, rbeta, Depth(0), ply, threadID);
+ if (v < rbeta)
+ return v; //FIXME: Logically should be: return (v + 0x200 + 16 * depth);
+ }
+
+ // Step 7. Static null move pruning
+ // We're betting that the opponent doesn't have a move that will reduce
+ // the score by more than fuility_margin(depth) if we do a null move.
if ( !isCheck
&& allowNullmove
&& depth < RazorDepth
- && staticValue - futility_margin(depth, 0) >= beta)
- return staticValue - futility_margin(depth, 0);
+ && refinedValue - futility_margin(depth, 0) >= beta)
+ return refinedValue - futility_margin(depth, 0);
- // Null move search
+ // Step 8. Null move search with verification search
+ // When we jump directly to qsearch() we do a null move only if static value is
+ // at least beta. Otherwise we do a null move if static value is not more than
+ // NullMoveMargin under beta.
if ( allowNullmove
&& depth > OnePly
&& !isCheck
&& !value_is_mate(beta)
&& ok_to_do_nullmove(pos)
- && staticValue >= beta - NullMoveMargin)
+ && refinedValue >= beta - (depth >= 4 * OnePly ? NullMoveMargin : 0))
{
ss[ply].currentMove = MOVE_NULL;
int R = 3 + (depth >= 5 * OnePly ? depth / 8 : 0);
// Null move dynamic reduction based on value
- if (staticValue - beta > PawnValueMidgame)
+ if (refinedValue - beta > PawnValueMidgame)
R++;
nullValue = -search(pos, ss, -(beta-1), depth-R*OnePly, ply+1, false, threadID);
return beta - 1;
}
}
- // Null move search not allowed, try razoring
- else if ( !value_is_mate(beta)
- && !isCheck
- && depth < RazorDepth
- && staticValue < beta - (NullMoveMargin + 16 * depth)
- && ss[ply - 1].currentMove != MOVE_NULL
- && ttMove == MOVE_NONE
- && !pos.has_pawn_on_7th(pos.side_to_move()))
- {
- Value rbeta = beta - (NullMoveMargin + 16 * depth);
- Value v = qsearch(pos, ss, rbeta-1, rbeta, Depth(0), ply, threadID);
- if (v < rbeta)
- return v;
- }
- // Go with internal iterative deepening if we don't have a TT move
+ // Step 9. Internal iterative deepening
if (UseIIDAtNonPVNodes && ttMove == MOVE_NONE && depth >= 8*OnePly &&
!isCheck && ss[ply].eval >= beta - IIDMargin)
{
- search(pos, ss, beta, Min(depth/2, depth-2*OnePly), ply, false, threadID);
+ search(pos, ss, beta, depth/2, ply, false, threadID);
ttMove = ss[ply].pv[ply];
tte = TT.retrieve(posKey);
}
- // Initialize a MovePicker object for the current position, and prepare
- // to search all moves.
+ // Step 10. Loop through moves
+ // Loop through all legal moves until no moves remain or a beta cutoff occurs
+
+ // Initialize a MovePicker object for the current position
MovePicker mp = MovePicker(pos, ttMove, depth, H, &ss[ply]);
CheckInfo ci(pos);
- // Loop through all legal moves until no moves remain or a beta cutoff occurs
while ( bestValue < beta
&& (move = mp.get_next_move()) != MOVE_NONE
&& !TM.thread_should_stop(threadID))
singleEvasion = (isCheck && mp.number_of_evasions() == 1);
captureOrPromotion = pos.move_is_capture_or_promotion(move);
- // Decide the new search depth
+ // Step 11. Decide the new search depth
ext = extension(pos, move, false, captureOrPromotion, moveIsCheck, singleEvasion, mateThreat, &dangerous);
// Singular extension search. We extend the TT move if its value is much better than
newDepth = depth - OnePly + ext;
- // Update current move
+ // Update current move (this must be done after singular extension search)
movesSearched[moveCount++] = ss[ply].currentMove = move;
- // Futility pruning
+ // Step 12. Futility pruning
if ( !isCheck
&& !dangerous
&& !captureOrPromotion
}
}
- // Make and search the move
+ // Step 13. Make the move
pos.do_move(move, st, ci, moveIsCheck);
- // Try to reduce non-pv search depth by one ply if move seems not problematic,
+ // Step 14. Reduced search
// if the move fails high will be re-searched at full depth.
bool doFullDepthSearch = true;
}
}
- if (doFullDepthSearch) // Go with full depth non-pv search
+ // Step 15. Full depth search
+ if (doFullDepthSearch)
{
ss[ply].reduction = Depth(0);
value = -search(pos, ss, -(beta-1), newDepth, ply+1, true, threadID);
}
+
+ // Step 16. Undo move
pos.undo_move(move);
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
- // New best move?
+ // Step 17. Check for new best move
if (value > bestValue)
{
bestValue = value;
ss[ply].mateKiller = move;
}
- // Split?
+ // Step 18. Check for split
if ( TM.active_threads() > 1
&& bestValue < beta
&& depth >= MinimumSplitDepth
&& Iteration <= 99
- && TM.idle_thread_exists(threadID)
+ && TM.available_thread_exists(threadID)
&& !AbortSearch
&& !TM.thread_should_stop(threadID)
- && TM.split(pos, ss, ply, &beta, &beta, &bestValue, futilityValue, //FIXME: SMP & futilityValue
+ && TM.split(pos, ss, ply, NULL, beta, &bestValue,
depth, &moveCount, &mp, threadID, false))
break;
}
- // All legal moves have been searched. A special case: If there were
+ // Step 19. Check for mate and stalemate
+ // All legal moves have been searched and if there were
// no legal moves, it must be mate or stalemate.
+ // If one move was excluded return fail low.
if (!moveCount)
return excludedMove ? beta - 1 : (pos.is_check() ? value_mated_in(ply) : VALUE_DRAW);
+ // Step 20. Update tables
// If the search is not aborted, update the transposition table,
// history counters, and killer moves.
if (AbortSearch || TM.thread_should_stop(threadID))
// Don't search moves with negative SEE values
if ( (!isCheck || evasionPrunable)
+ && !pvNode
&& move != ttMove
&& !move_is_promotion(move)
&& pos.see_sign(move) < 0)
Position pos(*sp->pos);
CheckInfo ci(pos);
SearchStack* ss = sp->sstack[threadID];
+ StateInfo st;
Value value = -VALUE_INFINITE;
Move move;
int moveCount;
bool isCheck = pos.is_check();
- bool useFutilityPruning = sp->depth < 7 * OnePly //FIXME: sync with search
- && !isCheck;
- while ( lock_grab_bool(&(sp->lock))
- && sp->bestValue < sp->beta
+ // Step 10. Loop through moves
+ // Loop through all legal moves until no moves remain or a beta cutoff occurs
+ lock_grab(&(sp->lock));
+
+ while ( sp->bestValue < sp->beta
&& !TM.thread_should_stop(threadID)
&& (move = sp->mp->get_next_move()) != MOVE_NONE)
{
bool moveIsCheck = pos.move_is_check(move, ci);
bool captureOrPromotion = pos.move_is_capture_or_promotion(move);
- ss[sp->ply].currentMove = move;
-
- // Decide the new search depth
+ // Step 11. Decide the new search depth
bool dangerous;
Depth ext = extension(pos, move, false, captureOrPromotion, moveIsCheck, false, false, &dangerous);
Depth newDepth = sp->depth - OnePly + ext;
- // Prune?
- if ( useFutilityPruning
+ // Update current move
+ ss[sp->ply].currentMove = move;
+
+ // Step 12. Futility pruning
+ if ( !isCheck
&& !dangerous
- && !captureOrPromotion)
+ && !captureOrPromotion
+ && !move_is_castle(move))
{
// Move count based pruning
if ( moveCount >= futility_move_count(sp->depth)
&& ok_to_prune(pos, move, ss[sp->ply].threatMove)
&& sp->bestValue > value_mated_in(PLY_MAX))
+ {
+ lock_grab(&(sp->lock));
continue;
+ }
// Value based pruning
- Value futilityValueScaled = sp->futilityValue - moveCount * 8; //FIXME: sync with search
+ Depth predictedDepth = newDepth - nonpv_reduction(sp->depth, moveCount);
+ Value futilityValueScaled = ss[sp->ply].eval + futility_margin(predictedDepth, moveCount)
+ + H.gain(pos.piece_on(move_from(move)), move_to(move)) + 45;
if (futilityValueScaled < sp->beta)
{
- if (futilityValueScaled > sp->bestValue) // Less then 1% of cases
- {
- lock_grab(&(sp->lock));
- if (futilityValueScaled > sp->bestValue)
- sp->bestValue = futilityValueScaled;
- lock_release(&(sp->lock));
- }
+ lock_grab(&(sp->lock));
+
+ if (futilityValueScaled > sp->bestValue)
+ sp->bestValue = futilityValueScaled;
continue;
}
}
- // Make and search the move.
- StateInfo st;
+ // Step 13. Make the move
pos.do_move(move, st, ci, moveIsCheck);
- // Try to reduce non-pv search depth by one ply if move seems not problematic,
+ // Step 14. Reduced search
// if the move fails high will be re-searched at full depth.
bool doFullDepthSearch = true;
if (ss[sp->ply].reduction)
{
value = -search(pos, ss, -(sp->beta-1), newDepth-ss[sp->ply].reduction, sp->ply+1, true, threadID);
- doFullDepthSearch = (value >= sp->beta);
+ doFullDepthSearch = (value >= sp->beta && !TM.thread_should_stop(threadID));
}
}
- if (doFullDepthSearch) // Go with full depth non-pv search
+ // Step 15. Full depth search
+ if (doFullDepthSearch)
{
ss[sp->ply].reduction = Depth(0);
value = -search(pos, ss, -(sp->beta - 1), newDepth, sp->ply+1, true, threadID);
}
+
+ // Step 16. Undo move
pos.undo_move(move);
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
- if (TM.thread_should_stop(threadID))
- {
- lock_grab(&(sp->lock));
- break;
- }
+ // Step 17. Check for new best move
+ lock_grab(&(sp->lock));
- // New best move?
- if (value > sp->bestValue) // Less then 2% of cases
+ if (value > sp->bestValue && !TM.thread_should_stop(threadID))
{
- lock_grab(&(sp->lock));
- if (value > sp->bestValue && !TM.thread_should_stop(threadID))
+ sp->bestValue = value;
+ if (sp->bestValue >= sp->beta)
{
- sp->bestValue = value;
- if (sp->bestValue >= sp->beta)
- {
- sp_update_pv(sp->parentSstack, ss, sp->ply);
- for (int i = 0; i < TM.active_threads(); i++)
- if (i != threadID && (i == sp->master || sp->slaves[i]))
- TM.set_stop_request(i);
-
- sp->finished = true;
- }
+ sp->stopRequest = true;
+ sp_update_pv(sp->parentSstack, ss, sp->ply);
}
- lock_release(&(sp->lock));
}
}
/* Here we have the lock still grabbed */
- // If this is the master thread and we have been asked to stop because of
- // a beta cutoff higher up in the tree, stop all slave threads. Note that
- // thread_should_stop(threadID) does not imply that 'stop' flag is set, so
- // do this explicitly now, under lock protection.
- if (sp->master == threadID && TM.thread_should_stop(threadID))
- for (int i = 0; i < TM.active_threads(); i++)
- if (sp->slaves[i] || i == threadID)
- TM.set_stop_request(i);
-
- sp->cpus--;
sp->slaves[threadID] = 0;
+ sp->cpus--;
lock_release(&(sp->lock));
}
Position pos(*sp->pos);
CheckInfo ci(pos);
SearchStack* ss = sp->sstack[threadID];
+ StateInfo st;
Value value = -VALUE_INFINITE;
int moveCount;
Move move;
- while ( lock_grab_bool(&(sp->lock))
- && sp->alpha < sp->beta
+ // Step 10. Loop through moves
+ // Loop through all legal moves until no moves remain or a beta cutoff occurs
+ lock_grab(&(sp->lock));
+
+ while ( sp->alpha < sp->beta
&& !TM.thread_should_stop(threadID)
&& (move = sp->mp->get_next_move()) != MOVE_NONE)
{
bool moveIsCheck = pos.move_is_check(move, ci);
bool captureOrPromotion = pos.move_is_capture_or_promotion(move);
- ss[sp->ply].currentMove = move;
-
- // Decide the new search depth
+ // Step 11. Decide the new search depth
bool dangerous;
Depth ext = extension(pos, move, true, captureOrPromotion, moveIsCheck, false, false, &dangerous);
Depth newDepth = sp->depth - OnePly + ext;
- // Make and search the move.
- StateInfo st;
+ // Update current move
+ ss[sp->ply].currentMove = move;
+
+ // Step 12. Futility pruning (is omitted in PV nodes)
+
+ // Step 13. Make the move
pos.do_move(move, st, ci, moveIsCheck);
- // Try to reduce non-pv search depth by one ply if move seems not problematic,
+ // Step 14. Reduced search
// if the move fails high will be re-searched at full depth.
bool doFullDepthSearch = true;
{
Value localAlpha = sp->alpha;
value = -search(pos, ss, -localAlpha, newDepth-ss[sp->ply].reduction, sp->ply+1, true, threadID);
- doFullDepthSearch = (value > localAlpha);
+ doFullDepthSearch = (value > localAlpha && !TM.thread_should_stop(threadID));
}
}
- if (doFullDepthSearch) // Go with full depth non-pv search
+ // Step 15. Full depth search
+ if (doFullDepthSearch)
{
Value localAlpha = sp->alpha;
ss[sp->ply].reduction = Depth(0);
value = -search(pos, ss, -localAlpha, newDepth, sp->ply+1, true, threadID);
- if (value > localAlpha && value < sp->beta)
+ if (value > localAlpha && value < sp->beta && !TM.thread_should_stop(threadID))
{
// If another thread has failed high then sp->alpha has been increased
// to be higher or equal then beta, if so, avoid to start a PV search.
localAlpha = sp->alpha;
if (localAlpha < sp->beta)
value = -search_pv(pos, ss, -sp->beta, -localAlpha, newDepth, sp->ply+1, threadID);
- else
- assert(TM.thread_should_stop(threadID));
- }
+ }
}
+
+ // Step 16. Undo move
pos.undo_move(move);
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
- if (TM.thread_should_stop(threadID))
- {
- lock_grab(&(sp->lock));
- break;
- }
+ // Step 17. Check for new best move
+ lock_grab(&(sp->lock));
- // New best move?
- if (value > sp->bestValue) // Less then 2% of cases
+ if (value > sp->bestValue && !TM.thread_should_stop(threadID))
{
- lock_grab(&(sp->lock));
- if (value > sp->bestValue && !TM.thread_should_stop(threadID))
+ sp->bestValue = value;
+ if (value > sp->alpha)
{
- sp->bestValue = value;
- if (value > sp->alpha)
- {
- // Ask threads to stop before to modify sp->alpha
- if (value >= sp->beta)
- {
- for (int i = 0; i < TM.active_threads(); i++)
- if (i != threadID && (i == sp->master || sp->slaves[i]))
- TM.set_stop_request(i);
-
- sp->finished = true;
- }
-
- sp->alpha = value;
-
- sp_update_pv(sp->parentSstack, ss, sp->ply);
- if (value == value_mate_in(sp->ply + 1))
- ss[sp->ply].mateKiller = move;
- }
+ // Ask threads to stop before to modify sp->alpha
+ if (value >= sp->beta)
+ sp->stopRequest = true;
+
+ sp->alpha = value;
+
+ sp_update_pv(sp->parentSstack, ss, sp->ply);
+ if (value == value_mate_in(sp->ply + 1))
+ ss[sp->ply].mateKiller = move;
}
- lock_release(&(sp->lock));
}
}
/* Here we have the lock still grabbed */
- // If this is the master thread and we have been asked to stop because of
- // a beta cutoff higher up in the tree, stop all slave threads. Note that
- // thread_should_stop(threadID) does not imply that 'stop' flag is set, so
- // do this explicitly now, under lock protection.
- if (sp->master == threadID && TM.thread_should_stop(threadID))
- for (int i = 0; i < TM.active_threads(); i++)
- if (sp->slaves[i] || i == threadID)
- TM.set_stop_request(i);
-
- sp->cpus--;
sp->slaves[threadID] = 0;
+ sp->cpus--;
lock_release(&(sp->lock));
}
void ThreadsManager::resetNodeCounters() {
- for (int i = 0; i < THREAD_MAX; i++)
+ for (int i = 0; i < MAX_THREADS; i++)
threads[i].nodes = 0ULL;
}
void ThreadsManager::resetBetaCounters() {
- for (int i = 0; i < THREAD_MAX; i++)
+ for (int i = 0; i < MAX_THREADS; i++)
threads[i].betaCutOffs[WHITE] = threads[i].betaCutOffs[BLACK] = 0ULL;
}
void ThreadsManager::get_beta_counters(Color us, int64_t& our, int64_t& their) const {
our = their = 0UL;
- for (int i = 0; i < THREAD_MAX; i++)
+ for (int i = 0; i < MAX_THREADS; i++)
{
our += threads[i].betaCutOffs[us];
their += threads[i].betaCutOffs[opposite_color(us)];
void ThreadsManager::idle_loop(int threadID, SplitPoint* waitSp) {
- assert(threadID >= 0 && threadID < THREAD_MAX);
+ assert(threadID >= 0 && threadID < MAX_THREADS);
- threads[threadID].running = true;
-
- while (!AllThreadsShouldExit || threadID == 0)
+ while (true)
{
+ // Slave threads can exit as soon as AllThreadsShouldExit raises,
+ // master should exit as last one.
+ if (AllThreadsShouldExit)
+ {
+ assert(!waitSp);
+ threads[threadID].state = THREAD_TERMINATED;
+ return;
+ }
+
// If we are not thinking, wait for a condition to be signaled
// instead of wasting CPU time polling for work.
- while ( threadID != 0
- && !AllThreadsShouldExit
- && (AllThreadsShouldSleep || threadID >= ActiveThreads))
+ while (AllThreadsShouldSleep || threadID >= ActiveThreads)
{
-
- threads[threadID].sleeping = true;
+ assert(!waitSp);
+ assert(threadID != 0);
+ threads[threadID].state = THREAD_SLEEPING;
#if !defined(_MSC_VER)
pthread_mutex_lock(&WaitLock);
if (AllThreadsShouldSleep || threadID >= ActiveThreads)
pthread_cond_wait(&WaitCond, &WaitLock);
-
pthread_mutex_unlock(&WaitLock);
#else
WaitForSingleObject(SitIdleEvent[threadID], INFINITE);
#endif
}
- // Out of the while loop to avoid races in case thread is woken up but
- // while condition still holds true so that is put to sleep again.
- threads[threadID].sleeping = false;
+ // If thread has just woken up, mark it as available
+ if (threads[threadID].state == THREAD_SLEEPING)
+ threads[threadID].state = THREAD_AVAILABLE;
// If this thread has been assigned work, launch a search
- if (threads[threadID].workIsWaiting)
+ if (threads[threadID].state == THREAD_WORKISWAITING)
{
- assert(!threads[threadID].idle);
+ assert(!AllThreadsShouldExit && !AllThreadsShouldSleep);
+
+ threads[threadID].state = THREAD_SEARCHING;
- threads[threadID].workIsWaiting = false;
if (threads[threadID].splitPoint->pvNode)
sp_search_pv(threads[threadID].splitPoint, threadID);
else
sp_search(threads[threadID].splitPoint, threadID);
- threads[threadID].idle = true;
+ assert(threads[threadID].state == THREAD_SEARCHING);
+
+ threads[threadID].state = THREAD_AVAILABLE;
}
// If this thread is the master of a split point and all threads have
// finished their work at this split point, return from the idle loop.
if (waitSp != NULL && waitSp->cpus == 0)
+ {
+ assert(threads[threadID].state == THREAD_AVAILABLE);
+
+ threads[threadID].state = THREAD_SEARCHING;
return;
+ }
}
-
- threads[threadID].running = false;
}
lock_init(&IOLock, NULL);
// Initialize SplitPointStack locks
- for (int i = 0; i < THREAD_MAX; i++)
+ for (i = 0; i < MAX_THREADS; i++)
for (int j = 0; j < ACTIVE_SPLIT_POINTS_MAX; j++)
{
SplitPointStack[i][j].parent = NULL;
pthread_mutex_init(&WaitLock, NULL);
pthread_cond_init(&WaitCond, NULL);
#else
- for (i = 0; i < THREAD_MAX; i++)
+ for (i = 0; i < MAX_THREADS; i++)
SitIdleEvent[i] = CreateEvent(0, FALSE, FALSE, 0);
#endif
// Threads will be put to sleep as soon as created
AllThreadsShouldSleep = true;
- // All threads except the main thread should be initialized to idle state
+ // All threads except the main thread should be initialized to THREAD_AVAILABLE
ActiveThreads = 1;
- for (i = 1; i < THREAD_MAX; i++)
- threads[i].idle = true;
+ threads[0].state = THREAD_SEARCHING;
+ for (i = 1; i < MAX_THREADS; i++)
+ threads[i].state = THREAD_AVAILABLE;
// Launch the helper threads
- for (i = 1; i < THREAD_MAX; i++)
+ for (i = 1; i < MAX_THREADS; i++)
{
#if !defined(_MSC_VER)
}
// Wait until the thread has finished launching and is gone to sleep
- while (!threads[i].running || !threads[i].sleeping);
+ while (threads[i].state != THREAD_SLEEPING);
}
}
void ThreadsManager::exit_threads() {
- ActiveThreads = THREAD_MAX; // HACK
+ ActiveThreads = MAX_THREADS; // HACK
AllThreadsShouldSleep = true; // HACK
wake_sleeping_threads();
+
+ // This makes the threads to exit idle_loop()
AllThreadsShouldExit = true;
- for (int i = 1; i < THREAD_MAX; i++)
- {
- threads[i].stopRequest = true;
- while (threads[i].running);
- }
+
+ // Wait for thread termination
+ for (int i = 1; i < MAX_THREADS; i++)
+ while (threads[i].state != THREAD_TERMINATED);
// Now we can safely destroy the locks
- for (int i = 0; i < THREAD_MAX; i++)
+ for (int i = 0; i < MAX_THREADS; i++)
for (int j = 0; j < ACTIVE_SPLIT_POINTS_MAX; j++)
lock_destroy(&(SplitPointStack[i][j].lock));
}
- // thread_should_stop() checks whether the thread with a given threadID has
- // been asked to stop, directly or indirectly. This can happen if a beta
- // cutoff has occurred in the thread's currently active split point, or in
- // some ancestor of the current split point.
+ // thread_should_stop() checks whether the thread should stop its search.
+ // This can happen if a beta cutoff has occurred in the thread's currently
+ // active split point, or in some ancestor of the current split point.
bool ThreadsManager::thread_should_stop(int threadID) const {
SplitPoint* sp;
- if (threads[threadID].stopRequest)
- return true;
-
- if (ActiveThreads <= 2)
- return false;
-
- for (sp = threads[threadID].splitPoint; sp != NULL; sp = sp->parent)
- if (sp->finished)
- return true;
-
- return false;
+ for (sp = threads[threadID].splitPoint; sp && !sp->stopRequest; sp = sp->parent);
+ return sp != NULL;
}
assert(master >= 0 && master < ActiveThreads);
assert(ActiveThreads > 1);
- if (!threads[slave].idle || slave == master)
+ if (threads[slave].state != THREAD_AVAILABLE || slave == master)
return false;
// Make a local copy to be sure doesn't change under our feet
}
- // idle_thread_exists() tries to find an idle thread which is available as
+ // available_thread_exists() tries to find an idle thread which is available as
// a slave for the thread with threadID "master".
- bool ThreadsManager::idle_thread_exists(int master) const {
+ bool ThreadsManager::available_thread_exists(int master) const {
assert(master >= 0 && master < ActiveThreads);
assert(ActiveThreads > 1);
// splitPoint->cpus becomes 0), split() returns true.
bool ThreadsManager::split(const Position& p, SearchStack* sstck, int ply,
- Value* alpha, Value* beta, Value* bestValue, const Value futilityValue,
+ Value* alpha, const Value beta, Value* bestValue,
Depth depth, int* moves, MovePicker* mp, int master, bool pvNode) {
assert(p.is_ok());
assert(sstck != NULL);
assert(ply >= 0 && ply < PLY_MAX);
- assert(*bestValue >= -VALUE_INFINITE && *bestValue <= *alpha);
- assert(!pvNode || *alpha < *beta);
- assert(*beta <= VALUE_INFINITE);
+ assert(*bestValue >= -VALUE_INFINITE);
+ assert( ( pvNode && *bestValue <= *alpha)
+ || (!pvNode && *bestValue < beta ));
+ assert(!pvNode || *alpha < beta);
+ assert(beta <= VALUE_INFINITE);
assert(depth > Depth(0));
assert(master >= 0 && master < ActiveThreads);
assert(ActiveThreads > 1);
// If no other thread is available to help us, or if we have too many
// active split points, don't split.
- if ( !idle_thread_exists(master)
+ if ( !available_thread_exists(master)
|| threads[master].activeSplitPoints >= ACTIVE_SPLIT_POINTS_MAX)
{
lock_release(&MPLock);
}
// Pick the next available split point object from the split point stack
- splitPoint = SplitPointStack[master] + threads[master].activeSplitPoints;
- threads[master].activeSplitPoints++;
+ splitPoint = &SplitPointStack[master][threads[master].activeSplitPoints];
// Initialize the split point object
splitPoint->parent = threads[master].splitPoint;
- splitPoint->finished = false;
+ splitPoint->stopRequest = false;
splitPoint->ply = ply;
splitPoint->depth = depth;
- splitPoint->alpha = pvNode ? *alpha : (*beta - 1);
- splitPoint->beta = *beta;
+ splitPoint->alpha = pvNode ? *alpha : beta - 1;
+ splitPoint->beta = beta;
splitPoint->pvNode = pvNode;
splitPoint->bestValue = *bestValue;
- splitPoint->futilityValue = futilityValue;
splitPoint->master = master;
splitPoint->mp = mp;
splitPoint->moves = *moves;
for (int i = 0; i < ActiveThreads; i++)
splitPoint->slaves[i] = 0;
- threads[master].idle = false;
- threads[master].stopRequest = false;
threads[master].splitPoint = splitPoint;
+ threads[master].activeSplitPoints++;
+
+ // If we are here it means we are not available
+ assert(threads[master].state != THREAD_AVAILABLE);
- // Allocate available threads setting idle flag to false
+ // Allocate available threads setting state to THREAD_BOOKED
for (int i = 0; i < ActiveThreads && splitPoint->cpus < MaxThreadsPerSplitPoint; i++)
if (thread_is_available(i, master))
{
- threads[i].idle = false;
- threads[i].stopRequest = false;
+ threads[i].state = THREAD_BOOKED;
threads[i].splitPoint = splitPoint;
splitPoint->slaves[i] = 1;
splitPoint->cpus++;
assert(splitPoint->cpus > 1);
- // We can release the lock because master and slave threads are already booked
+ // We can release the lock because slave threads are already booked and master is not available
lock_release(&MPLock);
// Tell the threads that they have work to do. This will make them leave
if (i == master || splitPoint->slaves[i])
{
memcpy(splitPoint->sstack[i] + ply - 1, sstck + ply - 1, 4 * sizeof(SearchStack));
- threads[i].workIsWaiting = true; // This makes the slave to exit from idle_loop()
+
+ assert(i == master || threads[i].state == THREAD_BOOKED);
+
+ threads[i].state = THREAD_WORKISWAITING; // This makes the slave to exit from idle_loop()
}
// Everything is set up. The master thread enters the idle loop, from
- // which it will instantly launch a search, because its workIsWaiting
- // slot is 'true'. We send the split point as a second parameter to the
+ // which it will instantly launch a search, because its state is
+ // THREAD_WORKISWAITING. We send the split point as a second parameter to the
// idle loop, which means that the main thread will return from the idle
// loop when all threads have finished their work at this split point
// (i.e. when splitPoint->cpus == 0).
if (pvNode)
*alpha = splitPoint->alpha;
- *beta = splitPoint->beta;
*bestValue = splitPoint->bestValue;
- threads[master].stopRequest = false;
- threads[master].idle = false;
threads[master].activeSplitPoints--;
threads[master].splitPoint = splitPoint->parent;
return;
for (int i = 1; i < ActiveThreads; i++)
- {
- assert(threads[i].sleeping == true);
-
- threads[i].idle = true;
- threads[i].workIsWaiting = false;
- }
+ assert(threads[i].state == THREAD_SLEEPING);
#if !defined(_MSC_VER)
pthread_mutex_lock(&WaitLock);
pthread_cond_broadcast(&WaitCond);
pthread_mutex_unlock(&WaitLock);
#else
- for (int i = 1; i < THREAD_MAX; i++)
+ for (int i = 1; i < MAX_THREADS; i++)
SetEvent(SitIdleEvent[i]);
#endif
- // Wait for the threads to be all woken up
- for (int i = 1; i < ActiveThreads; i++)
- while (threads[i].sleeping);
}
// put_threads_to_sleep() makes all the threads go to sleep just before
- // to leave think(), at the end of the search. threads should have already
+ // to leave think(), at the end of the search. Threads should have already
// finished the job and should be idle.
void ThreadsManager::put_threads_to_sleep() {
assert(!AllThreadsShouldSleep);
+ // This makes the threads to go to sleep
AllThreadsShouldSleep = true;
- // Wait for the threads to be all sleeping and reset flags
- // to a known state.
+ // Reset flags to a known state.
for (int i = 1; i < ActiveThreads; i++)
{
- while (!threads[i].sleeping);
-
- assert(threads[i].idle);
- assert(threads[i].running);
- assert(!threads[i].workIsWaiting);
-
- // These two flags can be in a random state
- threads[i].stopRequest = threads[i].printCurrentLineRequest = false;
+ // This flag can be in a random state
+ threads[i].printCurrentLineRequest = false;
}
}
// One shot only
threads[threadID].printCurrentLineRequest = false;
- if (!threads[threadID].idle)
+ if (threads[threadID].state == THREAD_SEARCHING)
{
lock_grab(&IOLock);
cout << "info currline " << (threadID + 1);