int active_threads() const { return ActiveThreads; }
void set_active_threads(int newActiveThreads) { ActiveThreads = newActiveThreads; }
- void set_stop_request(int threadID) { threads[threadID].stop = true; }
void incrementNodeCounter(int threadID) { threads[threadID].nodes++; }
void incrementBetaCounter(Color us, Depth d, int threadID) { threads[threadID].betaCutOffs[us] += unsigned(d); }
void print_current_line(SearchStack ss[], int ply, int threadID);
void resetBetaCounters();
int64_t nodes_searched() const;
void get_beta_counters(Color us, int64_t& our, int64_t& their) const;
- bool idle_thread_exists(int master) const;
+ bool available_thread_exists(int master) const;
bool thread_is_available(int slave, int master) const;
- bool thread_should_stop(int threadID);
+ bool thread_should_stop(int threadID) const;
void wake_sleeping_threads();
void put_threads_to_sleep();
void idle_loop(int threadID, SplitPoint* waitSp);
- bool split(const Position& pos, SearchStack* ss, int ply, Value* alpha, Value* beta, Value* bestValue,
+ bool split(const Position& pos, SearchStack* ss, int ply, Value* alpha, const Value beta, Value* bestValue,
const Value futilityValue, Depth depth, int* moves, MovePicker* mp, int master, bool pvNode);
private:
friend void poll();
int ActiveThreads;
- bool AllThreadsShouldExit, AllThreadsShouldSleep;
- Thread threads[THREAD_MAX];
- SplitPoint SplitPointStack[THREAD_MAX][ACTIVE_SPLIT_POINTS_MAX];
+ volatile bool AllThreadsShouldExit, AllThreadsShouldSleep;
+ Thread threads[MAX_THREADS];
+ SplitPoint SplitPointStack[MAX_THREADS][ACTIVE_SPLIT_POINTS_MAX];
Lock MPLock, IOLock;
pthread_cond_t WaitCond;
pthread_mutex_t WaitLock;
#else
- HANDLE SitIdleEvent[THREAD_MAX];
+ HANDLE SitIdleEvent[MAX_THREADS];
#endif
};
&& bestValue < beta
&& depth >= MinimumSplitDepth
&& Iteration <= 99
- && TM.idle_thread_exists(threadID)
+ && TM.available_thread_exists(threadID)
&& !AbortSearch
&& !TM.thread_should_stop(threadID)
- && TM.split(pos, ss, ply, &alpha, &beta, &bestValue, VALUE_NONE,
+ && TM.split(pos, ss, ply, &alpha, beta, &bestValue, VALUE_NONE,
depth, &moveCount, &mp, threadID, true))
break;
}
&& !isCheck
&& !value_is_mate(beta)
&& ok_to_do_nullmove(pos)
- && staticValue >= beta - NullMoveMargin)
+ && staticValue >= beta - (depth >= 4 * OnePly ? NullMoveMargin : 0))
{
ss[ply].currentMove = MOVE_NULL;
&& bestValue < beta
&& depth >= MinimumSplitDepth
&& Iteration <= 99
- && TM.idle_thread_exists(threadID)
+ && TM.available_thread_exists(threadID)
&& !AbortSearch
&& !TM.thread_should_stop(threadID)
- && TM.split(pos, ss, ply, &beta, &beta, &bestValue, futilityValue, //FIXME: SMP & futilityValue
+ && TM.split(pos, ss, ply, NULL, beta, &bestValue, futilityValue, //FIXME: SMP & futilityValue
depth, &moveCount, &mp, threadID, false))
break;
}
// Don't search moves with negative SEE values
if ( (!isCheck || evasionPrunable)
+ && !pvNode
&& move != ttMove
&& !move_is_promotion(move)
&& pos.see_sign(move) < 0)
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
- if (TM.thread_should_stop(threadID))
- {
- lock_grab(&(sp->lock));
- break;
- }
-
// New best move?
if (value > sp->bestValue) // Less then 2% of cases
{
sp->bestValue = value;
if (sp->bestValue >= sp->beta)
{
+ sp->stopRequest = true;
sp_update_pv(sp->parentSstack, ss, sp->ply);
- for (int i = 0; i < TM.active_threads(); i++)
- if (i != threadID && (i == sp->master || sp->slaves[i]))
- TM.set_stop_request(i);
-
- sp->finished = true;
}
}
lock_release(&(sp->lock));
/* Here we have the lock still grabbed */
- // If this is the master thread and we have been asked to stop because of
- // a beta cutoff higher up in the tree, stop all slave threads.
- if (sp->master == threadID && TM.thread_should_stop(threadID))
- for (int i = 0; i < TM.active_threads(); i++)
- if (sp->slaves[i])
- TM.set_stop_request(i);
-
sp->cpus--;
sp->slaves[threadID] = 0;
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
- if (TM.thread_should_stop(threadID))
- {
- lock_grab(&(sp->lock));
- break;
- }
-
// New best move?
if (value > sp->bestValue) // Less then 2% of cases
{
{
// Ask threads to stop before to modify sp->alpha
if (value >= sp->beta)
- {
- for (int i = 0; i < TM.active_threads(); i++)
- if (i != threadID && (i == sp->master || sp->slaves[i]))
- TM.set_stop_request(i);
-
- sp->finished = true;
- }
+ sp->stopRequest = true;
sp->alpha = value;
/* Here we have the lock still grabbed */
- // If this is the master thread and we have been asked to stop because of
- // a beta cutoff higher up in the tree, stop all slave threads.
- if (sp->master == threadID && TM.thread_should_stop(threadID))
- for (int i = 0; i < TM.active_threads(); i++)
- if (sp->slaves[i])
- TM.set_stop_request(i);
-
sp->cpus--;
sp->slaves[threadID] = 0;
void ThreadsManager::resetNodeCounters() {
- for (int i = 0; i < THREAD_MAX; i++)
+ for (int i = 0; i < MAX_THREADS; i++)
threads[i].nodes = 0ULL;
}
void ThreadsManager::resetBetaCounters() {
- for (int i = 0; i < THREAD_MAX; i++)
+ for (int i = 0; i < MAX_THREADS; i++)
threads[i].betaCutOffs[WHITE] = threads[i].betaCutOffs[BLACK] = 0ULL;
}
void ThreadsManager::get_beta_counters(Color us, int64_t& our, int64_t& their) const {
our = their = 0UL;
- for (int i = 0; i < THREAD_MAX; i++)
+ for (int i = 0; i < MAX_THREADS; i++)
{
our += threads[i].betaCutOffs[us];
their += threads[i].betaCutOffs[opposite_color(us)];
void ThreadsManager::idle_loop(int threadID, SplitPoint* waitSp) {
- assert(threadID >= 0 && threadID < THREAD_MAX);
-
- threads[threadID].running = true;
+ assert(threadID >= 0 && threadID < MAX_THREADS);
- while (!AllThreadsShouldExit || threadID == 0)
+ while (true)
{
+ // Slave threads can exit as soon as AllThreadsShouldExit raises,
+ // master should exit as last one.
+ if (AllThreadsShouldExit && !waitSp)
+ {
+ threads[threadID].state = THREAD_TERMINATED;
+ return;
+ }
+
// If we are not thinking, wait for a condition to be signaled
// instead of wasting CPU time polling for work.
while ( threadID != 0
&& !AllThreadsShouldExit
&& (AllThreadsShouldSleep || threadID >= ActiveThreads))
{
-
- threads[threadID].sleeping = true;
+ threads[threadID].state = THREAD_SLEEPING;
#if !defined(_MSC_VER)
pthread_mutex_lock(&WaitLock);
- if (AllThreadsShouldSleep || threadID >= ActiveThreads)
- pthread_cond_wait(&WaitCond, &WaitLock);
-
+ pthread_cond_wait(&WaitCond, &WaitLock);
pthread_mutex_unlock(&WaitLock);
#else
WaitForSingleObject(SitIdleEvent[threadID], INFINITE);
#endif
+ // State is already changed by wake_sleeping_threads()
+ assert(threads[threadID].state == THREAD_AVAILABLE || threadID >= ActiveThreads);
}
- // Out of the while loop to avoid races in case thread is woken up but
- // while condition still holds true so that is put to sleep again.
- threads[threadID].sleeping = false;
-
// If this thread has been assigned work, launch a search
- if (threads[threadID].workIsWaiting)
+ if (threads[threadID].state == THREAD_WORKISWAITING)
{
- assert(!threads[threadID].idle);
+ assert(!AllThreadsShouldExit);
+
+ threads[threadID].state = THREAD_SEARCHING;
- threads[threadID].workIsWaiting = false;
if (threads[threadID].splitPoint->pvNode)
sp_search_pv(threads[threadID].splitPoint, threadID);
else
sp_search(threads[threadID].splitPoint, threadID);
- threads[threadID].idle = true;
+ assert(threads[threadID].state == THREAD_SEARCHING);
+
+ threads[threadID].state = THREAD_AVAILABLE;
}
// If this thread is the master of a split point and all threads have
// finished their work at this split point, return from the idle loop.
if (waitSp != NULL && waitSp->cpus == 0)
+ {
+ assert( threads[threadID].state == THREAD_AVAILABLE
+ || threads[threadID].state == THREAD_SEARCHING);
+
+ threads[threadID].state = THREAD_SEARCHING;
return;
+ }
}
-
- threads[threadID].running = false;
}
lock_init(&IOLock, NULL);
// Initialize SplitPointStack locks
- for (int i = 0; i < THREAD_MAX; i++)
+ for (i = 0; i < MAX_THREADS; i++)
for (int j = 0; j < ACTIVE_SPLIT_POINTS_MAX; j++)
{
SplitPointStack[i][j].parent = NULL;
pthread_mutex_init(&WaitLock, NULL);
pthread_cond_init(&WaitCond, NULL);
#else
- for (i = 0; i < THREAD_MAX; i++)
+ for (i = 0; i < MAX_THREADS; i++)
SitIdleEvent[i] = CreateEvent(0, FALSE, FALSE, 0);
#endif
// Threads will be put to sleep as soon as created
AllThreadsShouldSleep = true;
- // All threads except the main thread should be initialized to idle state
+ // All threads except the main thread should be initialized to THREAD_AVAILABLE
ActiveThreads = 1;
- for (i = 1; i < THREAD_MAX; i++)
- threads[i].idle = true;
+ threads[0].state = THREAD_SEARCHING;
+ for (i = 1; i < MAX_THREADS; i++)
+ threads[i].state = THREAD_AVAILABLE;
// Launch the helper threads
- for (i = 1; i < THREAD_MAX; i++)
+ for (i = 1; i < MAX_THREADS; i++)
{
#if !defined(_MSC_VER)
}
// Wait until the thread has finished launching and is gone to sleep
- while (!threads[i].running || !threads[i].sleeping);
+ while (threads[i].state != THREAD_SLEEPING);
}
}
void ThreadsManager::exit_threads() {
- ActiveThreads = THREAD_MAX; // HACK
+ ActiveThreads = MAX_THREADS; // HACK
AllThreadsShouldSleep = true; // HACK
wake_sleeping_threads();
+
+ // This makes the threads to exit idle_loop()
AllThreadsShouldExit = true;
- for (int i = 1; i < THREAD_MAX; i++)
- {
- threads[i].stop = true;
- while (threads[i].running);
- }
+
+ // Wait for thread termination
+ for (int i = 1; i < MAX_THREADS; i++)
+ while (threads[i].state != THREAD_TERMINATED);
// Now we can safely destroy the locks
- for (int i = 0; i < THREAD_MAX; i++)
+ for (int i = 0; i < MAX_THREADS; i++)
for (int j = 0; j < ACTIVE_SPLIT_POINTS_MAX; j++)
lock_destroy(&(SplitPointStack[i][j].lock));
}
- // thread_should_stop() checks whether the thread with a given threadID has
- // been asked to stop, directly or indirectly. This can happen if a beta
- // cutoff has occurred in the thread's currently active split point, or in
- // some ancestor of the current split point.
+ // thread_should_stop() checks whether the thread should stop its search.
+ // This can happen if a beta cutoff has occurred in the thread's currently
+ // active split point, or in some ancestor of the current split point.
- bool ThreadsManager::thread_should_stop(int threadID) {
+ bool ThreadsManager::thread_should_stop(int threadID) const {
assert(threadID >= 0 && threadID < ActiveThreads);
SplitPoint* sp;
- if (threads[threadID].stop)
- return true;
-
- if (ActiveThreads <= 2)
- return false;
-
- for (sp = threads[threadID].splitPoint; sp != NULL; sp = sp->parent)
- if (sp->finished)
- {
- threads[threadID].stop = true;
- return true;
- }
-
- return false;
+ for (sp = threads[threadID].splitPoint; sp && !sp->stopRequest; sp = sp->parent);
+ return sp != NULL;
}
assert(master >= 0 && master < ActiveThreads);
assert(ActiveThreads > 1);
- if (!threads[slave].idle || slave == master)
+ if (threads[slave].state != THREAD_AVAILABLE || slave == master)
return false;
// Make a local copy to be sure doesn't change under our feet
}
- // idle_thread_exists() tries to find an idle thread which is available as
+ // available_thread_exists() tries to find an idle thread which is available as
// a slave for the thread with threadID "master".
- bool ThreadsManager::idle_thread_exists(int master) const {
+ bool ThreadsManager::available_thread_exists(int master) const {
assert(master >= 0 && master < ActiveThreads);
assert(ActiveThreads > 1);
// splitPoint->cpus becomes 0), split() returns true.
bool ThreadsManager::split(const Position& p, SearchStack* sstck, int ply,
- Value* alpha, Value* beta, Value* bestValue, const Value futilityValue,
+ Value* alpha, const Value beta, Value* bestValue, const Value futilityValue,
Depth depth, int* moves, MovePicker* mp, int master, bool pvNode) {
assert(p.is_ok());
// If no other thread is available to help us, or if we have too many
// active split points, don't split.
- if ( !idle_thread_exists(master)
+ if ( !available_thread_exists(master)
|| threads[master].activeSplitPoints >= ACTIVE_SPLIT_POINTS_MAX)
{
lock_release(&MPLock);
}
// Pick the next available split point object from the split point stack
- splitPoint = SplitPointStack[master] + threads[master].activeSplitPoints;
- threads[master].activeSplitPoints++;
+ splitPoint = &SplitPointStack[master][threads[master].activeSplitPoints];
// Initialize the split point object
splitPoint->parent = threads[master].splitPoint;
- splitPoint->finished = false;
+ splitPoint->stopRequest = false;
splitPoint->ply = ply;
splitPoint->depth = depth;
- splitPoint->alpha = pvNode ? *alpha : (*beta - 1);
- splitPoint->beta = *beta;
+ splitPoint->alpha = pvNode ? *alpha : beta - 1;
+ splitPoint->beta = beta;
splitPoint->pvNode = pvNode;
splitPoint->bestValue = *bestValue;
splitPoint->futilityValue = futilityValue;
for (int i = 0; i < ActiveThreads; i++)
splitPoint->slaves[i] = 0;
- threads[master].idle = false;
- threads[master].stop = false;
threads[master].splitPoint = splitPoint;
+ threads[master].activeSplitPoints++;
+
+ // If we are here it means we are not available
+ assert(threads[master].state != THREAD_AVAILABLE);
- // Allocate available threads setting idle flag to false
+ // Allocate available threads setting state to THREAD_BOOKED
for (int i = 0; i < ActiveThreads && splitPoint->cpus < MaxThreadsPerSplitPoint; i++)
if (thread_is_available(i, master))
{
- threads[i].idle = false;
- threads[i].stop = false;
+ threads[i].state = THREAD_BOOKED;
threads[i].splitPoint = splitPoint;
splitPoint->slaves[i] = 1;
splitPoint->cpus++;
assert(splitPoint->cpus > 1);
- // We can release the lock because master and slave threads are already booked
+ // We can release the lock because slave threads are already booked and master is not available
lock_release(&MPLock);
// Tell the threads that they have work to do. This will make them leave
if (i == master || splitPoint->slaves[i])
{
memcpy(splitPoint->sstack[i] + ply - 1, sstck + ply - 1, 4 * sizeof(SearchStack));
- threads[i].workIsWaiting = true; // This makes the slave to exit from idle_loop()
+
+ assert(i == master || threads[i].state == THREAD_BOOKED);
+
+ threads[i].state = THREAD_WORKISWAITING; // This makes the slave to exit from idle_loop()
}
// Everything is set up. The master thread enters the idle loop, from
- // which it will instantly launch a search, because its workIsWaiting
- // slot is 'true'. We send the split point as a second parameter to the
+ // which it will instantly launch a search, because its state is
+ // THREAD_WORKISWAITING. We send the split point as a second parameter to the
// idle loop, which means that the main thread will return from the idle
// loop when all threads have finished their work at this split point
// (i.e. when splitPoint->cpus == 0).
if (pvNode)
*alpha = splitPoint->alpha;
- *beta = splitPoint->beta;
*bestValue = splitPoint->bestValue;
- threads[master].stop = false;
- threads[master].idle = false;
threads[master].activeSplitPoints--;
threads[master].splitPoint = splitPoint->parent;
for (int i = 1; i < ActiveThreads; i++)
{
- assert(threads[i].sleeping == true);
+ assert(threads[i].state == THREAD_SLEEPING);
- threads[i].idle = true;
- threads[i].workIsWaiting = false;
+ threads[i].state = THREAD_AVAILABLE;
}
#if !defined(_MSC_VER)
pthread_cond_broadcast(&WaitCond);
pthread_mutex_unlock(&WaitLock);
#else
- for (int i = 1; i < THREAD_MAX; i++)
+ for (int i = 1; i < MAX_THREADS; i++)
SetEvent(SitIdleEvent[i]);
#endif
- // Wait for the threads to be all woken up
- for (int i = 1; i < ActiveThreads; i++)
- while (threads[i].sleeping);
}
// put_threads_to_sleep() makes all the threads go to sleep just before
- // to leave think(), at the end of the search. threads should have already
+ // to leave think(), at the end of the search. Threads should have already
// finished the job and should be idle.
void ThreadsManager::put_threads_to_sleep() {
assert(!AllThreadsShouldSleep);
+ // This makes the threads to go to sleep
AllThreadsShouldSleep = true;
- // Wait for the threads to be all sleeping
+ // Wait for the threads to be all sleeping and reset flags
+ // to a known state.
for (int i = 1; i < ActiveThreads; i++)
- while (!threads[i].sleeping);
- }
+ {
+ while (threads[i].state != THREAD_SLEEPING);
+ // This flag can be in a random state
+ threads[i].printCurrentLineRequest = false;
+ }
+ }
// print_current_line() prints _once_ the current line of search for a
// given thread and then setup the print request for the next thread.
// One shot only
threads[threadID].printCurrentLineRequest = false;
- if (!threads[threadID].idle)
+ if (threads[threadID].state == THREAD_SEARCHING)
{
lock_grab(&IOLock);
cout << "info currline " << (threadID + 1);