X-Git-Url: https://git.sesse.net/?p=stockfish;a=blobdiff_plain;f=src%2Fthread.cpp;h=358ced5dec3bf26eb823af4bd82ad8041801ca89;hp=3b87a72415ddd0fd3fc57ba345a55c3af78fbc97;hb=2ef5b4066e649c6ce3b10aa5f1bff7525246646d;hpb=b1768c115cf2bbe7ed6f89dc53a8db85b4442353 diff --git a/src/thread.cpp b/src/thread.cpp index 3b87a724..358ced5d 100644 --- a/src/thread.cpp +++ b/src/thread.cpp @@ -141,7 +141,7 @@ void Thread::wait_for_stop_or_ponderhit() { bool Thread::cutoff_occurred() const { - for (SplitPoint* sp = splitPoint; sp; sp = sp->parent) + for (SplitPoint* sp = curSplitPoint; sp; sp = sp->parent) if (sp->cutoff) return true; @@ -163,11 +163,11 @@ bool Thread::is_available_to(int master) const { // Make a local copy to be sure doesn't become zero under our feet while // testing next condition and so leading to an out of bound access. - int sp_count = activeSplitPoints; + int spCnt = splitPointsCnt; // No active split points means that the thread is available as a slave for any // other thread otherwise apply the "helpful master" concept if possible. - return !sp_count || (splitPoints[sp_count - 1].slavesMask & (1ULL << master)); + return !spCnt || (splitPoints[spCnt - 1].slavesMask & (1ULL << master)); } @@ -216,6 +216,8 @@ void ThreadsManager::set_size(int cnt) { void ThreadsManager::init() { + read_uci_options(); + cond_init(sleepCond); lock_init(splitLock); @@ -224,7 +226,7 @@ void ThreadsManager::init() { lock_init(threads[i].sleepLock); cond_init(threads[i].sleepCond); - for (int j = 0; j < MAX_ACTIVE_SPLIT_POINTS; j++) + for (int j = 0; j < MAX_SPLITPOINTS_PER_THREAD; j++) lock_init(threads[i].splitPoints[j].lock); } @@ -264,7 +266,7 @@ void ThreadsManager::exit() { lock_destroy(threads[i].sleepLock); cond_destroy(threads[i].sleepCond); - for (int j = 0; j < MAX_ACTIVE_SPLIT_POINTS; j++) + for (int j = 0; j < MAX_SPLITPOINTS_PER_THREAD; j++) lock_destroy(threads[i].splitPoints[j].lock); } @@ -299,8 +301,8 @@ bool ThreadsManager::available_slave_exists(int master) const { template Value ThreadsManager::split(Position& pos, Stack* ss, Value alpha, Value beta, - Value bestValue, Depth depth, Move threatMove, - int moveCount, MovePicker* mp, int nodeType) { + Value bestValue, Move* bestMove, Depth depth, + Move threatMove, int moveCount, MovePicker *mp, int nodeType) { assert(pos.pos_is_ok()); assert(bestValue > -VALUE_INFINITE); assert(bestValue <= alpha); @@ -313,17 +315,18 @@ Value ThreadsManager::split(Position& pos, Stack* ss, Value alpha, Value beta, int master = pos.thread(); Thread& masterThread = threads[master]; - if (masterThread.activeSplitPoints >= MAX_ACTIVE_SPLIT_POINTS) + if (masterThread.splitPointsCnt >= MAX_SPLITPOINTS_PER_THREAD) return bestValue; // Pick the next available split point from the split point stack - SplitPoint* sp = &masterThread.splitPoints[masterThread.activeSplitPoints]; + SplitPoint* sp = &masterThread.splitPoints[masterThread.splitPointsCnt++]; - sp->parent = masterThread.splitPoint; + sp->parent = masterThread.curSplitPoint; sp->master = master; sp->cutoff = false; sp->slavesMask = 1ULL << master; sp->depth = depth; + sp->bestMove = *bestMove; sp->threatMove = threatMove; sp->alpha = alpha; sp->beta = beta; @@ -337,6 +340,7 @@ Value ThreadsManager::split(Position& pos, Stack* ss, Value alpha, Value beta, assert(masterThread.is_searching); + masterThread.curSplitPoint = sp; int slavesCnt = 0; // Try to allocate available threads and ask them to start searching setting @@ -349,7 +353,7 @@ Value ThreadsManager::split(Position& pos, Stack* ss, Value alpha, Value beta, if (threads[i].is_available_to(master)) { sp->slavesMask |= 1ULL << i; - threads[i].splitPoint = sp; + threads[i].curSplitPoint = sp; threads[i].is_searching = true; // Slave leaves idle_loop() if (useSleepingThreads) @@ -359,9 +363,6 @@ Value ThreadsManager::split(Position& pos, Stack* ss, Value alpha, Value beta, break; } - masterThread.splitPoint = sp; - masterThread.activeSplitPoints++; - lock_release(splitLock); lock_release(sp->lock); @@ -371,18 +372,25 @@ Value ThreadsManager::split(Position& pos, Stack* ss, Value alpha, Value beta, // the thread will return from the idle loop when all slaves have finished // their work at this split point. if (slavesCnt || Fake) + { masterThread.idle_loop(sp); + // In helpful master concept a master can help only a sub-tree of its split + // point, and because here is all finished is not possible master is booked. + assert(!masterThread.is_searching); + } + // We have returned from the idle loop, which means that all threads are - // finished. Note that setting is_searching and decreasing activeSplitPoints is + // finished. Note that setting is_searching and decreasing splitPointsCnt is // done under lock protection to avoid a race with Thread::is_available_to(). lock_grab(sp->lock); // To protect sp->nodes lock_grab(splitLock); masterThread.is_searching = true; - masterThread.activeSplitPoints--; - masterThread.splitPoint = sp->parent; + masterThread.splitPointsCnt--; + masterThread.curSplitPoint = sp->parent; pos.set_nodes_searched(pos.nodes_searched() + sp->nodes); + *bestMove = sp->bestMove; lock_release(splitLock); lock_release(sp->lock); @@ -391,8 +399,8 @@ Value ThreadsManager::split(Position& pos, Stack* ss, Value alpha, Value beta, } // Explicit template instantiations -template Value ThreadsManager::split(Position&, Stack*, Value, Value, Value, Depth, Move, int, MovePicker*, int); -template Value ThreadsManager::split(Position&, Stack*, Value, Value, Value, Depth, Move, int, MovePicker*, int); +template Value ThreadsManager::split(Position&, Stack*, Value, Value, Value, Move*, Depth, Move, int, MovePicker*, int); +template Value ThreadsManager::split(Position&, Stack*, Value, Value, Value, Move*, Depth, Move, int, MovePicker*, int); // ThreadsManager::set_timer() is used to set the timer to trigger after msec