X-Git-Url: https://git.sesse.net/?p=stockfish;a=blobdiff_plain;f=src%2Fsearch.cpp;h=6e0552be64566baac7f6e8d908520447b2ff5f38;hp=ffe361268e6b0a113468e15b4ac18b758e32c977;hb=5fd5453e594ee6cadf4a3c502bc53d28cfe5aa8b;hpb=f8f5dcbb682830a66a37f68f3c192bbbfc84a33a diff --git a/src/search.cpp b/src/search.cpp index ffe36126..6e0552be 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -1042,7 +1042,9 @@ moves_loop: // When in check and at SpNode search starts from here && Threads.size() >= 2 && depth >= Threads.minimumSplitDepth && ( !thisThread->activeSplitPoint - || !thisThread->activeSplitPoint->allSlavesSearching) + || !thisThread->activeSplitPoint->allSlavesSearching + || ( Threads.size() > MAX_SLAVES_PER_SPLITPOINT + && thisThread->activeSplitPoint->slavesMask.count() == MAX_SLAVES_PER_SPLITPOINT)) && thisThread->splitPointsSize < MAX_SPLITPOINTS_PER_THREAD) { assert(bestValue > -VALUE_INFINITE && bestValue < beta); @@ -1522,9 +1524,9 @@ void Thread::idle_loop() { // Pointer 'this_sp' is not null only if we are called from split(), and not // at the thread creation. This means we are the split point's master. - SplitPoint* this_sp = splitPointsSize ? activeSplitPoint : NULL; + SplitPoint* this_sp = activeSplitPoint; - assert(!this_sp || (this_sp->masterThread == this && searching)); + assert(!this_sp || (this_sp->master == this && searching)); while (!exit) { @@ -1534,6 +1536,7 @@ void Thread::idle_loop() { Threads.mutex.lock(); assert(activeSplitPoint); + SplitPoint* sp = activeSplitPoint; Threads.mutex.unlock(); @@ -1572,11 +1575,11 @@ void Thread::idle_loop() { // Wake up the master thread so to allow it to return from the idle // loop in case we are the last slave of the split point. - if ( this != sp->masterThread - && sp->slavesMask.none()) + if (this != sp->master && sp->slavesMask.none()) { - assert(!sp->masterThread->searching); - sp->masterThread->notify_one(); + assert(!sp->master->searching); + + sp->master->notify_one(); } // After releasing the lock we can't access any SplitPoint related data @@ -1586,37 +1589,62 @@ void Thread::idle_loop() { // Try to late join to another split point if none of its slaves has // already finished. - if (Threads.size() > 2) - for (size_t i = 0; i < Threads.size(); ++i) + SplitPoint* bestSp = NULL; + int bestScore = INT_MAX; + + for (size_t i = 0; i < Threads.size(); ++i) + { + const size_t size = Threads[i]->splitPointsSize; // Local copy + sp = size ? &Threads[i]->splitPoints[size - 1] : NULL; + + if ( sp + && sp->allSlavesSearching + && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT + && available_to(Threads[i])) { - const int size = Threads[i]->splitPointsSize; // Local copy - sp = size ? &Threads[i]->splitPoints[size - 1] : NULL; + assert(this != Threads[i]); + assert(!(this_sp && this_sp->slavesMask.none())); + assert(Threads.size() > 2); + + // Prefer to join to SP with few parents to reduce the probability + // that a cut-off occurs above us, and hence we waste our work. + int level = -1; + for (SplitPoint* spp = Threads[i]->activeSplitPoint; spp; spp = spp->parentSplitPoint) + level++; - if ( sp - && sp->allSlavesSearching - && available_to(Threads[i])) + int score = level * 256 * 256 + (int)sp->slavesMask.count() * 256 - sp->depth * 1; + + if (score < bestScore) { - // Recheck the conditions under lock protection - Threads.mutex.lock(); - sp->mutex.lock(); - - if ( sp->allSlavesSearching - && available_to(Threads[i])) - { - sp->slavesMask.set(idx); - activeSplitPoint = sp; - searching = true; - } - - sp->mutex.unlock(); - Threads.mutex.unlock(); - - break; // Just a single attempt + bestSp = sp; + bestScore = score; } } + } + + if (bestSp) + { + sp = bestSp; + + // Recheck the conditions under lock protection + Threads.mutex.lock(); + sp->mutex.lock(); + + if ( sp->allSlavesSearching + && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT + && available_to(sp->master)) + { + sp->slavesMask.set(idx); + activeSplitPoint = sp; + searching = true; + } + + sp->mutex.unlock(); + Threads.mutex.unlock(); + } } - // Grab the lock to avoid races with Thread::notify_one() + // Avoid races with notify_one() fired from last slave of the split point mutex.lock(); // If we are master and all slaves have finished then exit idle_loop @@ -1678,7 +1706,7 @@ void check_time() { // Loop across all split points and sum accumulated SplitPoint nodes plus // all the currently active positions nodes. for (size_t i = 0; i < Threads.size(); ++i) - for (int j = 0; j < Threads[i]->splitPointsSize; ++j) + for (size_t j = 0; j < Threads[i]->splitPointsSize; ++j) { SplitPoint& sp = Threads[i]->splitPoints[j];