X-Git-Url: https://git.sesse.net/?p=stockfish;a=blobdiff_plain;f=src%2Fsearch.cpp;h=6e0552be64566baac7f6e8d908520447b2ff5f38;hp=dcb89d75caf89733567141297faaf3594271210c;hb=5fd5453e594ee6cadf4a3c502bc53d28cfe5aa8b;hpb=dccaa145d2b57999aa3e368c7c9203731b4e9685 diff --git a/src/search.cpp b/src/search.cpp index dcb89d75..6e0552be 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -1043,8 +1043,8 @@ moves_loop: // When in check and at SpNode search starts from here && depth >= Threads.minimumSplitDepth && ( !thisThread->activeSplitPoint || !thisThread->activeSplitPoint->allSlavesSearching - || ( int(Threads.size()) > MAX_SLAVES_PER_SPLITPOINT - && thisThread->activeSplitPoint->slavesCount == MAX_SLAVES_PER_SPLITPOINT)) + || ( Threads.size() > MAX_SLAVES_PER_SPLITPOINT + && thisThread->activeSplitPoint->slavesMask.count() == MAX_SLAVES_PER_SPLITPOINT)) && thisThread->splitPointsSize < MAX_SPLITPOINTS_PER_THREAD) { assert(bestValue > -VALUE_INFINITE && bestValue < beta); @@ -1524,9 +1524,9 @@ void Thread::idle_loop() { // Pointer 'this_sp' is not null only if we are called from split(), and not // at the thread creation. This means we are the split point's master. - SplitPoint* this_sp = splitPointsSize ? activeSplitPoint : NULL; + SplitPoint* this_sp = activeSplitPoint; - assert(!this_sp || (this_sp->masterThread == this && searching)); + assert(!this_sp || (this_sp->master == this && searching)); while (!exit) { @@ -1536,6 +1536,7 @@ void Thread::idle_loop() { Threads.mutex.lock(); assert(activeSplitPoint); + SplitPoint* sp = activeSplitPoint; Threads.mutex.unlock(); @@ -1574,11 +1575,11 @@ void Thread::idle_loop() { // Wake up the master thread so to allow it to return from the idle // loop in case we are the last slave of the split point. - if ( this != sp->masterThread - && sp->slavesMask.none()) + if (this != sp->master && sp->slavesMask.none()) { - assert(!sp->masterThread->searching); - sp->masterThread->notify_one(); + assert(!sp->master->searching); + + sp->master->notify_one(); } // After releasing the lock we can't access any SplitPoint related data @@ -1588,63 +1589,62 @@ void Thread::idle_loop() { // Try to late join to another split point if none of its slaves has // already finished. - if (Threads.size() > 2) + SplitPoint* bestSp = NULL; + int bestScore = INT_MAX; + + for (size_t i = 0; i < Threads.size(); ++i) { - SplitPoint *bestSp = NULL; - int bestThread = 0; - int bestScore = INT_MAX; + const size_t size = Threads[i]->splitPointsSize; // Local copy + sp = size ? &Threads[i]->splitPoints[size - 1] : NULL; - for (size_t i = 0; i < Threads.size(); ++i) + if ( sp + && sp->allSlavesSearching + && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT + && available_to(Threads[i])) { - const int size = Threads[i]->splitPointsSize; // Local copy - sp = size ? &Threads[i]->splitPoints[size - 1] : NULL; + assert(this != Threads[i]); + assert(!(this_sp && this_sp->slavesMask.none())); + assert(Threads.size() > 2); + + // Prefer to join to SP with few parents to reduce the probability + // that a cut-off occurs above us, and hence we waste our work. + int level = -1; + for (SplitPoint* spp = Threads[i]->activeSplitPoint; spp; spp = spp->parentSplitPoint) + level++; - if ( sp - && sp->allSlavesSearching - && sp->slavesCount < MAX_SLAVES_PER_SPLITPOINT - && available_to(Threads[i])) + int score = level * 256 * 256 + (int)sp->slavesMask.count() * 256 - sp->depth * 1; + + if (score < bestScore) { - // Compute the recursive split points chain size - int level = -1; - for (SplitPoint* spp = Threads[i]->activeSplitPoint; spp; spp = spp->parentSplitPoint) - level++; - - int score = level * 256 * 256 + sp->slavesCount * 256 - sp->depth * 1; - - if (score < bestScore) - { - bestSp = sp; - bestThread = i; - bestScore = score; - } + bestSp = sp; + bestScore = score; } } + } - if (bestSp) - { - sp = bestSp; - - // Recheck the conditions under lock protection - Threads.mutex.lock(); - sp->mutex.lock(); + if (bestSp) + { + sp = bestSp; - if ( sp->allSlavesSearching - && sp->slavesCount < MAX_SLAVES_PER_SPLITPOINT - && available_to(Threads[bestThread])) - { - sp->slavesMask.set(idx); - sp->slavesCount++; - activeSplitPoint = sp; - searching = true; - } + // Recheck the conditions under lock protection + Threads.mutex.lock(); + sp->mutex.lock(); - sp->mutex.unlock(); - Threads.mutex.unlock(); + if ( sp->allSlavesSearching + && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT + && available_to(sp->master)) + { + sp->slavesMask.set(idx); + activeSplitPoint = sp; + searching = true; } + + sp->mutex.unlock(); + Threads.mutex.unlock(); } } - // Grab the lock to avoid races with Thread::notify_one() + // Avoid races with notify_one() fired from last slave of the split point mutex.lock(); // If we are master and all slaves have finished then exit idle_loop @@ -1706,7 +1706,7 @@ void check_time() { // Loop across all split points and sum accumulated SplitPoint nodes plus // all the currently active positions nodes. for (size_t i = 0; i < Threads.size(); ++i) - for (int j = 0; j < Threads[i]->splitPointsSize; ++j) + for (size_t j = 0; j < Threads[i]->splitPointsSize; ++j) { SplitPoint& sp = Threads[i]->splitPoints[j];