X-Git-Url: https://git.sesse.net/?p=stockfish;a=blobdiff_plain;f=src%2Fsearch.cpp;h=43a3175bfe83e309b219f116c8bcc3aded3a0d1b;hp=a04766533b953f27798b7665c4a1d99c76bc4f82;hb=9369f4963d9376820cb5ca4bad66b86e67b0a010;hpb=8d47caa16ec9d2efad44f2638ce7d7637216d281 diff --git a/src/search.cpp b/src/search.cpp index a0476653..43a3175b 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -1524,9 +1524,9 @@ void Thread::idle_loop() { // Pointer 'this_sp' is not null only if we are called from split(), and not // at the thread creation. This means we are the split point's master. - SplitPoint* this_sp = splitPointsSize ? activeSplitPoint : NULL; + SplitPoint* this_sp = activeSplitPoint; - assert(!this_sp || (this_sp->masterThread == this && searching)); + assert(!this_sp || (this_sp->master == this && searching)); while (!exit) { @@ -1536,6 +1536,7 @@ void Thread::idle_loop() { Threads.mutex.lock(); assert(activeSplitPoint); + SplitPoint* sp = activeSplitPoint; Threads.mutex.unlock(); @@ -1574,11 +1575,11 @@ void Thread::idle_loop() { // Wake up the master thread so to allow it to return from the idle // loop in case we are the last slave of the split point. - if ( this != sp->masterThread - && sp->slavesMask.none()) + if (this != sp->master && sp->slavesMask.none()) { - assert(!sp->masterThread->searching); - sp->masterThread->notify_one(); + assert(!sp->master->searching); + + sp->master->notify_one(); } // After releasing the lock we can't access any SplitPoint related data @@ -1589,12 +1590,11 @@ void Thread::idle_loop() { // Try to late join to another split point if none of its slaves has // already finished. SplitPoint* bestSp = NULL; - Thread* bestThread = NULL; - int bestScore = INT_MAX; + int minLevel = INT_MAX; for (size_t i = 0; i < Threads.size(); ++i) { - const int size = Threads[i]->splitPointsSize; // Local copy + const size_t size = Threads[i]->splitPointsSize; // Local copy sp = size ? &Threads[i]->splitPoints[size - 1] : NULL; if ( sp @@ -1604,19 +1604,18 @@ void Thread::idle_loop() { { assert(this != Threads[i]); assert(!(this_sp && this_sp->slavesMask.none())); + assert(Threads.size() > 2); - // Compute the recursive split points chain size - int level = -1; - for (SplitPoint* spp = Threads[i]->activeSplitPoint; spp; spp = spp->parentSplitPoint) + // Prefer to join to SP with few parents to reduce the probability + // that a cut-off occurs above us, and hence we waste our work. + int level = 0; + for (SplitPoint* p = Threads[i]->activeSplitPoint; p; p = p->parentSplitPoint) level++; - int score = level * 256 * 256 + (int)sp->slavesMask.count() * 256 - sp->depth * 1; - - if (score < bestScore) + if (level < minLevel) { bestSp = sp; - bestThread = Threads[i]; - bestScore = score; + minLevel = level; } } } @@ -1631,7 +1630,7 @@ void Thread::idle_loop() { if ( sp->allSlavesSearching && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT - && available_to(bestThread)) + && available_to(sp->master)) { sp->slavesMask.set(idx); activeSplitPoint = sp; @@ -1643,7 +1642,7 @@ void Thread::idle_loop() { } } - // Grab the lock to avoid races with Thread::notify_one() + // Avoid races with notify_one() fired from last slave of the split point mutex.lock(); // If we are master and all slaves have finished then exit idle_loop @@ -1705,7 +1704,7 @@ void check_time() { // Loop across all split points and sum accumulated SplitPoint nodes plus // all the currently active positions nodes. for (size_t i = 0; i < Threads.size(); ++i) - for (int j = 0; j < Threads[i]->splitPointsSize; ++j) + for (size_t j = 0; j < Threads[i]->splitPointsSize; ++j) { SplitPoint& sp = Threads[i]->splitPoints[j];