]> git.sesse.net Git - stockfish/commitdiff
Sync with master
authorMarco Costalba <mcostalba@gmail.com>
Fri, 20 Feb 2015 09:36:45 +0000 (10:36 +0100)
committerMarco Costalba <mcostalba@gmail.com>
Fri, 20 Feb 2015 09:37:29 +0000 (10:37 +0100)
bench: 7911944

1  2 
src/evaluate.cpp
src/search.cpp
src/thread.cpp
src/thread.h

Simple merge
diff --cc src/search.cpp
index 9d7c1ef601e6514d09b161ac6683a697cc38c08e,15b93a728de884211a0ed864963010d502c2a9c9..04d67afebf77195f15f8e85758d488ef17fed5d9
@@@ -1579,34 -1588,61 +1581,61 @@@ void Thread::idle_loop() 
  
            // Try to late join to another split point if none of its slaves has
            // already finished.
-           if (Threads.size() > 2)
-               for (size_t i = 0; i < Threads.size(); ++i)
+           SplitPoint* bestSp = NULL;
+           Thread* bestThread = NULL;
+           int bestScore = INT_MAX;
+           for (size_t i = 0; i < Threads.size(); ++i)
+           {
+               const size_t size = Threads[i]->splitPointsSize; // Local copy
 -              sp = size ? &Threads[i]->splitPoints[size - 1] : NULL;
++              sp = size ? &Threads[i]->splitPoints[size - 1] : nullptr;
+               if (   sp
+                   && sp->allSlavesSearching
+                   && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
+                   && available_to(Threads[i]))
                {
-                   const int size = Threads[i]->splitPointsSize; // Local copy
-                   sp = size ? &Threads[i]->splitPoints[size - 1] : nullptr;
+                   assert(this != Threads[i]);
+                   assert(!(this_sp && this_sp->slavesMask.none()));
+                   assert(Threads.size() > 2);
+                   // Prefer to join to SP with few parents to reduce the probability
+                   // that a cut-off occurs above us, and hence we waste our work.
+                   int level = -1;
+                   for (SplitPoint* spp = Threads[i]->activeSplitPoint; spp; spp = spp->parentSplitPoint)
+                       level++;
  
-                   if (   sp
-                       && sp->allSlavesSearching
-                       && available_to(Threads[i]))
+                   int score = level * 256 * 256 + (int)sp->slavesMask.count() * 256 - sp->depth * 1;
+                   if (score < bestScore)
                    {
-                       // Recheck the conditions under lock protection
-                       Threads.mutex.lock();
-                       sp->mutex.lock();
-                       if (   sp->allSlavesSearching
-                           && available_to(Threads[i]))
-                       {
-                            sp->slavesMask.set(idx);
-                            activeSplitPoint = sp;
-                            searching = true;
-                       }
-                       sp->mutex.unlock();
-                       Threads.mutex.unlock();
-                       break; // Just a single attempt
+                       bestSp = sp;
+                       bestThread = Threads[i];
+                       bestScore = score;
                    }
                }
+           }
+           if (bestSp)
+           {
+               sp = bestSp;
+               // Recheck the conditions under lock protection
+               Threads.mutex.lock();
+               sp->mutex.lock();
+               if (   sp->allSlavesSearching
+                   && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
+                   && available_to(bestThread))
+               {
+                   sp->slavesMask.set(idx);
+                   activeSplitPoint = sp;
+                   searching = true;
+               }
+               sp->mutex.unlock();
+               Threads.mutex.unlock();
+           }
        }
  
        // Grab the lock to avoid races with Thread::notify_one()
@@@ -1667,10 -1706,10 +1696,10 @@@ void check_time() 
  
        // Loop across all split points and sum accumulated SplitPoint nodes plus
        // all the currently active positions nodes.
 -      for (size_t i = 0; i < Threads.size(); ++i)
 -          for (size_t j = 0; j < Threads[i]->splitPointsSize; ++j)
 +      for (Thread* th : Threads)
-           for (int i = 0; i < th->splitPointsSize; ++i)
++          for (size_t i = 0; i < th->splitPointsSize; ++i)
            {
 -              SplitPoint& sp = Threads[i]->splitPoints[j];
 +              SplitPoint& sp = th->splitPoints[i];
  
                sp.mutex.lock();
  
diff --cc src/thread.cpp
index c25fc129cd4ef0bc570e9377ecfe63765e8fb294,18870021ba3665bd026364a24594ca4bf57c26d8..279d9cfe2cdede4a254a126346530ab3c6925357
@@@ -81,9 -89,10 +81,10 @@@ void ThreadBase::wait_for(volatile cons
  Thread::Thread() /* : splitPoints() */ { // Initialization of non POD broken in MSVC
  
    searching = false;
-   maxPly = splitPointsSize = 0;
+   maxPly = 0;
+   splitPointsSize = 0;
 -  activeSplitPoint = NULL;
 -  activePosition = NULL;
 +  activeSplitPoint = nullptr;
 +  activePosition = nullptr;
    idx = Threads.size(); // Starts from 0
  }
  
@@@ -174,7 -183,8 +175,8 @@@ void Thread::split(Position& pos, Stack
  
    Thread* slave;
  
-   while ((slave = Threads.available_slave(this)) != nullptr)
+   while (    sp.slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
 -         && (slave = Threads.available_slave(this)) != NULL)
++         && (slave = Threads.available_slave(this)) != nullptr)
    {
        sp.slavesMask.set(slave->idx);
        slave->activeSplitPoint = &sp;
diff --cc src/thread.h
index 3f902dc17b794a35250f87759e932dd9f0607bd4,04e9a370cb9a5604ef7afa4a586263a3d594e28c..54083d2e5c83af994e0c3f647dc61eacd51d44cf
  
  struct Thread;
  
- const int MAX_THREADS = 128;
- const int MAX_SPLITPOINTS_PER_THREAD = 8;
+ const size_t MAX_THREADS = 128;
+ const size_t MAX_SPLITPOINTS_PER_THREAD = 8;
+ const size_t MAX_SLAVES_PER_SPLITPOINT = 4;
  
 -/// Mutex and ConditionVariable struct are wrappers of the low level locking
 -/// machinery and are modeled after the corresponding C++11 classes.
 -
 -struct Mutex {
 -  Mutex() { lock_init(l); }
 - ~Mutex() { lock_destroy(l); }
 -
 -  void lock() { lock_grab(l); }
 -  void unlock() { lock_release(l); }
 -
 -private:
 -  friend struct ConditionVariable;
 -
 -  Lock l;
 -};
 -
 -struct ConditionVariable {
 -  ConditionVariable() { cond_init(c); }
 - ~ConditionVariable() { cond_destroy(c); }
 -
 -  void wait(Mutex& m) { cond_wait(c, m.l); }
 -  void wait_for(Mutex& m, int ms) { timed_wait(c, m.l, ms); }
 -  void notify_one() { cond_signal(c); }
 -
 -private:
 -  WaitCondition c;
 -};
 -
 -
  /// SplitPoint struct stores information shared by the threads searching in
  /// parallel below the same split point. It is populated at splitting time.