]> git.sesse.net Git - stockfish/commitdiff
Sync with master
authorMarco Costalba <mcostalba@gmail.com>
Fri, 20 Feb 2015 09:36:45 +0000 (10:36 +0100)
committerMarco Costalba <mcostalba@gmail.com>
Fri, 20 Feb 2015 09:37:29 +0000 (10:37 +0100)
bench: 7911944

src/evaluate.cpp
src/search.cpp
src/thread.cpp
src/thread.h

index 9af22264b89ff00fbdd247eb33468072d750a962..6c46cb4b78e3d393142738a37449e11c7e02f012 100644 (file)
@@ -412,10 +412,10 @@ namespace {
         // attacked and undefended squares around our king and the quality of
         // the pawn shelter (current 'score' value).
         attackUnits =  std::min(74, ei.kingAttackersCount[Them] * ei.kingAttackersWeight[Them])
-                     + 8 * ei.kingAdjacentZoneAttacksCount[Them]
+                     +  8 * ei.kingAdjacentZoneAttacksCount[Them]
                      + 25 * popcount<Max15>(undefended)
-                     +  11 * (ei.pinnedPieces[Us] != 0)
-                     - mg_value(score) * 31 / 256
+                     + 11 * (ei.pinnedPieces[Us] != 0)
+                     - mg_value(score) / 8
                      - !pos.count<QUEEN>(Them) * 60;
 
         // Analyse the enemy's safe queen contact checks. Firstly, find the
index 9d7c1ef601e6514d09b161ac6683a697cc38c08e..04d67afebf77195f15f8e85758d488ef17fed5d9 100644 (file)
@@ -1034,7 +1034,9 @@ moves_loop: // When in check and at SpNode search starts from here
           &&  Threads.size() >= 2
           &&  depth >= Threads.minimumSplitDepth
           &&  (   !thisThread->activeSplitPoint
-               || !thisThread->activeSplitPoint->allSlavesSearching)
+               || !thisThread->activeSplitPoint->allSlavesSearching
+               || (   Threads.size() > MAX_SLAVES_PER_SPLITPOINT
+                   && thisThread->activeSplitPoint->slavesMask.count() == MAX_SLAVES_PER_SPLITPOINT))
           &&  thisThread->splitPointsSize < MAX_SPLITPOINTS_PER_THREAD)
       {
           assert(bestValue > -VALUE_INFINITE && bestValue < beta);
@@ -1579,34 +1581,61 @@ void Thread::idle_loop() {
 
           // Try to late join to another split point if none of its slaves has
           // already finished.
-          if (Threads.size() > 2)
-              for (size_t i = 0; i < Threads.size(); ++i)
+          SplitPoint* bestSp = NULL;
+          Thread* bestThread = NULL;
+          int bestScore = INT_MAX;
+
+          for (size_t i = 0; i < Threads.size(); ++i)
+          {
+              const size_t size = Threads[i]->splitPointsSize; // Local copy
+              sp = size ? &Threads[i]->splitPoints[size - 1] : nullptr;
+
+              if (   sp
+                  && sp->allSlavesSearching
+                  && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
+                  && available_to(Threads[i]))
               {
-                  const int size = Threads[i]->splitPointsSize; // Local copy
-                  sp = size ? &Threads[i]->splitPoints[size - 1] : nullptr;
+                  assert(this != Threads[i]);
+                  assert(!(this_sp && this_sp->slavesMask.none()));
+                  assert(Threads.size() > 2);
+
+                  // Prefer to join to SP with few parents to reduce the probability
+                  // that a cut-off occurs above us, and hence we waste our work.
+                  int level = -1;
+                  for (SplitPoint* spp = Threads[i]->activeSplitPoint; spp; spp = spp->parentSplitPoint)
+                      level++;
 
-                  if (   sp
-                      && sp->allSlavesSearching
-                      && available_to(Threads[i]))
+                  int score = level * 256 * 256 + (int)sp->slavesMask.count() * 256 - sp->depth * 1;
+
+                  if (score < bestScore)
                   {
-                      // Recheck the conditions under lock protection
-                      Threads.mutex.lock();
-                      sp->mutex.lock();
-
-                      if (   sp->allSlavesSearching
-                          && available_to(Threads[i]))
-                      {
-                           sp->slavesMask.set(idx);
-                           activeSplitPoint = sp;
-                           searching = true;
-                      }
-
-                      sp->mutex.unlock();
-                      Threads.mutex.unlock();
-
-                      break; // Just a single attempt
+                      bestSp = sp;
+                      bestThread = Threads[i];
+                      bestScore = score;
                   }
               }
+          }
+
+          if (bestSp)
+          {
+              sp = bestSp;
+
+              // Recheck the conditions under lock protection
+              Threads.mutex.lock();
+              sp->mutex.lock();
+
+              if (   sp->allSlavesSearching
+                  && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
+                  && available_to(bestThread))
+              {
+                  sp->slavesMask.set(idx);
+                  activeSplitPoint = sp;
+                  searching = true;
+              }
+
+              sp->mutex.unlock();
+              Threads.mutex.unlock();
+          }
       }
 
       // Grab the lock to avoid races with Thread::notify_one()
@@ -1668,7 +1697,7 @@ void check_time() {
       // Loop across all split points and sum accumulated SplitPoint nodes plus
       // all the currently active positions nodes.
       for (Thread* th : Threads)
-          for (int i = 0; i < th->splitPointsSize; ++i)
+          for (size_t i = 0; i < th->splitPointsSize; ++i)
           {
               SplitPoint& sp = th->splitPoints[i];
 
index c25fc129cd4ef0bc570e9377ecfe63765e8fb294..279d9cfe2cdede4a254a126346530ab3c6925357 100644 (file)
@@ -81,7 +81,8 @@ void ThreadBase::wait_for(volatile const bool& condition) {
 Thread::Thread() /* : splitPoints() */ { // Initialization of non POD broken in MSVC
 
   searching = false;
-  maxPly = splitPointsSize = 0;
+  maxPly = 0;
+  splitPointsSize = 0;
   activeSplitPoint = nullptr;
   activePosition = nullptr;
   idx = Threads.size(); // Starts from 0
@@ -115,7 +116,7 @@ bool Thread::available_to(const Thread* master) const {
 
   // Make a local copy to be sure it doesn't become zero under our feet while
   // testing next condition and so leading to an out of bounds access.
-  const int size = splitPointsSize;
+  const size_t size = splitPointsSize;
 
   // No split points means that the thread is available as a slave for any
   // other thread otherwise apply the "helpful master" concept if possible.
@@ -174,7 +175,8 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
 
   Thread* slave;
 
-  while ((slave = Threads.available_slave(this)) != nullptr)
+  while (    sp.slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
+         && (slave = Threads.available_slave(this)) != nullptr)
   {
       sp.slavesMask.set(slave->idx);
       slave->activeSplitPoint = &sp;
index 3f902dc17b794a35250f87759e932dd9f0607bd4..54083d2e5c83af994e0c3f647dc61eacd51d44cf 100644 (file)
@@ -34,8 +34,9 @@
 
 struct Thread;
 
-const int MAX_THREADS = 128;
-const int MAX_SPLITPOINTS_PER_THREAD = 8;
+const size_t MAX_THREADS = 128;
+const size_t MAX_SPLITPOINTS_PER_THREAD = 8;
+const size_t MAX_SLAVES_PER_SPLITPOINT = 4;
 
 /// SplitPoint struct stores information shared by the threads searching in
 /// parallel below the same split point. It is populated at splitting time.
@@ -108,7 +109,7 @@ struct Thread : public ThreadBase {
   size_t idx;
   int maxPly;
   SplitPoint* volatile activeSplitPoint;
-  volatile int splitPointsSize;
+  volatile size_t splitPointsSize;
   volatile bool searching;
 };