-// split() does the actual work of distributing the work at a node between
-// several available threads. If it does not succeed in splitting the
-// node (because no idle threads are available, or because we have no unused
-// split point objects), the function immediately returns. If splitting is
-// possible, a SplitPoint object is initialized with all the data that must be
-// copied to the helper threads and we tell our helper threads that they have
-// been assigned work. This will cause them to instantly leave their idle loops and
-// call search().When all threads have returned from search() then split() returns.
-
-template <bool Fake>
-Value ThreadsManager::split(Position& pos, SearchStack* ss, Value alpha, Value beta,
- Value bestValue, Depth depth, Move threatMove,
- int moveCount, MovePicker* mp, int nodeType) {
- assert(pos.is_ok());
- assert(bestValue >= -VALUE_INFINITE);
- assert(bestValue <= alpha);
- assert(alpha < beta);
- assert(beta <= VALUE_INFINITE);
- assert(depth > DEPTH_ZERO);
- assert(pos.thread() >= 0 && pos.thread() < activeThreads);
- assert(activeThreads > 1);
-
- int i, master = pos.thread();
- Thread& masterThread = threads[master];
-
- // If we already have too many active split points, don't split
- if (masterThread.activeSplitPoints >= MAX_ACTIVE_SPLIT_POINTS)
- return bestValue;
-
- // Pick the next available split point object from the split point stack
- SplitPoint& splitPoint = masterThread.splitPoints[masterThread.activeSplitPoints];
-
- // Initialize the split point object
- splitPoint.parent = masterThread.splitPoint;
- splitPoint.master = master;
- splitPoint.is_betaCutoff = false;
- splitPoint.depth = depth;
- splitPoint.threatMove = threatMove;
- splitPoint.alpha = alpha;
- splitPoint.beta = beta;
- splitPoint.nodeType = nodeType;
- splitPoint.bestValue = bestValue;
- splitPoint.mp = mp;
- splitPoint.moveCount = moveCount;
- splitPoint.pos = &pos;
- splitPoint.nodes = 0;
- splitPoint.ss = ss;
- for (i = 0; i < activeThreads; i++)
- splitPoint.is_slave[i] = false;
-
- // If we are here it means we are not available
- assert(masterThread.state == Thread::SEARCHING);
-
- int booked = 0;
-
- // Try to allocate available threads setting state to Thread::BOOKED, this
- // must be done under lock protection to avoid concurrent allocation of
- // the same slave by another master.
- lock_grab(&threadsLock);
-
- for (i = 0; !Fake && i < activeThreads && booked < maxThreadsPerSplitPoint; i++)
- if (i != master && threads[i].is_available_to(master))
- {
- threads[i].state = Thread::BOOKED;
- threads[i].splitPoint = &splitPoint;
- splitPoint.is_slave[i] = true;
- booked++;
- }
-
- lock_release(&threadsLock);
-
- // We failed to allocate even one slave, return
- if (!Fake && !booked)
- return bestValue;
-
- masterThread.activeSplitPoints++;
- masterThread.splitPoint = &splitPoint;
-
- // Tell the threads that they have some work to do. This will make them leave
- // their idle loop.
- for (i = 0; i < activeThreads; i++)
- if (i == master || splitPoint.is_slave[i])
- {
- assert(i == master || threads[i].state == Thread::BOOKED);
-
- // This makes the slave to exit from idle_loop()
- threads[i].state = Thread::WORKISWAITING;
-
- if (useSleepingThreads && i != master)
- threads[i].wake_up();
- }
-
- // Everything is set up. The master thread enters the idle loop, from
- // which it will instantly launch a search, because its state is
- // THREAD_WORKISWAITING. We send the split point as a second parameter to the
- // idle loop, which means that the main thread will return from the idle
- // loop when all threads have finished their work at this split point.
- idle_loop(master, &splitPoint);
-
- // We have returned from the idle loop, which means that all threads are
- // finished. Note that changing state and decreasing activeSplitPoints is done
- // under lock protection to avoid a race with Thread::is_available_to().
- lock_grab(&threadsLock);
-
- masterThread.state = Thread::SEARCHING;
- masterThread.activeSplitPoints--;
- masterThread.splitPoint = splitPoint.parent;
-
- lock_release(&threadsLock);
-
- pos.set_nodes_searched(pos.nodes_searched() + splitPoint.nodes);
- return splitPoint.bestValue;