- // If this thread has been assigned work, launch a search
- while (searching)
- {
- Threads.mutex.lock();
-
- assert(activeSplitPoint);
- SplitPoint* sp = activeSplitPoint;
-
- Threads.mutex.unlock();
-
- Stack stack[MAX_PLY+4], *ss = stack+2; // To allow referencing (ss-2) and (ss+2)
- Position pos(*sp->pos, this);
-
- std::memcpy(ss-2, sp->ss-2, 5 * sizeof(Stack));
- ss->splitPoint = sp;
-
- sp->mutex.lock();
-
- assert(activePosition == NULL);
-
- activePosition = &pos;
-
- if (sp->nodeType == NonPV)
- search<NonPV, true>(pos, ss, sp->alpha, sp->beta, sp->depth, sp->cutNode);
-
- else if (sp->nodeType == PV)
- search<PV, true>(pos, ss, sp->alpha, sp->beta, sp->depth, sp->cutNode);
-
- else if (sp->nodeType == Root)
- search<Root, true>(pos, ss, sp->alpha, sp->beta, sp->depth, sp->cutNode);
-
- else
- assert(false);
-
- assert(searching);
-
- searching = false;
- activePosition = NULL;
- sp->slavesMask.reset(idx);
- sp->allSlavesSearching = false;
- sp->nodes += pos.nodes_searched();
-
- // Wake up the master thread so to allow it to return from the idle
- // loop in case we are the last slave of the split point.
- if ( this != sp->masterThread
- && sp->slavesMask.none())
- {
- assert(!sp->masterThread->searching);
- sp->masterThread->notify_one();
- }
-
- // After releasing the lock we can't access any SplitPoint related data
- // in a safe way because it could have been released under our feet by
- // the sp master.
- sp->mutex.unlock();
-
- // Try to late join to another split point if none of its slaves has
- // already finished.
- SplitPoint* bestSp = NULL;
- Thread* bestThread = NULL;
- int bestScore = INT_MAX;
-
- for (size_t i = 0; i < Threads.size(); ++i)
- {
- const size_t size = Threads[i]->splitPointsSize; // Local copy
- sp = size ? &Threads[i]->splitPoints[size - 1] : NULL;
-
- if ( sp
- && sp->allSlavesSearching
- && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
- && available_to(Threads[i]))
- {
- assert(this != Threads[i]);
- assert(!(this_sp && this_sp->slavesMask.none()));