summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
9a542d9)
When calling split or we immediately return because unable to
find available slaves, or we start searching on _all_ the moves
of the node or until a cut-off occurs, so that when returning
from split we immediately leave the moves loop.
Because of this we don't need to change alpha inside split() and
we can use a signature similar to search() so to better clarify
that split() is actually a search on the remaining node's moves.
No functional change with faked split.
Signed-off-by: Marco Costalba <mcostalba@gmail.com>
&& Threads.available_slave_exists(pos.thread())
&& !StopRequest
&& !thread.cutoff_occurred())
&& Threads.available_slave_exists(pos.thread())
&& !StopRequest
&& !thread.cutoff_occurred())
- Threads.split<FakeSplit>(pos, ss, &alpha, beta, &bestValue, depth,
- threatMove, moveCount, &mp, NT);
+ bestValue = Threads.split<FakeSplit>(pos, ss, alpha, beta, bestValue, depth,
+ threatMove, moveCount, &mp, NT);
}
// Step 20. Check for mate and stalemate
}
// Step 20. Check for mate and stalemate
// call search().When all threads have returned from search() then split() returns.
template <bool Fake>
// call search().When all threads have returned from search() then split() returns.
template <bool Fake>
-void ThreadsManager::split(Position& pos, SearchStack* ss, Value* alpha, const Value beta,
- Value* bestValue, Depth depth, Move threatMove,
- int moveCount, MovePicker* mp, int nodeType) {
+Value ThreadsManager::split(Position& pos, SearchStack* ss, Value alpha, Value beta,
+ Value bestValue, Depth depth, Move threatMove,
+ int moveCount, MovePicker* mp, int nodeType) {
- assert(*bestValue >= -VALUE_INFINITE);
- assert(*bestValue <= *alpha);
- assert(*alpha < beta);
+ assert(bestValue >= -VALUE_INFINITE);
+ assert(bestValue <= alpha);
+ assert(alpha < beta);
assert(beta <= VALUE_INFINITE);
assert(depth > DEPTH_ZERO);
assert(pos.thread() >= 0 && pos.thread() < activeThreads);
assert(beta <= VALUE_INFINITE);
assert(depth > DEPTH_ZERO);
assert(pos.thread() >= 0 && pos.thread() < activeThreads);
// If we already have too many active split points, don't split
if (masterThread.activeSplitPoints >= MAX_ACTIVE_SPLIT_POINTS)
// If we already have too many active split points, don't split
if (masterThread.activeSplitPoints >= MAX_ACTIVE_SPLIT_POINTS)
// Pick the next available split point object from the split point stack
SplitPoint& splitPoint = masterThread.splitPoints[masterThread.activeSplitPoints];
// Pick the next available split point object from the split point stack
SplitPoint& splitPoint = masterThread.splitPoints[masterThread.activeSplitPoints];
splitPoint.is_betaCutoff = false;
splitPoint.depth = depth;
splitPoint.threatMove = threatMove;
splitPoint.is_betaCutoff = false;
splitPoint.depth = depth;
splitPoint.threatMove = threatMove;
- splitPoint.alpha = *alpha;
+ splitPoint.alpha = alpha;
splitPoint.beta = beta;
splitPoint.nodeType = nodeType;
splitPoint.beta = beta;
splitPoint.nodeType = nodeType;
- splitPoint.bestValue = *bestValue;
+ splitPoint.bestValue = bestValue;
splitPoint.mp = mp;
splitPoint.moveCount = moveCount;
splitPoint.pos = &pos;
splitPoint.mp = mp;
splitPoint.moveCount = moveCount;
splitPoint.pos = &pos;
// We failed to allocate even one slave, return
if (!Fake && !booked)
// We failed to allocate even one slave, return
if (!Fake && !booked)
masterThread.activeSplitPoints++;
masterThread.splitPoint = &splitPoint;
masterThread.activeSplitPoints++;
masterThread.splitPoint = &splitPoint;
- // Tell the threads that they have work to do. This will make them leave
+ // Tell the threads that they have some work to do. This will make them leave
// their idle loop.
for (i = 0; i < activeThreads; i++)
if (i == master || splitPoint.is_slave[i])
// their idle loop.
for (i = 0; i < activeThreads; i++)
if (i == master || splitPoint.is_slave[i])
idle_loop(master, &splitPoint);
// We have returned from the idle loop, which means that all threads are
idle_loop(master, &splitPoint);
// We have returned from the idle loop, which means that all threads are
- // finished. Update alpha and bestValue, and return. Note that changing
- // state and decreasing activeSplitPoints is done under lock protection
- // to avoid a race with Thread::is_available_to().
+ // finished. Note that changing state and decreasing activeSplitPoints is done
+ // under lock protection to avoid a race with Thread::is_available_to().
lock_grab(&threadsLock);
masterThread.state = Thread::SEARCHING;
lock_grab(&threadsLock);
masterThread.state = Thread::SEARCHING;
lock_release(&threadsLock);
lock_release(&threadsLock);
- *alpha = splitPoint.alpha;
- *bestValue = splitPoint.bestValue;
pos.set_nodes_searched(pos.nodes_searched() + splitPoint.nodes);
pos.set_nodes_searched(pos.nodes_searched() + splitPoint.nodes);
+ return splitPoint.bestValue;
}
// Explicit template instantiations
}
// Explicit template instantiations
-template void ThreadsManager::split<false>(Position&, SearchStack*, Value*, const Value, Value*, Depth, Move, int, MovePicker*, int);
-template void ThreadsManager::split<true>(Position&, SearchStack*, Value*, const Value, Value*, Depth, Move, int, MovePicker*, int);
+template Value ThreadsManager::split<false>(Position&, SearchStack*, Value, Value, Value, Depth, Move, int, MovePicker*, int);
+template Value ThreadsManager::split<true>(Position&, SearchStack*, Value, Value, Value, Depth, Move, int, MovePicker*, int);
void idle_loop(int threadID, SplitPoint* sp);
template <bool Fake>
void idle_loop(int threadID, SplitPoint* sp);
template <bool Fake>
- void split(Position& pos, SearchStack* ss, Value* alpha, const Value beta, Value* bestValue,
- Depth depth, Move threatMove, int moveCount, MovePicker* mp, int nodeType);
+ Value split(Position& pos, SearchStack* ss, Value alpha, Value beta, Value bestValue,
+ Depth depth, Move threatMove, int moveCount, MovePicker* mp, int nodeType);
private:
Thread threads[MAX_THREADS];
Lock threadsLock;
private:
Thread threads[MAX_THREADS];
Lock threadsLock;