int ActiveThreads;
volatile bool AllThreadsShouldExit, AllThreadsShouldSleep;
Thread threads[MAX_THREADS];
- SplitPoint SplitPointStack[MAX_THREADS][ACTIVE_SPLIT_POINTS_MAX];
Lock MPLock, WaitLock;
int32_t FutilityMarginsMatrix[16][64]; // [depth][moveNumber]
int FutilityMoveCountArray[32]; // [depth]
- inline Value futility_margin(Depth d, int mn) { return Value(d < 7 * OnePly ? FutilityMarginsMatrix[Max(d, 0)][Min(mn, 63)] : 2 * VALUE_INFINITE); }
+ inline Value futility_margin(Depth d, int mn) { return Value(d < 7 * OnePly ? FutilityMarginsMatrix[Max(d, 1)][Min(mn, 63)] : 2 * VALUE_INFINITE); }
inline int futility_move_count(Depth d) { return d < 16 * OnePly ? FutilityMoveCountArray[d] : 512; }
// Step 14. Reduced search
const Value EasyMoveMargin = Value(0x200);
// Last seconds noise filtering (LSN)
- const bool UseLSNFiltering = false;
+ const bool UseLSNFiltering = true;
const int LSNTime = 100; // In milliseconds
const Value LSNValue = value_from_centipawns(200);
bool loseOnTime = false;
}
// Init futility margins array
- for (d = 0; d < 16; d++) for (mc = 0; mc < 64; mc++)
+ for (d = 1; d < 16; d++) for (mc = 0; mc < 64; mc++)
FutilityMarginsMatrix[d][mc] = 112 * int(log(double(d * d) / 2) / log(2.0) + 1.001) - 8 * mc + 45;
// Init futility move count array
// Print final search statistics
cout << "info nodes " << TM.nodes_searched()
<< " nps " << nps()
- << " time " << current_search_time()
- << " hashfull " << TT.full() << endl;
+ << " time " << current_search_time() << endl;
// Print the best move and the ponder move to the standard output
if (pv[0] == MOVE_NONE)
if (nullValue >= value_mate_in(PLY_MAX))
nullValue = beta;
- // Do zugzwang verification search at high depths
if (depth < 6 * OnePly)
return nullValue;
+ // Do verification search at high depths
ss->skipNullMove = true;
- Value v = search<NonPV>(pos, ss, alpha, beta, depth-5*OnePly, ply);
+ Value v = search<NonPV>(pos, ss, alpha, beta, depth-R*OnePly, ply);
ss->skipNullMove = false;
if (v >= beta)
// Step 11. Decide the new search depth
ext = extension<PvNode>(pos, move, captureOrPromotion, moveIsCheck, singleEvasion, mateThreat, &dangerous);
- // Singular extension search. We extend the TT move if its value is much better than
- // its siblings. To verify this we do a reduced search on all the other moves but the
- // ttMove, if result is lower then ttValue minus a margin then we extend ttMove.
+ // Singular extension search. If all moves but one fail low on a search of (alpha-s, beta-s),
+ // and just one fails high on (alpha, beta), then that move is singular and should be extended.
+ // To verify this we do a reduced search on all the other moves but the ttMove, if result is
+ // lower then ttValue minus a margin then we extend ttMove.
if ( singularExtensionNode
&& move == tte->move()
&& ext < OnePly)
Value v = search<NonPV>(pos, ss, b - 1, b, depth / 2, ply);
ss->skipNullMove = false;
ss->excludedMove = MOVE_NONE;
- if (v < ttValue - SingularExtensionMargin)
+ if (v < b)
ext = OnePly;
}
}
dbg_print_hit_rate();
cout << "info nodes " << TM.nodes_searched() << " nps " << nps()
- << " time " << t << " hashfull " << TT.full() << endl;
+ << " time " << t << endl;
}
// Should we stop the search?
SitIdleEvent[i] = CreateEvent(0, FALSE, FALSE, 0);
#endif
- // Initialize SplitPointStack locks
+ // Initialize splitPoints[] locks
for (i = 0; i < MAX_THREADS; i++)
- for (int j = 0; j < ACTIVE_SPLIT_POINTS_MAX; j++)
- lock_init(&(SplitPointStack[i][j].lock), NULL);
+ for (int j = 0; j < MAX_ACTIVE_SPLIT_POINTS; j++)
+ lock_init(&(threads[i].splitPoints[j].lock), NULL);
// Will be set just before program exits to properly end the threads
AllThreadsShouldExit = false;
// Now we can safely destroy the locks
for (int i = 0; i < MAX_THREADS; i++)
- for (int j = 0; j < ACTIVE_SPLIT_POINTS_MAX; j++)
- lock_destroy(&(SplitPointStack[i][j].lock));
+ for (int j = 0; j < MAX_ACTIVE_SPLIT_POINTS; j++)
+ lock_destroy(&(threads[i].splitPoints[j].lock));
lock_destroy(&WaitLock);
lock_destroy(&MPLock);
// Apply the "helpful master" concept if possible. Use localActiveSplitPoints
// that is known to be > 0, instead of threads[slave].activeSplitPoints that
// could have been set to 0 by another thread leading to an out of bound access.
- if (SplitPointStack[slave][localActiveSplitPoints - 1].slaves[master])
+ if (threads[slave].splitPoints[localActiveSplitPoints - 1].slaves[master])
return true;
return false;
assert(p.thread() >= 0 && p.thread() < ActiveThreads);
assert(ActiveThreads > 1);
- int master = p.thread();
+ int i, master = p.thread();
+ Thread& masterThread = threads[master];
lock_grab(&MPLock);
// If no other thread is available to help us, or if we have too many
// active split points, don't split.
if ( !available_thread_exists(master)
- || threads[master].activeSplitPoints >= ACTIVE_SPLIT_POINTS_MAX)
+ || masterThread.activeSplitPoints >= MAX_ACTIVE_SPLIT_POINTS)
{
lock_release(&MPLock);
return;
}
// Pick the next available split point object from the split point stack
- SplitPoint* splitPoint = &SplitPointStack[master][threads[master].activeSplitPoints];
+ SplitPoint& splitPoint = masterThread.splitPoints[masterThread.activeSplitPoints++];
// Initialize the split point object
- splitPoint->parent = threads[master].splitPoint;
- splitPoint->stopRequest = false;
- splitPoint->ply = ply;
- splitPoint->depth = depth;
- splitPoint->mateThreat = mateThreat;
- splitPoint->alpha = *alpha;
- splitPoint->beta = beta;
- splitPoint->pvNode = pvNode;
- splitPoint->bestValue = *bestValue;
- splitPoint->mp = mp;
- splitPoint->moveCount = *moveCount;
- splitPoint->pos = &p;
- splitPoint->parentSstack = ss;
- for (int i = 0; i < ActiveThreads; i++)
- splitPoint->slaves[i] = 0;
-
- threads[master].splitPoint = splitPoint;
- threads[master].activeSplitPoints++;
+ splitPoint.parent = masterThread.splitPoint;
+ splitPoint.stopRequest = false;
+ splitPoint.ply = ply;
+ splitPoint.depth = depth;
+ splitPoint.mateThreat = mateThreat;
+ splitPoint.alpha = *alpha;
+ splitPoint.beta = beta;
+ splitPoint.pvNode = pvNode;
+ splitPoint.bestValue = *bestValue;
+ splitPoint.mp = mp;
+ splitPoint.moveCount = *moveCount;
+ splitPoint.pos = &p;
+ splitPoint.parentSstack = ss;
+ for (i = 0; i < ActiveThreads; i++)
+ splitPoint.slaves[i] = 0;
+
+ masterThread.splitPoint = &splitPoint;
// If we are here it means we are not available
- assert(threads[master].state != THREAD_AVAILABLE);
+ assert(masterThread.state != THREAD_AVAILABLE);
int workersCnt = 1; // At least the master is included
// Allocate available threads setting state to THREAD_BOOKED
- for (int i = 0; !Fake && i < ActiveThreads && workersCnt < MaxThreadsPerSplitPoint; i++)
+ for (i = 0; !Fake && i < ActiveThreads && workersCnt < MaxThreadsPerSplitPoint; i++)
if (thread_is_available(i, master))
{
threads[i].state = THREAD_BOOKED;
- threads[i].splitPoint = splitPoint;
- splitPoint->slaves[i] = 1;
+ threads[i].splitPoint = &splitPoint;
+ splitPoint.slaves[i] = 1;
workersCnt++;
}
// Tell the threads that they have work to do. This will make them leave
// their idle loop. But before copy search stack tail for each thread.
- for (int i = 0; i < ActiveThreads; i++)
- if (i == master || splitPoint->slaves[i])
+ for (i = 0; i < ActiveThreads; i++)
+ if (i == master || splitPoint.slaves[i])
{
- memcpy(splitPoint->sstack[i], ss - 1, 4 * sizeof(SearchStack));
+ memcpy(splitPoint.sstack[i], ss - 1, 4 * sizeof(SearchStack));
assert(i == master || threads[i].state == THREAD_BOOKED);
// THREAD_WORKISWAITING. We send the split point as a second parameter to the
// idle loop, which means that the main thread will return from the idle
// loop when all threads have finished their work at this split point.
- idle_loop(master, splitPoint);
+ idle_loop(master, &splitPoint);
// We have returned from the idle loop, which means that all threads are
// finished. Update alpha and bestValue, and return.
lock_grab(&MPLock);
- *alpha = splitPoint->alpha;
- *bestValue = splitPoint->bestValue;
- threads[master].activeSplitPoints--;
- threads[master].splitPoint = splitPoint->parent;
+ *alpha = splitPoint.alpha;
+ *bestValue = splitPoint.bestValue;
+ masterThread.activeSplitPoints--;
+ masterThread.splitPoint = splitPoint.parent;
lock_release(&MPLock);
}