X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=src%2Fsearch.cpp;h=decd51ce4abedebee717f553b75e983141dfc46c;hb=e63ab4bd0342be592e2490699e9e97ffccf7e0af;hp=3ca08aec2510bc8534bbdb405a0defbfb49c3bbb;hpb=c053f0b838662b2c6fd32beb3999b11c2c968ad9;p=stockfish diff --git a/src/search.cpp b/src/search.cpp index 3ca08aec..decd51ce 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -804,8 +804,8 @@ namespace { beta = *betaPtr; isCheck = pos.is_check(); - // Step 1. Initialize node and poll (omitted at root, but I can see no good reason for this, FIXME) - // Step 2. Check for aborted search (omitted at root, because we do not initialize root node) + // Step 1. Initialize node and poll (omitted at root, init_ss_array() has already initialized root node) + // Step 2. Check for aborted search (omitted at root) // Step 3. Mate distance pruning (omitted at root) // Step 4. Transposition table lookup (omitted at root) @@ -813,8 +813,6 @@ namespace { // At root we do this only to get reference value for child nodes if (!isCheck) ss[0].eval = evaluate(pos, ei, 0); - else - ss[0].eval = VALUE_NONE; // HACK because we do not initialize root node // Step 6. Razoring (omitted at root) // Step 7. Static null move pruning (omitted at root) @@ -1285,7 +1283,9 @@ namespace { continue; // Value based pruning - Depth predictedDepth = newDepth - reduction(depth, moveCount); // FIXME We illogically ignore reduction condition depth >= 3*OnePly + // We illogically ignore reduction condition depth >= 3*OnePly for predicted depth, + // but fixing this made program slightly weaker. + Depth predictedDepth = newDepth - reduction(depth, moveCount); futilityValueScaled = ss[ply].eval + futility_margin(predictedDepth, moveCount) + H.gain(pos.piece_on(move_from(move)), move_to(move)); @@ -1353,6 +1353,7 @@ namespace { alpha = value; update_pv(ss, ply); + if (value == value_mate_in(ply + 1)) ss[ply].mateKiller = move; } @@ -1631,8 +1632,8 @@ namespace { lock_grab(&(sp->lock)); while ( sp->bestValue < sp->beta - && !TM.thread_should_stop(threadID) - && (move = sp->mp->get_next_move()) != MOVE_NONE) + && (move = sp->mp->get_next_move()) != MOVE_NONE + && !TM.thread_should_stop(threadID)) { moveCount = ++sp->moves; lock_release(&(sp->lock)); @@ -1697,7 +1698,7 @@ namespace { { Value localAlpha = sp->alpha; value = -search(pos, ss, -(localAlpha+1), -localAlpha, newDepth-ss[sp->ply].reduction, sp->ply+1, true, threadID); - doFullDepthSearch = (value > localAlpha && !TM.thread_should_stop(threadID)); + doFullDepthSearch = (value > localAlpha); } } @@ -1708,7 +1709,7 @@ namespace { Value localAlpha = sp->alpha; value = -search(pos, ss, -(localAlpha+1), -localAlpha, newDepth, sp->ply+1, true, threadID); - if (PvNode && value > localAlpha && value < sp->beta && !TM.thread_should_stop(threadID)) + if (PvNode && value > localAlpha && value < sp->beta) value = -search(pos, ss, -sp->beta, -sp->alpha, newDepth, sp->ply+1, false, threadID); } @@ -1733,9 +1734,6 @@ namespace { sp->alpha = value; sp_update_pv(sp->parentSstack, ss, sp->ply); - - if (PvNode && value == value_mate_in(sp->ply + 1)) - ss[sp->ply].mateKiller = move; } } } @@ -1743,7 +1741,6 @@ namespace { /* Here we have the lock still grabbed */ sp->slaves[threadID] = 0; - sp->cpus--; lock_release(&(sp->lock)); } @@ -2409,12 +2406,15 @@ namespace { threads[threadID].state = THREAD_AVAILABLE; } - // If this thread is the master of a split point and all threads have + // If this thread is the master of a split point and all slaves have // finished their work at this split point, return from the idle loop. - if (sp && sp->cpus == 0) + int i = 0; + for ( ; sp && i < ActiveThreads && !sp->slaves[i]; i++) {} + + if (i == ActiveThreads) { - // Because sp->cpus is decremented under lock protection, - // be sure sp->lock has been released before to proceed. + // Because sp->slaves[] is reset under lock protection, + // be sure sp->lock has been released before to return. lock_grab(&(sp->lock)); lock_release(&(sp->lock)); @@ -2596,9 +2596,8 @@ namespace { // data that must be copied to the helper threads (the current position and // search stack, alpha, beta, the search depth, etc.), and we tell our // helper threads that they have been assigned work. This will cause them - // to instantly leave their idle loops and call sp_search_pv(). When all - // threads have returned from sp_search_pv (or, equivalently, when - // splitPoint->cpus becomes 0), split() returns true. + // to instantly leave their idle loops and call sp_search(). When all + // threads have returned from sp_search() then split() returns true. template bool ThreadsManager::split(const Position& p, SearchStack* sstck, int ply, Value* alpha, @@ -2644,7 +2643,6 @@ namespace { splitPoint->master = master; splitPoint->mp = mp; splitPoint->moves = *moves; - splitPoint->cpus = 1; splitPoint->pos = &p; splitPoint->parentSstack = sstck; for (int i = 0; i < ActiveThreads; i++) @@ -2656,17 +2654,19 @@ namespace { // If we are here it means we are not available assert(threads[master].state != THREAD_AVAILABLE); + int workersCnt = 1; // At least the master is included + // Allocate available threads setting state to THREAD_BOOKED - for (int i = 0; !Fake && i < ActiveThreads && splitPoint->cpus < MaxThreadsPerSplitPoint; i++) + for (int i = 0; !Fake && i < ActiveThreads && workersCnt < MaxThreadsPerSplitPoint; i++) if (thread_is_available(i, master)) { threads[i].state = THREAD_BOOKED; threads[i].splitPoint = splitPoint; splitPoint->slaves[i] = 1; - splitPoint->cpus++; + workersCnt++; } - assert(Fake || splitPoint->cpus > 1); + assert(Fake || workersCnt > 1); // We can release the lock because slave threads are already booked and master is not available lock_release(&MPLock); @@ -2687,8 +2687,7 @@ namespace { // which it will instantly launch a search, because its state is // THREAD_WORKISWAITING. We send the split point as a second parameter to the // idle loop, which means that the main thread will return from the idle - // loop when all threads have finished their work at this split point - // (i.e. when splitPoint->cpus == 0). + // loop when all threads have finished their work at this split point. idle_loop(master, splitPoint); // We have returned from the idle loop, which means that all threads are