X-Git-Url: https://git.sesse.net/?p=stockfish;a=blobdiff_plain;f=src%2Fsearch.cpp;h=18c520f15fd71aebec2becbe72463612b5d84df0;hp=c188469146eb074ae85ea6c72cef191fdaa014a2;hb=e74c2df907d5336d3d2b8ee7748b82270ebbf337;hpb=60c121f3b1ee7d5ced3435cc1718e4e6e6fd8383 diff --git a/src/search.cpp b/src/search.cpp index c1884691..18c520f1 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -152,19 +152,19 @@ uint64_t Search::perft(Position& pos, Depth depth) { CheckInfo ci(pos); const bool leaf = (depth == 2 * ONE_PLY); - for (const ExtMove& ms : MoveList(pos)) + for (const auto& m : MoveList(pos)) { if (Root && depth <= ONE_PLY) cnt = 1, nodes++; else { - pos.do_move(ms.move, st, ci, pos.gives_check(ms.move, ci)); + pos.do_move(m, st, pos.gives_check(m, ci)); cnt = leaf ? MoveList(pos).size() : perft(pos, depth - ONE_PLY); nodes += cnt; - pos.undo_move(ms.move); + pos.undo_move(m); } if (Root) - sync_cout << UCI::move(ms.move, pos.is_chess960()) << ": " << cnt << sync_endl; + sync_cout << UCI::move(m, pos.is_chess960()) << ": " << cnt << sync_endl; } return nodes; } @@ -199,7 +199,7 @@ void Search::think() { if (RootMoves.empty()) { - RootMoves.push_back(MOVE_NONE); + RootMoves.push_back(RootMove(MOVE_NONE)); sync_cout << "info depth 0 score " << UCI::value(RootPos.checkers() ? -VALUE_MATE : VALUE_DRAW) << sync_endl; @@ -694,7 +694,7 @@ namespace { if (pos.legal(move, ci.pinned)) { ss->currentMove = move; - pos.do_move(move, st, ci, pos.gives_check(move, ci)); + pos.do_move(move, st, pos.gives_check(move, ci)); value = -search(pos, ss+1, -rbeta, -rbeta+1, rdepth, !cutNode); pos.undo_move(move); if (value >= rbeta) @@ -872,7 +872,7 @@ moves_loop: // When in check and at SpNode search starts from here } // Speculative prefetch as early as possible - prefetch((char*)TT.first_entry(pos.key_after(move))); + prefetch(TT.first_entry(pos.key_after(move))); // Check for legality just before making the move if (!RootNode && !SpNode && !pos.legal(move, ci.pinned)) @@ -886,7 +886,7 @@ moves_loop: // When in check and at SpNode search starts from here quietsSearched[quietCount++] = move; // Step 14. Make the move - pos.do_move(move, st, ci, givesCheck); + pos.do_move(move, st, givesCheck); // Step 15. Reduced depth search (LMR). If the move fails high it will be // re-searched at full depth. @@ -1034,7 +1034,9 @@ moves_loop: // When in check and at SpNode search starts from here && Threads.size() >= 2 && depth >= Threads.minimumSplitDepth && ( !thisThread->activeSplitPoint - || !thisThread->activeSplitPoint->allSlavesSearching) + || !thisThread->activeSplitPoint->allSlavesSearching + || ( Threads.size() > MAX_SLAVES_PER_SPLITPOINT + && thisThread->activeSplitPoint->slavesMask.count() == MAX_SLAVES_PER_SPLITPOINT)) && thisThread->splitPointsSize < MAX_SPLITPOINTS_PER_THREAD) { assert(bestValue > -VALUE_INFINITE && bestValue < beta); @@ -1238,7 +1240,7 @@ moves_loop: // When in check and at SpNode search starts from here continue; // Speculative prefetch as early as possible - prefetch((char*)TT.first_entry(pos.key_after(move))); + prefetch(TT.first_entry(pos.key_after(move))); // Check for legality just before making the move if (!pos.legal(move, ci.pinned)) @@ -1247,7 +1249,7 @@ moves_loop: // When in check and at SpNode search starts from here ss->currentMove = move; // Make and search the move - pos.do_move(move, st, ci, givesCheck); + pos.do_move(move, st, givesCheck); value = givesCheck ? -qsearch(pos, ss+1, -beta, -alpha, depth - ONE_PLY) : -qsearch(pos, ss+1, -beta, -alpha, depth - ONE_PLY); pos.undo_move(move); @@ -1383,15 +1385,13 @@ moves_loop: // When in check and at SpNode search starts from here // then we choose the move with the resulting highest score. for (size_t i = 0; i < multiPV; ++i) { - int score = RootMoves[i].score; - // This is our magic formula - score += ( weakness * int(RootMoves[0].score - score) - + variance * (rng.rand() % weakness)) / 128; + int push = ( weakness * int(RootMoves[0].score - RootMoves[i].score) + + variance * (rng.rand() % weakness)) / 128; - if (score > maxScore) + if (RootMoves[i].score + push > maxScore) { - maxScore = score; + maxScore = RootMoves[i].score + push; best = RootMoves[i].pv[0]; } } @@ -1444,7 +1444,7 @@ string UCI::pv(const Position& pos, Depth depth, Value alpha, Value beta) { << " nps " << pos.nodes_searched() * 1000 / elapsed; if (elapsed > 1000) // Earlier makes little sense - ss << " hashfull " << TT.hashfull(); + ss << " hashfull " << TT.hashfull(); ss << " tbhits " << TB::Hits << " time " << elapsed @@ -1476,7 +1476,7 @@ void RootMove::insert_pv_in_tt(Position& pos) { if (!ttHit || tte->move() != m) // Don't overwrite correct entries tte->save(pos.key(), VALUE_NONE, BOUND_NONE, DEPTH_NONE, m, VALUE_NONE, TT.generation()); - pos.do_move(m, *st++); + pos.do_move(m, *st++, pos.gives_check(m, CheckInfo(pos))); } for (size_t i = pv.size(); i > 0; ) @@ -1496,7 +1496,7 @@ bool RootMove::extract_ponder_from_tt(Position& pos) assert(pv.size() == 1); - pos.do_move(pv[0], st); + pos.do_move(pv[0], st, pos.gives_check(pv[0], CheckInfo(pos))); TTEntry* tte = TT.probe(pos.key(), ttHit); pos.undo_move(pv[0]); @@ -1517,9 +1517,9 @@ void Thread::idle_loop() { // Pointer 'this_sp' is not null only if we are called from split(), and not // at the thread creation. This means we are the split point's master. - SplitPoint* this_sp = splitPointsSize ? activeSplitPoint : nullptr; + SplitPoint* this_sp = activeSplitPoint; - assert(!this_sp || (this_sp->masterThread == this && searching)); + assert(!this_sp || (this_sp->master == this && searching)); while (!exit) { @@ -1529,6 +1529,7 @@ void Thread::idle_loop() { Threads.mutex.lock(); assert(activeSplitPoint); + SplitPoint* sp = activeSplitPoint; Threads.mutex.unlock(); @@ -1567,11 +1568,11 @@ void Thread::idle_loop() { // Wake up the master thread so to allow it to return from the idle // loop in case we are the last slave of the split point. - if ( this != sp->masterThread - && sp->slavesMask.none()) + if (this != sp->master && sp->slavesMask.none()) { - assert(!sp->masterThread->searching); - sp->masterThread->notify_one(); + assert(!sp->master->searching); + + sp->master->notify_one(); } // After releasing the lock we can't access any SplitPoint related data @@ -1581,37 +1582,62 @@ void Thread::idle_loop() { // Try to late join to another split point if none of its slaves has // already finished. - if (Threads.size() > 2) - for (size_t i = 0; i < Threads.size(); ++i) + SplitPoint* bestSp = NULL; + int bestScore = INT_MAX; + + for (Thread* th : Threads) + { + const size_t size = th->splitPointsSize; // Local copy + sp = size ? &th->splitPoints[size - 1] : nullptr; + + if ( sp + && sp->allSlavesSearching + && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT + && available_to(th)) { - const int size = Threads[i]->splitPointsSize; // Local copy - sp = size ? &Threads[i]->splitPoints[size - 1] : nullptr; + assert(this != th); + assert(!(this_sp && this_sp->slavesMask.none())); + assert(Threads.size() > 2); + + // Prefer to join to SP with few parents to reduce the probability + // that a cut-off occurs above us, and hence we waste our work. + int level = -1; + for (SplitPoint* spp = th->activeSplitPoint; spp; spp = spp->parentSplitPoint) + level++; - if ( sp - && sp->allSlavesSearching - && available_to(Threads[i])) + int score = level * 256 * 256 + (int)sp->slavesMask.count() * 256 - sp->depth * 1; + + if (score < bestScore) { - // Recheck the conditions under lock protection - Threads.mutex.lock(); - sp->mutex.lock(); - - if ( sp->allSlavesSearching - && available_to(Threads[i])) - { - sp->slavesMask.set(idx); - activeSplitPoint = sp; - searching = true; - } - - sp->mutex.unlock(); - Threads.mutex.unlock(); - - break; // Just a single attempt + bestSp = sp; + bestScore = score; } } + } + + if (bestSp) + { + sp = bestSp; + + // Recheck the conditions under lock protection + Threads.mutex.lock(); + sp->mutex.lock(); + + if ( sp->allSlavesSearching + && sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT + && available_to(sp->master)) + { + sp->slavesMask.set(idx); + activeSplitPoint = sp; + searching = true; + } + + sp->mutex.unlock(); + Threads.mutex.unlock(); + } } - // Grab the lock to avoid races with Thread::notify_one() + // Avoid races with notify_one() fired from last slave of the split point std::unique_lock lk(mutex); // If we are master and all slaves have finished then exit idle_loop @@ -1670,7 +1696,7 @@ void check_time() { // Loop across all split points and sum accumulated SplitPoint nodes plus // all the currently active positions nodes. for (Thread* th : Threads) - for (int i = 0; i < th->splitPointsSize; ++i) + for (size_t i = 0; i < th->splitPointsSize; ++i) { SplitPoint& sp = th->splitPoints[i];