X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=src%2Fsearch.cpp;h=71768fef5bcd294a0d851f98b110cf18bf3c85d8;hb=4a081280ed14629dab0bab1affd1cb4d32e8950a;hp=13c46eca0abce1e830fa2798775b767d5bb98b6d;hpb=2cec7347dbb8e85891ba1fde85736f1732f52dc0;p=stockfish diff --git a/src/search.cpp b/src/search.cpp index 13c46eca..71768fef 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -89,8 +89,8 @@ namespace { void idle_loop(int threadID, SplitPoint* sp); template - bool split(const Position& pos, SearchStack* ss, int ply, Value* alpha, const Value beta, Value* bestValue, - Depth depth, bool mateThreat, int* moves, MovePicker* mp, int master, bool pvNode); + void split(const Position& pos, SearchStack* ss, int ply, Value* alpha, const Value beta, Value* bestValue, + Depth depth, bool mateThreat, int* moveCount, MovePicker* mp, int master, bool pvNode); private: friend void poll(); @@ -122,7 +122,7 @@ namespace { // RootMove::operator<() is the comparison function used when // sorting the moves. A move m1 is considered to be better // than a move m2 if it has a higher score, or if the moves - // have equal score but m1 has the higher node count. + // have equal score but m1 has the higher beta cut-off count. bool operator<(const RootMove& m) const { return score != m.score ? score < m.score : theirBeta <= m.theirBeta; @@ -804,8 +804,8 @@ namespace { beta = *betaPtr; isCheck = pos.is_check(); - // Step 1. Initialize node and poll (omitted at root, but I can see no good reason for this, FIXME) - // Step 2. Check for aborted search (omitted at root, because we do not initialize root node) + // Step 1. Initialize node and poll (omitted at root, init_ss_array() has already initialized root node) + // Step 2. Check for aborted search (omitted at root) // Step 3. Mate distance pruning (omitted at root) // Step 4. Transposition table lookup (omitted at root) @@ -813,8 +813,6 @@ namespace { // At root we do this only to get reference value for child nodes if (!isCheck) ss[0].eval = evaluate(pos, ei, 0); - else - ss[0].eval = VALUE_NONE; // HACK because we do not initialize root node // Step 6. Razoring (omitted at root) // Step 7. Static null move pruning (omitted at root) @@ -1097,7 +1095,7 @@ namespace { if (!PvNode && tte && ok_to_use_TT(tte, depth, beta, ply)) { // Refresh tte entry to avoid aging - TT.store(posKey, tte->value(), tte->type(), tte->depth(), ttMove); + TT.store(posKey, tte->value(), tte->type(), tte->depth(), ttMove, tte->static_value(), tte->king_danger()); ss[ply].currentMove = ttMove; // Can be MOVE_NONE return value_from_tt(tte->value(), ply); @@ -1108,8 +1106,11 @@ namespace { isCheck = pos.is_check(); if (!isCheck) { - if (tte && (tte->type() & VALUE_TYPE_EVAL)) - ss[ply].eval = value_from_tt(tte->value(), ply); + if (tte && tte->static_value() != VALUE_NONE) + { + ss[ply].eval = tte->static_value(); + ei.kingDanger[pos.side_to_move()] = tte->king_danger(); + } else ss[ply].eval = evaluate(pos, ei, threadID); @@ -1285,7 +1286,9 @@ namespace { continue; // Value based pruning - Depth predictedDepth = newDepth - reduction(depth, moveCount); // FIXME We illogically ignore reduction condition depth >= 3*OnePly + // We illogically ignore reduction condition depth >= 3*OnePly for predicted depth, + // but fixing this made program slightly weaker. + Depth predictedDepth = newDepth - reduction(depth, moveCount); futilityValueScaled = ss[ply].eval + futility_margin(predictedDepth, moveCount) + H.gain(pos.piece_on(move_from(move)), move_to(move)); @@ -1353,6 +1356,7 @@ namespace { alpha = value; update_pv(ss, ply); + if (value == value_mate_in(ply + 1)) ss[ply].mateKiller = move; } @@ -1365,10 +1369,9 @@ namespace { && Iteration <= 99 && TM.available_thread_exists(threadID) && !AbortSearch - && !TM.thread_should_stop(threadID) - && TM.split(pos, ss, ply, &alpha, beta, &bestValue, depth, - mateThreat, &moveCount, &mp, threadID, PvNode)) - break; + && !TM.thread_should_stop(threadID)) + TM.split(pos, ss, ply, &alpha, beta, &bestValue, depth, + mateThreat, &moveCount, &mp, threadID, PvNode); } // Step 19. Check for mate and stalemate @@ -1385,13 +1388,13 @@ namespace { return bestValue; if (bestValue <= oldAlpha) - TT.store(posKey, value_to_tt(bestValue, ply), VALUE_TYPE_UPPER, depth, MOVE_NONE); + TT.store(posKey, value_to_tt(bestValue, ply), VALUE_TYPE_UPPER, depth, MOVE_NONE, ss[ply].eval, ei.kingDanger[pos.side_to_move()]); else if (bestValue >= beta) { TM.incrementBetaCounter(pos.side_to_move(), depth, threadID); move = ss[ply].pv[ply]; - TT.store(posKey, value_to_tt(bestValue, ply), VALUE_TYPE_LOWER, depth, move); + TT.store(posKey, value_to_tt(bestValue, ply), VALUE_TYPE_LOWER, depth, move, ss[ply].eval, ei.kingDanger[pos.side_to_move()]); if (!pos.move_is_capture_or_promotion(move)) { update_history(pos, move, depth, movesSearched, moveCount); @@ -1399,7 +1402,7 @@ namespace { } } else - TT.store(posKey, value_to_tt(bestValue, ply), VALUE_TYPE_EXACT, depth, ss[ply].pv[ply]); + TT.store(posKey, value_to_tt(bestValue, ply), VALUE_TYPE_EXACT, depth, ss[ply].pv[ply], ss[ply].eval, ei.kingDanger[pos.side_to_move()]); assert(bestValue > -VALUE_INFINITE && bestValue < VALUE_INFINITE); @@ -1449,8 +1452,6 @@ namespace { if (!PvNode && tte && ok_to_use_TT(tte, depth, beta, ply)) { - assert(tte->type() != VALUE_TYPE_EVAL); - ss[ply].currentMove = ttMove; // Can be MOVE_NONE return value_from_tt(tte->value(), ply); } @@ -1460,8 +1461,11 @@ namespace { // Evaluate the position statically if (isCheck) staticValue = -VALUE_INFINITE; - else if (tte && (tte->type() & VALUE_TYPE_EVAL)) - staticValue = value_from_tt(tte->value(), ply); + else if (tte && tte->static_value() != VALUE_NONE) + { + staticValue = tte->static_value(); + ei.kingDanger[pos.side_to_move()] = tte->king_danger(); + } else staticValue = evaluate(pos, ei, threadID); @@ -1478,8 +1482,8 @@ namespace { if (bestValue >= beta) { // Store the score to avoid a future costly evaluation() call - if (!isCheck && !tte && ei.kingDanger[pos.side_to_move()] == 0) - TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_EV_LO, Depth(-127*OnePly), MOVE_NONE); + if (!isCheck && !tte) + TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_LOWER, Depth(-127*OnePly), MOVE_NONE, ss[ply].eval, ei.kingDanger[pos.side_to_move()]); return bestValue; } @@ -1577,20 +1581,19 @@ namespace { { // If bestValue isn't changed it means it is still the static evaluation // of the node, so keep this info to avoid a future evaluation() call. - ValueType type = (bestValue == staticValue && !ei.kingDanger[pos.side_to_move()] ? VALUE_TYPE_EV_UP : VALUE_TYPE_UPPER); - TT.store(pos.get_key(), value_to_tt(bestValue, ply), type, d, MOVE_NONE); + TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_UPPER, d, MOVE_NONE, ss[ply].eval, ei.kingDanger[pos.side_to_move()]); } else if (bestValue >= beta) { move = ss[ply].pv[ply]; - TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_LOWER, d, move); + TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_LOWER, d, move, ss[ply].eval, ei.kingDanger[pos.side_to_move()]); // Update killers only for good checking moves if (!pos.move_is_capture_or_promotion(move)) update_killers(move, ss[ply]); } else - TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_EXACT, d, ss[ply].pv[ply]); + TT.store(pos.get_key(), value_to_tt(bestValue, ply), VALUE_TYPE_EXACT, d, ss[ply].pv[ply], ss[ply].eval, ei.kingDanger[pos.side_to_move()]); assert(bestValue > -VALUE_INFINITE && bestValue < VALUE_INFINITE); @@ -1634,7 +1637,7 @@ namespace { && (move = sp->mp->get_next_move()) != MOVE_NONE && !TM.thread_should_stop(threadID)) { - moveCount = ++sp->moves; + moveCount = ++sp->moveCount; lock_release(&(sp->lock)); assert(move_is_ok(move)); @@ -1740,7 +1743,6 @@ namespace { /* Here we have the lock still grabbed */ sp->slaves[threadID] = 0; - sp->cpus--; lock_release(&(sp->lock)); } @@ -2406,12 +2408,15 @@ namespace { threads[threadID].state = THREAD_AVAILABLE; } - // If this thread is the master of a split point and all threads have + // If this thread is the master of a split point and all slaves have // finished their work at this split point, return from the idle loop. - if (sp && sp->cpus == 0) + int i = 0; + for ( ; sp && i < ActiveThreads && !sp->slaves[i]; i++) {} + + if (i == ActiveThreads) { - // Because sp->cpus is decremented under lock protection, - // be sure sp->lock has been released before to proceed. + // Because sp->slaves[] is reset under lock protection, + // be sure sp->lock has been released before to return. lock_grab(&(sp->lock)); lock_release(&(sp->lock)); @@ -2586,21 +2591,19 @@ namespace { // split() does the actual work of distributing the work at a node between - // several threads at PV nodes. If it does not succeed in splitting the + // several available threads. If it does not succeed in splitting the // node (because no idle threads are available, or because we have no unused - // split point objects), the function immediately returns false. If - // splitting is possible, a SplitPoint object is initialized with all the - // data that must be copied to the helper threads (the current position and - // search stack, alpha, beta, the search depth, etc.), and we tell our - // helper threads that they have been assigned work. This will cause them - // to instantly leave their idle loops and call sp_search_pv(). When all - // threads have returned from sp_search_pv (or, equivalently, when - // splitPoint->cpus becomes 0), split() returns true. + // split point objects), the function immediately returns. If splitting is + // possible, a SplitPoint object is initialized with all the data that must be + // copied to the helper threads and we tell our helper threads that they have + // been assigned work. This will cause them to instantly leave their idle loops + // and call sp_search(). When all threads have returned from sp_search() then + // split() returns. template - bool ThreadsManager::split(const Position& p, SearchStack* sstck, int ply, Value* alpha, + void ThreadsManager::split(const Position& p, SearchStack* sstck, int ply, Value* alpha, const Value beta, Value* bestValue, Depth depth, bool mateThreat, - int* moves, MovePicker* mp, int master, bool pvNode) { + int* moveCount, MovePicker* mp, int master, bool pvNode) { assert(p.is_ok()); assert(sstck != NULL); assert(ply >= 0 && ply < PLY_MAX); @@ -2612,8 +2615,6 @@ namespace { assert(master >= 0 && master < ActiveThreads); assert(ActiveThreads > 1); - SplitPoint* splitPoint; - lock_grab(&MPLock); // If no other thread is available to help us, or if we have too many @@ -2622,11 +2623,11 @@ namespace { || threads[master].activeSplitPoints >= ACTIVE_SPLIT_POINTS_MAX) { lock_release(&MPLock); - return false; + return; } // Pick the next available split point object from the split point stack - splitPoint = &SplitPointStack[master][threads[master].activeSplitPoints]; + SplitPoint* splitPoint = &SplitPointStack[master][threads[master].activeSplitPoints]; // Initialize the split point object splitPoint->parent = threads[master].splitPoint; @@ -2638,10 +2639,8 @@ namespace { splitPoint->beta = beta; splitPoint->pvNode = pvNode; splitPoint->bestValue = *bestValue; - splitPoint->master = master; splitPoint->mp = mp; - splitPoint->moves = *moves; - splitPoint->cpus = 1; + splitPoint->moveCount = *moveCount; splitPoint->pos = &p; splitPoint->parentSstack = sstck; for (int i = 0; i < ActiveThreads; i++) @@ -2653,17 +2652,19 @@ namespace { // If we are here it means we are not available assert(threads[master].state != THREAD_AVAILABLE); + int workersCnt = 1; // At least the master is included + // Allocate available threads setting state to THREAD_BOOKED - for (int i = 0; !Fake && i < ActiveThreads && splitPoint->cpus < MaxThreadsPerSplitPoint; i++) + for (int i = 0; !Fake && i < ActiveThreads && workersCnt < MaxThreadsPerSplitPoint; i++) if (thread_is_available(i, master)) { threads[i].state = THREAD_BOOKED; threads[i].splitPoint = splitPoint; splitPoint->slaves[i] = 1; - splitPoint->cpus++; + workersCnt++; } - assert(Fake || splitPoint->cpus > 1); + assert(Fake || workersCnt > 1); // We can release the lock because slave threads are already booked and master is not available lock_release(&MPLock); @@ -2684,8 +2685,7 @@ namespace { // which it will instantly launch a search, because its state is // THREAD_WORKISWAITING. We send the split point as a second parameter to the // idle loop, which means that the main thread will return from the idle - // loop when all threads have finished their work at this split point - // (i.e. when splitPoint->cpus == 0). + // loop when all threads have finished their work at this split point. idle_loop(master, splitPoint); // We have returned from the idle loop, which means that all threads are @@ -2698,7 +2698,6 @@ namespace { threads[master].splitPoint = splitPoint->parent; lock_release(&MPLock); - return true; }