<< endl;
}
- for (int i = 0; i < Threads.size(); i++)
- {
- Threads[i].maxPly = 0;
- Threads[i].wake_up();
- }
+ Threads.set_size(Options["Threads"]);
// Set best timer interval to avoid lagging under time pressure. Timer is
// used to check for remaining available thinking time.
// but if we are pondering or in infinite search, we shouldn't print the best
// move before we are told to do so.
if (!Signals.stop && (Limits.ponder || Limits.infinite))
- Threads.wait_for_stop_or_ponderhit();
+ Threads[pos.thread()].wait_for_stop_or_ponderhit();
// Best move could be MOVE_NONE when searching on a stalemate position
cout << "bestmove " << move_to_uci(RootMoves[0].pv[0], Chess960)
if (SkillLevelEnabled && depth == 1 + SkillLevel)
skillBest = do_skill_level();
- if (Options["Use Search Log"])
+ if (!Signals.stop && Options["Use Search Log"])
pv_info_to_log(pos, depth, bestValue, elapsed_time(), &RootMoves[0].pv[0]);
// Filter out startup noise when monitoring best move stability
Key posKey;
Move ttMove, move, excludedMove, threatMove;
Depth ext, newDepth;
- ValueType vt;
+ Bound bt;
Value bestValue, value, oldAlpha;
Value refinedValue, nullValue, futilityBase, futilityValue;
bool isPvMove, inCheck, singularExtensionNode, givesCheck;
// a fail high/low. Biggest advantage at probing at PV nodes is to have a
// smooth experience in analysis mode. We don't probe at Root nodes otherwise
// we should also update RootMoveList to avoid bogus output.
- if (!RootNode && tte && (PvNode ? tte->depth() >= depth && tte->type() == VALUE_TYPE_EXACT
+ if (!RootNode && tte && (PvNode ? tte->depth() >= depth && tte->type() == BOUND_EXACT
: can_return_tt(tte, depth, beta, ss->ply)))
{
TT.refresh(tte);
else
{
refinedValue = ss->eval = evaluate(pos, ss->evalMargin);
- TT.store(posKey, VALUE_NONE, VALUE_TYPE_NONE, DEPTH_NONE, MOVE_NONE, ss->eval, ss->evalMargin);
+ TT.store(posKey, VALUE_NONE, BOUND_NONE, DEPTH_NONE, MOVE_NONE, ss->eval, ss->evalMargin);
}
// Update gain for the parent non-capture move given the static position
&& depth >= SingularExtensionDepth[PvNode]
&& ttMove != MOVE_NONE
&& !excludedMove // Recursive singular search is not allowed
- && (tte->type() & VALUE_TYPE_LOWER)
+ && (tte->type() & BOUND_LOWER)
&& tte->depth() >= depth - 3 * ONE_PLY;
// Step 11. Loop through moves
sp->bestValue = value;
sp->ss->bestMove = move;
sp->alpha = alpha;
- sp->is_betaCutoff = (value >= beta);
+
+ if (value >= beta)
+ sp->cutoff = true;
}
}
if (!SpNode && !Signals.stop && !thread.cutoff_occurred())
{
move = bestValue <= oldAlpha ? MOVE_NONE : ss->bestMove;
- vt = bestValue <= oldAlpha ? VALUE_TYPE_UPPER
- : bestValue >= beta ? VALUE_TYPE_LOWER : VALUE_TYPE_EXACT;
+ bt = bestValue <= oldAlpha ? BOUND_UPPER
+ : bestValue >= beta ? BOUND_LOWER : BOUND_EXACT;
- TT.store(posKey, value_to_tt(bestValue, ss->ply), vt, depth, move, ss->eval, ss->evalMargin);
+ TT.store(posKey, value_to_tt(bestValue, ss->ply), bt, depth, move, ss->eval, ss->evalMargin);
// Update killers and history for non capture cut-off moves
if ( bestValue >= beta
bool inCheck, enoughMaterial, givesCheck, evasionPrunable;
const TTEntry* tte;
Depth ttDepth;
- ValueType vt;
+ Bound bt;
Value oldAlpha = alpha;
ss->bestMove = ss->currentMove = MOVE_NONE;
if (bestValue >= beta)
{
if (!tte)
- TT.store(pos.key(), value_to_tt(bestValue, ss->ply), VALUE_TYPE_LOWER, DEPTH_NONE, MOVE_NONE, ss->eval, evalMargin);
+ TT.store(pos.key(), value_to_tt(bestValue, ss->ply), BOUND_LOWER, DEPTH_NONE, MOVE_NONE, ss->eval, evalMargin);
return bestValue;
}
&& !pos.is_capture_or_promotion(move)
&& ss->eval + PawnValueMidgame / 4 < beta
&& !check_is_dangerous(pos, move, futilityBase, beta, &bestValue))
- {
- if (ss->eval + PawnValueMidgame / 4 > bestValue)
- bestValue = ss->eval + PawnValueMidgame / 4;
-
continue;
- }
// Check for legality only before to do the move
if (!pos.pl_move_is_legal(move, ci.pinned))
// Update transposition table
move = bestValue <= oldAlpha ? MOVE_NONE : ss->bestMove;
- vt = bestValue <= oldAlpha ? VALUE_TYPE_UPPER
- : bestValue >= beta ? VALUE_TYPE_LOWER : VALUE_TYPE_EXACT;
+ bt = bestValue <= oldAlpha ? BOUND_UPPER
+ : bestValue >= beta ? BOUND_LOWER : BOUND_EXACT;
- TT.store(pos.key(), value_to_tt(bestValue, ss->ply), vt, ttDepth, move, ss->eval, evalMargin);
+ TT.store(pos.key(), value_to_tt(bestValue, ss->ply), bt, ttDepth, move, ss->eval, evalMargin);
assert(bestValue > -VALUE_INFINITE && bestValue < VALUE_INFINITE);
return true;
// Rule 2. Queen contact check is very dangerous
- if ( type_of(pc) == QUEEN
- && bit_is_set(kingAtt, to))
+ if (type_of(pc) == QUEEN && (kingAtt & to))
return true;
// Rule 3. Creating new double threats with checks
// Case 3: Moving through the vacated square
p2 = pos.piece_on(f2);
- if ( piece_is_slider(p2)
- && bit_is_set(squares_between(f2, t2), f1))
+ if (piece_is_slider(p2) && (squares_between(f2, t2) & f1))
return true;
// Case 4: The destination square for m2 is defended by the moving piece in m1
p1 = pos.piece_on(t1);
- if (bit_is_set(pos.attacks_from(p1, t1), t2))
+ if (pos.attacks_from(p1, t1) & t2)
return true;
// Case 5: Discovered check, checking piece is the piece moved in m1
ksq = pos.king_square(pos.side_to_move());
- if ( piece_is_slider(p1)
- && bit_is_set(squares_between(t1, ksq), f2))
+ if (piece_is_slider(p1) && (squares_between(t1, ksq) & f2))
{
Bitboard occ = pos.occupied_squares();
- clear_bit(&occ, f2);
- if (bit_is_set(pos.attacks_from(p1, t1, occ), ksq))
+ occ ^= f2;
+ if (pos.attacks_from(p1, t1, occ) & ksq)
return true;
}
return false;
// Case 3: If the moving piece in the threatened move is a slider, don't
// prune safe moves which block its ray.
- if ( piece_is_slider(pos.piece_on(tfrom))
- && bit_is_set(squares_between(tfrom, tto), mto)
- && pos.see_sign(m) >= 0)
+ if ( piece_is_slider(pos.piece_on(tfrom))
+ && (squares_between(tfrom, tto) & mto)
+ && pos.see_sign(m) >= 0)
return true;
return false;
|| v >= std::max(VALUE_MATE_IN_MAX_PLY, beta)
|| v < std::min(VALUE_MATED_IN_MAX_PLY, beta))
- && ( ((tte->type() & VALUE_TYPE_LOWER) && v >= beta)
- || ((tte->type() & VALUE_TYPE_UPPER) && v < beta));
+ && ( ((tte->type() & BOUND_LOWER) && v >= beta)
+ || ((tte->type() & BOUND_UPPER) && v < beta));
}
Value v = value_from_tt(tte->value(), ply);
- if ( ((tte->type() & VALUE_TYPE_LOWER) && v >= defaultEval)
- || ((tte->type() & VALUE_TYPE_UPPER) && v < defaultEval))
+ if ( ((tte->type() & BOUND_LOWER) && v >= defaultEval)
+ || ((tte->type() & BOUND_UPPER) && v < defaultEval))
return v;
return defaultEval;
/// RootMove::extract_pv_from_tt() builds a PV by adding moves from the TT table.
-/// We consider also failing high nodes and not only VALUE_TYPE_EXACT nodes so
-/// to allow to always have a ponder move even when we fail high at root, and
-/// a long PV to print that is important for position analysis.
+/// We consider also failing high nodes and not only BOUND_EXACT nodes so to
+/// allow to always have a ponder move even when we fail high at root, and a
+/// long PV to print that is important for position analysis.
void RootMove::extract_pv_from_tt(Position& pos) {
if (!tte || tte->move() != pv[ply])
{
v = (pos.in_check() ? VALUE_NONE : evaluate(pos, m));
- TT.store(k, VALUE_NONE, VALUE_TYPE_NONE, DEPTH_NONE, pv[ply], v, m);
+ TT.store(k, VALUE_NONE, BOUND_NONE, DEPTH_NONE, pv[ply], v, m);
}
pos.do_move(pv[ply], *st++);
{
assert(!do_sleep && !do_exit);
- // Copy split point position and search stack and call search()
- Stack ss[MAX_PLY_PLUS_2];
+ lock_grab(Threads.splitLock);
+
+ assert(is_searching);
SplitPoint* sp = splitPoint;
+
+ lock_release(Threads.splitLock);
+
+ Stack ss[MAX_PLY_PLUS_2];
Position pos(*sp->pos, threadID);
+ int master = sp->master;
memcpy(ss, sp->ss - 1, 4 * sizeof(Stack));
(ss+1)->sp = sp;
assert(is_searching);
- // We return from search with lock held
+ is_searching = false;
sp->slavesMask &= ~(1ULL << threadID);
sp->nodes += pos.nodes_searched();
- lock_release(sp->lock);
- is_searching = false;
+ // After releasing the lock we cannot access anymore any SplitPoint
+ // related data in a reliably way becuase it could have been released
+ // under our feet by the sp master.
+ lock_release(sp->lock);
// Wake up master thread so to allow it to return from the idle loop in
// case we are the last slave of the split point.
if ( Threads.use_sleeping_threads()
- && threadID != sp->master
- && !Threads[sp->master].is_searching)
- Threads[sp->master].wake_up();
+ && threadID != master
+ && !Threads[master].is_searching)
+ Threads[master].wake_up();
}
}
+ // In helpful master concept a master can help only a sub-tree of its split
+ // point, and because here is all finished is not possible master is booked.
+ assert(!is_searching);
}