/// search captures, promotions and some checks) and about how important good
/// move ordering is at the current node.
-MovePicker::MovePicker(const Position& p, bool pv, Move ttm, Depth d,
+MovePicker::MovePicker(const Position& p, Move ttm, Depth d,
const History& h, SearchStack* ss) : pos(p), H(h) {
-
- pvNode = pv;
ttMove = ttm;
if (ss)
{
} else
mateKiller = killer1 = killer2 = MOVE_NONE;
- depth = d;
- movesPicked = 0;
- numOfMoves = 0;
- numOfBadCaptures = 0;
- checkKillers = checkLegal = false;
+ movesPicked = numOfMoves = numOfBadCaptures = 0;
+ checkKillers = checkLegal = finished = false;
if (p.is_check())
phaseIndex = EvasionsPhaseIndex;
- else if (depth > Depth(0))
+ else if (d > Depth(0))
phaseIndex = MainSearchPhaseIndex;
- else if (depth == Depth(0))
+ else if (d == Depth(0))
phaseIndex = QsearchWithChecksPhaseIndex;
else
phaseIndex = QsearchWithoutChecksPhaseIndex;
seeValue = pos.see(m);
if (seeValue >= 0)
{
- if (move_promotion(m))
+ if (move_is_promotion(m))
moves[i].score = QueenValueMidgame;
else
moves[i].score = int(pos.midgame_value_of_piece_on(move_to(m)))
// First score by history, when no history is available then use
// piece/square tables values. This seems to be better then a
// random choice when we don't have an history for any move.
- Move m;
+ Piece piece;
+ Square from, to;
int hs;
for (int i = 0; i < numOfMoves; i++)
{
- m = moves[i].move;
- hs = H.move_ordering_score(pos.piece_on(move_from(m)), move_to(m));
+ from = move_from(moves[i].move);
+ to = move_to(moves[i].move);
+ piece = pos.piece_on(from);
+ hs = H.move_ordering_score(piece, to);
// Ensure history is always preferred to pst
if (hs > 0)
hs += 1000;
// pst based scoring
- moves[i].score = hs + pos.mg_pst_delta(m);
+ moves[i].score = hs + pos.pst_delta<Position::MidGame>(piece, from, to);
}
}
for (int i = 0; i < numOfMoves; i++)
{
Move m = moves[i].move;
- if (move_promotion(m))
+ if (move_is_promotion(m))
moves[i].score = QueenValueMidgame;
else
moves[i].score = int(pos.midgame_value_of_piece_on(move_to(m)))