return SCALE_FACTOR_DRAW;
}
}
+
+ // All pawns on same B or G file? Then potential draw
+ if ( (pawnFile == FILE_B || pawnFile == FILE_G)
+ && !(pos.pieces(PAWN) & ~file_bb(pawnFile))
+ && pos.non_pawn_material(weakerSide) == 0
+ && pos.piece_count(weakerSide, PAWN) >= 1)
+ {
+ // Get weaker pawn closest to opponent's queening square
+ Bitboard wkPawns = pos.pieces(weakerSide, PAWN);
+ Square weakerPawnSq = strongerSide == WHITE ? msb(wkPawns) : lsb(wkPawns);
+
+ Square strongerKingSq = pos.king_square(strongerSide);
+ Square weakerKingSq = pos.king_square(weakerSide);
+ Square bishopSq = pos.piece_list(strongerSide, BISHOP)[0];
+
+ // Draw if weaker pawn is on rank 7, bishop can't attack the pawn, and
+ // weaker king can stop opposing opponent's king from penetrating.
+ if ( relative_rank(strongerSide, weakerPawnSq) == RANK_7
+ && opposite_colors(bishopSq, weakerPawnSq)
+ && square_distance(weakerPawnSq, weakerKingSq) <= square_distance(weakerPawnSq, strongerKingSq))
+ return SCALE_FACTOR_DRAW;
+ }
+
return SCALE_FACTOR_NONE;
}
+++ /dev/null
-/*
- Stockfish, a UCI chess playing engine derived from Glaurung 2.1
- Copyright (C) 2004-2008 Tord Romstad (Glaurung author)
- Copyright (C) 2008-2012 Marco Costalba, Joona Kiiski, Tord Romstad
-
- Stockfish is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- Stockfish is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
-
-#if !defined(HISTORY_H_INCLUDED)
-#define HISTORY_H_INCLUDED
-
-#include <algorithm>
-#include <cstring>
-
-#include "types.h"
-
-/// The History class stores statistics about how often different moves
-/// have been successful or unsuccessful during the current search. These
-/// statistics are used for reduction and move ordering decisions. History
-/// entries are stored according only to moving piece and destination square,
-/// in particular two moves with different origin but same destination and
-/// same piece will be considered identical.
-
-class History {
-
-public:
- void clear();
- Value value(Piece p, Square to) const;
- void add(Piece p, Square to, Value bonus);
- Value gain(Piece p, Square to) const;
- void update_gain(Piece p, Square to, Value g);
-
- static const Value MaxValue = Value(2000);
-
-private:
- Value history[PIECE_NB][SQUARE_NB];
- Value maxGains[PIECE_NB][SQUARE_NB];
-};
-
-inline void History::clear() {
- memset(history, 0, 16 * 64 * sizeof(Value));
- memset(maxGains, 0, 16 * 64 * sizeof(Value));
-}
-
-inline Value History::value(Piece p, Square to) const {
- return history[p][to];
-}
-
-inline void History::add(Piece p, Square to, Value bonus) {
- if (abs(history[p][to] + bonus) < MaxValue) history[p][to] += bonus;
-}
-
-inline Value History::gain(Piece p, Square to) const {
- return maxGains[p][to];
-}
-
-inline void History::update_gain(Piece p, Square to, Value g) {
- maxGains[p][to] = std::max(g, maxGains[p][to] - 1);
-}
-
-#endif // !defined(HISTORY_H_INCLUDED)
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <algorithm>
#include <cassert>
-#include "movegen.h"
#include "movepick.h"
#include "thread.h"
STOP
};
+ // Our insertion sort, guaranteed to be stable, as is needed
+ void insertion_sort(MoveStack* begin, MoveStack* end)
+ {
+ MoveStack tmp, *p, *q;
+
+ for (p = begin + 1; p < end; ++p)
+ {
+ tmp = *p;
+ for (q = p; q != begin && *(q-1) < tmp; --q)
+ *q = *(q-1);
+ *q = tmp;
+ }
+ }
+
// Unary predicate used by std::partition to split positive scores from remaining
// ones so to sort separately the two sets, and with the second sort delayed.
inline bool has_positive_score(const MoveStack& ms) { return ms.score > 0; }
/// move ordering is at the current node.
MovePicker::MovePicker(const Position& p, Move ttm, Depth d, const History& h,
- Search::Stack* s, Value beta) : pos(p), H(h), depth(d) {
+ Search::Stack* s, Value beta) : pos(p), Hist(h), depth(d) {
assert(d > DEPTH_ZERO);
}
MovePicker::MovePicker(const Position& p, Move ttm, Depth d, const History& h,
- Square sq) : pos(p), H(h), cur(moves), end(moves) {
+ Square sq) : pos(p), Hist(h), cur(moves), end(moves) {
assert(d <= DEPTH_ZERO);
}
MovePicker::MovePicker(const Position& p, Move ttm, const History& h, PieceType pt)
- : pos(p), H(h), cur(moves), end(moves) {
+ : pos(p), Hist(h), cur(moves), end(moves) {
assert(!pos.checkers());
}
-/// MovePicker::score_captures(), MovePicker::score_noncaptures() and
-/// MovePicker::score_evasions() assign a numerical move ordering score
-/// to each move in a move list. The moves with highest scores will be
-/// picked first by next_move().
-
-void MovePicker::score_captures() {
+/// score() assign a numerical move ordering score to each move in a move list.
+/// The moves with highest scores will be picked first.
+template<>
+void MovePicker::score<CAPTURES>() {
// Winning and equal captures in the main search are ordered by MVV/LVA.
// Suprisingly, this appears to perform slightly better than SEE based
// move ordering. The reason is probably that in a position with a winning
- type_of(pos.piece_moved(m));
if (type_of(m) == PROMOTION)
- it->score += PieceValue[MG][promotion_type(m)];
+ it->score += PieceValue[MG][promotion_type(m)] - PieceValue[MG][PAWN];
+
+ else if (type_of(m) == ENPASSANT)
+ it->score += PieceValue[MG][PAWN];
}
}
-void MovePicker::score_noncaptures() {
+template<>
+void MovePicker::score<QUIETS>() {
Move m;
for (MoveStack* it = moves; it != end; ++it)
{
m = it->move;
- it->score = H.value(pos.piece_moved(m), to_sq(m));
+ it->score = Hist[pos.piece_moved(m)][to_sq(m)];
}
}
-void MovePicker::score_evasions() {
+template<>
+void MovePicker::score<EVASIONS>() {
// Try good captures ordered by MVV/LVA, then non-captures if destination square
// is not under attack, ordered by history value, then bad-captures and quiet
// moves with a negative SEE. This last group is ordered by the SEE score.
Move m;
int seeScore;
- if (end < moves + 2)
- return;
-
for (MoveStack* it = moves; it != end; ++it)
{
m = it->move;
if ((seeScore = pos.see_sign(m)) < 0)
- it->score = seeScore - History::MaxValue; // Be sure we are at the bottom
+ it->score = seeScore - History::Max; // At the bottom
+
else if (pos.is_capture(m))
it->score = PieceValue[MG][pos.piece_on(to_sq(m))]
- - type_of(pos.piece_moved(m)) + History::MaxValue;
+ - type_of(pos.piece_moved(m)) + History::Max;
else
- it->score = H.value(pos.piece_moved(m), to_sq(m));
+ it->score = Hist[pos.piece_moved(m)][to_sq(m)];
}
}
-/// MovePicker::generate_next() generates, scores and sorts the next bunch of moves,
-/// when there are no more moves to try for the current phase.
+/// generate_next() generates, scores and sorts the next bunch of moves, when
+/// there are no more moves to try for the current phase.
void MovePicker::generate_next() {
case CAPTURES_S1: case CAPTURES_S3: case CAPTURES_S4: case CAPTURES_S5: case CAPTURES_S6:
end = generate<CAPTURES>(pos, moves);
- score_captures();
+ score<CAPTURES>();
return;
case KILLERS_S1:
case QUIETS_1_S1:
endQuiets = end = generate<QUIETS>(pos, moves);
- score_noncaptures();
+ score<QUIETS>();
end = std::partition(cur, end, has_positive_score);
- sort<MoveStack>(cur, end);
+ insertion_sort(cur, end);
return;
case QUIETS_2_S1:
cur = end;
end = endQuiets;
if (depth >= 3 * ONE_PLY)
- sort<MoveStack>(cur, end);
+ insertion_sort(cur, end);
return;
case BAD_CAPTURES_S1:
case EVASIONS_S2:
end = generate<EVASIONS>(pos, moves);
- score_evasions();
+ if (end > moves + 1)
+ score<EVASIONS>();
return;
case QUIET_CHECKS_S3:
}
-/// MovePicker::next_move() is the most important method of the MovePicker class.
-/// It returns a new pseudo legal move every time it is called, until there
-/// are no more moves left. It picks the move with the biggest score from a list
-/// of generated moves taking care not to return the tt move if has already been
-/// searched previously.
+/// next_move() is the most important method of the MovePicker class. It returns
+/// a new pseudo legal move every time is called, until there are no more moves
+/// left. It picks the move with the biggest score from a list of generated moves
+/// taking care not returning the ttMove if has already been searched previously.
template<>
Move MovePicker::next_move<false>() {
/// Version of next_move() to use at split point nodes where the move is grabbed
/// from the split point's shared MovePicker object. This function is not thread
-/// safe so should be lock protected by the caller.
+/// safe so must be lock protected by the caller.
template<>
-Move MovePicker::next_move<true>() { return ss->sp->mp->next_move<false>(); }
+Move MovePicker::next_move<true>() { return ss->sp->movePicker->next_move<false>(); }
#if !defined MOVEPICK_H_INCLUDED
#define MOVEPICK_H_INCLUDED
-#include "history.h"
+#include <algorithm> // For std::max
+#include <cstring> // For memset
+
+#include "movegen.h"
#include "position.h"
#include "search.h"
#include "types.h"
+/// The Stats struct stores moves statistics. According to the template parameter
+/// the class can store both History and Gains type statistics. History records
+/// how often different moves have been successful or unsuccessful during the
+/// current search and is used for reduction and move ordering decisions. Gains
+/// records the move's best evaluation gain from one ply to the next and is used
+/// for pruning decisions. Entries are stored according only to moving piece and
+/// destination square, in particular two moves with different origin but same
+/// destination and same piece will be considered identical.
+template<bool Gain>
+struct Stats {
+
+ static const Value Max = Value(2000);
+
+ const Value* operator[](Piece p) const { return &table[p][0]; }
+ void clear() { memset(table, 0, sizeof(table)); }
+
+ void update(Piece p, Square to, Value v) {
+
+ if (Gain)
+ table[p][to] = std::max(v, table[p][to] - 1);
+
+ else if (abs(table[p][to] + v) < Max)
+ table[p][to] += v;
+ }
+
+private:
+ Value table[PIECE_NB][SQUARE_NB];
+};
+
+typedef Stats<false> History;
+typedef Stats<true> Gains;
+
+
/// MovePicker class is used to pick one pseudo legal move at a time from the
/// current position. The most important method is next_move(), which returns a
/// new pseudo legal move each time it is called, until there are no moves left,
template<bool SpNode> Move next_move();
private:
- void score_captures();
- void score_noncaptures();
- void score_evasions();
+ template<GenType> void score();
void generate_next();
const Position& pos;
- const History& H;
+ const History& Hist;
Search::Stack* ss;
Depth depth;
Move ttMove;
assert(MoveList<LEGAL>(pos).contains(m));
- Bitboard attackers;
- bool ambiguousMove, ambiguousFile, ambiguousRank;
+ Bitboard others, b;
string san;
Color us = pos.side_to_move();
Square from = from_sq(m);
{
san = PieceToChar[WHITE][pt]; // Upper case
- // Disambiguation if we have more then one piece with destination 'to'
- // note that for pawns is not needed because starting file is explicit.
- ambiguousMove = ambiguousFile = ambiguousRank = false;
+ // Disambiguation if we have more then one piece of type 'pt' that can
+ // reach 'to' with a legal move.
+ others = b = (pos.attacks_from(pc, to) & pos.pieces(us, pt)) ^ from;
- attackers = (pos.attacks_from(pc, to) & pos.pieces(us, pt)) ^ from;
-
- while (attackers)
+ while (b)
{
- Square sq = pop_lsb(&attackers);
-
- // If the move is illegal, the piece is not included in the sub-set
- if (!pos.pl_move_is_legal(make_move(sq, to), pos.pinned_pieces()))
- continue;
-
- ambiguousFile |= file_of(sq) == file_of(from);
- ambiguousRank |= rank_of(sq) == rank_of(from);
- ambiguousMove = true;
+ Move move = make_move(pop_lsb(&b), to);
+ if (!pos.pl_move_is_legal(move, pos.pinned_pieces()))
+ others ^= from_sq(move);
}
- if (ambiguousMove)
+ if (others)
{
- if (!ambiguousFile)
+ if (!(others & file_bb(from)))
san += file_to_char(file_of(from));
- else if (!ambiguousRank)
+ else if (!(others & rank_bb(from)))
san += rank_to_char(rank_of(from));
else
// Update piece list, move the last piece at index[capsq] position and
// shrink the list.
//
- // WARNING: This is a not revresible operation. When we will reinsert the
+ // WARNING: This is a not reversible operation. When we will reinsert the
// captured piece in undo_move() we will put it at the end of the list and
// not in its original place, it means index[] and pieceList[] are not
// guaranteed to be invariant to a do_move() + undo_move() sequence.
#include "book.h"
#include "evaluate.h"
-#include "history.h"
#include "movegen.h"
#include "movepick.h"
#include "notation.h"
TimeManager TimeMgr;
int BestMoveChanges;
Value DrawValue[COLOR_NB];
- History H;
+ History Hist;
+ Gains Gain;
template <NodeType NT>
Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth);
void id_loop(Position& pos);
Value value_to_tt(Value v, int ply);
Value value_from_tt(Value v, int ply);
- bool check_is_dangerous(Position& pos, Move move, Value futilityBase, Value beta);
- bool allows_move(const Position& pos, Move first, Move second);
- bool prevents_move(const Position& pos, Move first, Move second);
+ bool check_is_dangerous(const Position& pos, Move move, Value futilityBase, Value beta);
+ bool allows(const Position& pos, Move first, Move second);
+ bool refutes(const Position& pos, Move first, Move second);
string uci_pv(const Position& pos, int depth, Value alpha, Value beta);
struct Skill {
// Reset the threads, still sleeping: will be wake up at split time
for (size_t i = 0; i < Threads.size(); i++)
- Threads[i].maxPly = 0;
+ Threads[i]->maxPly = 0;
Threads.sleepWhileIdle = Options["Use Sleeping Threads"];
// Set best timer interval to avoid lagging under time pressure. Timer is
// used to check for remaining available thinking time.
- Threads.timer_thread()->msec =
+ Threads.timer->msec =
Limits.use_time_management() ? std::min(100, std::max(TimeMgr.available_time() / 16, TimerResolution)) :
Limits.nodes ? 2 * TimerResolution
: 100;
- Threads.timer_thread()->notify_one(); // Wake up the recurring timer
+ Threads.timer->notify_one(); // Wake up the recurring timer
id_loop(RootPos); // Let's start searching !
- Threads.timer_thread()->msec = 0; // Stop the timer
+ Threads.timer->msec = 0; // Stop the timer
Threads.sleepWhileIdle = true; // Send idle threads to sleep
if (Options["Use Search Log"])
bestValue = delta = -VALUE_INFINITE;
ss->currentMove = MOVE_NULL; // Hack to skip update gains
TT.new_search();
- H.clear();
+ Hist.clear();
+ Gain.clear();
PVSize = Options["MultiPV"];
Skill skill(Options["Skill Level"]);
// we want to keep the same order for all the moves but the new
// PV that goes to the front. Note that in case of MultiPV search
// the already searched PV lines are preserved.
- sort<RootMove>(RootMoves.begin() + PVIdx, RootMoves.end());
+ std::stable_sort(RootMoves.begin() + PVIdx, RootMoves.end());
// Write PV back to transposition table in case the relevant
// entries have been overwritten during the search.
}
// Sort the PV lines searched so far and update the GUI
- sort<RootMove>(RootMoves.begin(), RootMoves.begin() + PVIdx + 1);
+ std::stable_sort(RootMoves.begin(), RootMoves.begin() + PVIdx + 1);
+
if (PVIdx + 1 == PVSize || Time::now() - SearchTime > 3000)
sync_cout << uci_pv(pos, depth, alpha, beta) << sync_endl;
}
else if (tte)
{
// Never assume anything on values stored in TT
- if ( (ss->staticEval = eval = tte->static_value()) == VALUE_NONE
- ||(ss->evalMargin = tte->static_value_margin()) == VALUE_NONE)
+ if ( (ss->staticEval = eval = tte->eval_value()) == VALUE_NONE
+ ||(ss->evalMargin = tte->eval_margin()) == VALUE_NONE)
eval = ss->staticEval = evaluate(pos, ss->evalMargin);
// Can ttValue be used as a better position evaluation?
&& type_of(move) == NORMAL)
{
Square to = to_sq(move);
- H.update_gain(pos.piece_on(to), to, -(ss-1)->staticEval - ss->staticEval);
+ Gain.update(pos.piece_on(to), to, -(ss-1)->staticEval - ss->staticEval);
}
// Step 6. Razoring (is omitted in PV nodes)
if ( depth < 5 * ONE_PLY
&& (ss-1)->reduction
&& threatMove != MOVE_NONE
- && allows_move(pos, (ss-1)->currentMove, threatMove))
+ && allows(pos, (ss-1)->currentMove, threatMove))
return beta - 1;
}
}
assert((ss-1)->currentMove != MOVE_NONE);
assert((ss-1)->currentMove != MOVE_NULL);
- MovePicker mp(pos, ttMove, H, pos.captured_piece_type());
+ MovePicker mp(pos, ttMove, Hist, pos.captured_piece_type());
CheckInfo ci(pos);
while ((move = mp.next_move<false>()) != MOVE_NONE)
split_point_start: // At split points actual search starts from here
- MovePicker mp(pos, ttMove, depth, H, ss, PvNode ? -VALUE_INFINITE : beta);
+ MovePicker mp(pos, ttMove, depth, Hist, ss, PvNode ? -VALUE_INFINITE : beta);
CheckInfo ci(pos);
value = bestValue; // Workaround a bogus 'uninitialized' warning under gcc
singularExtensionNode = !RootNode
// Move count based pruning
if ( depth < 16 * ONE_PLY
&& moveCount >= FutilityMoveCounts[depth]
- && (!threatMove || !prevents_move(pos, move, threatMove)))
+ && (!threatMove || !refutes(pos, move, threatMove)))
{
if (SpNode)
sp->mutex.lock();
// but fixing this made program slightly weaker.
Depth predictedDepth = newDepth - reduction<PvNode>(depth, moveCount);
futilityValue = ss->staticEval + ss->evalMargin + futility_margin(predictedDepth, moveCount)
- + H.gain(pos.piece_moved(move), to_sq(move));
+ + Gain[pos.piece_moved(move)][to_sq(move)];
if (futilityValue < beta)
{
&& !pvMove
&& !captureOrPromotion
&& !dangerous
- && ss->killers[0] != move
- && ss->killers[1] != move)
+ && move != ttMove
+ && move != ss->killers[0]
+ && move != ss->killers[1])
{
ss->reduction = reduction<PvNode>(depth, moveCount);
Depth d = std::max(newDepth - ss->reduction, ONE_PLY);
// Step 19. Check for splitting the search
if ( !SpNode
&& depth >= Threads.minimumSplitDepth
- && Threads.slave_available(thisThread)
+ && Threads.available_slave(thisThread)
&& thisThread->splitPointsSize < MAX_SPLITPOINTS_PER_THREAD)
{
assert(bestValue < beta);
- bestValue = Threads.split<FakeSplit>(pos, ss, alpha, beta, bestValue, &bestMove,
- depth, threatMove, moveCount, mp, NT);
+ thisThread->split<FakeSplit>(pos, ss, alpha, beta, &bestValue, &bestMove,
+ depth, threatMove, moveCount, &mp, NT);
if (bestValue >= beta)
break;
}
// Increase history value of the cut-off move
Value bonus = Value(int(depth) * int(depth));
- H.add(pos.piece_moved(bestMove), to_sq(bestMove), bonus);
+ Hist.update(pos.piece_moved(bestMove), to_sq(bestMove), bonus);
// Decrease history of all the other played non-capture moves
for (int i = 0; i < playedMoveCount - 1; i++)
{
Move m = movesSearched[i];
- H.add(pos.piece_moved(m), to_sq(m), -bonus);
+ Hist.update(pos.piece_moved(m), to_sq(m), -bonus);
}
}
}
if (tte)
{
// Never assume anything on values stored in TT
- if ( (ss->staticEval = bestValue = tte->static_value()) == VALUE_NONE
- ||(ss->evalMargin = tte->static_value_margin()) == VALUE_NONE)
+ if ( (ss->staticEval = bestValue = tte->eval_value()) == VALUE_NONE
+ ||(ss->evalMargin = tte->eval_margin()) == VALUE_NONE)
ss->staticEval = bestValue = evaluate(pos, ss->evalMargin);
}
else
// to search the moves. Because the depth is <= 0 here, only captures,
// queen promotions and checks (only if depth >= DEPTH_QS_CHECKS) will
// be generated.
- MovePicker mp(pos, ttMove, depth, H, to_sq((ss-1)->currentMove));
+ MovePicker mp(pos, ttMove, depth, Hist, to_sq((ss-1)->currentMove));
CheckInfo ci(pos);
// Loop through the moves until no moves remain or a beta cutoff occurs
// check_is_dangerous() tests if a checking move can be pruned in qsearch()
- bool check_is_dangerous(Position& pos, Move move, Value futilityBase, Value beta)
+ bool check_is_dangerous(const Position& pos, Move move, Value futilityBase, Value beta)
{
Piece pc = pos.piece_moved(move);
Square from = from_sq(move);
}
- // allows_move() tests whether the move at previous ply (first) somehow makes a
- // second move possible, for instance if the moving piece is the same in both
- // moves. Normally the second move is the threat move (the best move returned
+ // allows() tests whether the 'first' move at previous ply somehow makes the
+ // 'second' move possible, for instance if the moving piece is the same in
+ // both moves. Normally the second move is the threat (the best move returned
// from a null search that fails low).
- bool allows_move(const Position& pos, Move first, Move second) {
+ bool allows(const Position& pos, Move first, Move second) {
assert(is_ok(first));
assert(is_ok(second));
}
- // prevents_move() tests whether a move (first) is able to defend against an
- // opponent's move (second). In this case will not be pruned. Normally the
- // second move is the threat move (the best move returned from a null search
- // that fails low).
+ // refutes() tests whether a 'first' move is able to defend against a 'second'
+ // opponent's move. In this case will not be pruned. Normally the second move
+ // is the threat (the best move returned from a null search that fails low).
- bool prevents_move(const Position& pos, Move first, Move second) {
+ bool refutes(const Position& pos, Move first, Move second) {
assert(is_ok(first));
assert(is_ok(second));
int selDepth = 0;
for (size_t i = 0; i < Threads.size(); i++)
- if (Threads[i].maxPly > selDepth)
- selDepth = Threads[i].maxPly;
+ if (Threads[i]->maxPly > selDepth)
+ selDepth = Threads[i]->maxPly;
for (size_t i = 0; i < uciPVSize; i++)
{
// at the thread creation. So it means we are the split point's master.
const SplitPoint* this_sp = splitPointsSize ? activeSplitPoint : NULL;
- assert(!this_sp || (this_sp->master == this && searching));
+ assert(!this_sp || (this_sp->masterThread == this && searching));
// If this thread is the master of a split point and all slaves have finished
// their work at this split point, return from the idle loop.
sp->mutex.lock();
- assert(sp->slavesPositions[idx] == NULL);
+ assert(activePosition == NULL);
- sp->slavesPositions[idx] = &pos;
+ activePosition = &pos;
switch (sp->nodeType) {
case Root:
assert(searching);
searching = false;
- sp->slavesPositions[idx] = NULL;
+ activePosition = NULL;
sp->slavesMask &= ~(1ULL << idx);
sp->nodes += pos.nodes_searched();
// Wake up master thread so to allow it to return from the idle loop
// in case we are the last slave of the split point.
if ( Threads.sleepWhileIdle
- && this != sp->master
+ && this != sp->masterThread
&& !sp->slavesMask)
{
- assert(!sp->master->searching);
- sp->master->notify_one();
+ assert(!sp->masterThread->searching);
+ sp->masterThread->notify_one();
}
// After releasing the lock we cannot access anymore any SplitPoint
nodes = RootPos.nodes_searched();
// Loop across all split points and sum accumulated SplitPoint nodes plus
- // all the currently active slaves positions.
+ // all the currently active positions nodes.
for (size_t i = 0; i < Threads.size(); i++)
- for (int j = 0; j < Threads[i].splitPointsSize; j++)
+ for (int j = 0; j < Threads[i]->splitPointsSize; j++)
{
- SplitPoint& sp = Threads[i].splitPoints[j];
+ SplitPoint& sp = Threads[i]->splitPoints[j];
sp.mutex.lock();
Bitboard sm = sp.slavesMask;
while (sm)
{
- Position* pos = sp.slavesPositions[pop_lsb(&sm)];
- nodes += pos ? pos->nodes_searched() : 0;
+ Position* pos = Threads[pop_lsb(&sm)]->activePosition;
+ if (pos)
+ nodes += pos->nodes_searched();
}
sp.mutex.unlock();
/// all non-pv moves.
struct RootMove {
- RootMove(){} // Needed by sort()
RootMove(Move m) : score(-VALUE_INFINITE), prevScore(-VALUE_INFINITE) {
pv.push_back(m); pv.push_back(MOVE_NONE);
}
- bool operator<(const RootMove& m) const { return score < m.score; }
+ bool operator<(const RootMove& m) const { return score > m.score; } // Ascending sort
bool operator==(const Move& m) const { return pv[0] == m; }
void extract_pv_from_tt(Position& pos);
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <algorithm> // For std::count
#include <cassert>
#include <iostream>
// Thread c'tor starts a newly-created thread of execution that will call
// the the virtual function idle_loop(), going immediately to sleep.
-Thread::Thread() : splitPoints() {
+Thread::Thread() /* : splitPoints() */ { // Value-initialization bug in MSVC
searching = exit = false;
maxPly = splitPointsSize = 0;
activeSplitPoint = NULL;
+ activePosition = NULL;
idx = Threads.size();
if (!thread_create(handle, start_routine, this))
bool Thread::cutoff_occurred() const {
- for (SplitPoint* sp = activeSplitPoint; sp; sp = sp->parent)
+ for (SplitPoint* sp = activeSplitPoint; sp; sp = sp->parentSplitPoint)
if (sp->cutoff)
return true;
sleepWhileIdle = true;
timer = new TimerThread();
- threads.push_back(new MainThread());
+ push_back(new MainThread());
read_uci_options();
}
delete timer; // As first because check_time() accesses threads data
- for (size_t i = 0; i < threads.size(); i++)
- delete threads[i];
+ for (iterator it = begin(); it != end(); ++it)
+ delete *it;
}
assert(requested > 0);
- while (threads.size() < requested)
- threads.push_back(new Thread());
+ while (size() < requested)
+ push_back(new Thread());
- while (threads.size() > requested)
+ while (size() > requested)
{
- delete threads.back();
- threads.pop_back();
+ delete back();
+ pop_back();
}
}
// slave_available() tries to find an idle thread which is available as a slave
// for the thread 'master'.
-bool ThreadPool::slave_available(Thread* master) const {
+Thread* ThreadPool::available_slave(Thread* master) const {
- for (size_t i = 0; i < threads.size(); i++)
- if (threads[i]->is_available_to(master))
- return true;
+ for (const_iterator it = begin(); it != end(); ++it)
+ if ((*it)->is_available_to(master))
+ return *it;
- return false;
+ return NULL;
}
// search() then split() returns.
template <bool Fake>
-Value ThreadPool::split(Position& pos, Stack* ss, Value alpha, Value beta,
- Value bestValue, Move* bestMove, Depth depth, Move threatMove,
- int moveCount, MovePicker& mp, int nodeType) {
+void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bestValue,
+ Move* bestMove, Depth depth, Move threatMove, int moveCount,
+ MovePicker* movePicker, int nodeType) {
assert(pos.pos_is_ok());
- assert(bestValue <= alpha && alpha < beta && beta <= VALUE_INFINITE);
- assert(bestValue > -VALUE_INFINITE);
+ assert(*bestValue <= alpha && alpha < beta && beta <= VALUE_INFINITE);
+ assert(*bestValue > -VALUE_INFINITE);
assert(depth >= Threads.minimumSplitDepth);
-
- Thread* master = pos.this_thread();
-
- assert(master->searching);
- assert(master->splitPointsSize < MAX_SPLITPOINTS_PER_THREAD);
+ assert(searching);
+ assert(splitPointsSize < MAX_SPLITPOINTS_PER_THREAD);
// Pick the next available split point from the split point stack
- SplitPoint& sp = master->splitPoints[master->splitPointsSize];
+ SplitPoint& sp = splitPoints[splitPointsSize];
- sp.master = master;
- sp.parent = master->activeSplitPoint;
- sp.slavesMask = 1ULL << master->idx;
+ sp.masterThread = this;
+ sp.parentSplitPoint = activeSplitPoint;
+ sp.slavesMask = 1ULL << idx;
sp.depth = depth;
+ sp.bestValue = *bestValue;
sp.bestMove = *bestMove;
sp.threatMove = threatMove;
sp.alpha = alpha;
sp.beta = beta;
sp.nodeType = nodeType;
- sp.bestValue = bestValue;
- sp.mp = ∓
+ sp.movePicker = movePicker;
sp.moveCount = moveCount;
sp.pos = &pos;
sp.nodes = 0;
// Try to allocate available threads and ask them to start searching setting
// 'searching' flag. This must be done under lock protection to avoid concurrent
// allocation of the same slave by another master.
- mutex.lock();
+ Threads.mutex.lock();
sp.mutex.lock();
- master->splitPointsSize++;
- master->activeSplitPoint = &sp;
+ splitPointsSize++;
+ activeSplitPoint = &sp;
+ activePosition = NULL;
- size_t slavesCnt = 1; // Master is always included
+ size_t slavesCnt = 1; // This thread is always included
+ Thread* slave;
- for (size_t i = 0; i < threads.size() && !Fake; ++i)
- if (threads[i]->is_available_to(master) && ++slavesCnt <= maxThreadsPerSplitPoint)
- {
- sp.slavesMask |= 1ULL << threads[i]->idx;
- threads[i]->activeSplitPoint = &sp;
- threads[i]->searching = true; // Slave leaves idle_loop()
- threads[i]->notify_one(); // Could be sleeping
- }
+ while ( (slave = Threads.available_slave(this)) != NULL
+ && ++slavesCnt <= Threads.maxThreadsPerSplitPoint && !Fake)
+ {
+ sp.slavesMask |= 1ULL << slave->idx;
+ slave->activeSplitPoint = &sp;
+ slave->searching = true; // Slave leaves idle_loop()
+ slave->notify_one(); // Could be sleeping
+ }
sp.mutex.unlock();
- mutex.unlock();
+ Threads.mutex.unlock();
// Everything is set up. The master thread enters the idle loop, from which
// it will instantly launch a search, because its 'searching' flag is set.
// their work at this split point.
if (slavesCnt > 1 || Fake)
{
- master->Thread::idle_loop(); // Force a call to base class idle_loop()
+ Thread::idle_loop(); // Force a call to base class idle_loop()
// In helpful master concept a master can help only a sub-tree of its split
// point, and because here is all finished is not possible master is booked.
- assert(!master->searching);
+ assert(!searching);
+ assert(!activePosition);
}
// We have returned from the idle loop, which means that all threads are
// finished. Note that setting 'searching' and decreasing splitPointsSize is
// done under lock protection to avoid a race with Thread::is_available_to().
- mutex.lock();
+ Threads.mutex.lock();
sp.mutex.lock();
- master->searching = true;
- master->splitPointsSize--;
- master->activeSplitPoint = sp.parent;
+ searching = true;
+ splitPointsSize--;
+ activeSplitPoint = sp.parentSplitPoint;
+ activePosition = &pos;
pos.set_nodes_searched(pos.nodes_searched() + sp.nodes);
*bestMove = sp.bestMove;
+ *bestValue = sp.bestValue;
sp.mutex.unlock();
- mutex.unlock();
-
- return sp.bestValue;
+ Threads.mutex.unlock();
}
// Explicit template instantiations
-template Value ThreadPool::split<false>(Position&, Stack*, Value, Value, Value, Move*, Depth, Move, int, MovePicker&, int);
-template Value ThreadPool::split<true>(Position&, Stack*, Value, Value, Value, Move*, Depth, Move, int, MovePicker&, int);
+template void Thread::split<false>(Position&, Stack*, Value, Value, Value*, Move*, Depth, Move, int, MovePicker*, int);
+template void Thread::split< true>(Position&, Stack*, Value, Value, Value*, Move*, Depth, Move, int, MovePicker*, int);
// wait_for_think_finished() waits for main thread to go to sleep then returns
RootMoves.clear();
for (MoveList<LEGAL> ml(pos); !ml.end(); ++ml)
- if (searchMoves.empty() || count(searchMoves.begin(), searchMoves.end(), ml.move()))
+ if ( searchMoves.empty()
+ || std::count(searchMoves.begin(), searchMoves.end(), ml.move()))
RootMoves.push_back(RootMove(ml.move()));
main_thread()->thinking = true;
// Const data after split point has been setup
const Position* pos;
const Search::Stack* ss;
- Thread* master;
+ Thread* masterThread;
Depth depth;
Value beta;
int nodeType;
Move threatMove;
// Const pointers to shared data
- MovePicker* mp;
- SplitPoint* parent;
+ MovePicker* movePicker;
+ SplitPoint* parentSplitPoint;
// Shared data
Mutex mutex;
- Position* slavesPositions[MAX_THREADS];
volatile uint64_t slavesMask;
volatile int64_t nodes;
volatile Value alpha;
bool is_available_to(Thread* master) const;
void wait_for(volatile const bool& b);
+ template <bool Fake>
+ void split(Position& pos, Search::Stack* ss, Value alpha, Value beta, Value* bestValue, Move* bestMove,
+ Depth depth, Move threatMove, int moveCount, MovePicker* movePicker, int nodeType);
+
SplitPoint splitPoints[MAX_SPLITPOINTS_PER_THREAD];
Material::Table materialTable;
Endgames endgames;
Pawns::Table pawnsTable;
+ Position* activePosition;
size_t idx;
int maxPly;
Mutex mutex;
};
-/// ThreadPool class handles all the threads related stuff like init, starting,
+/// ThreadPool struct handles all the threads related stuff like init, starting,
/// parking and, the most important, launching a slave thread at a split point.
/// All the access to shared thread data is done through this class.
-class ThreadPool {
+struct ThreadPool : public std::vector<Thread*> {
-public:
void init(); // No c'tor and d'tor, threads rely on globals that should
void exit(); // be initialized and valid during the whole thread lifetime.
- Thread& operator[](size_t id) { return *threads[id]; }
- size_t size() const { return threads.size(); }
- MainThread* main_thread() { return static_cast<MainThread*>(threads[0]); }
- TimerThread* timer_thread() { return timer; }
-
+ MainThread* main_thread() { return static_cast<MainThread*>((*this)[0]); }
void read_uci_options();
- bool slave_available(Thread* master) const;
+ Thread* available_slave(Thread* master) const;
void wait_for_think_finished();
void start_thinking(const Position&, const Search::LimitsType&,
const std::vector<Move>&, Search::StateStackPtr&);
- template <bool Fake>
- Value split(Position& pos, Search::Stack* ss, Value alpha, Value beta, Value bestValue, Move* bestMove,
- Depth depth, Move threatMove, int moveCount, MovePicker& mp, int nodeType);
-
bool sleepWhileIdle;
Depth minimumSplitDepth;
+ size_t maxThreadsPerSplitPoint;
Mutex mutex;
ConditionVariable sleepCondition;
-
-private:
- std::vector<Thread*> threads;
TimerThread* timer;
- size_t maxThreadsPerSplitPoint;
};
extern ThreadPool Threads;
TranspositionTable TT; // Our global transposition table
-TranspositionTable::TranspositionTable() {
-
- size = generation = 0;
- entries = NULL;
-}
-
-TranspositionTable::~TranspositionTable() {
-
- delete [] entries;
-}
-
/// TranspositionTable::set_size() sets the size of the transposition table,
-/// measured in megabytes. Transposition table consists of a power of 2 number of
-/// TTCluster and each cluster consists of ClusterSize number of TTEntries. Each
-/// non-empty entry contains information of exactly one position.
+/// measured in megabytes. Transposition table consists of a power of 2 number
+/// of clusters and each cluster consists of ClusterSize number of TTEntry.
void TranspositionTable::set_size(size_t mbSize) {
- size_t newSize = 1ULL << msb((mbSize << 20) / sizeof(TTCluster));
+ assert(msb((mbSize << 20) / sizeof(TTEntry)) < 32);
- if (newSize == size)
+ uint32_t size = ClusterSize << msb((mbSize << 20) / sizeof(TTEntry[ClusterSize]));
+
+ if (hashMask == size - ClusterSize)
return;
- size = newSize;
- delete [] entries;
- entries = new (std::nothrow) TTCluster[size];
+ hashMask = size - ClusterSize;
+ delete [] table;
+ table = new (std::nothrow) TTEntry[size];
- if (!entries)
+ if (!table)
{
std::cerr << "Failed to allocate " << mbSize
<< "MB for transposition table." << std::endl;
void TranspositionTable::clear() {
- memset(entries, 0, size * sizeof(TTCluster));
+ memset(table, 0, (hashMask + ClusterSize) * sizeof(TTEntry));
}
/// more valuable than a TTEntry t2 if t1 is from the current search and t2 is from
/// a previous search, or if the depth of t1 is bigger than the depth of t2.
-void TranspositionTable::store(const Key posKey, Value v, Bound t, Depth d, Move m, Value statV, Value kingD) {
+void TranspositionTable::store(const Key key, Value v, Bound t, Depth d, Move m, Value statV, Value kingD) {
int c1, c2, c3;
TTEntry *tte, *replace;
- uint32_t posKey32 = posKey >> 32; // Use the high 32 bits as key inside the cluster
+ uint32_t key32 = key >> 32; // Use the high 32 bits as key inside the cluster
- tte = replace = first_entry(posKey);
+ tte = replace = first_entry(key);
- for (int i = 0; i < ClusterSize; i++, tte++)
+ for (unsigned i = 0; i < ClusterSize; i++, tte++)
{
- if (!tte->key() || tte->key() == posKey32) // Empty or overwrite old
+ if (!tte->key() || tte->key() == key32) // Empty or overwrite old
{
// Preserve any existing ttMove
if (m == MOVE_NONE)
m = tte->move();
- tte->save(posKey32, v, t, d, m, generation, statV, kingD);
+ tte->save(key32, v, t, d, m, generation, statV, kingD);
return;
}
if (c1 + c2 + c3 > 0)
replace = tte;
}
- replace->save(posKey32, v, t, d, m, generation, statV, kingD);
+ replace->save(key32, v, t, d, m, generation, statV, kingD);
}
/// transposition table. Returns a pointer to the TTEntry or NULL if
/// position is not found.
-TTEntry* TranspositionTable::probe(const Key posKey) const {
+TTEntry* TranspositionTable::probe(const Key key) const {
- uint32_t posKey32 = posKey >> 32;
- TTEntry* tte = first_entry(posKey);
+ TTEntry* tte = first_entry(key);
+ uint32_t key32 = key >> 32;
- for (int i = 0; i < ClusterSize; i++, tte++)
- if (tte->key() == posKey32)
+ for (unsigned i = 0; i < ClusterSize; i++, tte++)
+ if (tte->key() == key32)
return tte;
return NULL;
}
-
-
-/// TranspositionTable::new_search() is called at the beginning of every new
-/// search. It increments the "generation" variable, which is used to
-/// distinguish transposition table entries from previous searches from
-/// entries from the current search.
-
-void TranspositionTable::new_search() {
- generation++;
-}
class TTEntry {
public:
- void save(uint32_t k, Value v, Bound b, Depth d, Move m, int g, Value statV, Value statM) {
+ void save(uint32_t k, Value v, Bound b, Depth d, Move m, int g, Value ev, Value em) {
key32 = (uint32_t)k;
move16 = (uint16_t)m;
generation8 = (uint8_t)g;
value16 = (int16_t)v;
depth16 = (int16_t)d;
- staticValue = (int16_t)statV;
- staticMargin = (int16_t)statM;
+ evalValue = (int16_t)ev;
+ evalMargin = (int16_t)em;
}
void set_generation(int g) { generation8 = (uint8_t)g; }
- uint32_t key() const { return key32; }
- Depth depth() const { return (Depth)depth16; }
- Move move() const { return (Move)move16; }
- Value value() const { return (Value)value16; }
- Bound type() const { return (Bound)bound; }
- int generation() const { return (int)generation8; }
- Value static_value() const { return (Value)staticValue; }
- Value static_value_margin() const { return (Value)staticMargin; }
+ uint32_t key() const { return key32; }
+ Depth depth() const { return (Depth)depth16; }
+ Move move() const { return (Move)move16; }
+ Value value() const { return (Value)value16; }
+ Bound type() const { return (Bound)bound; }
+ int generation() const { return (int)generation8; }
+ Value eval_value() const { return (Value)evalValue; }
+ Value eval_margin() const { return (Value)evalMargin; }
private:
uint32_t key32;
uint16_t move16;
uint8_t bound, generation8;
- int16_t value16, depth16, staticValue, staticMargin;
+ int16_t value16, depth16, evalValue, evalMargin;
};
-/// This is the number of TTEntry slots for each cluster
-const int ClusterSize = 4;
-
-
-/// TTCluster consists of ClusterSize number of TTEntries. Size of TTCluster
-/// must not be bigger than a cache line size. In case it is less, it should
-/// be padded to guarantee always aligned accesses.
-
-struct TTCluster {
- TTEntry data[ClusterSize];
-};
-
-
-/// The transposition table class. This is basically just a huge array containing
-/// TTCluster objects, and a few methods for writing and reading entries.
+/// A TranspositionTable consists of a power of 2 number of clusters and each
+/// cluster consists of ClusterSize number of TTEntry. Each non-empty entry
+/// contains information of exactly one position. Size of a cluster shall not be
+/// bigger than a cache line size. In case it is less, it should be padded to
+/// guarantee always aligned accesses.
class TranspositionTable {
- TranspositionTable(const TranspositionTable&);
- TranspositionTable& operator=(const TranspositionTable&);
+ static const unsigned ClusterSize = 4; // A cluster is 64 Bytes
public:
- TranspositionTable();
- ~TranspositionTable();
+ ~TranspositionTable() { delete [] table; }
+ void new_search() { generation++; }
+
+ TTEntry* probe(const Key key) const;
+ TTEntry* first_entry(const Key key) const;
+ void refresh(const TTEntry* tte) const;
void set_size(size_t mbSize);
void clear();
- void store(const Key posKey, Value v, Bound type, Depth d, Move m, Value statV, Value kingD);
- TTEntry* probe(const Key posKey) const;
- void new_search();
- TTEntry* first_entry(const Key posKey) const;
- void refresh(const TTEntry* tte) const;
+ void store(const Key key, Value v, Bound type, Depth d, Move m, Value statV, Value kingD);
private:
- size_t size;
- TTCluster* entries;
+ uint32_t hashMask;
+ TTEntry* table;
uint8_t generation; // Size must be not bigger then TTEntry::generation8
};
/// a cluster given a position. The lowest order bits of the key are used to
/// get the index of the cluster.
-inline TTEntry* TranspositionTable::first_entry(const Key posKey) const {
+inline TTEntry* TranspositionTable::first_entry(const Key key) const {
- return entries[((uint32_t)posKey) & (size - 1)].data;
+ return table + ((uint32_t)key & hashMask);
}
return ch;
}
-/// Our insertion sort implementation, works with pointers and iterators and is
-/// guaranteed to be stable, as is needed.
-template<typename T, typename K>
-void sort(K begin, K end)
-{
- T tmp;
- K p, q;
-
- for (p = begin + 1; p < end; p++)
- {
- tmp = *p;
- for (q = p; q != begin && *(q-1) < tmp; --q)
- *q = *(q-1);
- *q = tmp;
- }
-}
-
#endif // !defined(TYPES_H_INCLUDED)
void set_option(istringstream& up);
void set_position(Position& pos, istringstream& up);
- void go(Position& pos, istringstream& up);
+ void go(const Position& pos, istringstream& up);
}
// the thinking time and other parameters from the input string, and starts
// the search.
- void go(Position& pos, istringstream& is) {
+ void go(const Position& pos, istringstream& is) {
Search::LimitsType limits;
vector<Move> searchMoves;