// Skill level adjustment
int SkillLevel;
+ bool SkillLevelEnabled;
RKISS RK;
// Multi-threads manager object
bool check_is_dangerous(Position &pos, Move move, Value futilityBase, Value beta, Value *bValue);
bool connected_moves(const Position& pos, Move m1, Move m2);
- bool value_is_mate(Value value);
Value value_to_tt(Value v, int ply);
Value value_from_tt(Value v, int ply);
bool ok_to_use_TT(const TTEntry* tte, Depth depth, Value beta, int ply);
Value refine_eval(const TTEntry* tte, Value defaultEval, int ply);
void update_history(const Position& pos, Move move, Depth depth, Move movesSearched[], int moveCount);
void update_gains(const Position& pos, Move move, Value before, Value after);
+ void do_skill_level(Move* best, Move* ponder);
int current_search_time();
std::string value_to_uci(Value v);
// Do we have to play with skill handicap? In this case enable MultiPV that
// we will use behind the scenes to retrieve a set of possible moves.
- MultiPV = (SkillLevel < 20 ? Max(UCIMultiPV, 4) : UCIMultiPV);
+ SkillLevelEnabled = (SkillLevel < 20);
+ MultiPV = (SkillLevelEnabled ? Max(UCIMultiPV, 4) : UCIMultiPV);
// Set the number of active threads
ThreadsMgr.read_uci_options();
SearchStack ss[PLY_MAX_PLUS_2];
Value bestValues[PLY_MAX_PLUS_2];
int bestMoveChanges[PLY_MAX_PLUS_2];
- int depth, aspirationDelta;
+ int depth, aspirationDelta, skillSamplingDepth;
Value value, alpha, beta;
- Move bestMove, easyMove;
+ Move bestMove, easyMove, skillBest, skillPonder;
// Initialize stuff before a new search
memset(ss, 0, 4 * sizeof(SearchStack));
TT.new_search();
H.clear();
- *ponderMove = bestMove = easyMove = MOVE_NONE;
- depth = aspirationDelta = 0;
+ *ponderMove = bestMove = easyMove = skillBest = skillPonder = MOVE_NONE;
+ depth = aspirationDelta = skillSamplingDepth = 0;
alpha = -VALUE_INFINITE, beta = VALUE_INFINITE;
ss->currentMove = MOVE_NULL; // Hack to skip update_gains()
return MOVE_NONE;
}
+ // Choose a random sampling depth according to SkillLevel so that at low
+ // skills there is an higher risk to pick up a blunder.
+ if (SkillLevelEnabled)
+ skillSamplingDepth = 4 + SkillLevel + (RK.rand<unsigned>() % 4);
+
// Iterative deepening loop
while (++depth <= PLY_MAX && (!MaxDepth || depth <= MaxDepth) && !StopRequest)
{
bestValues[depth] = value;
bestMoveChanges[depth] = Rml.bestMoveChanges;
+ // Do we need to pick now the best and the ponder moves ?
+ if (SkillLevelEnabled && depth == skillSamplingDepth)
+ do_skill_level(&skillBest, &skillPonder);
+
// Send PV line to GUI and to log file
for (int i = 0; i < Min(UCIMultiPV, (int)Rml.size()); i++)
cout << Rml[i].pv_info_to_uci(pos, depth, alpha, beta, i) << endl;
}
}
- // When playing with strength handicap choose best move among the MultiPV set
- // using a statistical rule dependent on SkillLevel. Idea by Heinz van Saanen.
- if (SkillLevel < 20)
+ // When using skills fake best and ponder moves with the sub-optimal ones
+ if (SkillLevelEnabled)
{
- assert(MultiPV > 1);
-
- // Rml list is already sorted by pv_score in descending order
- int s;
- int max_s = -VALUE_INFINITE;
- int size = Min(MultiPV, (int)Rml.size());
- int max = Rml[0].pv_score;
- int var = Min(max - Rml[size - 1].pv_score, PawnValueMidgame);
- int wk = 120 - 2 * SkillLevel;
-
- // PRNG sequence should be non deterministic
- for (int i = abs(get_system_time() % 50); i > 0; i--)
- RK.rand<unsigned>();
-
- // Choose best move. For each move's score we add two terms both dependent
- // on wk, one deterministic and bigger for weaker moves, and one random,
- // then we choose the move with the resulting highest score.
- for (int i = 0; i < size; i++)
- {
- s = Rml[i].pv_score;
+ if (skillBest == MOVE_NONE) // Still unassigned ?
+ do_skill_level(&skillBest, &skillPonder);
- // Don't allow crazy blunders even at very low skills
- if (i > 0 && Rml[i-1].pv_score > s + EasyMoveMargin)
- break;
-
- // This is our magical formula
- s += ((max - s) * wk + var * (RK.rand<unsigned>() % wk)) / 128;
-
- if (s > max_s)
- {
- max_s = s;
- bestMove = Rml[i].pv[0];
- *ponderMove = Rml[i].pv[1];
- }
- }
+ bestMove = skillBest;
+ *ponderMove = skillPonder;
}
return bestMove;
ValueType vt;
Value bestValue, value, oldAlpha;
Value refinedValue, nullValue, futilityBase, futilityValueScaled; // Non-PV specific
- bool isPvMove, isCheck, singularExtensionNode, moveIsCheck, captureOrPromotion, dangerous;
+ bool isPvMove, isCheck, singularExtensionNode, moveIsCheck, captureOrPromotion, dangerous, isBadCap;
bool mateThreat = false;
int moveCount = 0, playedMoveCount = 0;
int threadID = pos.thread();
&& !isCheck
&& refinedValue < beta - razor_margin(depth)
&& ttMove == MOVE_NONE
- && !value_is_mate(beta)
+ && abs(beta) < VALUE_MATE_IN_PLY_MAX
&& !pos.has_pawn_on_7th(pos.side_to_move()))
{
Value rbeta = beta - razor_margin(depth);
&& depth < RazorDepth
&& !isCheck
&& refinedValue >= beta + futility_margin(depth, 0)
- && !value_is_mate(beta)
+ && abs(beta) < VALUE_MATE_IN_PLY_MAX
&& pos.non_pawn_material(pos.side_to_move()))
return refinedValue - futility_margin(depth, 0);
&& depth > ONE_PLY
&& !isCheck
&& refinedValue >= beta
- && !value_is_mate(beta)
+ && abs(beta) < VALUE_MATE_IN_PLY_MAX
&& pos.non_pawn_material(pos.side_to_move()))
{
ss->currentMove = MOVE_NULL;
if (nullValue >= beta)
{
// Do not return unproven mate scores
- if (nullValue >= value_mate_in(PLY_MAX))
+ if (nullValue >= VALUE_MATE_IN_PLY_MAX)
nullValue = beta;
if (depth < 6 * ONE_PLY)
// Move count based pruning
if ( moveCount >= futility_move_count(depth)
&& !(threatMove && connected_threat(pos, move, threatMove))
- && bestValue > value_mated_in(PLY_MAX)) // FIXME bestValue is racy
+ && bestValue > VALUE_MATED_IN_PLY_MAX) // FIXME bestValue is racy
{
if (SpNode)
lock_grab(&(sp->lock));
// Prune moves with negative SEE at low depths
if ( predictedDepth < 2 * ONE_PLY
- && bestValue > value_mated_in(PLY_MAX)
+ && bestValue > VALUE_MATED_IN_PLY_MAX
&& pos.see_sign(move) < 0)
{
if (SpNode)
}
}
+ // Bad capture detection. Will be used by prob-cut search
+ isBadCap = depth >= 3 * ONE_PLY
+ && depth < 8 * ONE_PLY
+ && captureOrPromotion
+ && move != ttMove
+ && !dangerous
+ && !move_is_promotion(move)
+ && abs(alpha) < VALUE_MATE_IN_PLY_MAX
+ && pos.see_sign(move) < 0;
+
// Step 13. Make the move
pos.do_move(move, st, ci, moveIsCheck);
// Step 14. Reduced depth search
// If the move fails high will be re-searched at full depth.
bool doFullDepthSearch = true;
+ alpha = SpNode ? sp->alpha : alpha;
if ( depth >= 3 * ONE_PLY
&& !captureOrPromotion
ss->reduction = DEPTH_ZERO; // Restore original reduction
}
+ // Probcut search for bad captures. If a reduced search returns a value
+ // very below beta then we can (almost) safely prune the bad capture.
+ if (isBadCap)
+ {
+ ss->reduction = 3 * ONE_PLY;
+ Value redAlpha = alpha - 300;
+ Depth d = newDepth - ss->reduction;
+ value = -search<NonPV>(pos, ss+1, -(redAlpha+1), -redAlpha, d, ply+1);
+ doFullDepthSearch = (value > redAlpha);
+ ss->reduction = DEPTH_ZERO; // Restore original reduction
+ }
+
// Step 15. Full depth search
if (doFullDepthSearch)
{
// Detect non-capture evasions that are candidate to be pruned
evasionPrunable = isCheck
- && bestValue > value_mated_in(PLY_MAX)
+ && bestValue > VALUE_MATED_IN_PLY_MAX
&& !pos.move_is_capture(move)
&& !pos.can_castle(pos.side_to_move());
}
- // value_is_mate() checks if the given value is a mate one eventually
- // compensated for the ply.
-
- bool value_is_mate(Value value) {
-
- assert(abs(value) <= VALUE_INFINITE);
-
- return value <= value_mated_in(PLY_MAX)
- || value >= value_mate_in(PLY_MAX);
- }
-
-
// value_to_tt() adjusts a mate score from "plies to mate from the root" to
// "plies to mate from the current ply". Non-mate scores are unchanged.
// The function is called before storing a value to the transposition table.
Value value_to_tt(Value v, int ply) {
- if (v >= value_mate_in(PLY_MAX))
+ if (v >= VALUE_MATE_IN_PLY_MAX)
return v + ply;
- if (v <= value_mated_in(PLY_MAX))
+ if (v <= VALUE_MATED_IN_PLY_MAX)
return v - ply;
return v;
Value value_from_tt(Value v, int ply) {
- if (v >= value_mate_in(PLY_MAX))
+ if (v >= VALUE_MATE_IN_PLY_MAX)
return v - ply;
- if (v <= value_mated_in(PLY_MAX))
+ if (v <= VALUE_MATED_IN_PLY_MAX)
return v + ply;
return v;
*dangerous = true;
}
- if ( PvNode
- && captureOrPromotion
- && pos.type_of_piece_on(move_to(m)) != PAWN
- && pos.see_sign(m) >= 0)
- {
- result += ONE_PLY / 2;
- *dangerous = true;
- }
-
return Min(result, ONE_PLY);
}
Value v = value_from_tt(tte->value(), ply);
return ( tte->depth() >= depth
- || v >= Max(value_mate_in(PLY_MAX), beta)
- || v < Min(value_mated_in(PLY_MAX), beta))
+ || v >= Max(VALUE_MATE_IN_PLY_MAX, beta)
+ || v < Min(VALUE_MATED_IN_PLY_MAX, beta))
&& ( ((tte->type() & VALUE_TYPE_LOWER) && v >= beta)
|| ((tte->type() & VALUE_TYPE_UPPER) && v < beta));
if (abs(v) < VALUE_MATE - PLY_MAX * ONE_PLY)
s << "cp " << int(v) * 100 / int(PawnValueMidgame); // Scale to centipawns
else
- s << "mate " << (v > 0 ? (VALUE_MATE - v + 1) / 2 : -(VALUE_MATE + v) / 2);
+ s << "mate " << (v > 0 ? VALUE_MATE - v + 1 : -VALUE_MATE - v) / 2;
return s.str();
}
// We are line oriented, don't read single chars
std::string command;
- if (!std::getline(std::cin, command))
- command = "quit";
-
- if (command == "quit")
+ if (!std::getline(std::cin, command) || command == "quit")
{
// Quit the program as soon as possible
Pondering = false;
std::string command;
- while (true)
- {
- // Wait for a command from stdin
- if (!std::getline(std::cin, command))
- command = "quit";
+ // Wait for a command from stdin
+ while ( std::getline(std::cin, command)
+ && command != "ponderhit" && command != "stop" && command != "quit") {};
- if (command == "quit")
- {
- QuitRequest = true;
- break;
- }
- else if (command == "ponderhit" || command == "stop")
- break;
- }
+ if (command != "ponderhit" && command != "stop")
+ QuitRequest = true; // Must be "quit" or getline() returned false
}
}
}
+
+ // When playing with strength handicap choose best move among the MultiPV set
+ // using a statistical rule dependent on SkillLevel. Idea by Heinz van Saanen.
+ void do_skill_level(Move* best, Move* ponder) {
+
+ assert(MultiPV > 1);
+
+ // Rml list is already sorted by pv_score in descending order
+ int s;
+ int max_s = -VALUE_INFINITE;
+ int size = Min(MultiPV, (int)Rml.size());
+ int max = Rml[0].pv_score;
+ int var = Min(max - Rml[size - 1].pv_score, PawnValueMidgame);
+ int wk = 120 - 2 * SkillLevel;
+
+ // PRNG sequence should be non deterministic
+ for (int i = abs(get_system_time() % 50); i > 0; i--)
+ RK.rand<unsigned>();
+
+ // Choose best move. For each move's score we add two terms both dependent
+ // on wk, one deterministic and bigger for weaker moves, and one random,
+ // then we choose the move with the resulting highest score.
+ for (int i = 0; i < size; i++)
+ {
+ s = Rml[i].pv_score;
+
+ // Don't allow crazy blunders even at very low skills
+ if (i > 0 && Rml[i-1].pv_score > s + EasyMoveMargin)
+ break;
+
+ // This is our magical formula
+ s += ((max - s) * wk + var * (RK.rand<unsigned>() % wk)) / 128;
+
+ if (s > max_s)
+ {
+ max_s = s;
+ *best = Rml[i].pv[0];
+ *ponder = Rml[i].pv[1];
+ }
+ }
+ }
+
} // namespace