// Skill level adjustment
int SkillLevel;
+ bool SkillLevelEnabled;
RKISS RK;
// Multi-threads manager object
Value refine_eval(const TTEntry* tte, Value defaultEval, int ply);
void update_history(const Position& pos, Move move, Depth depth, Move movesSearched[], int moveCount);
void update_gains(const Position& pos, Move move, Value before, Value after);
+ void do_skill_level(Move* best, Move* ponder);
int current_search_time();
std::string value_to_uci(Value v);
// Do we have to play with skill handicap? In this case enable MultiPV that
// we will use behind the scenes to retrieve a set of possible moves.
- MultiPV = (SkillLevel < 20 ? Max(UCIMultiPV, 4) : UCIMultiPV);
+ SkillLevelEnabled = (SkillLevel < 20);
+ MultiPV = (SkillLevelEnabled ? Max(UCIMultiPV, 4) : UCIMultiPV);
// Set the number of active threads
ThreadsMgr.read_uci_options();
SearchStack ss[PLY_MAX_PLUS_2];
Value bestValues[PLY_MAX_PLUS_2];
int bestMoveChanges[PLY_MAX_PLUS_2];
- int depth, aspirationDelta;
+ int depth, aspirationDelta, skillSamplingDepth;
Value value, alpha, beta;
- Move bestMove, easyMove;
+ Move bestMove, easyMove, skillBest, skillPonder;
// Initialize stuff before a new search
memset(ss, 0, 4 * sizeof(SearchStack));
TT.new_search();
H.clear();
- *ponderMove = bestMove = easyMove = MOVE_NONE;
- depth = aspirationDelta = 0;
+ *ponderMove = bestMove = easyMove = skillBest = skillPonder = MOVE_NONE;
+ depth = aspirationDelta = skillSamplingDepth = 0;
alpha = -VALUE_INFINITE, beta = VALUE_INFINITE;
ss->currentMove = MOVE_NULL; // Hack to skip update_gains()
return MOVE_NONE;
}
+ // Choose a random sampling depth according to SkillLevel so that at low
+ // skills there is an higher risk to pick up a blunder.
+ if (SkillLevelEnabled)
+ skillSamplingDepth = 4 + SkillLevel + (RK.rand<unsigned>() % 4);
+
// Iterative deepening loop
while (++depth <= PLY_MAX && (!MaxDepth || depth <= MaxDepth) && !StopRequest)
{
bestValues[depth] = value;
bestMoveChanges[depth] = Rml.bestMoveChanges;
+ // Do we need to pick now the best and the ponder moves ?
+ if (SkillLevelEnabled && depth == skillSamplingDepth)
+ do_skill_level(&skillBest, &skillPonder);
+
// Send PV line to GUI and to log file
for (int i = 0; i < Min(UCIMultiPV, (int)Rml.size()); i++)
cout << Rml[i].pv_info_to_uci(pos, depth, alpha, beta, i) << endl;
}
}
- // When playing with strength handicap choose best move among the MultiPV set
- // using a statistical rule dependent on SkillLevel. Idea by Heinz van Saanen.
- if (SkillLevel < 20)
+ // When using skills fake best and ponder moves with the sub-optimal ones
+ if (SkillLevelEnabled)
{
- assert(MultiPV > 1);
-
- // Rml list is already sorted by pv_score in descending order
- int s;
- int max_s = -VALUE_INFINITE;
- int size = Min(MultiPV, (int)Rml.size());
- int max = Rml[0].pv_score;
- int var = Min(max - Rml[size - 1].pv_score, PawnValueMidgame);
- int wk = 120 - 2 * SkillLevel;
-
- // PRNG sequence should be non deterministic
- for (int i = abs(get_system_time() % 50); i > 0; i--)
- RK.rand<unsigned>();
-
- // Choose best move. For each move's score we add two terms both dependent
- // on wk, one deterministic and bigger for weaker moves, and one random,
- // then we choose the move with the resulting highest score.
- for (int i = 0; i < size; i++)
- {
- s = Rml[i].pv_score;
+ if (skillBest == MOVE_NONE) // Still unassigned ?
+ do_skill_level(&skillBest, &skillPonder);
- // Don't allow crazy blunders even at very low skills
- if (i > 0 && Rml[i-1].pv_score > s + EasyMoveMargin)
- break;
-
- // This is our magical formula
- s += ((max - s) * wk + var * (RK.rand<unsigned>() % wk)) / 128;
-
- if (s > max_s)
- {
- max_s = s;
- bestMove = Rml[i].pv[0];
- *ponderMove = Rml[i].pv[1];
- }
- }
+ bestMove = skillBest;
+ *ponderMove = skillPonder;
}
return bestMove;
ValueType vt;
Value bestValue, value, oldAlpha;
Value refinedValue, nullValue, futilityBase, futilityValueScaled; // Non-PV specific
- bool isPvMove, isCheck, singularExtensionNode, moveIsCheck, captureOrPromotion, dangerous;
+ bool isPvMove, isCheck, singularExtensionNode, moveIsCheck, captureOrPromotion, dangerous, isBadCap;
bool mateThreat = false;
int moveCount = 0, playedMoveCount = 0;
int threadID = pos.thread();
}
}
+ // Bad capture detection. Will be used by prob-cut search
+ isBadCap = depth >= 3 * ONE_PLY
+ && depth < 8 * ONE_PLY
+ && captureOrPromotion
+ && move != ttMove
+ && !dangerous
+ && !move_is_promotion(move)
+ && abs(alpha) < VALUE_MATE_IN_PLY_MAX
+ && pos.see_sign(move) < 0;
+
// Step 13. Make the move
pos.do_move(move, st, ci, moveIsCheck);
// Step 14. Reduced depth search
// If the move fails high will be re-searched at full depth.
bool doFullDepthSearch = true;
+ alpha = SpNode ? sp->alpha : alpha;
if ( depth >= 3 * ONE_PLY
&& !captureOrPromotion
ss->reduction = DEPTH_ZERO; // Restore original reduction
}
+ // Probcut search for bad captures. If a reduced search returns a value
+ // very below beta then we can (almost) safely prune the bad capture.
+ if (isBadCap)
+ {
+ ss->reduction = 3 * ONE_PLY;
+ Value redAlpha = alpha - 300;
+ Depth d = newDepth - ss->reduction;
+ value = -search<NonPV>(pos, ss+1, -(redAlpha+1), -redAlpha, d, ply+1);
+ doFullDepthSearch = (value > redAlpha);
+ ss->reduction = DEPTH_ZERO; // Restore original reduction
+ }
+
// Step 15. Full depth search
if (doFullDepthSearch)
{
}
}
+
+ // When playing with strength handicap choose best move among the MultiPV set
+ // using a statistical rule dependent on SkillLevel. Idea by Heinz van Saanen.
+ void do_skill_level(Move* best, Move* ponder) {
+
+ assert(MultiPV > 1);
+
+ // Rml list is already sorted by pv_score in descending order
+ int s;
+ int max_s = -VALUE_INFINITE;
+ int size = Min(MultiPV, (int)Rml.size());
+ int max = Rml[0].pv_score;
+ int var = Min(max - Rml[size - 1].pv_score, PawnValueMidgame);
+ int wk = 120 - 2 * SkillLevel;
+
+ // PRNG sequence should be non deterministic
+ for (int i = abs(get_system_time() % 50); i > 0; i--)
+ RK.rand<unsigned>();
+
+ // Choose best move. For each move's score we add two terms both dependent
+ // on wk, one deterministic and bigger for weaker moves, and one random,
+ // then we choose the move with the resulting highest score.
+ for (int i = 0; i < size; i++)
+ {
+ s = Rml[i].pv_score;
+
+ // Don't allow crazy blunders even at very low skills
+ if (i > 0 && Rml[i-1].pv_score > s + EasyMoveMargin)
+ break;
+
+ // This is our magical formula
+ s += ((max - s) * wk + var * (RK.rand<unsigned>() % wk)) / 128;
+
+ if (s > max_s)
+ {
+ max_s = s;
+ *best = Rml[i].pv[0];
+ *ponder = Rml[i].pv[1];
+ }
+ }
+ }
+
} // namespace