complexityAverage.set(155, 1);
- trend = SCORE_ZERO;
- optimism[ us] = Value(37);
- optimism[~us] = -optimism[us];
+ trend = SCORE_ZERO;
+ optimism[us] = optimism[~us] = VALUE_ZERO;
int searchAgainCounter = 0;
beta = std::min(prev + delta, VALUE_INFINITE);
// Adjust trend and optimism based on root move's previousScore
- int tr = sigmoid(prev, 3, 10, 89, 116, 1);
+ int tr = 116 * prev / (std::abs(prev) + 89);
trend = (us == WHITE ? make_score(tr, tr / 2)
: -make_score(tr, tr / 2));
- int opt = sigmoid(prev, 7, 20, 169, 19350, 164);
+ int opt = 118 * prev / (std::abs(prev) + 169);
optimism[ us] = Value(opt);
optimism[~us] = -optimism[us];
}
&& depth < 8
&& eval - futility_margin(depth, improving) - (ss-1)->statScore / 303 >= beta
&& eval >= beta
- && eval < 28031) // larger than VALUE_KNOWN_WIN, but smaller than TB wins.
+ && eval < 28031) // larger than VALUE_KNOWN_WIN, but smaller than TB wins
return eval;
// Step 9. Null move search with verification search (~22 Elo)
if (singularQuietLMR)
r--;
- // Increase reduction if next ply has a lot of fail high else reset count to 0
+ // Dicrease reduction if we move a threatened piece (~1 Elo)
+ if ( depth > 9
+ && (mp.threatenedPieces & from_sq(move)))
+ r--;
+
+ // Increase reduction if next ply has a lot of fail high
if ((ss+1)->cutoffCnt > 3 && !PvNode)
r++;
ss << (v >= beta ? " lowerbound" : v <= alpha ? " upperbound" : "");
ss << " nodes " << nodesSearched
- << " nps " << nodesSearched * 1000 / elapsed;
-
- if (elapsed > 1000) // Earlier makes little sense
- ss << " hashfull " << TT.hashfull();
-
- ss << " tbhits " << tbHits
+ << " nps " << nodesSearched * 1000 / elapsed
+ << " hashfull " << TT.hashfull()
+ << " tbhits " << tbHits
<< " time " << elapsed
<< " pv";