WIN = 4
};
- inline Result& operator|=(Result& r, Result v) { return r = Result(r | v); }
+ Result& operator|=(Result& r, Result v) { return r = Result(r | v); }
struct KPKPosition {
KPKPosition() = default;
// bsf_index() returns the index into BSFTable[] to look up the bitscan. Uses
// Matt Taylor's folding for 32 bit case, extended to 64 bit by Kim Walisch.
- FORCE_INLINE unsigned bsf_index(Bitboard b) {
+ unsigned bsf_index(Bitboard b) {
b ^= b - 1;
return Is64Bit ? (b * DeBruijn64) >> 58
: ((unsigned(b) ^ unsigned(b >> 32)) * DeBruijn32) >> 26;
/// piece of type Pt (bishop or rook) placed on 's'. The helper magic_index()
/// looks up the index using the 'magic bitboards' approach.
template<PieceType Pt>
-FORCE_INLINE unsigned magic_index(Square s, Bitboard occupied) {
+inline unsigned magic_index(Square s, Bitboard occupied) {
Bitboard* const Masks = Pt == ROOK ? RookMasks : BishopMasks;
Bitboard* const Magics = Pt == ROOK ? RookMagics : BishopMagics;
# if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
-FORCE_INLINE Square lsb(Bitboard b) {
+inline Square lsb(Bitboard b) {
unsigned long idx;
_BitScanForward64(&idx, b);
return (Square) idx;
}
-FORCE_INLINE Square msb(Bitboard b) {
+inline Square msb(Bitboard b) {
unsigned long idx;
_BitScanReverse64(&idx, b);
return (Square) idx;
# elif defined(__arm__)
-FORCE_INLINE int lsb32(uint32_t v) {
+inline int lsb32(uint32_t v) {
__asm__("rbit %0, %1" : "=r"(v) : "r"(v));
return __builtin_clz(v);
}
-FORCE_INLINE Square msb(Bitboard b) {
+inline Square msb(Bitboard b) {
return (Square) (63 - __builtin_clzll(b));
}
-FORCE_INLINE Square lsb(Bitboard b) {
+inline Square lsb(Bitboard b) {
return (Square) (uint32_t(b) ? lsb32(uint32_t(b)) : 32 + lsb32(uint32_t(b >> 32)));
}
# else // Assumed gcc or compatible compiler
-FORCE_INLINE Square lsb(Bitboard b) { // Assembly code by Heinz van Saanen
+inline Square lsb(Bitboard b) { // Assembly code by Heinz van Saanen
Bitboard idx;
__asm__("bsfq %1, %0": "=r"(idx): "rm"(b) );
return (Square) idx;
}
-FORCE_INLINE Square msb(Bitboard b) {
+inline Square msb(Bitboard b) {
Bitboard idx;
__asm__("bsrq %1, %0": "=r"(idx): "rm"(b) );
return (Square) idx;
/// pop_lsb() finds and clears the least significant bit in a non-zero bitboard
-FORCE_INLINE Square pop_lsb(Bitboard* b) {
+inline Square pop_lsb(Bitboard* b) {
const Square s = lsb(*b);
*b &= *b - 1;
return s;
template<GenType Type, Square Delta>
- inline ExtMove* make_promotions(ExtMove* moveList, Square to, const CheckInfo* ci) {
+ ExtMove* make_promotions(ExtMove* moveList, Square to, const CheckInfo* ci) {
if (Type == CAPTURES || Type == EVASIONS || Type == NON_EVASIONS)
*moveList++ = make<PROMOTION>(to - Delta, to, QUEEN);
}
- template<PieceType Pt, bool Checks> FORCE_INLINE
+ template<PieceType Pt, bool Checks>
ExtMove* generate_moves(const Position& pos, ExtMove* moveList, Color us,
Bitboard target, const CheckInfo* ci) {
}
- template<Color Us, GenType Type> FORCE_INLINE
+ template<Color Us, GenType Type>
ExtMove* generate_all(const Position& pos, ExtMove* moveList, Bitboard target,
const CheckInfo* ci = nullptr) {
// pick_best() finds the best move in the range (begin, end) and moves it to
// the front. It's faster than sorting all the moves in advance when there
// are few moves e.g. the possible captures.
- inline Move pick_best(ExtMove* begin, ExtMove* end)
+ Move pick_best(ExtMove* begin, ExtMove* end)
{
std::swap(*begin, *std::max_element(begin, end));
return *begin;
// valuable attacker for the side to move, remove the attacker we just found
// from the bitboards and scan for new X-ray attacks behind it.
-template<int Pt> FORCE_INLINE
+template<int Pt>
PieceType min_attacker(const Bitboard* bb, const Square& to, const Bitboard& stmAttackers,
Bitboard& occupied, Bitboard& attackers) {
return (PieceType)Pt;
}
-template<> FORCE_INLINE
+template<>
PieceType min_attacker<KING>(const Bitboard*, const Square&, const Bitboard&, Bitboard&, Bitboard&) {
return KING; // No need to update bitboards: it is the last cycle
}
enum NodeType { Root, PV, NonPV };
// Razoring and futility margin based on depth
- inline Value razor_margin(Depth d) { return Value(512 + 32 * d); }
- inline Value futility_margin(Depth d) { return Value(200 * d); }
+ Value razor_margin(Depth d) { return Value(512 + 32 * d); }
+ Value futility_margin(Depth d) { return Value(200 * d); }
// Futility and reductions lookup tables, initialized at startup
int FutilityMoveCounts[2][16]; // [improving][depth]
Depth Reductions[2][2][64][64]; // [pv][improving][depth][moveNumber]
- template <bool PvNode> inline Depth reduction(bool i, Depth d, int mn) {
+ template <bool PvNode> Depth reduction(bool i, Depth d, int mn) {
return Reductions[PvNode][i][std::min(d, 63 * ONE_PLY)][std::min(mn, 63)];
}
# define pext(b, m) (0)
#endif
-#ifdef _MSC_VER
-# define FORCE_INLINE __forceinline
-#elif defined(__GNUC__)
-# define FORCE_INLINE inline __attribute__((always_inline))
-#else
-# define FORCE_INLINE inline
-#endif
-
#ifdef USE_POPCNT
const bool HasPopCnt = true;
#else