-#include "features/index_list.h"
-
-#include <cstring> // std::memset()
-
-namespace Eval::NNUE {
-
- // If vector instructions are enabled, we update and refresh the
- // accumulator tile by tile such that each tile fits in the CPU's
- // vector registers.
- #define VECTOR
-
- #ifdef USE_AVX512
- typedef __m512i vec_t;
- #define vec_load(a) _mm512_load_si512(a)
- #define vec_store(a,b) _mm512_store_si512(a,b)
- #define vec_add_16(a,b) _mm512_add_epi16(a,b)
- #define vec_sub_16(a,b) _mm512_sub_epi16(a,b)
- static constexpr IndexType kNumRegs = 8; // only 8 are needed
-
- #elif USE_AVX2
- typedef __m256i vec_t;
- #define vec_load(a) _mm256_load_si256(a)
- #define vec_store(a,b) _mm256_store_si256(a,b)
- #define vec_add_16(a,b) _mm256_add_epi16(a,b)
- #define vec_sub_16(a,b) _mm256_sub_epi16(a,b)
- static constexpr IndexType kNumRegs = 16;
-
- #elif USE_SSE2
- typedef __m128i vec_t;
- #define vec_load(a) (*(a))
- #define vec_store(a,b) *(a)=(b)
- #define vec_add_16(a,b) _mm_add_epi16(a,b)
- #define vec_sub_16(a,b) _mm_sub_epi16(a,b)
- static constexpr IndexType kNumRegs = Is64Bit ? 16 : 8;
-
- #elif USE_MMX
- typedef __m64 vec_t;
- #define vec_load(a) (*(a))
- #define vec_store(a,b) *(a)=(b)
- #define vec_add_16(a,b) _mm_add_pi16(a,b)
- #define vec_sub_16(a,b) _mm_sub_pi16(a,b)
- static constexpr IndexType kNumRegs = 8;
-
- #elif USE_NEON
- typedef int16x8_t vec_t;
- #define vec_load(a) (*(a))
- #define vec_store(a,b) *(a)=(b)
- #define vec_add_16(a,b) vaddq_s16(a,b)
- #define vec_sub_16(a,b) vsubq_s16(a,b)
- static constexpr IndexType kNumRegs = 16;
-
- #else
- #undef VECTOR
-
- #endif
-
- // Input feature converter
- class FeatureTransformer {
+#include "nnue_common.h"
+
+namespace Stockfish::Eval::NNUE {
+
+using BiasType = std::int16_t;
+using WeightType = std::int16_t;
+using PSQTWeightType = std::int32_t;
+
+// If vector instructions are enabled, we update and refresh the
+// accumulator tile by tile such that each tile fits in the CPU's
+// vector registers.
+#define VECTOR
+
+static_assert(PSQTBuckets % 8 == 0,
+ "Per feature PSQT values cannot be processed at granularity lower than 8 at a time.");
+
+#ifdef USE_AVX512
+using vec_t = __m512i;
+using psqt_vec_t = __m256i;
+ #define vec_load(a) _mm512_load_si512(a)
+ #define vec_store(a, b) _mm512_store_si512(a, b)
+ #define vec_add_16(a, b) _mm512_add_epi16(a, b)
+ #define vec_sub_16(a, b) _mm512_sub_epi16(a, b)
+ #define vec_mul_16(a, b) _mm512_mullo_epi16(a, b)
+ #define vec_zero() _mm512_setzero_epi32()
+ #define vec_set_16(a) _mm512_set1_epi16(a)
+ #define vec_max_16(a, b) _mm512_max_epi16(a, b)
+ #define vec_min_16(a, b) _mm512_min_epi16(a, b)
+inline vec_t vec_msb_pack_16(vec_t a, vec_t b) {
+ vec_t compacted = _mm512_packs_epi16(_mm512_srli_epi16(a, 7), _mm512_srli_epi16(b, 7));
+ return _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 2, 4, 6, 1, 3, 5, 7), compacted);
+}
+ #define vec_load_psqt(a) _mm256_load_si256(a)
+ #define vec_store_psqt(a, b) _mm256_store_si256(a, b)
+ #define vec_add_psqt_32(a, b) _mm256_add_epi32(a, b)
+ #define vec_sub_psqt_32(a, b) _mm256_sub_epi32(a, b)
+ #define vec_zero_psqt() _mm256_setzero_si256()
+ #define NumRegistersSIMD 16
+ #define MaxChunkSize 64
+
+#elif USE_AVX2
+using vec_t = __m256i;
+using psqt_vec_t = __m256i;
+ #define vec_load(a) _mm256_load_si256(a)
+ #define vec_store(a, b) _mm256_store_si256(a, b)
+ #define vec_add_16(a, b) _mm256_add_epi16(a, b)
+ #define vec_sub_16(a, b) _mm256_sub_epi16(a, b)
+ #define vec_mul_16(a, b) _mm256_mullo_epi16(a, b)
+ #define vec_zero() _mm256_setzero_si256()
+ #define vec_set_16(a) _mm256_set1_epi16(a)
+ #define vec_max_16(a, b) _mm256_max_epi16(a, b)
+ #define vec_min_16(a, b) _mm256_min_epi16(a, b)
+inline vec_t vec_msb_pack_16(vec_t a, vec_t b) {
+ vec_t compacted = _mm256_packs_epi16(_mm256_srli_epi16(a, 7), _mm256_srli_epi16(b, 7));
+ return _mm256_permute4x64_epi64(compacted, 0b11011000);
+}
+ #define vec_load_psqt(a) _mm256_load_si256(a)
+ #define vec_store_psqt(a, b) _mm256_store_si256(a, b)
+ #define vec_add_psqt_32(a, b) _mm256_add_epi32(a, b)
+ #define vec_sub_psqt_32(a, b) _mm256_sub_epi32(a, b)
+ #define vec_zero_psqt() _mm256_setzero_si256()
+ #define NumRegistersSIMD 16
+ #define MaxChunkSize 32
+
+#elif USE_SSE2
+using vec_t = __m128i;
+using psqt_vec_t = __m128i;
+ #define vec_load(a) (*(a))
+ #define vec_store(a, b) *(a) = (b)
+ #define vec_add_16(a, b) _mm_add_epi16(a, b)
+ #define vec_sub_16(a, b) _mm_sub_epi16(a, b)
+ #define vec_mul_16(a, b) _mm_mullo_epi16(a, b)
+ #define vec_zero() _mm_setzero_si128()
+ #define vec_set_16(a) _mm_set1_epi16(a)
+ #define vec_max_16(a, b) _mm_max_epi16(a, b)
+ #define vec_min_16(a, b) _mm_min_epi16(a, b)
+ #define vec_msb_pack_16(a, b) _mm_packs_epi16(_mm_srli_epi16(a, 7), _mm_srli_epi16(b, 7))
+ #define vec_load_psqt(a) (*(a))
+ #define vec_store_psqt(a, b) *(a) = (b)
+ #define vec_add_psqt_32(a, b) _mm_add_epi32(a, b)
+ #define vec_sub_psqt_32(a, b) _mm_sub_epi32(a, b)
+ #define vec_zero_psqt() _mm_setzero_si128()
+ #define NumRegistersSIMD (Is64Bit ? 16 : 8)
+ #define MaxChunkSize 16
+
+#elif USE_NEON
+using vec_t = int16x8_t;
+using psqt_vec_t = int32x4_t;
+ #define vec_load(a) (*(a))
+ #define vec_store(a, b) *(a) = (b)
+ #define vec_add_16(a, b) vaddq_s16(a, b)
+ #define vec_sub_16(a, b) vsubq_s16(a, b)
+ #define vec_mul_16(a, b) vmulq_s16(a, b)
+ #define vec_zero() \
+ vec_t { 0 }
+ #define vec_set_16(a) vdupq_n_s16(a)
+ #define vec_max_16(a, b) vmaxq_s16(a, b)
+ #define vec_min_16(a, b) vminq_s16(a, b)
+inline vec_t vec_msb_pack_16(vec_t a, vec_t b) {
+ const int8x8_t shifta = vshrn_n_s16(a, 7);
+ const int8x8_t shiftb = vshrn_n_s16(b, 7);
+ const int8x16_t compacted = vcombine_s8(shifta, shiftb);
+ return *reinterpret_cast<const vec_t*>(&compacted);
+}
+ #define vec_load_psqt(a) (*(a))
+ #define vec_store_psqt(a, b) *(a) = (b)
+ #define vec_add_psqt_32(a, b) vaddq_s32(a, b)
+ #define vec_sub_psqt_32(a, b) vsubq_s32(a, b)
+ #define vec_zero_psqt() \
+ psqt_vec_t { 0 }
+ #define NumRegistersSIMD 16
+ #define MaxChunkSize 16
+
+#else
+ #undef VECTOR
+
+#endif
+
+
+#ifdef VECTOR
+
+ // Compute optimal SIMD register count for feature transformer accumulation.
+
+ // We use __m* types as template arguments, which causes GCC to emit warnings
+ // about losing some attribute information. This is irrelevant to us as we
+ // only take their size, so the following pragma are harmless.
+ #if defined(__GNUC__)
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wignored-attributes"
+ #endif
+
+template<typename SIMDRegisterType, typename LaneType, int NumLanes, int MaxRegisters>
+static constexpr int BestRegisterCount() {
+ #define RegisterSize sizeof(SIMDRegisterType)
+ #define LaneSize sizeof(LaneType)
+
+ static_assert(RegisterSize >= LaneSize);
+ static_assert(MaxRegisters <= NumRegistersSIMD);
+ static_assert(MaxRegisters > 0);
+ static_assert(NumRegistersSIMD > 0);
+ static_assert(RegisterSize % LaneSize == 0);
+ static_assert((NumLanes * LaneSize) % RegisterSize == 0);
+
+ const int ideal = (NumLanes * LaneSize) / RegisterSize;
+ if (ideal <= MaxRegisters)
+ return ideal;
+
+ // Look for the largest divisor of the ideal register count that is smaller than MaxRegisters
+ for (int divisor = MaxRegisters; divisor > 1; --divisor)
+ if (ideal % divisor == 0)
+ return divisor;
+
+ return 1;
+}
+
+static constexpr int NumRegs =
+ BestRegisterCount<vec_t, WeightType, TransformedFeatureDimensions, NumRegistersSIMD>();
+static constexpr int NumPsqtRegs =
+ BestRegisterCount<psqt_vec_t, PSQTWeightType, PSQTBuckets, NumRegistersSIMD>();
+ #if defined(__GNUC__)
+ #pragma GCC diagnostic pop
+ #endif
+#endif
+
+
+// Input feature converter
+class FeatureTransformer {