]> git.sesse.net Git - stockfish/blobdiff - src/nnue/nnue_feature_transformer.h
Reduce SIMD register count from 32 to 16
[stockfish] / src / nnue / nnue_feature_transformer.h
index 2b6259c328111e0e143d61d534eb80ab934b92a0..902918b2c6ff433656a110c9714481a7b72eac77 100644 (file)
@@ -1,6 +1,6 @@
 /*
   Stockfish, a UCI chess playing engine derived from Glaurung 2.1
-  Copyright (C) 2004-2020 The Stockfish developers (see AUTHORS file)
+  Copyright (C) 2004-2023 The Stockfish developers (see AUTHORS file)
 
   Stockfish is free software: you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
 #ifndef NNUE_FEATURE_TRANSFORMER_H_INCLUDED
 #define NNUE_FEATURE_TRANSFORMER_H_INCLUDED
 
-#include "nnue_common.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cstring>
+#include <iosfwd>
+#include <utility>
+
+#include "../position.h"
+#include "../types.h"
+#include "nnue_accumulator.h"
 #include "nnue_architecture.h"
-#include "features/index_list.h"
+#include "nnue_common.h"
+
+namespace Stockfish::Eval::NNUE {
+
+  using BiasType       = std::int16_t;
+  using WeightType     = std::int16_t;
+  using PSQTWeightType = std::int32_t;
+
+  // If vector instructions are enabled, we update and refresh the
+  // accumulator tile by tile such that each tile fits in the CPU's
+  // vector registers.
+  #define VECTOR
+
+  static_assert(PSQTBuckets % 8 == 0,
+    "Per feature PSQT values cannot be processed at granularity lower than 8 at a time.");
+
+  #ifdef USE_AVX512
+  using vec_t = __m512i;
+  using psqt_vec_t = __m256i;
+  #define vec_load(a) _mm512_load_si512(a)
+  #define vec_store(a,b) _mm512_store_si512(a,b)
+  #define vec_add_16(a,b) _mm512_add_epi16(a,b)
+  #define vec_sub_16(a,b) _mm512_sub_epi16(a,b)
+  #define vec_mul_16(a,b) _mm512_mullo_epi16(a,b)
+  #define vec_zero() _mm512_setzero_epi32()
+  #define vec_set_16(a) _mm512_set1_epi16(a)
+  #define vec_max_16(a,b) _mm512_max_epi16(a,b)
+  #define vec_min_16(a,b) _mm512_min_epi16(a,b)
+  inline vec_t vec_msb_pack_16(vec_t a, vec_t b){
+    vec_t compacted = _mm512_packs_epi16(_mm512_srli_epi16(a,7),_mm512_srli_epi16(b,7));
+    return _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 2, 4, 6, 1, 3, 5, 7), compacted);
+  }
+  #define vec_load_psqt(a) _mm256_load_si256(a)
+  #define vec_store_psqt(a,b) _mm256_store_si256(a,b)
+  #define vec_add_psqt_32(a,b) _mm256_add_epi32(a,b)
+  #define vec_sub_psqt_32(a,b) _mm256_sub_epi32(a,b)
+  #define vec_zero_psqt() _mm256_setzero_si256()
+  #define NumRegistersSIMD 16
+  #define MaxChunkSize 64
+
+  #elif USE_AVX2
+  using vec_t = __m256i;
+  using psqt_vec_t = __m256i;
+  #define vec_load(a) _mm256_load_si256(a)
+  #define vec_store(a,b) _mm256_store_si256(a,b)
+  #define vec_add_16(a,b) _mm256_add_epi16(a,b)
+  #define vec_sub_16(a,b) _mm256_sub_epi16(a,b)
+  #define vec_mul_16(a,b) _mm256_mullo_epi16(a,b)
+  #define vec_zero() _mm256_setzero_si256()
+  #define vec_set_16(a) _mm256_set1_epi16(a)
+  #define vec_max_16(a,b) _mm256_max_epi16(a,b)
+  #define vec_min_16(a,b) _mm256_min_epi16(a,b)
+  inline vec_t vec_msb_pack_16(vec_t a, vec_t b){
+    vec_t compacted = _mm256_packs_epi16(_mm256_srli_epi16(a,7), _mm256_srli_epi16(b,7));
+    return _mm256_permute4x64_epi64(compacted, 0b11011000);
+  }
+  #define vec_load_psqt(a) _mm256_load_si256(a)
+  #define vec_store_psqt(a,b) _mm256_store_si256(a,b)
+  #define vec_add_psqt_32(a,b) _mm256_add_epi32(a,b)
+  #define vec_sub_psqt_32(a,b) _mm256_sub_epi32(a,b)
+  #define vec_zero_psqt() _mm256_setzero_si256()
+  #define NumRegistersSIMD 16
+  #define MaxChunkSize 32
+
+  #elif USE_SSE2
+  using vec_t = __m128i;
+  using psqt_vec_t = __m128i;
+  #define vec_load(a) (*(a))
+  #define vec_store(a,b) *(a)=(b)
+  #define vec_add_16(a,b) _mm_add_epi16(a,b)
+  #define vec_sub_16(a,b) _mm_sub_epi16(a,b)
+  #define vec_mul_16(a,b) _mm_mullo_epi16(a,b)
+  #define vec_zero() _mm_setzero_si128()
+  #define vec_set_16(a) _mm_set1_epi16(a)
+  #define vec_max_16(a,b) _mm_max_epi16(a,b)
+  #define vec_min_16(a,b) _mm_min_epi16(a,b)
+  #define vec_msb_pack_16(a,b) _mm_packs_epi16(_mm_srli_epi16(a,7),_mm_srli_epi16(b,7))
+  #define vec_load_psqt(a) (*(a))
+  #define vec_store_psqt(a,b) *(a)=(b)
+  #define vec_add_psqt_32(a,b) _mm_add_epi32(a,b)
+  #define vec_sub_psqt_32(a,b) _mm_sub_epi32(a,b)
+  #define vec_zero_psqt() _mm_setzero_si128()
+  #define NumRegistersSIMD (Is64Bit ? 16 : 8)
+  #define MaxChunkSize 16
+
+  #elif USE_MMX
+  using vec_t = __m64;
+  using psqt_vec_t = __m64;
+  #define vec_load(a) (*(a))
+  #define vec_store(a,b) *(a)=(b)
+  #define vec_add_16(a,b) _mm_add_pi16(a,b)
+  #define vec_sub_16(a,b) _mm_sub_pi16(a,b)
+  #define vec_mul_16(a,b) _mm_mullo_pi16(a,b)
+  #define vec_zero() _mm_setzero_si64()
+  #define vec_set_16(a) _mm_set1_pi16(a)
+  inline vec_t vec_max_16(vec_t a,vec_t b){
+    vec_t comparison = _mm_cmpgt_pi16(a,b);
+    return _mm_or_si64(_mm_and_si64(comparison, a), _mm_andnot_si64(comparison, b));
+  }
+  inline vec_t vec_min_16(vec_t a,vec_t b){
+    vec_t comparison = _mm_cmpgt_pi16(a,b);
+    return _mm_or_si64(_mm_and_si64(comparison, b), _mm_andnot_si64(comparison, a));
+  }
+  #define vec_msb_pack_16(a,b) _mm_packs_pi16(_mm_srli_pi16(a,7),_mm_srli_pi16(b,7))
+  #define vec_load_psqt(a) (*(a))
+  #define vec_store_psqt(a,b) *(a)=(b)
+  #define vec_add_psqt_32(a,b) _mm_add_pi32(a,b)
+  #define vec_sub_psqt_32(a,b) _mm_sub_pi32(a,b)
+  #define vec_zero_psqt() _mm_setzero_si64()
+  #define vec_cleanup() _mm_empty()
+  #define NumRegistersSIMD 8
+  #define MaxChunkSize 8
+
+  #elif USE_NEON
+  using vec_t = int16x8_t;
+  using psqt_vec_t = int32x4_t;
+  #define vec_load(a) (*(a))
+  #define vec_store(a,b) *(a)=(b)
+  #define vec_add_16(a,b) vaddq_s16(a,b)
+  #define vec_sub_16(a,b) vsubq_s16(a,b)
+  #define vec_mul_16(a,b) vmulq_s16(a,b)
+  #define vec_zero() vec_t{0}
+  #define vec_set_16(a) vdupq_n_s16(a)
+  #define vec_max_16(a,b) vmaxq_s16(a,b)
+  #define vec_min_16(a,b) vminq_s16(a,b)
+  inline vec_t vec_msb_pack_16(vec_t a, vec_t b){
+    const int8x8_t shifta = vshrn_n_s16(a, 7);
+    const int8x8_t shiftb = vshrn_n_s16(b, 7);
+    const int8x16_t compacted = vcombine_s8(shifta,shiftb);
+    return *reinterpret_cast<const vec_t*> (&compacted);
+  }
+  #define vec_load_psqt(a) (*(a))
+  #define vec_store_psqt(a,b) *(a)=(b)
+  #define vec_add_psqt_32(a,b) vaddq_s32(a,b)
+  #define vec_sub_psqt_32(a,b) vsubq_s32(a,b)
+  #define vec_zero_psqt() psqt_vec_t{0}
+  #define NumRegistersSIMD 16
+  #define MaxChunkSize 16
+
+  #else
+  #undef VECTOR
+
+  #endif
+
+
+  #ifdef VECTOR
+
+      // Compute optimal SIMD register count for feature transformer accumulation.
+
+      // We use __m* types as template arguments, which causes GCC to emit warnings
+      // about losing some attribute information. This is irrelevant to us as we
+      // only take their size, so the following pragma are harmless.
+      #if defined(__GNUC__)
+      #pragma GCC diagnostic push
+      #pragma GCC diagnostic ignored "-Wignored-attributes"
+      #endif
+
+      template <typename SIMDRegisterType,
+                typename LaneType,
+                int      NumLanes,
+                int      MaxRegisters>
+      static constexpr int BestRegisterCount()
+      {
+          #define RegisterSize  sizeof(SIMDRegisterType)
+          #define LaneSize      sizeof(LaneType)
+
+          static_assert(RegisterSize >= LaneSize);
+          static_assert(MaxRegisters <= NumRegistersSIMD);
+          static_assert(MaxRegisters > 0);
+          static_assert(NumRegistersSIMD > 0);
+          static_assert(RegisterSize % LaneSize == 0);
+          static_assert((NumLanes * LaneSize) % RegisterSize == 0);
+
+          const int ideal = (NumLanes * LaneSize) / RegisterSize;
+          if (ideal <= MaxRegisters)
+            return ideal;
+
+          // Look for the largest divisor of the ideal register count that is smaller than MaxRegisters
+          for (int divisor = MaxRegisters; divisor > 1; --divisor)
+            if (ideal % divisor == 0)
+              return divisor;
+
+          return 1;
+      }
+
+      static constexpr int NumRegs     = BestRegisterCount<vec_t, WeightType, TransformedFeatureDimensions, NumRegistersSIMD>();
+      static constexpr int NumPsqtRegs = BestRegisterCount<psqt_vec_t, PSQTWeightType, PSQTBuckets, NumRegistersSIMD>();
+      #if defined(__GNUC__)
+      #pragma GCC diagnostic pop
+      #endif
+  #endif
 
-#include <cstring> // std::memset()
 
-namespace Eval::NNUE {
 
   // Input feature converter
   class FeatureTransformer {
 
    private:
     // Number of output dimensions for one side
-    static constexpr IndexType kHalfDimensions = kTransformedFeatureDimensions;
+    static constexpr IndexType HalfDimensions = TransformedFeatureDimensions;
+
+    #ifdef VECTOR
+    static constexpr IndexType TileHeight = NumRegs * sizeof(vec_t) / 2;
+    static constexpr IndexType PsqtTileHeight = NumPsqtRegs * sizeof(psqt_vec_t) / 4;
+    static_assert(HalfDimensions % TileHeight == 0, "TileHeight must divide HalfDimensions");
+    static_assert(PSQTBuckets % PsqtTileHeight == 0, "PsqtTileHeight must divide PSQTBuckets");
+    #endif
 
    public:
     // Output type
     using OutputType = TransformedFeatureType;
 
     // Number of input/output dimensions
-    static constexpr IndexType kInputDimensions = RawFeatures::kDimensions;
-    static constexpr IndexType kOutputDimensions = kHalfDimensions * 2;
+    static constexpr IndexType InputDimensions = FeatureSet::Dimensions;
+    static constexpr IndexType OutputDimensions = HalfDimensions;
 
     // Size of forward propagation buffer
-    static constexpr std::size_t kBufferSize =
-        kOutputDimensions * sizeof(OutputType);
+    static constexpr std::size_t BufferSize =
+        OutputDimensions * sizeof(OutputType);
 
     // Hash value embedded in the evaluation file
-    static constexpr std::uint32_t GetHashValue() {
-
-      return RawFeatures::kHashValue ^ kOutputDimensions;
+    static constexpr std::uint32_t get_hash_value() {
+      return FeatureSet::HashValue ^ (OutputDimensions * 2);
     }
 
     // Read network parameters
-    bool ReadParameters(std::istream& stream) {
+    bool read_parameters(std::istream& stream) {
+
+      read_leb_128<BiasType      >(stream, biases     , HalfDimensions                  );
+      read_leb_128<WeightType    >(stream, weights    , HalfDimensions * InputDimensions);
+      read_leb_128<PSQTWeightType>(stream, psqtWeights, PSQTBuckets    * InputDimensions);
 
-      for (std::size_t i = 0; i < kHalfDimensions; ++i)
-        biases_[i] = read_little_endian<BiasType>(stream);
-      for (std::size_t i = 0; i < kHalfDimensions * kInputDimensions; ++i)
-        weights_[i] = read_little_endian<WeightType>(stream);
       return !stream.fail();
     }
 
-    // Proceed with the difference calculation if possible
-    bool UpdateAccumulatorIfPossible(const Position& pos) const {
-
-      const auto now = pos.state();
-      if (now->accumulator.computed_accumulation)
-        return true;
+    // Write network parameters
+    bool write_parameters(std::ostream& stream) const {
 
-      const auto prev = now->previous;
-      if (prev && prev->accumulator.computed_accumulation) {
-        UpdateAccumulator(pos);
-        return true;
-      }
+      write_leb_128<BiasType      >(stream, biases     , HalfDimensions                  );
+      write_leb_128<WeightType    >(stream, weights    , HalfDimensions * InputDimensions);
+      write_leb_128<PSQTWeightType>(stream, psqtWeights, PSQTBuckets    * InputDimensions);
 
-      return false;
+      return !stream.fail();
     }
 
     // Convert input features
-    void Transform(const Position& pos, OutputType* output) const {
-
-      if (!UpdateAccumulatorIfPossible(pos))
-        RefreshAccumulator(pos);
+    std::int32_t transform(const Position& pos, OutputType* output, int bucket) const {
+      update_accumulator<WHITE>(pos);
+      update_accumulator<BLACK>(pos);
 
+      const Color perspectives[2] = {pos.side_to_move(), ~pos.side_to_move()};
       const auto& accumulation = pos.state()->accumulator.accumulation;
+      const auto& psqtAccumulation = pos.state()->accumulator.psqtAccumulation;
 
-  #if defined(USE_AVX2)
-      constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
-      constexpr int kControl = 0b11011000;
-      const __m256i kZero = _mm256_setzero_si256();
+      const auto psqt = (
+            psqtAccumulation[perspectives[0]][bucket]
+          - psqtAccumulation[perspectives[1]][bucket]
+        ) / 2;
 
-  #elif defined(USE_SSE2)
-      constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
 
-  #ifdef USE_SSE41
-      const __m128i kZero = _mm_setzero_si128();
-  #else
-      const __m128i k0x80s = _mm_set1_epi8(-128);
-  #endif
+      for (IndexType p = 0; p < 2; ++p)
+      {
+          const IndexType offset = (HalfDimensions / 2) * p;
 
-  #elif defined(USE_MMX)
-      constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
-      const __m64 k0x80s = _mm_set1_pi8(-128);
+#if defined(VECTOR)
 
-  #elif defined(USE_NEON)
-      constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
-      const int8x8_t kZero = {0};
-  #endif
+          constexpr IndexType OutputChunkSize = MaxChunkSize;
+          static_assert((HalfDimensions / 2) % OutputChunkSize == 0);
+          constexpr IndexType NumOutputChunks = HalfDimensions / 2 / OutputChunkSize;
 
-      const Color perspectives[2] = {pos.side_to_move(), ~pos.side_to_move()};
-      for (IndexType p = 0; p < 2; ++p) {
-        const IndexType offset = kHalfDimensions * p;
-
-  #if defined(USE_AVX2)
-        auto out = reinterpret_cast<__m256i*>(&output[offset]);
-        for (IndexType j = 0; j < kNumChunks; ++j) {
-          __m256i sum0 = _mm256_loadA_si256(
-              &reinterpret_cast<const __m256i*>(accumulation[perspectives[p]][0])[j * 2 + 0]);
-          __m256i sum1 = _mm256_loadA_si256(
-            &reinterpret_cast<const __m256i*>(accumulation[perspectives[p]][0])[j * 2 + 1]);
-          _mm256_storeA_si256(&out[j], _mm256_permute4x64_epi64(_mm256_max_epi8(
-              _mm256_packs_epi16(sum0, sum1), kZero), kControl));
-        }
+          vec_t Zero = vec_zero();
+          vec_t One = vec_set_16(127);
 
-  #elif defined(USE_SSE2)
-        auto out = reinterpret_cast<__m128i*>(&output[offset]);
-        for (IndexType j = 0; j < kNumChunks; ++j) {
-          __m128i sum0 = _mm_load_si128(&reinterpret_cast<const __m128i*>(
-              accumulation[perspectives[p]][0])[j * 2 + 0]);
-          __m128i sum1 = _mm_load_si128(&reinterpret_cast<const __m128i*>(
-              accumulation[perspectives[p]][0])[j * 2 + 1]);
-      const __m128i packedbytes = _mm_packs_epi16(sum0, sum1);
+          const vec_t* in0 = reinterpret_cast<const vec_t*>(&(accumulation[perspectives[p]][0]));
+          const vec_t* in1 = reinterpret_cast<const vec_t*>(&(accumulation[perspectives[p]][HalfDimensions / 2]));
+                vec_t* out = reinterpret_cast<      vec_t*>(output + offset);
 
-          _mm_store_si128(&out[j],
+          for (IndexType j = 0; j < NumOutputChunks; j += 1)
+          {
+              const vec_t sum0a = vec_max_16(vec_min_16(in0[j * 2 + 0], One), Zero);
+              const vec_t sum0b = vec_max_16(vec_min_16(in0[j * 2 + 1], One), Zero);
+              const vec_t sum1a = vec_max_16(vec_min_16(in1[j * 2 + 0], One), Zero);
+              const vec_t sum1b = vec_max_16(vec_min_16(in1[j * 2 + 1], One), Zero);
 
-  #ifdef USE_SSE41
-            _mm_max_epi8(packedbytes, kZero)
-  #else
-            _mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s)
+              const vec_t pa = vec_mul_16(sum0a, sum1a);
+              const vec_t pb = vec_mul_16(sum0b, sum1b);
+
+              out[j] = vec_msb_pack_16(pa, pb);
+          }
+
+#else
+
+          for (IndexType j = 0; j < HalfDimensions / 2; ++j) {
+              BiasType sum0 = accumulation[static_cast<int>(perspectives[p])][j + 0];
+              BiasType sum1 = accumulation[static_cast<int>(perspectives[p])][j + HalfDimensions / 2];
+              sum0 = std::clamp<BiasType>(sum0, 0, 127);
+              sum1 = std::clamp<BiasType>(sum1, 0, 127);
+              output[offset + j] = static_cast<OutputType>(unsigned(sum0 * sum1) / 128);
+          }
+
+#endif
+      }
+
+#if defined(vec_cleanup)
+      vec_cleanup();
+#endif
+
+      return psqt;
+    } // end of function transform()
+
+    void hint_common_access(const Position& pos) const {
+      hint_common_access_for_perspective<WHITE>(pos);
+      hint_common_access_for_perspective<BLACK>(pos);
+    }
+
+   private:
+    template<Color Perspective>
+    [[nodiscard]] std::pair<StateInfo*, StateInfo*> try_find_computed_accumulator(const Position& pos) const {
+      // Look for a usable accumulator of an earlier position. We keep track
+      // of the estimated gain in terms of features to be added/subtracted.
+      StateInfo *st = pos.state(), *next = nullptr;
+      int gain = FeatureSet::refresh_cost(pos);
+      while (st->previous && !st->accumulator.computed[Perspective])
+      {
+        // This governs when a full feature refresh is needed and how many
+        // updates are better than just one full refresh.
+        if (   FeatureSet::requires_refresh(st, Perspective)
+            || (gain -= FeatureSet::update_cost(st) + 1) < 0)
+          break;
+        next = st;
+        st = st->previous;
+      }
+      return { st, next };
+    }
+
+    // NOTE: The parameter states_to_update is an array of position states, ending with nullptr.
+    //       All states must be sequential, that is states_to_update[i] must either be reachable
+    //       by repeatedly applying ->previous from states_to_update[i+1] or states_to_update[i] == nullptr.
+    //       computed_st must be reachable by repeatedly applying ->previous on states_to_update[0], if not nullptr.
+    template<Color Perspective, size_t N>
+    void update_accumulator_incremental(const Position& pos, StateInfo* computed_st, StateInfo* states_to_update[N]) const {
+      static_assert(N > 0);
+      assert(states_to_update[N-1] == nullptr);
+
+  #ifdef VECTOR
+      // Gcc-10.2 unnecessarily spills AVX2 registers if this array
+      // is defined in the VECTOR code below, once in each branch
+      vec_t acc[NumRegs];
+      psqt_vec_t psqt[NumPsqtRegs];
   #endif
 
-          );
-        }
+      if (states_to_update[0] == nullptr)
+        return;
 
-  #elif defined(USE_MMX)
-        auto out = reinterpret_cast<__m64*>(&output[offset]);
-        for (IndexType j = 0; j < kNumChunks; ++j) {
-          __m64 sum0 = *(&reinterpret_cast<const __m64*>(
-              accumulation[perspectives[p]][0])[j * 2 + 0]);
-          __m64 sum1 = *(&reinterpret_cast<const __m64*>(
-              accumulation[perspectives[p]][0])[j * 2 + 1]);
-          const __m64 packedbytes = _mm_packs_pi16(sum0, sum1);
-          out[j] = _mm_subs_pi8(_mm_adds_pi8(packedbytes, k0x80s), k0x80s);
-        }
+      // Update incrementally going back through states_to_update.
+
+      // Gather all features to be updated.
+      const Square ksq = pos.square<KING>(Perspective);
+
+      // The size must be enough to contain the largest possible update.
+      // That might depend on the feature set and generally relies on the
+      // feature set's update cost calculation to be correct and never
+      // allow updates with more added/removed features than MaxActiveDimensions.
+      FeatureSet::IndexList removed[N-1], added[N-1];
+
+      {
+        int i = N-2; // last potential state to update. Skip last element because it must be nullptr.
+        while (states_to_update[i] == nullptr)
+          --i;
+
+        StateInfo *st2 = states_to_update[i];
+
+        for (; i >= 0; --i)
+        {
+          states_to_update[i]->accumulator.computed[Perspective] = true;
+
+          StateInfo* end_state = i == 0 ? computed_st : states_to_update[i - 1];
 
-  #elif defined(USE_NEON)
-        const auto out = reinterpret_cast<int8x8_t*>(&output[offset]);
-        for (IndexType j = 0; j < kNumChunks; ++j) {
-          int16x8_t sum = reinterpret_cast<const int16x8_t*>(
-              accumulation[perspectives[p]][0])[j];
-          out[j] = vmax_s8(vqmovn_s16(sum), kZero);
+          for (; st2 != end_state; st2 = st2->previous)
+            FeatureSet::append_changed_indices<Perspective>(
+              ksq, st2->dirtyPiece, removed[i], added[i]);
         }
+      }
 
-  #else
-        for (IndexType j = 0; j < kHalfDimensions; ++j) {
-          BiasType sum = accumulation[static_cast<int>(perspectives[p])][0][j];
-          output[offset + j] = static_cast<OutputType>(
-              std::max<int>(0, std::min<int>(127, sum)));
+      StateInfo* st = computed_st;
+
+      // Now update the accumulators listed in states_to_update[], where the last element is a sentinel.
+#ifdef VECTOR
+      for (IndexType j = 0; j < HalfDimensions / TileHeight; ++j)
+      {
+        // Load accumulator
+        auto accTile = reinterpret_cast<vec_t*>(
+          &st->accumulator.accumulation[Perspective][j * TileHeight]);
+        for (IndexType k = 0; k < NumRegs; ++k)
+          acc[k] = vec_load(&accTile[k]);
+
+        for (IndexType i = 0; states_to_update[i]; ++i)
+        {
+          // Difference calculation for the deactivated features
+          for (const auto index : removed[i])
+          {
+            const IndexType offset = HalfDimensions * index + j * TileHeight;
+            auto column = reinterpret_cast<const vec_t*>(&weights[offset]);
+            for (IndexType k = 0; k < NumRegs; ++k)
+              acc[k] = vec_sub_16(acc[k], column[k]);
+          }
+
+          // Difference calculation for the activated features
+          for (const auto index : added[i])
+          {
+            const IndexType offset = HalfDimensions * index + j * TileHeight;
+            auto column = reinterpret_cast<const vec_t*>(&weights[offset]);
+            for (IndexType k = 0; k < NumRegs; ++k)
+              acc[k] = vec_add_16(acc[k], column[k]);
+          }
+
+          // Store accumulator
+          accTile = reinterpret_cast<vec_t*>(
+            &states_to_update[i]->accumulator.accumulation[Perspective][j * TileHeight]);
+          for (IndexType k = 0; k < NumRegs; ++k)
+            vec_store(&accTile[k], acc[k]);
         }
-  #endif
+      }
+
+      for (IndexType j = 0; j < PSQTBuckets / PsqtTileHeight; ++j)
+      {
+        // Load accumulator
+        auto accTilePsqt = reinterpret_cast<psqt_vec_t*>(
+          &st->accumulator.psqtAccumulation[Perspective][j * PsqtTileHeight]);
+        for (std::size_t k = 0; k < NumPsqtRegs; ++k)
+          psqt[k] = vec_load_psqt(&accTilePsqt[k]);
+
+        for (IndexType i = 0; states_to_update[i]; ++i)
+        {
+          // Difference calculation for the deactivated features
+          for (const auto index : removed[i])
+          {
+            const IndexType offset = PSQTBuckets * index + j * PsqtTileHeight;
+            auto columnPsqt = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offset]);
+            for (std::size_t k = 0; k < NumPsqtRegs; ++k)
+              psqt[k] = vec_sub_psqt_32(psqt[k], columnPsqt[k]);
+          }
 
+          // Difference calculation for the activated features
+          for (const auto index : added[i])
+          {
+            const IndexType offset = PSQTBuckets * index + j * PsqtTileHeight;
+            auto columnPsqt = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offset]);
+            for (std::size_t k = 0; k < NumPsqtRegs; ++k)
+              psqt[k] = vec_add_psqt_32(psqt[k], columnPsqt[k]);
+          }
+
+          // Store accumulator
+          accTilePsqt = reinterpret_cast<psqt_vec_t*>(
+            &states_to_update[i]->accumulator.psqtAccumulation[Perspective][j * PsqtTileHeight]);
+          for (std::size_t k = 0; k < NumPsqtRegs; ++k)
+            vec_store_psqt(&accTilePsqt[k], psqt[k]);
+        }
       }
-  #if defined(USE_MMX)
-      _mm_empty();
-  #endif
-    }
 
-   private:
-    // Calculate cumulative value without using difference calculation
-    void RefreshAccumulator(const Position& pos) const {
+#else
+      for (IndexType i = 0; states_to_update[i]; ++i)
+      {
+        std::memcpy(states_to_update[i]->accumulator.accumulation[Perspective],
+            st->accumulator.accumulation[Perspective],
+            HalfDimensions * sizeof(BiasType));
 
-      auto& accumulator = pos.state()->accumulator;
-      IndexType i = 0;
-      Features::IndexList active_indices[2];
-      RawFeatures::AppendActiveIndices(pos, kRefreshTriggers[i],
-                                       active_indices);
-      for (Color perspective : { WHITE, BLACK }) {
-        std::memcpy(accumulator.accumulation[perspective][i], biases_,
-                   kHalfDimensions * sizeof(BiasType));
-        for (const auto index : active_indices[perspective]) {
-          const IndexType offset = kHalfDimensions * index;
-  #if defined(USE_AVX512)
-          auto accumulation = reinterpret_cast<__m512i*>(
-              &accumulator.accumulation[perspective][i][0]);
-          auto column = reinterpret_cast<const __m512i*>(&weights_[offset]);
-          constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
-          for (IndexType j = 0; j < kNumChunks; ++j)
-            _mm512_storeA_si512(&accumulation[j], _mm512_add_epi16(_mm512_loadA_si512(&accumulation[j]), column[j]));
-
-  #elif defined(USE_AVX2)
-          auto accumulation = reinterpret_cast<__m256i*>(
-              &accumulator.accumulation[perspective][i][0]);
-          auto column = reinterpret_cast<const __m256i*>(&weights_[offset]);
-          constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
-          for (IndexType j = 0; j < kNumChunks; ++j)
-            _mm256_storeA_si256(&accumulation[j], _mm256_add_epi16(_mm256_loadA_si256(&accumulation[j]), column[j]));
-
-  #elif defined(USE_SSE2)
-          auto accumulation = reinterpret_cast<__m128i*>(
-              &accumulator.accumulation[perspective][i][0]);
-          auto column = reinterpret_cast<const __m128i*>(&weights_[offset]);
-          constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
-          for (IndexType j = 0; j < kNumChunks; ++j)
-            accumulation[j] = _mm_add_epi16(accumulation[j], column[j]);
-
-  #elif defined(USE_MMX)
-          auto accumulation = reinterpret_cast<__m64*>(
-              &accumulator.accumulation[perspective][i][0]);
-          auto column = reinterpret_cast<const __m64*>(&weights_[offset]);
-          constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
-          for (IndexType j = 0; j < kNumChunks; ++j)
-            accumulation[j] = _mm_add_pi16(accumulation[j], column[j]);
-
-  #elif defined(USE_NEON)
-          auto accumulation = reinterpret_cast<int16x8_t*>(
-              &accumulator.accumulation[perspective][i][0]);
-          auto column = reinterpret_cast<const int16x8_t*>(&weights_[offset]);
-          constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
-          for (IndexType j = 0; j < kNumChunks; ++j)
-            accumulation[j] = vaddq_s16(accumulation[j], column[j]);
+        for (std::size_t k = 0; k < PSQTBuckets; ++k)
+          states_to_update[i]->accumulator.psqtAccumulation[Perspective][k] = st->accumulator.psqtAccumulation[Perspective][k];
 
-  #else
-          for (IndexType j = 0; j < kHalfDimensions; ++j)
-            accumulator.accumulation[perspective][i][j] += weights_[offset + j];
-  #endif
+        st = states_to_update[i];
+
+        // Difference calculation for the deactivated features
+        for (const auto index : removed[i])
+        {
+          const IndexType offset = HalfDimensions * index;
+
+          for (IndexType j = 0; j < HalfDimensions; ++j)
+            st->accumulator.accumulation[Perspective][j] -= weights[offset + j];
+
+          for (std::size_t k = 0; k < PSQTBuckets; ++k)
+            st->accumulator.psqtAccumulation[Perspective][k] -= psqtWeights[index * PSQTBuckets + k];
+        }
+
+        // Difference calculation for the activated features
+        for (const auto index : added[i])
+        {
+          const IndexType offset = HalfDimensions * index;
 
+          for (IndexType j = 0; j < HalfDimensions; ++j)
+            st->accumulator.accumulation[Perspective][j] += weights[offset + j];
+
+          for (std::size_t k = 0; k < PSQTBuckets; ++k)
+            st->accumulator.psqtAccumulation[Perspective][k] += psqtWeights[index * PSQTBuckets + k];
         }
       }
+#endif
+
   #if defined(USE_MMX)
       _mm_empty();
   #endif
-
-      accumulator.computed_accumulation = true;
     }
 
-    // Calculate cumulative value using difference calculation
-    void UpdateAccumulator(const Position& pos) const {
+    template<Color Perspective>
+    void update_accumulator_refresh(const Position& pos) const {
+  #ifdef VECTOR
+      // Gcc-10.2 unnecessarily spills AVX2 registers if this array
+      // is defined in the VECTOR code below, once in each branch
+      vec_t acc[NumRegs];
+      psqt_vec_t psqt[NumPsqtRegs];
+  #endif
 
-      const auto prev_accumulator = pos.state()->previous->accumulator;
+      // Refresh the accumulator
+      // Could be extracted to a separate function because it's done in 2 places,
+      // but it's unclear if compilers would correctly handle register allocation.
       auto& accumulator = pos.state()->accumulator;
-      IndexType i = 0;
-      Features::IndexList removed_indices[2], added_indices[2];
-      bool reset[2];
-      RawFeatures::AppendChangedIndices(pos, kRefreshTriggers[i],
-                                        removed_indices, added_indices, reset);
-      for (Color perspective : { WHITE, BLACK }) {
-
-  #if defined(USE_AVX2)
-        constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
-        auto accumulation = reinterpret_cast<__m256i*>(
-            &accumulator.accumulation[perspective][i][0]);
-
-  #elif defined(USE_SSE2)
-        constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
-        auto accumulation = reinterpret_cast<__m128i*>(
-            &accumulator.accumulation[perspective][i][0]);
-
-  #elif defined(USE_MMX)
-        constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
-        auto accumulation = reinterpret_cast<__m64*>(
-            &accumulator.accumulation[perspective][i][0]);
-
-  #elif defined(USE_NEON)
-        constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
-        auto accumulation = reinterpret_cast<int16x8_t*>(
-            &accumulator.accumulation[perspective][i][0]);
-  #endif
+      accumulator.computed[Perspective] = true;
+      FeatureSet::IndexList active;
+      FeatureSet::append_active_indices<Perspective>(pos, active);
+
+#ifdef VECTOR
+      for (IndexType j = 0; j < HalfDimensions / TileHeight; ++j)
+      {
+        auto biasesTile = reinterpret_cast<const vec_t*>(
+            &biases[j * TileHeight]);
+        for (IndexType k = 0; k < NumRegs; ++k)
+          acc[k] = biasesTile[k];
+
+        for (const auto index : active)
+        {
+          const IndexType offset = HalfDimensions * index + j * TileHeight;
+          auto column = reinterpret_cast<const vec_t*>(&weights[offset]);
+
+          for (unsigned k = 0; k < NumRegs; ++k)
+            acc[k] = vec_add_16(acc[k], column[k]);
+        }
 
-        if (reset[perspective]) {
-          std::memcpy(accumulator.accumulation[perspective][i], biases_,
-                      kHalfDimensions * sizeof(BiasType));
-        } else {
-          std::memcpy(accumulator.accumulation[perspective][i],
-                      prev_accumulator.accumulation[perspective][i],
-                      kHalfDimensions * sizeof(BiasType));
-          // Difference calculation for the deactivated features
-          for (const auto index : removed_indices[perspective]) {
-            const IndexType offset = kHalfDimensions * index;
+        auto accTile = reinterpret_cast<vec_t*>(
+            &accumulator.accumulation[Perspective][j * TileHeight]);
+        for (unsigned k = 0; k < NumRegs; k++)
+          vec_store(&accTile[k], acc[k]);
+      }
 
-  #if defined(USE_AVX2)
-            auto column = reinterpret_cast<const __m256i*>(&weights_[offset]);
-            for (IndexType j = 0; j < kNumChunks; ++j)
-              accumulation[j] = _mm256_sub_epi16(accumulation[j], column[j]);
+      for (IndexType j = 0; j < PSQTBuckets / PsqtTileHeight; ++j)
+      {
+        for (std::size_t k = 0; k < NumPsqtRegs; ++k)
+          psqt[k] = vec_zero_psqt();
 
-  #elif defined(USE_SSE2)
-            auto column = reinterpret_cast<const __m128i*>(&weights_[offset]);
-            for (IndexType j = 0; j < kNumChunks; ++j)
-              accumulation[j] = _mm_sub_epi16(accumulation[j], column[j]);
+        for (const auto index : active)
+        {
+          const IndexType offset = PSQTBuckets * index + j * PsqtTileHeight;
+          auto columnPsqt = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offset]);
 
-  #elif defined(USE_MMX)
-            auto column = reinterpret_cast<const __m64*>(&weights_[offset]);
-            for (IndexType j = 0; j < kNumChunks; ++j)
-              accumulation[j] = _mm_sub_pi16(accumulation[j], column[j]);
+          for (std::size_t k = 0; k < NumPsqtRegs; ++k)
+            psqt[k] = vec_add_psqt_32(psqt[k], columnPsqt[k]);
+        }
 
-  #elif defined(USE_NEON)
-            auto column = reinterpret_cast<const int16x8_t*>(&weights_[offset]);
-            for (IndexType j = 0; j < kNumChunks; ++j)
-              accumulation[j] = vsubq_s16(accumulation[j], column[j]);
+        auto accTilePsqt = reinterpret_cast<psqt_vec_t*>(
+          &accumulator.psqtAccumulation[Perspective][j * PsqtTileHeight]);
+        for (std::size_t k = 0; k < NumPsqtRegs; ++k)
+          vec_store_psqt(&accTilePsqt[k], psqt[k]);
+      }
 
-  #else
-            for (IndexType j = 0; j < kHalfDimensions; ++j)
-              accumulator.accumulation[perspective][i][j] -= weights_[offset + j];
-  #endif
+#else
+      std::memcpy(accumulator.accumulation[Perspective], biases,
+          HalfDimensions * sizeof(BiasType));
 
-          }
-        }
-        { // Difference calculation for the activated features
-          for (const auto index : added_indices[perspective]) {
-            const IndexType offset = kHalfDimensions * index;
-
-  #if defined(USE_AVX2)
-            auto column = reinterpret_cast<const __m256i*>(&weights_[offset]);
-            for (IndexType j = 0; j < kNumChunks; ++j)
-              accumulation[j] = _mm256_add_epi16(accumulation[j], column[j]);
-
-  #elif defined(USE_SSE2)
-            auto column = reinterpret_cast<const __m128i*>(&weights_[offset]);
-            for (IndexType j = 0; j < kNumChunks; ++j)
-              accumulation[j] = _mm_add_epi16(accumulation[j], column[j]);
-
-  #elif defined(USE_MMX)
-            auto column = reinterpret_cast<const __m64*>(&weights_[offset]);
-            for (IndexType j = 0; j < kNumChunks; ++j)
-              accumulation[j] = _mm_add_pi16(accumulation[j], column[j]);
-
-  #elif defined(USE_NEON)
-            auto column = reinterpret_cast<const int16x8_t*>(&weights_[offset]);
-            for (IndexType j = 0; j < kNumChunks; ++j)
-              accumulation[j] = vaddq_s16(accumulation[j], column[j]);
+      for (std::size_t k = 0; k < PSQTBuckets; ++k)
+        accumulator.psqtAccumulation[Perspective][k] = 0;
 
-  #else
-            for (IndexType j = 0; j < kHalfDimensions; ++j)
-              accumulator.accumulation[perspective][i][j] += weights_[offset + j];
-  #endif
+      for (const auto index : active)
+      {
+        const IndexType offset = HalfDimensions * index;
 
-          }
-        }
+        for (IndexType j = 0; j < HalfDimensions; ++j)
+          accumulator.accumulation[Perspective][j] += weights[offset + j];
+
+        for (std::size_t k = 0; k < PSQTBuckets; ++k)
+          accumulator.psqtAccumulation[Perspective][k] += psqtWeights[index * PSQTBuckets + k];
       }
+#endif
+
   #if defined(USE_MMX)
       _mm_empty();
   #endif
+    }
+
+    template<Color Perspective>
+    void hint_common_access_for_perspective(const Position& pos) const {
+
+      // Works like update_accumulator, but performs less work.
+      // Updates ONLY the accumulator for pos.
+
+      // Look for a usable accumulator of an earlier position. We keep track
+      // of the estimated gain in terms of features to be added/subtracted.
+      // Fast early exit.
+      if (pos.state()->accumulator.computed[Perspective])
+        return;
 
-      accumulator.computed_accumulation = true;
+      auto [oldest_st, _] = try_find_computed_accumulator<Perspective>(pos);
+
+      if (oldest_st->accumulator.computed[Perspective])
+      {
+        // Only update current position accumulator to minimize work.
+        StateInfo* states_to_update[2] = { pos.state(), nullptr };
+        update_accumulator_incremental<Perspective, 2>(pos, oldest_st, states_to_update);
+      }
+      else
+      {
+        update_accumulator_refresh<Perspective>(pos);
+      }
     }
 
-    using BiasType = std::int16_t;
-    using WeightType = std::int16_t;
+    template<Color Perspective>
+    void update_accumulator(const Position& pos) const {
+
+      auto [oldest_st, next] = try_find_computed_accumulator<Perspective>(pos);
+
+      if (oldest_st->accumulator.computed[Perspective])
+      {
+        if (next == nullptr)
+          return;
+
+        // Now update the accumulators listed in states_to_update[], where the last element is a sentinel.
+        // Currently we update 2 accumulators.
+        //     1. for the current position
+        //     2. the next accumulator after the computed one
+        // The heuristic may change in the future.
+        StateInfo *states_to_update[3] =
+          { next, next == pos.state() ? nullptr : pos.state(), nullptr };
+
+        update_accumulator_incremental<Perspective, 3>(pos, oldest_st, states_to_update);
+      }
+      else
+      {
+        update_accumulator_refresh<Perspective>(pos);
+      }
+    }
 
-    alignas(kCacheLineSize) BiasType biases_[kHalfDimensions];
-    alignas(kCacheLineSize)
-        WeightType weights_[kHalfDimensions * kInputDimensions];
+    alignas(CacheLineSize) BiasType biases[HalfDimensions];
+    alignas(CacheLineSize) WeightType weights[HalfDimensions * InputDimensions];
+    alignas(CacheLineSize) PSQTWeightType psqtWeights[InputDimensions * PSQTBuckets];
   };
 
-}  // namespace Eval::NNUE
+}  // namespace Stockfish::Eval::NNUE
 
 #endif // #ifndef NNUE_FEATURE_TRANSFORMER_H_INCLUDED