/*
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
- Copyright (C) 2004-2020 The Stockfish developers (see AUTHORS file)
+ Copyright (C) 2004-2023 The Stockfish developers (see AUTHORS file)
Stockfish is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
#ifndef NNUE_FEATURE_TRANSFORMER_H_INCLUDED
#define NNUE_FEATURE_TRANSFORMER_H_INCLUDED
-#include "nnue_common.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cstring>
+#include <iosfwd>
+#include <utility>
+
+#include "../position.h"
+#include "../types.h"
+#include "nnue_accumulator.h"
#include "nnue_architecture.h"
-#include "features/index_list.h"
+#include "nnue_common.h"
-#include <cstring> // std::memset()
+namespace Stockfish::Eval::NNUE {
-namespace Eval::NNUE {
+ using BiasType = std::int16_t;
+ using WeightType = std::int16_t;
+ using PSQTWeightType = std::int32_t;
// If vector instructions are enabled, we update and refresh the
// accumulator tile by tile such that each tile fits in the CPU's
// vector registers.
- #define TILING
+ #define VECTOR
+
+ static_assert(PSQTBuckets % 8 == 0,
+ "Per feature PSQT values cannot be processed at granularity lower than 8 at a time.");
#ifdef USE_AVX512
- typedef __m512i vec_t;
- #define vec_load(a) _mm512_loadA_si512(a)
- #define vec_store(a,b) _mm512_storeA_si512(a,b)
+ using vec_t = __m512i;
+ using psqt_vec_t = __m256i;
+ #define vec_load(a) _mm512_load_si512(a)
+ #define vec_store(a,b) _mm512_store_si512(a,b)
#define vec_add_16(a,b) _mm512_add_epi16(a,b)
#define vec_sub_16(a,b) _mm512_sub_epi16(a,b)
- static constexpr IndexType kNumRegs = 8; // only 8 are needed
+ #define vec_mul_16(a,b) _mm512_mullo_epi16(a,b)
+ #define vec_zero() _mm512_setzero_epi32()
+ #define vec_set_16(a) _mm512_set1_epi16(a)
+ #define vec_max_16(a,b) _mm512_max_epi16(a,b)
+ #define vec_min_16(a,b) _mm512_min_epi16(a,b)
+ inline vec_t vec_msb_pack_16(vec_t a, vec_t b){
+ vec_t compacted = _mm512_packs_epi16(_mm512_srli_epi16(a,7),_mm512_srli_epi16(b,7));
+ return _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 2, 4, 6, 1, 3, 5, 7), compacted);
+ }
+ #define vec_load_psqt(a) _mm256_load_si256(a)
+ #define vec_store_psqt(a,b) _mm256_store_si256(a,b)
+ #define vec_add_psqt_32(a,b) _mm256_add_epi32(a,b)
+ #define vec_sub_psqt_32(a,b) _mm256_sub_epi32(a,b)
+ #define vec_zero_psqt() _mm256_setzero_si256()
+ #define NumRegistersSIMD 16
+ #define MaxChunkSize 64
#elif USE_AVX2
- typedef __m256i vec_t;
- #define vec_load(a) _mm256_loadA_si256(a)
- #define vec_store(a,b) _mm256_storeA_si256(a,b)
+ using vec_t = __m256i;
+ using psqt_vec_t = __m256i;
+ #define vec_load(a) _mm256_load_si256(a)
+ #define vec_store(a,b) _mm256_store_si256(a,b)
#define vec_add_16(a,b) _mm256_add_epi16(a,b)
#define vec_sub_16(a,b) _mm256_sub_epi16(a,b)
- static constexpr IndexType kNumRegs = 16;
+ #define vec_mul_16(a,b) _mm256_mullo_epi16(a,b)
+ #define vec_zero() _mm256_setzero_si256()
+ #define vec_set_16(a) _mm256_set1_epi16(a)
+ #define vec_max_16(a,b) _mm256_max_epi16(a,b)
+ #define vec_min_16(a,b) _mm256_min_epi16(a,b)
+ inline vec_t vec_msb_pack_16(vec_t a, vec_t b){
+ vec_t compacted = _mm256_packs_epi16(_mm256_srli_epi16(a,7), _mm256_srli_epi16(b,7));
+ return _mm256_permute4x64_epi64(compacted, 0b11011000);
+ }
+ #define vec_load_psqt(a) _mm256_load_si256(a)
+ #define vec_store_psqt(a,b) _mm256_store_si256(a,b)
+ #define vec_add_psqt_32(a,b) _mm256_add_epi32(a,b)
+ #define vec_sub_psqt_32(a,b) _mm256_sub_epi32(a,b)
+ #define vec_zero_psqt() _mm256_setzero_si256()
+ #define NumRegistersSIMD 16
+ #define MaxChunkSize 32
#elif USE_SSE2
- typedef __m128i vec_t;
+ using vec_t = __m128i;
+ using psqt_vec_t = __m128i;
#define vec_load(a) (*(a))
#define vec_store(a,b) *(a)=(b)
#define vec_add_16(a,b) _mm_add_epi16(a,b)
#define vec_sub_16(a,b) _mm_sub_epi16(a,b)
- static constexpr IndexType kNumRegs = Is64Bit ? 16 : 8;
-
- #elif USE_MMX
- typedef __m64 vec_t;
- #define vec_load(a) (*(a))
- #define vec_store(a,b) *(a)=(b)
- #define vec_add_16(a,b) _mm_add_pi16(a,b)
- #define vec_sub_16(a,b) _mm_sub_pi16(a,b)
- static constexpr IndexType kNumRegs = 8;
+ #define vec_mul_16(a,b) _mm_mullo_epi16(a,b)
+ #define vec_zero() _mm_setzero_si128()
+ #define vec_set_16(a) _mm_set1_epi16(a)
+ #define vec_max_16(a,b) _mm_max_epi16(a,b)
+ #define vec_min_16(a,b) _mm_min_epi16(a,b)
+ #define vec_msb_pack_16(a,b) _mm_packs_epi16(_mm_srli_epi16(a,7),_mm_srli_epi16(b,7))
+ #define vec_load_psqt(a) (*(a))
+ #define vec_store_psqt(a,b) *(a)=(b)
+ #define vec_add_psqt_32(a,b) _mm_add_epi32(a,b)
+ #define vec_sub_psqt_32(a,b) _mm_sub_epi32(a,b)
+ #define vec_zero_psqt() _mm_setzero_si128()
+ #define NumRegistersSIMD (Is64Bit ? 16 : 8)
+ #define MaxChunkSize 16
#elif USE_NEON
- typedef int16x8_t vec_t;
+ using vec_t = int16x8_t;
+ using psqt_vec_t = int32x4_t;
#define vec_load(a) (*(a))
#define vec_store(a,b) *(a)=(b)
#define vec_add_16(a,b) vaddq_s16(a,b)
#define vec_sub_16(a,b) vsubq_s16(a,b)
- static constexpr IndexType kNumRegs = 16;
+ #define vec_mul_16(a,b) vmulq_s16(a,b)
+ #define vec_zero() vec_t{0}
+ #define vec_set_16(a) vdupq_n_s16(a)
+ #define vec_max_16(a,b) vmaxq_s16(a,b)
+ #define vec_min_16(a,b) vminq_s16(a,b)
+ inline vec_t vec_msb_pack_16(vec_t a, vec_t b){
+ const int8x8_t shifta = vshrn_n_s16(a, 7);
+ const int8x8_t shiftb = vshrn_n_s16(b, 7);
+ const int8x16_t compacted = vcombine_s8(shifta,shiftb);
+ return *reinterpret_cast<const vec_t*> (&compacted);
+ }
+ #define vec_load_psqt(a) (*(a))
+ #define vec_store_psqt(a,b) *(a)=(b)
+ #define vec_add_psqt_32(a,b) vaddq_s32(a,b)
+ #define vec_sub_psqt_32(a,b) vsubq_s32(a,b)
+ #define vec_zero_psqt() psqt_vec_t{0}
+ #define NumRegistersSIMD 16
+ #define MaxChunkSize 16
#else
- #undef TILING
+ #undef VECTOR
#endif
+
+ #ifdef VECTOR
+
+ // Compute optimal SIMD register count for feature transformer accumulation.
+
+ // We use __m* types as template arguments, which causes GCC to emit warnings
+ // about losing some attribute information. This is irrelevant to us as we
+ // only take their size, so the following pragma are harmless.
+ #if defined(__GNUC__)
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wignored-attributes"
+ #endif
+
+ template <typename SIMDRegisterType,
+ typename LaneType,
+ int NumLanes,
+ int MaxRegisters>
+ static constexpr int BestRegisterCount()
+ {
+ #define RegisterSize sizeof(SIMDRegisterType)
+ #define LaneSize sizeof(LaneType)
+
+ static_assert(RegisterSize >= LaneSize);
+ static_assert(MaxRegisters <= NumRegistersSIMD);
+ static_assert(MaxRegisters > 0);
+ static_assert(NumRegistersSIMD > 0);
+ static_assert(RegisterSize % LaneSize == 0);
+ static_assert((NumLanes * LaneSize) % RegisterSize == 0);
+
+ const int ideal = (NumLanes * LaneSize) / RegisterSize;
+ if (ideal <= MaxRegisters)
+ return ideal;
+
+ // Look for the largest divisor of the ideal register count that is smaller than MaxRegisters
+ for (int divisor = MaxRegisters; divisor > 1; --divisor)
+ if (ideal % divisor == 0)
+ return divisor;
+
+ return 1;
+ }
+
+ static constexpr int NumRegs = BestRegisterCount<vec_t, WeightType, TransformedFeatureDimensions, NumRegistersSIMD>();
+ static constexpr int NumPsqtRegs = BestRegisterCount<psqt_vec_t, PSQTWeightType, PSQTBuckets, NumRegistersSIMD>();
+ #if defined(__GNUC__)
+ #pragma GCC diagnostic pop
+ #endif
+ #endif
+
+
+
// Input feature converter
class FeatureTransformer {
private:
// Number of output dimensions for one side
- static constexpr IndexType kHalfDimensions = kTransformedFeatureDimensions;
+ static constexpr IndexType HalfDimensions = TransformedFeatureDimensions;
- #ifdef TILING
- static constexpr IndexType kTileHeight = kNumRegs * sizeof(vec_t) / 2;
- static_assert(kHalfDimensions % kTileHeight == 0, "kTileHeight must divide kHalfDimensions");
+ #ifdef VECTOR
+ static constexpr IndexType TileHeight = NumRegs * sizeof(vec_t) / 2;
+ static constexpr IndexType PsqtTileHeight = NumPsqtRegs * sizeof(psqt_vec_t) / 4;
+ static_assert(HalfDimensions % TileHeight == 0, "TileHeight must divide HalfDimensions");
+ static_assert(PSQTBuckets % PsqtTileHeight == 0, "PsqtTileHeight must divide PSQTBuckets");
#endif
public:
using OutputType = TransformedFeatureType;
// Number of input/output dimensions
- static constexpr IndexType kInputDimensions = RawFeatures::kDimensions;
- static constexpr IndexType kOutputDimensions = kHalfDimensions * 2;
+ static constexpr IndexType InputDimensions = FeatureSet::Dimensions;
+ static constexpr IndexType OutputDimensions = HalfDimensions;
// Size of forward propagation buffer
- static constexpr std::size_t kBufferSize =
- kOutputDimensions * sizeof(OutputType);
+ static constexpr std::size_t BufferSize =
+ OutputDimensions * sizeof(OutputType);
// Hash value embedded in the evaluation file
- static constexpr std::uint32_t GetHashValue() {
-
- return RawFeatures::kHashValue ^ kOutputDimensions;
+ static constexpr std::uint32_t get_hash_value() {
+ return FeatureSet::HashValue ^ (OutputDimensions * 2);
}
// Read network parameters
- bool ReadParameters(std::istream& stream) {
+ bool read_parameters(std::istream& stream) {
+
+ read_leb_128<BiasType >(stream, biases , HalfDimensions );
+ read_leb_128<WeightType >(stream, weights , HalfDimensions * InputDimensions);
+ read_leb_128<PSQTWeightType>(stream, psqtWeights, PSQTBuckets * InputDimensions);
- for (std::size_t i = 0; i < kHalfDimensions; ++i)
- biases_[i] = read_little_endian<BiasType>(stream);
- for (std::size_t i = 0; i < kHalfDimensions * kInputDimensions; ++i)
- weights_[i] = read_little_endian<WeightType>(stream);
return !stream.fail();
}
- // Proceed with the difference calculation if possible
- bool UpdateAccumulatorIfPossible(const Position& pos) const {
-
- const auto now = pos.state();
- if (now->accumulator.computed_accumulation)
- return true;
-
- const auto prev = now->previous;
- if (prev) {
- if (prev->accumulator.computed_accumulation) {
- UpdateAccumulator(pos);
- return true;
- } else if (prev->previous && prev->previous->accumulator.computed_accumulation) {
- UpdateAccumulator(pos);
- return true;
- }
- }
+ // Write network parameters
+ bool write_parameters(std::ostream& stream) const {
- return false;
+ write_leb_128<BiasType >(stream, biases , HalfDimensions );
+ write_leb_128<WeightType >(stream, weights , HalfDimensions * InputDimensions);
+ write_leb_128<PSQTWeightType>(stream, psqtWeights, PSQTBuckets * InputDimensions);
+
+ return !stream.fail();
}
// Convert input features
- void Transform(const Position& pos, OutputType* output) const {
-
- if (!UpdateAccumulatorIfPossible(pos))
- RefreshAccumulator(pos);
+ std::int32_t transform(const Position& pos, OutputType* output, int bucket) const {
+ update_accumulator<WHITE>(pos);
+ update_accumulator<BLACK>(pos);
+ const Color perspectives[2] = {pos.side_to_move(), ~pos.side_to_move()};
const auto& accumulation = pos.state()->accumulator.accumulation;
+ const auto& psqtAccumulation = pos.state()->accumulator.psqtAccumulation;
- #if defined(USE_AVX2)
- constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
- constexpr int kControl = 0b11011000;
- const __m256i kZero = _mm256_setzero_si256();
-
- #elif defined(USE_SSE2)
- constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
+ const auto psqt = (
+ psqtAccumulation[perspectives[0]][bucket]
+ - psqtAccumulation[perspectives[1]][bucket]
+ ) / 2;
- #ifdef USE_SSE41
- const __m128i kZero = _mm_setzero_si128();
- #else
- const __m128i k0x80s = _mm_set1_epi8(-128);
- #endif
- #elif defined(USE_MMX)
- constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
- const __m64 k0x80s = _mm_set1_pi8(-128);
+ for (IndexType p = 0; p < 2; ++p)
+ {
+ const IndexType offset = (HalfDimensions / 2) * p;
- #elif defined(USE_NEON)
- constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
- const int8x8_t kZero = {0};
- #endif
+#if defined(VECTOR)
- const Color perspectives[2] = {pos.side_to_move(), ~pos.side_to_move()};
- for (IndexType p = 0; p < 2; ++p) {
- const IndexType offset = kHalfDimensions * p;
-
- #if defined(USE_AVX2)
- auto out = reinterpret_cast<__m256i*>(&output[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- __m256i sum0 = _mm256_loadA_si256(
- &reinterpret_cast<const __m256i*>(accumulation[perspectives[p]][0])[j * 2 + 0]);
- __m256i sum1 = _mm256_loadA_si256(
- &reinterpret_cast<const __m256i*>(accumulation[perspectives[p]][0])[j * 2 + 1]);
- _mm256_storeA_si256(&out[j], _mm256_permute4x64_epi64(_mm256_max_epi8(
- _mm256_packs_epi16(sum0, sum1), kZero), kControl));
- }
+ constexpr IndexType OutputChunkSize = MaxChunkSize;
+ static_assert((HalfDimensions / 2) % OutputChunkSize == 0);
+ constexpr IndexType NumOutputChunks = HalfDimensions / 2 / OutputChunkSize;
- #elif defined(USE_SSE2)
- auto out = reinterpret_cast<__m128i*>(&output[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- __m128i sum0 = _mm_load_si128(&reinterpret_cast<const __m128i*>(
- accumulation[perspectives[p]][0])[j * 2 + 0]);
- __m128i sum1 = _mm_load_si128(&reinterpret_cast<const __m128i*>(
- accumulation[perspectives[p]][0])[j * 2 + 1]);
- const __m128i packedbytes = _mm_packs_epi16(sum0, sum1);
+ vec_t Zero = vec_zero();
+ vec_t One = vec_set_16(127);
- _mm_store_si128(&out[j],
+ const vec_t* in0 = reinterpret_cast<const vec_t*>(&(accumulation[perspectives[p]][0]));
+ const vec_t* in1 = reinterpret_cast<const vec_t*>(&(accumulation[perspectives[p]][HalfDimensions / 2]));
+ vec_t* out = reinterpret_cast< vec_t*>(output + offset);
- #ifdef USE_SSE41
- _mm_max_epi8(packedbytes, kZero)
- #else
- _mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s)
- #endif
+ for (IndexType j = 0; j < NumOutputChunks; j += 1)
+ {
+ const vec_t sum0a = vec_max_16(vec_min_16(in0[j * 2 + 0], One), Zero);
+ const vec_t sum0b = vec_max_16(vec_min_16(in0[j * 2 + 1], One), Zero);
+ const vec_t sum1a = vec_max_16(vec_min_16(in1[j * 2 + 0], One), Zero);
+ const vec_t sum1b = vec_max_16(vec_min_16(in1[j * 2 + 1], One), Zero);
- );
- }
+ const vec_t pa = vec_mul_16(sum0a, sum1a);
+ const vec_t pb = vec_mul_16(sum0b, sum1b);
- #elif defined(USE_MMX)
- auto out = reinterpret_cast<__m64*>(&output[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- __m64 sum0 = *(&reinterpret_cast<const __m64*>(
- accumulation[perspectives[p]][0])[j * 2 + 0]);
- __m64 sum1 = *(&reinterpret_cast<const __m64*>(
- accumulation[perspectives[p]][0])[j * 2 + 1]);
- const __m64 packedbytes = _mm_packs_pi16(sum0, sum1);
- out[j] = _mm_subs_pi8(_mm_adds_pi8(packedbytes, k0x80s), k0x80s);
- }
+ out[j] = vec_msb_pack_16(pa, pb);
+ }
- #elif defined(USE_NEON)
- const auto out = reinterpret_cast<int8x8_t*>(&output[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- int16x8_t sum = reinterpret_cast<const int16x8_t*>(
- accumulation[perspectives[p]][0])[j];
- out[j] = vmax_s8(vqmovn_s16(sum), kZero);
- }
+#else
- #else
- for (IndexType j = 0; j < kHalfDimensions; ++j) {
- BiasType sum = accumulation[static_cast<int>(perspectives[p])][0][j];
- output[offset + j] = static_cast<OutputType>(
- std::max<int>(0, std::min<int>(127, sum)));
- }
- #endif
+ for (IndexType j = 0; j < HalfDimensions / 2; ++j) {
+ BiasType sum0 = accumulation[static_cast<int>(perspectives[p])][j + 0];
+ BiasType sum1 = accumulation[static_cast<int>(perspectives[p])][j + HalfDimensions / 2];
+ sum0 = std::clamp<BiasType>(sum0, 0, 127);
+ sum1 = std::clamp<BiasType>(sum1, 0, 127);
+ output[offset + j] = static_cast<OutputType>(unsigned(sum0 * sum1) / 128);
+ }
+#endif
}
- #if defined(USE_MMX)
- _mm_empty();
- #endif
+
+ return psqt;
+ } // end of function transform()
+
+ void hint_common_access(const Position& pos) const {
+ hint_common_access_for_perspective<WHITE>(pos);
+ hint_common_access_for_perspective<BLACK>(pos);
}
private:
- // Calculate cumulative value without using difference calculation
- void RefreshAccumulator(const Position& pos) const {
+ template<Color Perspective>
+ [[nodiscard]] std::pair<StateInfo*, StateInfo*> try_find_computed_accumulator(const Position& pos) const {
+ // Look for a usable accumulator of an earlier position. We keep track
+ // of the estimated gain in terms of features to be added/subtracted.
+ StateInfo *st = pos.state(), *next = nullptr;
+ int gain = FeatureSet::refresh_cost(pos);
+ while (st->previous && !st->accumulator.computed[Perspective])
+ {
+ // This governs when a full feature refresh is needed and how many
+ // updates are better than just one full refresh.
+ if ( FeatureSet::requires_refresh(st, Perspective)
+ || (gain -= FeatureSet::update_cost(st) + 1) < 0)
+ break;
+ next = st;
+ st = st->previous;
+ }
+ return { st, next };
+ }
- auto& accumulator = pos.state()->accumulator;
- IndexType i = 0;
- Features::IndexList active_indices[2];
- RawFeatures::AppendActiveIndices(pos, kRefreshTriggers[i],
- active_indices);
- for (Color perspective : { WHITE, BLACK }) {
- #ifdef TILING
- for (unsigned j = 0; j < kHalfDimensions / kTileHeight; ++j) {
- auto biasesTile = reinterpret_cast<const vec_t*>(
- &biases_[j * kTileHeight]);
- auto accTile = reinterpret_cast<vec_t*>(
- &accumulator.accumulation[perspective][i][j * kTileHeight]);
- vec_t acc[kNumRegs];
-
- for (unsigned k = 0; k < kNumRegs; ++k)
- acc[k] = biasesTile[k];
-
- for (const auto index : active_indices[perspective]) {
- const IndexType offset = kHalfDimensions * index + j * kTileHeight;
- auto column = reinterpret_cast<const vec_t*>(&weights_[offset]);
-
- for (unsigned k = 0; k < kNumRegs; ++k)
- acc[k] = vec_add_16(acc[k], column[k]);
- }
+ // NOTE: The parameter states_to_update is an array of position states, ending with nullptr.
+ // All states must be sequential, that is states_to_update[i] must either be reachable
+ // by repeatedly applying ->previous from states_to_update[i+1] or states_to_update[i] == nullptr.
+ // computed_st must be reachable by repeatedly applying ->previous on states_to_update[0], if not nullptr.
+ template<Color Perspective, size_t N>
+ void update_accumulator_incremental(const Position& pos, StateInfo* computed_st, StateInfo* states_to_update[N]) const {
+ static_assert(N > 0);
+ assert(states_to_update[N-1] == nullptr);
+
+ #ifdef VECTOR
+ // Gcc-10.2 unnecessarily spills AVX2 registers if this array
+ // is defined in the VECTOR code below, once in each branch
+ vec_t acc[NumRegs];
+ psqt_vec_t psqt[NumPsqtRegs];
+ #endif
- for (unsigned k = 0; k < kNumRegs; k++)
- vec_store(&accTile[k], acc[k]);
- }
- #else
- std::memcpy(accumulator.accumulation[perspective][i], biases_,
- kHalfDimensions * sizeof(BiasType));
+ if (states_to_update[0] == nullptr)
+ return;
+
+ // Update incrementally going back through states_to_update.
+
+ // Gather all features to be updated.
+ const Square ksq = pos.square<KING>(Perspective);
+
+ // The size must be enough to contain the largest possible update.
+ // That might depend on the feature set and generally relies on the
+ // feature set's update cost calculation to be correct and never
+ // allow updates with more added/removed features than MaxActiveDimensions.
+ FeatureSet::IndexList removed[N-1], added[N-1];
+
+ {
+ int i = N-2; // last potential state to update. Skip last element because it must be nullptr.
+ while (states_to_update[i] == nullptr)
+ --i;
+
+ StateInfo* st2 = states_to_update[i];
+
+ for (; i >= 0; --i)
+ {
+ states_to_update[i]->accumulator.computed[Perspective] = true;
- for (const auto index : active_indices[perspective]) {
- const IndexType offset = kHalfDimensions * index;
+ const StateInfo* end_state = i == 0 ? computed_st : states_to_update[i - 1];
- for (IndexType j = 0; j < kHalfDimensions; ++j)
- accumulator.accumulation[perspective][i][j] += weights_[offset + j];
+ for (; st2 != end_state; st2 = st2->previous)
+ FeatureSet::append_changed_indices<Perspective>(
+ ksq, st2->dirtyPiece, removed[i], added[i]);
}
- #endif
}
- #if defined(USE_MMX)
- _mm_empty();
- #endif
+ StateInfo* st = computed_st;
- accumulator.computed_accumulation = true;
- }
+ // Now update the accumulators listed in states_to_update[], where the last element is a sentinel.
+#ifdef VECTOR
- // Calculate cumulative value using difference calculation
- void UpdateAccumulator(const Position& pos) const {
+ if ( states_to_update[1] == nullptr
+ && (removed[0].size() == 1 || removed[0].size() == 2)
+ && added[0].size() == 1)
+ {
+ assert(states_to_update[0]);
- Accumulator* prev_accumulator;
- assert(pos.state()->previous);
- if (pos.state()->previous->accumulator.computed_accumulation) {
- prev_accumulator = &pos.state()->previous->accumulator;
- }
- else {
- assert(pos.state()->previous->previous);
- assert(pos.state()->previous->previous->accumulator.computed_accumulation);
- prev_accumulator = &pos.state()->previous->previous->accumulator;
- }
+ auto accTileIn = reinterpret_cast<const vec_t*>(
+ &st->accumulator.accumulation[Perspective][0]);
+ auto accTileOut = reinterpret_cast<vec_t*>(
+ &states_to_update[0]->accumulator.accumulation[Perspective][0]);
- auto& accumulator = pos.state()->accumulator;
- IndexType i = 0;
- Features::IndexList removed_indices[2], added_indices[2];
- bool reset[2] = { false, false };
- RawFeatures::AppendChangedIndices(pos, kRefreshTriggers[i],
- removed_indices, added_indices, reset);
-
- #ifdef TILING
- for (IndexType j = 0; j < kHalfDimensions / kTileHeight; ++j) {
- for (Color perspective : { WHITE, BLACK }) {
- auto accTile = reinterpret_cast<vec_t*>(
- &accumulator.accumulation[perspective][i][j * kTileHeight]);
- vec_t acc[kNumRegs];
-
- if (reset[perspective]) {
- auto biasesTile = reinterpret_cast<const vec_t*>(
- &biases_[j * kTileHeight]);
- for (unsigned k = 0; k < kNumRegs; ++k)
- acc[k] = biasesTile[k];
- } else {
- auto prevAccTile = reinterpret_cast<const vec_t*>(
- &prev_accumulator->accumulation[perspective][i][j * kTileHeight]);
- for (IndexType k = 0; k < kNumRegs; ++k)
- acc[k] = vec_load(&prevAccTile[k]);
-
- // Difference calculation for the deactivated features
- for (const auto index : removed_indices[perspective]) {
- const IndexType offset = kHalfDimensions * index + j * kTileHeight;
- auto column = reinterpret_cast<const vec_t*>(&weights_[offset]);
-
- for (IndexType k = 0; k < kNumRegs; ++k)
- acc[k] = vec_sub_16(acc[k], column[k]);
+ const IndexType offsetR0 = HalfDimensions * removed[0][0];
+ auto columnR0 = reinterpret_cast<const vec_t*>(&weights[offsetR0]);
+ const IndexType offsetA = HalfDimensions * added[0][0];
+ auto columnA = reinterpret_cast<const vec_t*>(&weights[offsetA]);
+
+ if (removed[0].size() == 1)
+ {
+ for (IndexType k = 0; k < HalfDimensions * sizeof(std::int16_t) / sizeof(vec_t); ++k)
+ accTileOut[k] = vec_add_16(vec_sub_16(accTileIn[k], columnR0[k]), columnA[k]);
+ }
+ else
+ {
+ const IndexType offsetR1 = HalfDimensions * removed[0][1];
+ auto columnR1 = reinterpret_cast<const vec_t*>(&weights[offsetR1]);
+
+ for (IndexType k = 0; k < HalfDimensions * sizeof(std::int16_t) / sizeof(vec_t); ++k)
+ accTileOut[k] = vec_sub_16(
+ vec_add_16(accTileIn[k], columnA[k]),
+ vec_add_16(columnR0[k], columnR1[k]));
+ }
+
+ auto accTilePsqtIn = reinterpret_cast<const psqt_vec_t*>(
+ &st->accumulator.psqtAccumulation[Perspective][0]);
+ auto accTilePsqtOut = reinterpret_cast<psqt_vec_t*>(
+ &states_to_update[0]->accumulator.psqtAccumulation[Perspective][0]);
+
+ const IndexType offsetPsqtR0 = PSQTBuckets * removed[0][0];
+ auto columnPsqtR0 = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offsetPsqtR0]);
+ const IndexType offsetPsqtA = PSQTBuckets * added[0][0];
+ auto columnPsqtA = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offsetPsqtA]);
+
+ if (removed[0].size() == 1)
+ {
+ for (std::size_t k = 0; k < PSQTBuckets * sizeof(std::int32_t) / sizeof(psqt_vec_t); ++k)
+ accTilePsqtOut[k] = vec_add_psqt_32(vec_sub_psqt_32(
+ accTilePsqtIn[k], columnPsqtR0[k]), columnPsqtA[k]);
+ }
+ else
+ {
+ const IndexType offsetPsqtR1 = PSQTBuckets * removed[0][1];
+ auto columnPsqtR1 = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offsetPsqtR1]);
+
+ for (std::size_t k = 0; k < PSQTBuckets * sizeof(std::int32_t) / sizeof(psqt_vec_t); ++k)
+ accTilePsqtOut[k] = vec_sub_psqt_32(
+ vec_add_psqt_32(accTilePsqtIn[k], columnPsqtA[k]),
+ vec_add_psqt_32(columnPsqtR0[k], columnPsqtR1[k]));
+ }
+ }
+ else
+ {
+ for (IndexType j = 0; j < HalfDimensions / TileHeight; ++j)
+ {
+ // Load accumulator
+ auto accTileIn = reinterpret_cast<const vec_t*>(
+ &st->accumulator.accumulation[Perspective][j * TileHeight]);
+ for (IndexType k = 0; k < NumRegs; ++k)
+ acc[k] = vec_load(&accTileIn[k]);
+
+ for (IndexType i = 0; states_to_update[i]; ++i)
+ {
+ // Difference calculation for the deactivated features
+ for (const auto index : removed[i])
+ {
+ const IndexType offset = HalfDimensions * index + j * TileHeight;
+ auto column = reinterpret_cast<const vec_t*>(&weights[offset]);
+ for (IndexType k = 0; k < NumRegs; ++k)
+ acc[k] = vec_sub_16(acc[k], column[k]);
+ }
+
+ // Difference calculation for the activated features
+ for (const auto index : added[i])
+ {
+ const IndexType offset = HalfDimensions * index + j * TileHeight;
+ auto column = reinterpret_cast<const vec_t*>(&weights[offset]);
+ for (IndexType k = 0; k < NumRegs; ++k)
+ acc[k] = vec_add_16(acc[k], column[k]);
+ }
+
+ // Store accumulator
+ auto accTileOut = reinterpret_cast<vec_t*>(
+ &states_to_update[i]->accumulator.accumulation[Perspective][j * TileHeight]);
+ for (IndexType k = 0; k < NumRegs; ++k)
+ vec_store(&accTileOut[k], acc[k]);
}
}
- { // Difference calculation for the activated features
- for (const auto index : added_indices[perspective]) {
- const IndexType offset = kHalfDimensions * index + j * kTileHeight;
- auto column = reinterpret_cast<const vec_t*>(&weights_[offset]);
- for (IndexType k = 0; k < kNumRegs; ++k)
- acc[k] = vec_add_16(acc[k], column[k]);
+ for (IndexType j = 0; j < PSQTBuckets / PsqtTileHeight; ++j)
+ {
+ // Load accumulator
+ auto accTilePsqtIn = reinterpret_cast<const psqt_vec_t*>(
+ &st->accumulator.psqtAccumulation[Perspective][j * PsqtTileHeight]);
+ for (std::size_t k = 0; k < NumPsqtRegs; ++k)
+ psqt[k] = vec_load_psqt(&accTilePsqtIn[k]);
+
+ for (IndexType i = 0; states_to_update[i]; ++i)
+ {
+ // Difference calculation for the deactivated features
+ for (const auto index : removed[i])
+ {
+ const IndexType offset = PSQTBuckets * index + j * PsqtTileHeight;
+ auto columnPsqt = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offset]);
+ for (std::size_t k = 0; k < NumPsqtRegs; ++k)
+ psqt[k] = vec_sub_psqt_32(psqt[k], columnPsqt[k]);
+ }
+
+ // Difference calculation for the activated features
+ for (const auto index : added[i])
+ {
+ const IndexType offset = PSQTBuckets * index + j * PsqtTileHeight;
+ auto columnPsqt = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offset]);
+ for (std::size_t k = 0; k < NumPsqtRegs; ++k)
+ psqt[k] = vec_add_psqt_32(psqt[k], columnPsqt[k]);
+ }
+
+ // Store accumulator
+ auto accTilePsqtOut = reinterpret_cast<psqt_vec_t*>(
+ &states_to_update[i]->accumulator.psqtAccumulation[Perspective][j * PsqtTileHeight]);
+ for (std::size_t k = 0; k < NumPsqtRegs; ++k)
+ vec_store_psqt(&accTilePsqtOut[k], psqt[k]);
}
}
+ }
+#else
+ for (IndexType i = 0; states_to_update[i]; ++i)
+ {
+ std::memcpy(states_to_update[i]->accumulator.accumulation[Perspective],
+ st->accumulator.accumulation[Perspective],
+ HalfDimensions * sizeof(BiasType));
- for (IndexType k = 0; k < kNumRegs; ++k)
- vec_store(&accTile[k], acc[k]);
+ for (std::size_t k = 0; k < PSQTBuckets; ++k)
+ states_to_update[i]->accumulator.psqtAccumulation[Perspective][k] = st->accumulator.psqtAccumulation[Perspective][k];
+
+ st = states_to_update[i];
+
+ // Difference calculation for the deactivated features
+ for (const auto index : removed[i])
+ {
+ const IndexType offset = HalfDimensions * index;
+
+ for (IndexType j = 0; j < HalfDimensions; ++j)
+ st->accumulator.accumulation[Perspective][j] -= weights[offset + j];
+
+ for (std::size_t k = 0; k < PSQTBuckets; ++k)
+ st->accumulator.psqtAccumulation[Perspective][k] -= psqtWeights[index * PSQTBuckets + k];
+ }
+
+ // Difference calculation for the activated features
+ for (const auto index : added[i])
+ {
+ const IndexType offset = HalfDimensions * index;
+
+ for (IndexType j = 0; j < HalfDimensions; ++j)
+ st->accumulator.accumulation[Perspective][j] += weights[offset + j];
+
+ for (std::size_t k = 0; k < PSQTBuckets; ++k)
+ st->accumulator.psqtAccumulation[Perspective][k] += psqtWeights[index * PSQTBuckets + k];
}
}
- #if defined(USE_MMX)
- _mm_empty();
+#endif
+ }
+
+ template<Color Perspective>
+ void update_accumulator_refresh(const Position& pos) const {
+ #ifdef VECTOR
+ // Gcc-10.2 unnecessarily spills AVX2 registers if this array
+ // is defined in the VECTOR code below, once in each branch
+ vec_t acc[NumRegs];
+ psqt_vec_t psqt[NumPsqtRegs];
#endif
- #else
- for (Color perspective : { WHITE, BLACK }) {
-
- if (reset[perspective]) {
- std::memcpy(accumulator.accumulation[perspective][i], biases_,
- kHalfDimensions * sizeof(BiasType));
- } else {
- std::memcpy(accumulator.accumulation[perspective][i],
- prev_accumulator->accumulation[perspective][i],
- kHalfDimensions * sizeof(BiasType));
- // Difference calculation for the deactivated features
- for (const auto index : removed_indices[perspective]) {
- const IndexType offset = kHalfDimensions * index;
-
- for (IndexType j = 0; j < kHalfDimensions; ++j)
- accumulator.accumulation[perspective][i][j] -= weights_[offset + j];
- }
+ // Refresh the accumulator
+ // Could be extracted to a separate function because it's done in 2 places,
+ // but it's unclear if compilers would correctly handle register allocation.
+ auto& accumulator = pos.state()->accumulator;
+ accumulator.computed[Perspective] = true;
+ FeatureSet::IndexList active;
+ FeatureSet::append_active_indices<Perspective>(pos, active);
+
+#ifdef VECTOR
+ for (IndexType j = 0; j < HalfDimensions / TileHeight; ++j)
+ {
+ auto biasesTile = reinterpret_cast<const vec_t*>(
+ &biases[j * TileHeight]);
+ for (IndexType k = 0; k < NumRegs; ++k)
+ acc[k] = biasesTile[k];
+
+ for (const auto index : active)
+ {
+ const IndexType offset = HalfDimensions * index + j * TileHeight;
+ auto column = reinterpret_cast<const vec_t*>(&weights[offset]);
+
+ for (unsigned k = 0; k < NumRegs; ++k)
+ acc[k] = vec_add_16(acc[k], column[k]);
}
- { // Difference calculation for the activated features
- for (const auto index : added_indices[perspective]) {
- const IndexType offset = kHalfDimensions * index;
- for (IndexType j = 0; j < kHalfDimensions; ++j)
- accumulator.accumulation[perspective][i][j] += weights_[offset + j];
- }
+ auto accTile = reinterpret_cast<vec_t*>(
+ &accumulator.accumulation[Perspective][j * TileHeight]);
+ for (unsigned k = 0; k < NumRegs; k++)
+ vec_store(&accTile[k], acc[k]);
+ }
+
+ for (IndexType j = 0; j < PSQTBuckets / PsqtTileHeight; ++j)
+ {
+ for (std::size_t k = 0; k < NumPsqtRegs; ++k)
+ psqt[k] = vec_zero_psqt();
+
+ for (const auto index : active)
+ {
+ const IndexType offset = PSQTBuckets * index + j * PsqtTileHeight;
+ auto columnPsqt = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offset]);
+
+ for (std::size_t k = 0; k < NumPsqtRegs; ++k)
+ psqt[k] = vec_add_psqt_32(psqt[k], columnPsqt[k]);
}
+
+ auto accTilePsqt = reinterpret_cast<psqt_vec_t*>(
+ &accumulator.psqtAccumulation[Perspective][j * PsqtTileHeight]);
+ for (std::size_t k = 0; k < NumPsqtRegs; ++k)
+ vec_store_psqt(&accTilePsqt[k], psqt[k]);
}
- #endif
- accumulator.computed_accumulation = true;
+#else
+ std::memcpy(accumulator.accumulation[Perspective], biases,
+ HalfDimensions * sizeof(BiasType));
+
+ for (std::size_t k = 0; k < PSQTBuckets; ++k)
+ accumulator.psqtAccumulation[Perspective][k] = 0;
+
+ for (const auto index : active)
+ {
+ const IndexType offset = HalfDimensions * index;
+
+ for (IndexType j = 0; j < HalfDimensions; ++j)
+ accumulator.accumulation[Perspective][j] += weights[offset + j];
+
+ for (std::size_t k = 0; k < PSQTBuckets; ++k)
+ accumulator.psqtAccumulation[Perspective][k] += psqtWeights[index * PSQTBuckets + k];
+ }
+#endif
}
- using BiasType = std::int16_t;
- using WeightType = std::int16_t;
+ template<Color Perspective>
+ void hint_common_access_for_perspective(const Position& pos) const {
+
+ // Works like update_accumulator, but performs less work.
+ // Updates ONLY the accumulator for pos.
+
+ // Look for a usable accumulator of an earlier position. We keep track
+ // of the estimated gain in terms of features to be added/subtracted.
+ // Fast early exit.
+ if (pos.state()->accumulator.computed[Perspective])
+ return;
+
+ auto [oldest_st, _] = try_find_computed_accumulator<Perspective>(pos);
+
+ if (oldest_st->accumulator.computed[Perspective])
+ {
+ // Only update current position accumulator to minimize work.
+ StateInfo* states_to_update[2] = { pos.state(), nullptr };
+ update_accumulator_incremental<Perspective, 2>(pos, oldest_st, states_to_update);
+ }
+ else
+ {
+ update_accumulator_refresh<Perspective>(pos);
+ }
+ }
+
+ template<Color Perspective>
+ void update_accumulator(const Position& pos) const {
+
+ auto [oldest_st, next] = try_find_computed_accumulator<Perspective>(pos);
+
+ if (oldest_st->accumulator.computed[Perspective])
+ {
+ if (next == nullptr)
+ return;
+
+ // Now update the accumulators listed in states_to_update[], where the last element is a sentinel.
+ // Currently we update 2 accumulators.
+ // 1. for the current position
+ // 2. the next accumulator after the computed one
+ // The heuristic may change in the future.
+ StateInfo *states_to_update[3] =
+ { next, next == pos.state() ? nullptr : pos.state(), nullptr };
+
+ update_accumulator_incremental<Perspective, 3>(pos, oldest_st, states_to_update);
+ }
+ else
+ {
+ update_accumulator_refresh<Perspective>(pos);
+ }
+ }
- alignas(kCacheLineSize) BiasType biases_[kHalfDimensions];
- alignas(kCacheLineSize)
- WeightType weights_[kHalfDimensions * kInputDimensions];
+ alignas(CacheLineSize) BiasType biases[HalfDimensions];
+ alignas(CacheLineSize) WeightType weights[HalfDimensions * InputDimensions];
+ alignas(CacheLineSize) PSQTWeightType psqtWeights[InputDimensions * PSQTBuckets];
};
-} // namespace Eval::NNUE
+} // namespace Stockfish::Eval::NNUE
#endif // #ifndef NNUE_FEATURE_TRANSFORMER_H_INCLUDED