X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=src%2Fnnue%2Fnnue_feature_transformer.h;h=f4024dce83b9d1e5f02ce87528a80d70a68874fc;hb=c357c4ad6f7318234c4d745eaa6b0c4774e28741;hp=741d97cf1190c539bdcf2766ae6852bdefc52a12;hpb=f193778446acc6e60d7f0f99c6eb01489f89e962;p=stockfish diff --git a/src/nnue/nnue_feature_transformer.h b/src/nnue/nnue_feature_transformer.h index 741d97cf..f4024dce 100644 --- a/src/nnue/nnue_feature_transformer.h +++ b/src/nnue/nnue_feature_transformer.h @@ -1,6 +1,6 @@ /* Stockfish, a UCI chess playing engine derived from Glaurung 2.1 - Copyright (C) 2004-2021 The Stockfish developers (see AUTHORS file) + Copyright (C) 2004-2022 The Stockfish developers (see AUTHORS file) Stockfish is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -24,18 +24,21 @@ #include "nnue_common.h" #include "nnue_architecture.h" -#include "../misc.h" - #include // std::memset() namespace Stockfish::Eval::NNUE { + using BiasType = std::int16_t; + using WeightType = std::int16_t; + using PSQTWeightType = std::int32_t; + // If vector instructions are enabled, we update and refresh the // accumulator tile by tile such that each tile fits in the CPU's // vector registers. #define VECTOR - static_assert(PSQTBuckets == 8, "Assumed by the current choice of constants."); + static_assert(PSQTBuckets % 8 == 0, + "Per feature PSQT values cannot be processed at granularity lower than 8 at a time."); #ifdef USE_AVX512 typedef __m512i vec_t; @@ -49,8 +52,7 @@ namespace Stockfish::Eval::NNUE { #define vec_add_psqt_32(a,b) _mm256_add_epi32(a,b) #define vec_sub_psqt_32(a,b) _mm256_sub_epi32(a,b) #define vec_zero_psqt() _mm256_setzero_si256() - static constexpr IndexType NumRegs = 8; // only 8 are needed - static constexpr IndexType NumPsqtRegs = 1; + #define NumRegistersSIMD 32 #elif USE_AVX2 typedef __m256i vec_t; @@ -64,8 +66,7 @@ namespace Stockfish::Eval::NNUE { #define vec_add_psqt_32(a,b) _mm256_add_epi32(a,b) #define vec_sub_psqt_32(a,b) _mm256_sub_epi32(a,b) #define vec_zero_psqt() _mm256_setzero_si256() - static constexpr IndexType NumRegs = 16; - static constexpr IndexType NumPsqtRegs = 1; + #define NumRegistersSIMD 16 #elif USE_SSE2 typedef __m128i vec_t; @@ -79,8 +80,7 @@ namespace Stockfish::Eval::NNUE { #define vec_add_psqt_32(a,b) _mm_add_epi32(a,b) #define vec_sub_psqt_32(a,b) _mm_sub_epi32(a,b) #define vec_zero_psqt() _mm_setzero_si128() - static constexpr IndexType NumRegs = Is64Bit ? 16 : 8; - static constexpr IndexType NumPsqtRegs = 2; + #define NumRegistersSIMD (Is64Bit ? 16 : 8) #elif USE_MMX typedef __m64 vec_t; @@ -94,8 +94,7 @@ namespace Stockfish::Eval::NNUE { #define vec_add_psqt_32(a,b) _mm_add_pi32(a,b) #define vec_sub_psqt_32(a,b) _mm_sub_pi32(a,b) #define vec_zero_psqt() _mm_setzero_si64() - static constexpr IndexType NumRegs = 8; - static constexpr IndexType NumPsqtRegs = 4; + #define NumRegistersSIMD 8 #elif USE_NEON typedef int16x8_t vec_t; @@ -109,14 +108,61 @@ namespace Stockfish::Eval::NNUE { #define vec_add_psqt_32(a,b) vaddq_s32(a,b) #define vec_sub_psqt_32(a,b) vsubq_s32(a,b) #define vec_zero_psqt() psqt_vec_t{0} - static constexpr IndexType NumRegs = 16; - static constexpr IndexType NumPsqtRegs = 2; + #define NumRegistersSIMD 16 #else #undef VECTOR #endif + + #ifdef VECTOR + + // Compute optimal SIMD register count for feature transformer accumulation. + + // We use __m* types as template arguments, which causes GCC to emit warnings + // about losing some attribute information. This is irrelevant to us as we + // only take their size, so the following pragma are harmless. + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wignored-attributes" + + template + static constexpr int BestRegisterCount() + { + #define RegisterSize sizeof(SIMDRegisterType) + #define LaneSize sizeof(LaneType) + + static_assert(RegisterSize >= LaneSize); + static_assert(MaxRegisters <= NumRegistersSIMD); + static_assert(MaxRegisters > 0); + static_assert(NumRegistersSIMD > 0); + static_assert(RegisterSize % LaneSize == 0); + static_assert((NumLanes * LaneSize) % RegisterSize == 0); + + const int ideal = (NumLanes * LaneSize) / RegisterSize; + if (ideal <= MaxRegisters) + return ideal; + + // Look for the largest divisor of the ideal register count that is smaller than MaxRegisters + for (int divisor = MaxRegisters; divisor > 1; --divisor) + if (ideal % divisor == 0) + return divisor; + + return 1; + } + + static constexpr int NumRegs = BestRegisterCount(); + static constexpr int NumPsqtRegs = BestRegisterCount(); + + #pragma GCC diagnostic pop + + #endif + + + // Input feature converter class FeatureTransformer { @@ -150,21 +196,21 @@ namespace Stockfish::Eval::NNUE { // Read network parameters bool read_parameters(std::istream& stream) { - for (std::size_t i = 0; i < HalfDimensions; ++i) - biases[i] = read_little_endian(stream); - for (std::size_t i = 0; i < HalfDimensions * InputDimensions; ++i) - weights[i] = read_little_endian(stream); - for (std::size_t i = 0; i < PSQTBuckets * InputDimensions; ++i) - psqtWeights[i] = read_little_endian(stream); + + read_little_endian(stream, biases , HalfDimensions ); + read_little_endian(stream, weights , HalfDimensions * InputDimensions); + read_little_endian(stream, psqtWeights, PSQTBuckets * InputDimensions); + return !stream.fail(); } // Write network parameters bool write_parameters(std::ostream& stream) const { - for (std::size_t i = 0; i < HalfDimensions; ++i) - write_little_endian(stream, biases[i]); - for (std::size_t i = 0; i < HalfDimensions * InputDimensions; ++i) - write_little_endian(stream, weights[i]); + + write_little_endian(stream, biases , HalfDimensions ); + write_little_endian(stream, weights , HalfDimensions * InputDimensions); + write_little_endian(stream, psqtWeights, PSQTBuckets * InputDimensions); + return !stream.fail(); } @@ -178,118 +224,151 @@ namespace Stockfish::Eval::NNUE { const auto& psqtAccumulation = pos.state()->accumulator.psqtAccumulation; const auto psqt = ( - psqtAccumulation[static_cast(perspectives[0])][bucket] - - psqtAccumulation[static_cast(perspectives[1])][bucket] + psqtAccumulation[perspectives[0]][bucket] + - psqtAccumulation[perspectives[1]][bucket] ) / 2; + #if defined(USE_AVX512) + constexpr IndexType NumChunks = HalfDimensions / (SimdWidth * 2); static_assert(HalfDimensions % (SimdWidth * 2) == 0); const __m512i Control = _mm512_setr_epi64(0, 2, 4, 6, 1, 3, 5, 7); const __m512i Zero = _mm512_setzero_si512(); + for (IndexType p = 0; p < 2; ++p) + { + const IndexType offset = HalfDimensions * p; + auto out = reinterpret_cast<__m512i*>(&output[offset]); + for (IndexType j = 0; j < NumChunks; ++j) + { + __m512i sum0 = _mm512_load_si512(&reinterpret_cast + (accumulation[perspectives[p]])[j * 2 + 0]); + __m512i sum1 = _mm512_load_si512(&reinterpret_cast + (accumulation[perspectives[p]])[j * 2 + 1]); + + _mm512_store_si512(&out[j], _mm512_permutexvar_epi64(Control, + _mm512_max_epi8(_mm512_packs_epi16(sum0, sum1), Zero))); + } + } + return psqt; + #elif defined(USE_AVX2) + constexpr IndexType NumChunks = HalfDimensions / SimdWidth; constexpr int Control = 0b11011000; const __m256i Zero = _mm256_setzero_si256(); + for (IndexType p = 0; p < 2; ++p) + { + const IndexType offset = HalfDimensions * p; + auto out = reinterpret_cast<__m256i*>(&output[offset]); + for (IndexType j = 0; j < NumChunks; ++j) + { + __m256i sum0 = _mm256_load_si256(&reinterpret_cast + (accumulation[perspectives[p]])[j * 2 + 0]); + __m256i sum1 = _mm256_load_si256(&reinterpret_cast + (accumulation[perspectives[p]])[j * 2 + 1]); + + _mm256_store_si256(&out[j], _mm256_permute4x64_epi64( + _mm256_max_epi8(_mm256_packs_epi16(sum0, sum1), Zero), Control)); + } + } + return psqt; + #elif defined(USE_SSE2) - constexpr IndexType NumChunks = HalfDimensions / SimdWidth; - #ifdef USE_SSE41 + #ifdef USE_SSE41 + constexpr IndexType NumChunks = HalfDimensions / SimdWidth; const __m128i Zero = _mm_setzero_si128(); - #else + #else + constexpr IndexType NumChunks = HalfDimensions / SimdWidth; const __m128i k0x80s = _mm_set1_epi8(-128); - #endif + #endif + + for (IndexType p = 0; p < 2; ++p) + { + const IndexType offset = HalfDimensions * p; + auto out = reinterpret_cast<__m128i*>(&output[offset]); + for (IndexType j = 0; j < NumChunks; ++j) + { + __m128i sum0 = _mm_load_si128(&reinterpret_cast + (accumulation[perspectives[p]])[j * 2 + 0]); + __m128i sum1 = _mm_load_si128(&reinterpret_cast + (accumulation[perspectives[p]])[j * 2 + 1]); + const __m128i packedbytes = _mm_packs_epi16(sum0, sum1); + + #ifdef USE_SSE41 + _mm_store_si128(&out[j], _mm_max_epi8(packedbytes, Zero)); + #else + _mm_store_si128(&out[j], _mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s)); + #endif + } + } + return psqt; #elif defined(USE_MMX) + constexpr IndexType NumChunks = HalfDimensions / SimdWidth; const __m64 k0x80s = _mm_set1_pi8(-128); + for (IndexType p = 0; p < 2; ++p) + { + const IndexType offset = HalfDimensions * p; + auto out = reinterpret_cast<__m64*>(&output[offset]); + for (IndexType j = 0; j < NumChunks; ++j) + { + __m64 sum0 = *(&reinterpret_cast(accumulation[perspectives[p]])[j * 2 + 0]); + __m64 sum1 = *(&reinterpret_cast(accumulation[perspectives[p]])[j * 2 + 1]); + const __m64 packedbytes = _mm_packs_pi16(sum0, sum1); + out[j] = _mm_subs_pi8(_mm_adds_pi8(packedbytes, k0x80s), k0x80s); + } + } + _mm_empty(); + return psqt; + #elif defined(USE_NEON) + constexpr IndexType NumChunks = HalfDimensions / (SimdWidth / 2); const int8x8_t Zero = {0}; - #endif - for (IndexType p = 0; p < 2; ++p) { - const IndexType offset = HalfDimensions * p; + for (IndexType p = 0; p < 2; ++p) + { + const IndexType offset = HalfDimensions * p; + const auto out = reinterpret_cast(&output[offset]); - #if defined(USE_AVX512) - auto out = reinterpret_cast<__m512i*>(&output[offset]); - for (IndexType j = 0; j < NumChunks; ++j) { - __m512i sum0 = _mm512_load_si512( - &reinterpret_cast(accumulation[perspectives[p]])[j * 2 + 0]); - __m512i sum1 = _mm512_load_si512( - &reinterpret_cast(accumulation[perspectives[p]])[j * 2 + 1]); - _mm512_store_si512(&out[j], _mm512_permutexvar_epi64(Control, - _mm512_max_epi8(_mm512_packs_epi16(sum0, sum1), Zero))); - } + constexpr IndexType UnrollFactor = 16; + static_assert(UnrollFactor % UnrollFactor == 0); + for (IndexType j = 0; j < NumChunks; j += UnrollFactor) + { + int16x8_t sums[UnrollFactor]; + for (IndexType i = 0; i < UnrollFactor; ++i) + sums[i] = reinterpret_cast(accumulation[perspectives[p]])[j+i]; - #elif defined(USE_AVX2) - auto out = reinterpret_cast<__m256i*>(&output[offset]); - for (IndexType j = 0; j < NumChunks; ++j) { - __m256i sum0 = _mm256_load_si256( - &reinterpret_cast(accumulation[perspectives[p]])[j * 2 + 0]); - __m256i sum1 = _mm256_load_si256( - &reinterpret_cast(accumulation[perspectives[p]])[j * 2 + 1]); - _mm256_store_si256(&out[j], _mm256_permute4x64_epi64(_mm256_max_epi8( - _mm256_packs_epi16(sum0, sum1), Zero), Control)); - } + for (IndexType i = 0; i < UnrollFactor; ++i) + out[j+i] = vmax_s8(vqmovn_s16(sums[i]), Zero); + } + } + return psqt; - #elif defined(USE_SSE2) - auto out = reinterpret_cast<__m128i*>(&output[offset]); - for (IndexType j = 0; j < NumChunks; ++j) { - __m128i sum0 = _mm_load_si128(&reinterpret_cast( - accumulation[perspectives[p]])[j * 2 + 0]); - __m128i sum1 = _mm_load_si128(&reinterpret_cast( - accumulation[perspectives[p]])[j * 2 + 1]); - const __m128i packedbytes = _mm_packs_epi16(sum0, sum1); - - _mm_store_si128(&out[j], - - #ifdef USE_SSE41 - _mm_max_epi8(packedbytes, Zero) #else - _mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s) - #endif - ); - } - - #elif defined(USE_MMX) - auto out = reinterpret_cast<__m64*>(&output[offset]); - for (IndexType j = 0; j < NumChunks; ++j) { - __m64 sum0 = *(&reinterpret_cast( - accumulation[perspectives[p]])[j * 2 + 0]); - __m64 sum1 = *(&reinterpret_cast( - accumulation[perspectives[p]])[j * 2 + 1]); - const __m64 packedbytes = _mm_packs_pi16(sum0, sum1); - out[j] = _mm_subs_pi8(_mm_adds_pi8(packedbytes, k0x80s), k0x80s); - } - - #elif defined(USE_NEON) - const auto out = reinterpret_cast(&output[offset]); - for (IndexType j = 0; j < NumChunks; ++j) { - int16x8_t sum = reinterpret_cast( - accumulation[perspectives[p]])[j]; - out[j] = vmax_s8(vqmovn_s16(sum), Zero); - } + for (IndexType p = 0; p < 2; ++p) + { + const IndexType offset = HalfDimensions * p; + for (IndexType j = 0; j < HalfDimensions; ++j) + { + BiasType sum = accumulation[perspectives[p]][j]; + output[offset + j] = static_cast(std::max(0, std::min(127, sum))); + } + } + return psqt; - #else - for (IndexType j = 0; j < HalfDimensions; ++j) { - BiasType sum = accumulation[static_cast(perspectives[p])][j]; - output[offset + j] = static_cast( - std::max(0, std::min(127, sum))); - } #endif - } - #if defined(USE_MMX) - _mm_empty(); - #endif + } // end of function transform() + - return psqt; - } private: void update_accumulator(const Position& pos, const Color perspective) const { @@ -298,7 +377,6 @@ namespace Stockfish::Eval::NNUE { // That might depend on the feature set and generally relies on the // feature set's update cost calculation to be correct and never // allow updates with more added/removed features than MaxActiveDimensions. - using IndexList = ValueList; #ifdef VECTOR // Gcc-10.2 unnecessarily spills AVX2 registers if this array @@ -311,7 +389,7 @@ namespace Stockfish::Eval::NNUE { // of the estimated gain in terms of features to be added/subtracted. StateInfo *st = pos.state(), *next = nullptr; int gain = FeatureSet::refresh_cost(pos); - while (st->accumulator.state[perspective] == EMPTY) + while (st->previous && !st->accumulator.computed[perspective]) { // This governs when a full feature refresh is needed and how many // updates are better than just one full refresh. @@ -322,7 +400,7 @@ namespace Stockfish::Eval::NNUE { st = st->previous; } - if (st->accumulator.state[perspective] == COMPUTED) + if (st->accumulator.computed[perspective]) { if (next == nullptr) return; @@ -332,16 +410,16 @@ namespace Stockfish::Eval::NNUE { // Gather all features to be updated. const Square ksq = pos.square(perspective); - IndexList removed[2], added[2]; + FeatureSet::IndexList removed[2], added[2]; FeatureSet::append_changed_indices( - ksq, next, perspective, removed[0], added[0]); + ksq, next->dirtyPiece, perspective, removed[0], added[0]); for (StateInfo *st2 = pos.state(); st2 != next; st2 = st2->previous) FeatureSet::append_changed_indices( - ksq, st2, perspective, removed[1], added[1]); + ksq, st2->dirtyPiece, perspective, removed[1], added[1]); // Mark the accumulators as computed. - next->accumulator.state[perspective] = COMPUTED; - pos.state()->accumulator.state[perspective] = COMPUTED; + next->accumulator.computed[perspective] = true; + pos.state()->accumulator.computed[perspective] = true; // Now update the accumulators listed in states_to_update[], where the last element is a sentinel. StateInfo *states_to_update[3] = @@ -461,8 +539,8 @@ namespace Stockfish::Eval::NNUE { { // Refresh the accumulator auto& accumulator = pos.state()->accumulator; - accumulator.state[perspective] = COMPUTED; - IndexList active; + accumulator.computed[perspective] = true; + FeatureSet::IndexList active; FeatureSet::append_active_indices(pos, perspective, active); #ifdef VECTOR @@ -533,10 +611,6 @@ namespace Stockfish::Eval::NNUE { #endif } - using BiasType = std::int16_t; - using WeightType = std::int16_t; - using PSQTWeightType = std::int32_t; - alignas(CacheLineSize) BiasType biases[HalfDimensions]; alignas(CacheLineSize) WeightType weights[HalfDimensions * InputDimensions]; alignas(CacheLineSize) PSQTWeightType psqtWeights[InputDimensions * PSQTBuckets];