2 Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3 Copyright (C) 2004-2023 The Stockfish developers (see AUTHORS file)
5 Stockfish is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 Stockfish is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
19 // Definition of layer AffineTransformSparseInput of NNUE evaluation function
21 #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED
22 #define NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED
29 #include "../../bitboard.h"
30 #include "../nnue_common.h"
31 #include "affine_transform.h"
35 This file contains the definition for a fully connected layer (aka affine transform) with block sparse input.
38 namespace Stockfish::Eval::NNUE::Layers {
40 #if (USE_SSSE3 | (USE_NEON >= 8))
41 alignas(CacheLineSize) static inline const
42 std::array<std::array<std::uint16_t, 8>, 256> lookup_indices = []() {
43 std::array<std::array<std::uint16_t, 8>, 256> v{};
44 for (unsigned i = 0; i < 256; ++i)
46 std::uint64_t j = i, k = 0;
48 v[i][k++] = pop_lsb(j);
53 // Find indices of nonzero numbers in an int32_t array
54 template<const IndexType InputDimensions>
55 void find_nnz(const std::int32_t* input, std::uint16_t* out, IndexType& count_out) {
56 #if defined(USE_SSSE3)
57 #if defined(USE_AVX512)
58 using vec_t = __m512i;
59 #define vec_nnz(a) _mm512_cmpgt_epi32_mask(a, _mm512_setzero_si512())
60 #elif defined(USE_AVX2)
61 using vec_t = __m256i;
62 #if defined(USE_VNNI) && !defined(USE_AVXVNNI)
63 #define vec_nnz(a) _mm256_cmpgt_epi32_mask(a, _mm256_setzero_si256())
67 _mm256_castsi256_ps(_mm256_cmpgt_epi32(a, _mm256_setzero_si256())))
69 #elif defined(USE_SSSE3)
70 using vec_t = __m128i;
72 _mm_movemask_ps(_mm_castsi128_ps(_mm_cmpgt_epi32(a, _mm_setzero_si128())))
74 using vec128_t = __m128i;
75 #define vec128_zero _mm_setzero_si128()
76 #define vec128_set_16(a) _mm_set1_epi16(a)
77 #define vec128_load(a) _mm_load_si128(a)
78 #define vec128_storeu(a, b) _mm_storeu_si128(a, b)
79 #define vec128_add(a, b) _mm_add_epi16(a, b)
80 #elif defined(USE_NEON)
81 using vec_t = uint32x4_t;
82 static const std::uint32_t Mask[4] = {1, 2, 4, 8};
83 #define vec_nnz(a) vaddvq_u32(vandq_u32(vtstq_u32(a, a), vld1q_u32(Mask)))
84 using vec128_t = uint16x8_t;
85 #define vec128_zero vdupq_n_u16(0)
86 #define vec128_set_16(a) vdupq_n_u16(a)
87 #define vec128_load(a) vld1q_u16(reinterpret_cast<const std::uint16_t*>(a))
88 #define vec128_storeu(a, b) vst1q_u16(reinterpret_cast<std::uint16_t*>(a), b)
89 #define vec128_add(a, b) vaddq_u16(a, b)
91 constexpr IndexType InputSimdWidth = sizeof(vec_t) / sizeof(std::int32_t);
92 // Inputs are processed InputSimdWidth at a time and outputs are processed 8 at a time so we process in chunks of max(InputSimdWidth, 8)
93 constexpr IndexType ChunkSize = std::max<IndexType>(InputSimdWidth, 8);
94 constexpr IndexType NumChunks = InputDimensions / ChunkSize;
95 constexpr IndexType InputsPerChunk = ChunkSize / InputSimdWidth;
96 constexpr IndexType OutputsPerChunk = ChunkSize / 8;
98 const auto inputVector = reinterpret_cast<const vec_t*>(input);
100 vec128_t base = vec128_zero;
101 const vec128_t increment = vec128_set_16(8);
102 for (IndexType i = 0; i < NumChunks; ++i)
104 // bitmask of nonzero values in this chunk
106 for (IndexType j = 0; j < InputsPerChunk; ++j)
108 const vec_t inputChunk = inputVector[i * InputsPerChunk + j];
109 nnz |= unsigned(vec_nnz(inputChunk)) << (j * InputSimdWidth);
111 for (IndexType j = 0; j < OutputsPerChunk; ++j)
113 const auto lookup = (nnz >> (j * 8)) & 0xFF;
115 vec128_load(reinterpret_cast<const vec128_t*>(&lookup_indices[lookup]));
116 vec128_storeu(reinterpret_cast<vec128_t*>(out + count), vec128_add(base, offsets));
117 count += popcount(lookup);
118 base = vec128_add(base, increment);
131 // Sparse input implementation
132 template<IndexType InDims, IndexType OutDims>
133 class AffineTransformSparseInput {
136 using InputType = std::uint8_t;
137 using OutputType = std::int32_t;
139 // Number of input/output dimensions
140 static constexpr IndexType InputDimensions = InDims;
141 static constexpr IndexType OutputDimensions = OutDims;
143 static_assert(OutputDimensions % 16 == 0,
144 "Only implemented for OutputDimensions divisible by 16.");
146 static constexpr IndexType PaddedInputDimensions =
147 ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
148 static constexpr IndexType PaddedOutputDimensions =
149 ceil_to_multiple<IndexType>(OutputDimensions, MaxSimdWidth);
151 #if (USE_SSSE3 | (USE_NEON >= 8))
152 static constexpr IndexType ChunkSize = 4;
154 static constexpr IndexType ChunkSize = 1;
157 using OutputBuffer = OutputType[PaddedOutputDimensions];
159 // Hash value embedded in the evaluation file
160 static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
161 std::uint32_t hashValue = 0xCC03DAE4u;
162 hashValue += OutputDimensions;
163 hashValue ^= prevHash >> 1;
164 hashValue ^= prevHash << 31;
168 static constexpr IndexType get_weight_index_scrambled(IndexType i) {
169 return (i / ChunkSize) % (PaddedInputDimensions / ChunkSize) * OutputDimensions * ChunkSize
170 + i / PaddedInputDimensions * ChunkSize + i % ChunkSize;
173 static constexpr IndexType get_weight_index(IndexType i) {
174 #if (USE_SSSE3 | (USE_NEON >= 8))
175 return get_weight_index_scrambled(i);
181 // Read network parameters
182 bool read_parameters(std::istream& stream) {
183 read_little_endian<BiasType>(stream, biases, OutputDimensions);
184 for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
185 weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);
187 return !stream.fail();
190 // Write network parameters
191 bool write_parameters(std::ostream& stream) const {
192 write_little_endian<BiasType>(stream, biases, OutputDimensions);
194 for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
195 write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);
197 return !stream.fail();
199 // Forward propagation
200 void propagate(const InputType* input, OutputType* output) const {
202 #if (USE_SSSE3 | (USE_NEON >= 8))
203 #if defined(USE_AVX512)
204 using invec_t = __m512i;
205 using outvec_t = __m512i;
206 #define vec_set_32 _mm512_set1_epi32
207 #define vec_add_dpbusd_32 Simd::m512_add_dpbusd_epi32
208 #elif defined(USE_AVX2)
209 using invec_t = __m256i;
210 using outvec_t = __m256i;
211 #define vec_set_32 _mm256_set1_epi32
212 #define vec_add_dpbusd_32 Simd::m256_add_dpbusd_epi32
213 #elif defined(USE_SSSE3)
214 using invec_t = __m128i;
215 using outvec_t = __m128i;
216 #define vec_set_32 _mm_set1_epi32
217 #define vec_add_dpbusd_32 Simd::m128_add_dpbusd_epi32
218 #elif defined(USE_NEON_DOTPROD)
219 using invec_t = int8x16_t;
220 using outvec_t = int32x4_t;
221 #define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))
222 #define vec_add_dpbusd_32 Simd::dotprod_m128_add_dpbusd_epi32
223 #elif defined(USE_NEON)
224 using invec_t = int8x16_t;
225 using outvec_t = int32x4_t;
226 #define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))
227 #define vec_add_dpbusd_32 Simd::neon_m128_add_dpbusd_epi32
229 static constexpr IndexType OutputSimdWidth = sizeof(outvec_t) / sizeof(OutputType);
231 constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 8) / ChunkSize;
232 constexpr IndexType NumRegs = OutputDimensions / OutputSimdWidth;
233 std::uint16_t nnz[NumChunks];
236 const auto input32 = reinterpret_cast<const std::int32_t*>(input);
238 // Find indices of nonzero 32bit blocks
239 find_nnz<NumChunks>(input32, nnz, count);
241 const outvec_t* biasvec = reinterpret_cast<const outvec_t*>(biases);
242 outvec_t acc[NumRegs];
243 for (IndexType k = 0; k < NumRegs; ++k)
246 for (IndexType j = 0; j < count; ++j)
248 const auto i = nnz[j];
249 const invec_t in = vec_set_32(input32[i]);
251 reinterpret_cast<const invec_t*>(&weights[i * OutputDimensions * ChunkSize]);
252 for (IndexType k = 0; k < NumRegs; ++k)
253 vec_add_dpbusd_32(acc[k], in, col[k]);
256 outvec_t* outptr = reinterpret_cast<outvec_t*>(output);
257 for (IndexType k = 0; k < NumRegs; ++k)
260 #undef vec_add_dpbusd_32
262 // Use dense implementation for the other architectures.
263 affine_transform_non_ssse3<InputDimensions, PaddedInputDimensions, OutputDimensions>(
264 output, weights, biases, input);
269 using BiasType = OutputType;
270 using WeightType = std::int8_t;
272 alignas(CacheLineSize) BiasType biases[OutputDimensions];
273 alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
276 } // namespace Stockfish::Eval::NNUE::Layers
278 #endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED