2 Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3 Copyright (C) 2004-2023 The Stockfish developers (see AUTHORS file)
5 Stockfish is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 Stockfish is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
19 // Definition of layer AffineTransform of NNUE evaluation function
21 #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
22 #define NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
27 #include "../nnue_common.h"
31 This file contains the definition for a fully connected layer (aka affine transform).
33 - expected use-case is for when PaddedInputDimensions == 32 and InputDimensions <= 32.
34 - that's why AVX512 is hard to implement
35 - expected use-case is small layers
36 - inputs are processed in chunks of 4, weights are respectively transposed
37 - accumulation happens directly to int32s
40 namespace Stockfish::Eval::NNUE::Layers {
42 // Fallback implementation for older/other architectures.
43 // Requires the input to be padded to at least 16 values.
44 #if !defined(USE_SSSE3)
45 template <IndexType InputDimensions, IndexType PaddedInputDimensions, IndexType OutputDimensions>
46 static void affine_transform_non_ssse3(std::int32_t* output, const std::int8_t* weights, const std::int32_t* biases, const std::uint8_t* input)
48 # if defined(USE_SSE2) || defined(USE_MMX) || defined(USE_NEON_DOTPROD) || defined(USE_NEON)
49 # if defined(USE_SSE2)
50 // At least a multiple of 16, with SSE2.
51 constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 16) / 16;
52 const __m128i Zeros = _mm_setzero_si128();
53 const auto inputVector = reinterpret_cast<const __m128i*>(input);
55 # elif defined(USE_MMX)
56 constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 8) / 8;
57 const __m64 Zeros = _mm_setzero_si64();
58 const auto inputVector = reinterpret_cast<const __m64*>(input);
60 # elif defined(USE_NEON_DOTPROD)
61 constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 16) / 16;
62 const auto inputVector = reinterpret_cast<const int8x16_t*>(input);
64 # elif defined(USE_NEON)
65 constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 16) / 16;
66 const auto inputVector = reinterpret_cast<const int8x8_t*>(input);
69 for (IndexType i = 0; i < OutputDimensions; ++i) {
70 const IndexType offset = i * PaddedInputDimensions;
72 # if defined(USE_SSE2)
73 __m128i sumLo = _mm_cvtsi32_si128(biases[i]);
74 __m128i sumHi = Zeros;
75 const auto row = reinterpret_cast<const __m128i*>(&weights[offset]);
76 for (IndexType j = 0; j < NumChunks; ++j) {
77 __m128i row_j = _mm_load_si128(&row[j]);
78 __m128i input_j = _mm_load_si128(&inputVector[j]);
79 __m128i extendedRowLo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8);
80 __m128i extendedRowHi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8);
81 __m128i extendedInputLo = _mm_unpacklo_epi8(input_j, Zeros);
82 __m128i extendedInputHi = _mm_unpackhi_epi8(input_j, Zeros);
83 __m128i productLo = _mm_madd_epi16(extendedRowLo, extendedInputLo);
84 __m128i productHi = _mm_madd_epi16(extendedRowHi, extendedInputHi);
85 sumLo = _mm_add_epi32(sumLo, productLo);
86 sumHi = _mm_add_epi32(sumHi, productHi);
88 __m128i sum = _mm_add_epi32(sumLo, sumHi);
89 __m128i sumHigh_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2));
90 sum = _mm_add_epi32(sum, sumHigh_64);
91 __m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2));
92 sum = _mm_add_epi32(sum, sum_second_32);
93 output[i] = _mm_cvtsi128_si32(sum);
95 # elif defined(USE_MMX)
96 __m64 sumLo = _mm_cvtsi32_si64(biases[i]);
98 const auto row = reinterpret_cast<const __m64*>(&weights[offset]);
99 for (IndexType j = 0; j < NumChunks; ++j) {
100 __m64 row_j = row[j];
101 __m64 input_j = inputVector[j];
102 __m64 extendedRowLo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8);
103 __m64 extendedRowHi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8);
104 __m64 extendedInputLo = _mm_unpacklo_pi8(input_j, Zeros);
105 __m64 extendedInputHi = _mm_unpackhi_pi8(input_j, Zeros);
106 __m64 productLo = _mm_madd_pi16(extendedRowLo, extendedInputLo);
107 __m64 productHi = _mm_madd_pi16(extendedRowHi, extendedInputHi);
108 sumLo = _mm_add_pi32(sumLo, productLo);
109 sumHi = _mm_add_pi32(sumHi, productHi);
111 __m64 sum = _mm_add_pi32(sumLo, sumHi);
112 sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum));
113 output[i] = _mm_cvtsi64_si32(sum);
115 # elif defined(USE_NEON_DOTPROD)
116 int32x4_t sum = {biases[i]};
117 const auto row = reinterpret_cast<const int8x16_t*>(&weights[offset]);
118 for (IndexType j = 0; j < NumChunks; ++j) {
119 sum = vdotq_s32(sum, inputVector[j], row[j]);
121 output[i] = vaddvq_s32(sum);
123 # elif defined(USE_NEON)
124 int32x4_t sum = {biases[i]};
125 const auto row = reinterpret_cast<const int8x8_t*>(&weights[offset]);
126 for (IndexType j = 0; j < NumChunks; ++j) {
127 int16x8_t product = vmull_s8(inputVector[j * 2], row[j * 2]);
128 product = vmlal_s8(product, inputVector[j * 2 + 1], row[j * 2 + 1]);
129 sum = vpadalq_s16(sum, product);
131 output[i] = sum[0] + sum[1] + sum[2] + sum[3];
136 # if defined(USE_MMX)
141 std::memcpy(output, biases, sizeof(std::int32_t) * OutputDimensions);
143 // Traverse weights in transpose order to take advantage of input sparsity
144 for (IndexType i = 0; i < InputDimensions; ++i)
146 const std::int8_t* w = &weights[i];
147 const int in = input[i];
148 for (IndexType j = 0; j < OutputDimensions; ++j)
149 output[j] += w[j * PaddedInputDimensions] * in;
155 template <IndexType InDims, IndexType OutDims>
156 class AffineTransform {
159 using InputType = std::uint8_t;
160 using OutputType = std::int32_t;
162 // Number of input/output dimensions
163 static constexpr IndexType InputDimensions = InDims;
164 static constexpr IndexType OutputDimensions = OutDims;
166 static constexpr IndexType PaddedInputDimensions =
167 ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
168 static constexpr IndexType PaddedOutputDimensions =
169 ceil_to_multiple<IndexType>(OutputDimensions, MaxSimdWidth);
171 using OutputBuffer = OutputType[PaddedOutputDimensions];
173 // Hash value embedded in the evaluation file
174 static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
175 std::uint32_t hashValue = 0xCC03DAE4u;
176 hashValue += OutputDimensions;
177 hashValue ^= prevHash >> 1;
178 hashValue ^= prevHash << 31;
182 static constexpr IndexType get_weight_index_scrambled(IndexType i)
185 (i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 +
186 i / PaddedInputDimensions * 4 +
190 static constexpr IndexType get_weight_index(IndexType i)
192 #if defined (USE_SSSE3)
193 return get_weight_index_scrambled(i);
199 // Read network parameters
200 bool read_parameters(std::istream& stream) {
201 read_little_endian<BiasType>(stream, biases, OutputDimensions);
202 for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
203 weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);
205 return !stream.fail();
208 // Write network parameters
209 bool write_parameters(std::ostream& stream) const {
210 write_little_endian<BiasType>(stream, biases, OutputDimensions);
212 for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
213 write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);
215 return !stream.fail();
217 // Forward propagation
219 const InputType* input, OutputType* output) const {
221 #if defined (USE_SSSE3)
223 if constexpr (OutputDimensions > 1)
226 #if defined (USE_AVX512)
227 using vec_t = __m512i;
228 #define vec_setzero _mm512_setzero_si512
229 #define vec_set_32 _mm512_set1_epi32
230 #define vec_add_dpbusd_32 Simd::m512_add_dpbusd_epi32
231 #define vec_add_dpbusd_32x2 Simd::m512_add_dpbusd_epi32x2
232 #define vec_hadd Simd::m512_hadd
233 #elif defined (USE_AVX2)
234 using vec_t = __m256i;
235 #define vec_setzero _mm256_setzero_si256
236 #define vec_set_32 _mm256_set1_epi32
237 #define vec_add_dpbusd_32 Simd::m256_add_dpbusd_epi32
238 #define vec_add_dpbusd_32x2 Simd::m256_add_dpbusd_epi32x2
239 #define vec_hadd Simd::m256_hadd
240 #elif defined (USE_SSSE3)
241 using vec_t = __m128i;
242 #define vec_setzero _mm_setzero_si128
243 #define vec_set_32 _mm_set1_epi32
244 #define vec_add_dpbusd_32 Simd::m128_add_dpbusd_epi32
245 #define vec_add_dpbusd_32x2 Simd::m128_add_dpbusd_epi32x2
246 #define vec_hadd Simd::m128_hadd
249 static constexpr IndexType OutputSimdWidth = sizeof(vec_t) / sizeof(OutputType);
251 static_assert(OutputDimensions % OutputSimdWidth == 0);
253 constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 8) / 4;
254 constexpr IndexType NumRegs = OutputDimensions / OutputSimdWidth;
256 const auto input32 = reinterpret_cast<const std::int32_t*>(input);
257 const vec_t* biasvec = reinterpret_cast<const vec_t*>(biases);
259 for (IndexType k = 0; k < NumRegs; ++k)
262 for (IndexType i = 0; i < NumChunks; i += 2)
264 const vec_t in0 = vec_set_32(input32[i + 0]);
265 const vec_t in1 = vec_set_32(input32[i + 1]);
266 const auto col0 = reinterpret_cast<const vec_t*>(&weights[(i + 0) * OutputDimensions * 4]);
267 const auto col1 = reinterpret_cast<const vec_t*>(&weights[(i + 1) * OutputDimensions * 4]);
268 for (IndexType k = 0; k < NumRegs; ++k)
269 vec_add_dpbusd_32x2(acc[k], in0, col0[k], in1, col1[k]);
272 vec_t* outptr = reinterpret_cast<vec_t*>(output);
273 for (IndexType k = 0; k < NumRegs; ++k)
278 # undef vec_add_dpbusd_32
279 # undef vec_add_dpbusd_32x2
283 else if constexpr (OutputDimensions == 1)
286 // We cannot use AVX512 for the last layer because there's only 32 inputs and the buffer is not padded to 64 elements.
287 #if defined (USE_AVX2)
288 using vec_t = __m256i;
289 #define vec_setzero _mm256_setzero_si256
290 #define vec_set_32 _mm256_set1_epi32
291 #define vec_add_dpbusd_32 Simd::m256_add_dpbusd_epi32
292 #define vec_add_dpbusd_32x2 Simd::m256_add_dpbusd_epi32x2
293 #define vec_hadd Simd::m256_hadd
294 #elif defined (USE_SSSE3)
295 using vec_t = __m128i;
296 #define vec_setzero _mm_setzero_si128
297 #define vec_set_32 _mm_set1_epi32
298 #define vec_add_dpbusd_32 Simd::m128_add_dpbusd_epi32
299 #define vec_add_dpbusd_32x2 Simd::m128_add_dpbusd_epi32x2
300 #define vec_hadd Simd::m128_hadd
303 const auto inputVector = reinterpret_cast<const vec_t*>(input);
305 static constexpr IndexType InputSimdWidth = sizeof(vec_t) / sizeof(InputType);
307 static_assert(PaddedInputDimensions % InputSimdWidth == 0);
309 constexpr IndexType NumChunks = PaddedInputDimensions / InputSimdWidth;
310 vec_t sum0 = vec_setzero();
311 const auto row0 = reinterpret_cast<const vec_t*>(&weights[0]);
313 for (int j = 0; j < int(NumChunks); ++j)
315 const vec_t in = inputVector[j];
316 vec_add_dpbusd_32(sum0, in, row0[j]);
318 output[0] = vec_hadd(sum0, biases[0]);
322 # undef vec_add_dpbusd_32
323 # undef vec_add_dpbusd_32x2
328 // Use old implementation for the other architectures.
329 affine_transform_non_ssse3<
331 PaddedInputDimensions,
332 OutputDimensions>(output, weights, biases, input);
337 using BiasType = OutputType;
338 using WeightType = std::int8_t;
340 alignas(CacheLineSize) BiasType biases[OutputDimensions];
341 alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
344 } // namespace Stockfish::Eval::NNUE::Layers
346 #endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED