2 Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3 Copyright (C) 2004-2023 The Stockfish developers (see AUTHORS file)
5 Stockfish is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 Stockfish is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
19 // Definition of layer AffineTransform of NNUE evaluation function
21 #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
22 #define NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
26 #include <type_traits>
27 #include "../nnue_common.h"
31 This file contains the definition for a fully connected layer (aka affine transform).
32 Two approaches are employed, depending on the sizes of the transform.
34 Approach 1 (a specialization for large inputs):
35 - used when the PaddedInputDimensions >= 128
36 - uses AVX512 if possible
37 - processes inputs in batches of 2*InputSimdWidth
38 - so in batches of 128 for AVX512
39 - the weight blocks of size InputSimdWidth are transposed such that
41 - N columns of the weight matrix are processed a time, where N
42 depends on the architecture (the amount of registers)
43 - accumulate + hadd is used
45 Approach 2 (a specialization for small inputs):
46 - used when the PaddedInputDimensions < 128
47 - expected use-case is for when PaddedInputDimensions == 32 and InputDimensions <= 32.
48 - that's why AVX512 is hard to implement
49 - expected use-case is small layers
50 - not optimized as well as the approach 1
51 - inputs are processed in chunks of 4, weights are respectively transposed
52 - accumulation happens directly to int32s
55 namespace Stockfish::Eval::NNUE::Layers {
57 // Fallback implementation for older/other architectures.
58 // Identical for both approaches. Requires the input to be padded to at least 16 values.
59 #if !defined(USE_SSSE3)
60 template <IndexType InputDimensions, IndexType PaddedInputDimensions, IndexType OutputDimensions>
61 static void affine_transform_non_ssse3(std::int32_t* output, const std::int8_t* weights, const std::int32_t* biases, const std::uint8_t* input)
63 # if defined(USE_SSE2)
64 // At least a multiple of 16, with SSE2.
65 constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 16) / 16;
66 const __m128i Zeros = _mm_setzero_si128();
67 const auto inputVector = reinterpret_cast<const __m128i*>(input);
69 # elif defined(USE_MMX)
70 constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 8) / 8;
71 const __m64 Zeros = _mm_setzero_si64();
72 const auto inputVector = reinterpret_cast<const __m64*>(input);
74 # elif defined(USE_NEON_DOTPROD)
75 constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 16) / 16;
76 const auto inputVector = reinterpret_cast<const int8x16_t*>(input);
78 # elif defined(USE_NEON)
79 constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 16) / 16;
80 const auto inputVector = reinterpret_cast<const int8x8_t*>(input);
83 for (IndexType i = 0; i < OutputDimensions; ++i) {
84 const IndexType offset = i * PaddedInputDimensions;
86 # if defined(USE_SSE2)
87 __m128i sumLo = _mm_cvtsi32_si128(biases[i]);
88 __m128i sumHi = Zeros;
89 const auto row = reinterpret_cast<const __m128i*>(&weights[offset]);
90 for (IndexType j = 0; j < NumChunks; ++j) {
91 __m128i row_j = _mm_load_si128(&row[j]);
92 __m128i input_j = _mm_load_si128(&inputVector[j]);
93 __m128i extendedRowLo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8);
94 __m128i extendedRowHi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8);
95 __m128i extendedInputLo = _mm_unpacklo_epi8(input_j, Zeros);
96 __m128i extendedInputHi = _mm_unpackhi_epi8(input_j, Zeros);
97 __m128i productLo = _mm_madd_epi16(extendedRowLo, extendedInputLo);
98 __m128i productHi = _mm_madd_epi16(extendedRowHi, extendedInputHi);
99 sumLo = _mm_add_epi32(sumLo, productLo);
100 sumHi = _mm_add_epi32(sumHi, productHi);
102 __m128i sum = _mm_add_epi32(sumLo, sumHi);
103 __m128i sumHigh_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2));
104 sum = _mm_add_epi32(sum, sumHigh_64);
105 __m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2));
106 sum = _mm_add_epi32(sum, sum_second_32);
107 output[i] = _mm_cvtsi128_si32(sum);
109 # elif defined(USE_MMX)
110 __m64 sumLo = _mm_cvtsi32_si64(biases[i]);
112 const auto row = reinterpret_cast<const __m64*>(&weights[offset]);
113 for (IndexType j = 0; j < NumChunks; ++j) {
114 __m64 row_j = row[j];
115 __m64 input_j = inputVector[j];
116 __m64 extendedRowLo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8);
117 __m64 extendedRowHi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8);
118 __m64 extendedInputLo = _mm_unpacklo_pi8(input_j, Zeros);
119 __m64 extendedInputHi = _mm_unpackhi_pi8(input_j, Zeros);
120 __m64 productLo = _mm_madd_pi16(extendedRowLo, extendedInputLo);
121 __m64 productHi = _mm_madd_pi16(extendedRowHi, extendedInputHi);
122 sumLo = _mm_add_pi32(sumLo, productLo);
123 sumHi = _mm_add_pi32(sumHi, productHi);
125 __m64 sum = _mm_add_pi32(sumLo, sumHi);
126 sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum));
127 output[i] = _mm_cvtsi64_si32(sum);
129 # elif defined(USE_NEON_DOTPROD)
130 int32x4_t sum = {biases[i]};
131 const auto row = reinterpret_cast<const int8x16_t*>(&weights[offset]);
132 for (IndexType j = 0; j < NumChunks; ++j) {
133 sum = vdotq_s32(sum, inputVector[j], row[j]);
135 output[i] = vaddvq_s32(sum);
137 # elif defined(USE_NEON)
138 int32x4_t sum = {biases[i]};
139 const auto row = reinterpret_cast<const int8x8_t*>(&weights[offset]);
140 for (IndexType j = 0; j < NumChunks; ++j) {
141 int16x8_t product = vmull_s8(inputVector[j * 2], row[j * 2]);
142 product = vmlal_s8(product, inputVector[j * 2 + 1], row[j * 2 + 1]);
143 sum = vpadalq_s16(sum, product);
145 output[i] = sum[0] + sum[1] + sum[2] + sum[3];
148 std::int32_t sum = biases[i];
149 for (IndexType j = 0; j < InputDimensions; ++j) {
150 sum += weights[offset + j] * input[j];
156 # if defined(USE_MMX)
162 template <IndexType InDims, IndexType OutDims, typename Enabled = void>
163 class AffineTransform;
165 #if defined (USE_AVX512)
166 constexpr IndexType LargeInputSize = 2 * 64;
168 constexpr IndexType LargeInputSize = std::numeric_limits<IndexType>::max();
171 // A specialization for large inputs
172 template <IndexType InDims, IndexType OutDims>
173 class AffineTransform<InDims, OutDims, std::enable_if_t<(ceil_to_multiple<IndexType>(InDims, MaxSimdWidth) >= LargeInputSize)>> {
176 using InputType = std::uint8_t;
177 using OutputType = std::int32_t;
179 // Number of input/output dimensions
180 static constexpr IndexType InputDimensions = InDims;
181 static constexpr IndexType OutputDimensions = OutDims;
183 static constexpr IndexType PaddedInputDimensions =
184 ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
185 static constexpr IndexType PaddedOutputDimensions =
186 ceil_to_multiple<IndexType>(OutputDimensions, MaxSimdWidth);
188 using OutputBuffer = OutputType[PaddedOutputDimensions];
190 static_assert(PaddedInputDimensions >= LargeInputSize, "Something went wrong. This specialization (for large inputs) should not have been chosen.");
192 #if defined (USE_AVX512)
193 static constexpr IndexType InputSimdWidth = 64;
194 static constexpr IndexType MaxNumOutputRegs = 16;
195 #elif defined (USE_AVX2)
196 static constexpr IndexType InputSimdWidth = 32;
197 static constexpr IndexType MaxNumOutputRegs = 8;
198 #elif defined (USE_SSSE3)
199 static constexpr IndexType InputSimdWidth = 16;
200 static constexpr IndexType MaxNumOutputRegs = 8;
201 #elif defined (USE_NEON_DOTPROD)
202 static constexpr IndexType InputSimdWidth = 16;
203 static constexpr IndexType MaxNumOutputRegs = 8;
204 #elif defined (USE_NEON)
205 static constexpr IndexType InputSimdWidth = 8;
206 static constexpr IndexType MaxNumOutputRegs = 8;
208 // The fallback implementation will not have permuted weights.
209 // We define these to avoid a lot of ifdefs later.
210 static constexpr IndexType InputSimdWidth = 1;
211 static constexpr IndexType MaxNumOutputRegs = 1;
214 // A big block is a region in the weight matrix of the size [PaddedInputDimensions, NumOutputRegs].
215 // A small block is a region of size [InputSimdWidth, 1]
217 static constexpr IndexType NumOutputRegs = std::min(MaxNumOutputRegs, OutputDimensions);
218 static constexpr IndexType SmallBlockSize = InputSimdWidth;
219 static constexpr IndexType BigBlockSize = NumOutputRegs * PaddedInputDimensions;
220 static constexpr IndexType NumSmallBlocksInBigBlock = BigBlockSize / SmallBlockSize;
221 static constexpr IndexType NumSmallBlocksPerOutput = PaddedInputDimensions / SmallBlockSize;
222 static constexpr IndexType NumBigBlocks = OutputDimensions / NumOutputRegs;
224 static_assert(OutputDimensions % NumOutputRegs == 0);
226 // Hash value embedded in the evaluation file
227 static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
228 std::uint32_t hashValue = 0xCC03DAE4u;
229 hashValue += OutputDimensions;
230 hashValue ^= prevHash >> 1;
231 hashValue ^= prevHash << 31;
236 Transposes the small blocks within a block.
237 Effectively means that weights can be traversed sequentially during inference.
239 static IndexType get_weight_index(IndexType i)
241 const IndexType smallBlock = (i / SmallBlockSize) % NumSmallBlocksInBigBlock;
242 const IndexType smallBlockCol = smallBlock / NumSmallBlocksPerOutput;
243 const IndexType smallBlockRow = smallBlock % NumSmallBlocksPerOutput;
244 const IndexType bigBlock = i / BigBlockSize;
245 const IndexType rest = i % SmallBlockSize;
247 const IndexType idx =
248 bigBlock * BigBlockSize
249 + smallBlockRow * SmallBlockSize * NumOutputRegs
250 + smallBlockCol * SmallBlockSize
256 // Read network parameters
257 bool read_parameters(std::istream& stream) {
258 read_little_endian<BiasType>(stream, biases, OutputDimensions);
260 for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
261 weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);
263 return !stream.fail();
266 // Write network parameters
267 bool write_parameters(std::ostream& stream) const {
268 write_little_endian<BiasType>(stream, biases, OutputDimensions);
270 for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
271 write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);
273 return !stream.fail();
276 // Forward propagation
277 const OutputType* propagate(
278 const InputType* input, OutputType* output) const {
280 #if defined (USE_AVX512)
281 using acc_vec_t = __m512i;
282 using bias_vec_t = __m128i;
283 using weight_vec_t = __m512i;
284 using in_vec_t = __m512i;
285 #define vec_zero _mm512_setzero_si512()
286 #define vec_add_dpbusd_32x2 Simd::m512_add_dpbusd_epi32x2
287 #define vec_hadd Simd::m512_hadd
288 #define vec_haddx4 Simd::m512_haddx4
289 #elif defined (USE_AVX2)
290 using acc_vec_t = __m256i;
291 using bias_vec_t = __m128i;
292 using weight_vec_t = __m256i;
293 using in_vec_t = __m256i;
294 #define vec_zero _mm256_setzero_si256()
295 #define vec_add_dpbusd_32x2 Simd::m256_add_dpbusd_epi32x2
296 #define vec_hadd Simd::m256_hadd
297 #define vec_haddx4 Simd::m256_haddx4
298 #elif defined (USE_SSSE3)
299 using acc_vec_t = __m128i;
300 using bias_vec_t = __m128i;
301 using weight_vec_t = __m128i;
302 using in_vec_t = __m128i;
303 #define vec_zero _mm_setzero_si128()
304 #define vec_add_dpbusd_32x2 Simd::m128_add_dpbusd_epi32x2
305 #define vec_hadd Simd::m128_hadd
306 #define vec_haddx4 Simd::m128_haddx4
307 #elif defined (USE_NEON_DOTPROD)
308 using acc_vec_t = int32x4_t;
309 using bias_vec_t = int32x4_t;
310 using weight_vec_t = int8x16_t;
311 using in_vec_t = int8x16_t;
313 #define vec_add_dpbusd_32x2 Simd::dotprod_m128_add_dpbusd_epi32x2
314 #define vec_hadd Simd::neon_m128_hadd
315 #define vec_haddx4 Simd::neon_m128_haddx4
316 #elif defined (USE_NEON)
317 using acc_vec_t = int32x4_t;
318 using bias_vec_t = int32x4_t;
319 using weight_vec_t = int8x8_t;
320 using in_vec_t = int8x8_t;
322 #define vec_add_dpbusd_32x2 Simd::neon_m128_add_dpbusd_epi32x2
323 #define vec_hadd Simd::neon_m128_hadd
324 #define vec_haddx4 Simd::neon_m128_haddx4
327 #if defined (USE_SSSE3) || defined (USE_NEON)
328 const in_vec_t* invec = reinterpret_cast<const in_vec_t*>(input);
330 // Perform accumulation to registers for each big block
331 for (IndexType bigBlock = 0; bigBlock < NumBigBlocks; ++bigBlock)
333 acc_vec_t acc[NumOutputRegs] = { vec_zero };
335 // Each big block has NumOutputRegs small blocks in each "row", one per register.
336 // We process two small blocks at a time to save on one addition without VNNI.
337 for (IndexType smallBlock = 0; smallBlock < NumSmallBlocksPerOutput; smallBlock += 2)
339 const weight_vec_t* weightvec =
340 reinterpret_cast<const weight_vec_t*>(
342 + bigBlock * BigBlockSize
343 + smallBlock * SmallBlockSize * NumOutputRegs);
345 const in_vec_t in0 = invec[smallBlock + 0];
346 const in_vec_t in1 = invec[smallBlock + 1];
348 for (IndexType k = 0; k < NumOutputRegs; ++k)
349 vec_add_dpbusd_32x2(acc[k], in0, weightvec[k], in1, weightvec[k + NumOutputRegs]);
352 // Horizontally add all accumulators.
353 if constexpr (NumOutputRegs % 4 == 0)
355 bias_vec_t* outputvec = reinterpret_cast<bias_vec_t*>(output);
356 const bias_vec_t* biasvec = reinterpret_cast<const bias_vec_t*>(biases);
358 for (IndexType k = 0; k < NumOutputRegs; k += 4)
360 const IndexType idx = (bigBlock * NumOutputRegs + k) / 4;
361 outputvec[idx] = vec_haddx4(acc[k+0], acc[k+1], acc[k+2], acc[k+3], biasvec[idx]);
366 for (IndexType k = 0; k < NumOutputRegs; ++k)
368 const IndexType idx = (bigBlock * NumOutputRegs + k);
369 output[idx] = vec_hadd(acc[k], biases[idx]);
375 # undef vec_add_dpbusd_32x2
379 // Use old implementation for the other architectures.
380 affine_transform_non_ssse3<
382 PaddedInputDimensions,
383 OutputDimensions>(output, weights, biases, input);
391 using BiasType = OutputType;
392 using WeightType = std::int8_t;
394 alignas(CacheLineSize) BiasType biases[OutputDimensions];
395 alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
398 // A specialization for small inputs
399 template <IndexType InDims, IndexType OutDims>
400 class AffineTransform<InDims, OutDims, std::enable_if_t<(ceil_to_multiple<IndexType>(InDims, MaxSimdWidth) < LargeInputSize)>> {
404 using InputType = std::uint8_t;
405 using OutputType = std::int32_t;
407 // Number of input/output dimensions
408 static constexpr IndexType InputDimensions = InDims;
409 static constexpr IndexType OutputDimensions = OutDims;
411 static constexpr IndexType PaddedInputDimensions =
412 ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
413 static constexpr IndexType PaddedOutputDimensions =
414 ceil_to_multiple<IndexType>(OutputDimensions, MaxSimdWidth);
416 using OutputBuffer = OutputType[PaddedOutputDimensions];
418 static_assert(PaddedInputDimensions < LargeInputSize, "Something went wrong. This specialization (for small inputs) should not have been chosen.");
420 // Hash value embedded in the evaluation file
421 static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
422 std::uint32_t hashValue = 0xCC03DAE4u;
423 hashValue += OutputDimensions;
424 hashValue ^= prevHash >> 1;
425 hashValue ^= prevHash << 31;
429 static IndexType get_weight_index_scrambled(IndexType i)
432 (i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 +
433 i / PaddedInputDimensions * 4 +
437 static IndexType get_weight_index(IndexType i)
439 #if defined (USE_SSSE3)
440 return get_weight_index_scrambled(i);
446 // Read network parameters
447 bool read_parameters(std::istream& stream) {
448 read_little_endian<BiasType>(stream, biases, OutputDimensions);
449 for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
450 weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);
452 return !stream.fail();
455 // Write network parameters
456 bool write_parameters(std::ostream& stream) const {
457 write_little_endian<BiasType>(stream, biases, OutputDimensions);
459 for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
460 write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);
462 return !stream.fail();
464 // Forward propagation
465 const OutputType* propagate(
466 const InputType* input, OutputType* output) const {
468 #if defined (USE_AVX512)
469 using vec_t = __m512i;
470 #define vec_setzero _mm512_setzero_si512
471 #define vec_set_32 _mm512_set1_epi32
472 #define vec_add_dpbusd_32 Simd::m512_add_dpbusd_epi32
473 #define vec_add_dpbusd_32x2 Simd::m512_add_dpbusd_epi32x2
474 #define vec_hadd Simd::m512_hadd
475 #elif defined (USE_AVX2)
476 using vec_t = __m256i;
477 #define vec_setzero _mm256_setzero_si256
478 #define vec_set_32 _mm256_set1_epi32
479 #define vec_add_dpbusd_32 Simd::m256_add_dpbusd_epi32
480 #define vec_add_dpbusd_32x2 Simd::m256_add_dpbusd_epi32x2
481 #define vec_hadd Simd::m256_hadd
482 #elif defined (USE_SSSE3)
483 using vec_t = __m128i;
484 #define vec_setzero _mm_setzero_si128
485 #define vec_set_32 _mm_set1_epi32
486 #define vec_add_dpbusd_32 Simd::m128_add_dpbusd_epi32
487 #define vec_add_dpbusd_32x2 Simd::m128_add_dpbusd_epi32x2
488 #define vec_hadd Simd::m128_hadd
491 #if defined (USE_SSSE3)
492 const auto inputVector = reinterpret_cast<const vec_t*>(input);
494 static constexpr IndexType OutputSimdWidth = sizeof(vec_t) / sizeof(OutputType);
496 static_assert(OutputDimensions % OutputSimdWidth == 0 || OutputDimensions == 1);
498 if constexpr (OutputDimensions % OutputSimdWidth == 0)
500 constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 8) / 4;
501 constexpr IndexType NumRegs = OutputDimensions / OutputSimdWidth;
503 const auto input32 = reinterpret_cast<const std::int32_t*>(input);
504 const vec_t* biasvec = reinterpret_cast<const vec_t*>(biases);
506 for (IndexType k = 0; k < NumRegs; ++k)
509 for (IndexType i = 0; i < NumChunks; i += 2)
511 const vec_t in0 = vec_set_32(input32[i + 0]);
512 const vec_t in1 = vec_set_32(input32[i + 1]);
513 const auto col0 = reinterpret_cast<const vec_t*>(&weights[(i + 0) * OutputDimensions * 4]);
514 const auto col1 = reinterpret_cast<const vec_t*>(&weights[(i + 1) * OutputDimensions * 4]);
515 for (IndexType k = 0; k < NumRegs; ++k)
516 vec_add_dpbusd_32x2(acc[k], in0, col0[k], in1, col1[k]);
519 vec_t* outptr = reinterpret_cast<vec_t*>(output);
520 for (IndexType k = 0; k < NumRegs; ++k)
523 else if constexpr (OutputDimensions == 1)
525 constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
526 vec_t sum0 = vec_setzero();
527 const auto row0 = reinterpret_cast<const vec_t*>(&weights[0]);
529 for (int j = 0; j < (int)NumChunks; ++j)
531 const vec_t in = inputVector[j];
532 vec_add_dpbusd_32(sum0, in, row0[j]);
534 output[0] = vec_hadd(sum0, biases[0]);
539 # undef vec_add_dpbusd_32
540 # undef vec_add_dpbusd_32x2
543 // Use old implementation for the other architectures.
544 affine_transform_non_ssse3<
546 PaddedInputDimensions,
547 OutputDimensions>(output, weights, biases, input);
554 using BiasType = OutputType;
555 using WeightType = std::int8_t;
557 alignas(CacheLineSize) BiasType biases[OutputDimensions];
558 alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
561 } // namespace Stockfish::Eval::NNUE::Layers
563 #endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED