2 Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3 Copyright (C) 2004-2022 The Stockfish developers (see AUTHORS file)
5 Stockfish is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 Stockfish is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
19 // Definition of layer AffineTransform of NNUE evaluation function
21 #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
22 #define NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
26 #include <type_traits>
27 #include "../nnue_common.h"
28 #include "../../simd.h"
31 This file contains the definition for a fully connected layer (aka affine transform).
32 Two approaches are employed, depending on the sizes of the transform.
35 - used when the PaddedInputDimensions >= 128
36 - uses AVX512 if possible
37 - processes inputs in batches of 2*InputSimdWidth
38 - so in batches of 128 for AVX512
39 - the weight blocks of size InputSimdWidth are transposed such that
41 - N columns of the weight matrix are processed a time, where N
42 depends on the architecture (the amount of registers)
43 - accumulate + hadd is used
46 - used when the PaddedInputDimensions < 128
48 - expected use-case is for when PaddedInputDimensions == 32 and InputDimensions <= 32.
49 - that's why AVX512 is hard to implement
50 - expected use-case is small layers
51 - not optimized as well as the approach 1
52 - inputs are processed in chunks of 4, weights are respectively transposed
53 - accumulation happens directly to int32s
56 namespace Stockfish::Eval::NNUE::Layers {
58 // Fallback implementation for older/other architectures.
59 // Identical for both approaches. Requires the input to be padded to at least 16 values.
60 #if !defined(USE_SSSE3)
61 template <IndexType InputDimensions, IndexType PaddedInputDimensions, IndexType OutputDimensions>
62 static void affine_transform_non_ssse3(std::int32_t* output, const std::int8_t* weights, const std::int32_t* biases, const std::uint8_t* input)
64 # if defined(USE_SSE2)
65 // At least a multiple of 16, with SSE2.
66 static_assert(PaddedInputDimensions % 16 == 0);
67 constexpr IndexType NumChunks = PaddedInputDimensions / 16;
68 const __m128i Zeros = _mm_setzero_si128();
69 const auto inputVector = reinterpret_cast<const __m128i*>(input);
71 # elif defined(USE_MMX)
72 static_assert(InputDimensions % 8 == 0);
73 constexpr IndexType NumChunks = InputDimensions / 8;
74 const __m64 Zeros = _mm_setzero_si64();
75 const auto inputVector = reinterpret_cast<const __m64*>(input);
77 # elif defined(USE_NEON)
78 constexpr IndexType NumChunks = (InputDimensions + 15) / 16;
79 const auto inputVector = reinterpret_cast<const int8x8_t*>(input);
82 for (IndexType i = 0; i < OutputDimensions; ++i) {
83 const IndexType offset = i * PaddedInputDimensions;
85 # if defined(USE_SSE2)
86 __m128i sumLo = _mm_cvtsi32_si128(biases[i]);
87 __m128i sumHi = Zeros;
88 const auto row = reinterpret_cast<const __m128i*>(&weights[offset]);
89 for (IndexType j = 0; j < NumChunks; ++j) {
90 __m128i row_j = _mm_load_si128(&row[j]);
91 __m128i input_j = _mm_load_si128(&inputVector[j]);
92 __m128i extendedRowLo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8);
93 __m128i extendedRowHi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8);
94 __m128i extendedInputLo = _mm_unpacklo_epi8(input_j, Zeros);
95 __m128i extendedInputHi = _mm_unpackhi_epi8(input_j, Zeros);
96 __m128i productLo = _mm_madd_epi16(extendedRowLo, extendedInputLo);
97 __m128i productHi = _mm_madd_epi16(extendedRowHi, extendedInputHi);
98 sumLo = _mm_add_epi32(sumLo, productLo);
99 sumHi = _mm_add_epi32(sumHi, productHi);
101 __m128i sum = _mm_add_epi32(sumLo, sumHi);
102 __m128i sumHigh_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2));
103 sum = _mm_add_epi32(sum, sumHigh_64);
104 __m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2));
105 sum = _mm_add_epi32(sum, sum_second_32);
106 output[i] = _mm_cvtsi128_si32(sum);
108 # elif defined(USE_MMX)
109 __m64 sumLo = _mm_cvtsi32_si64(biases[i]);
111 const auto row = reinterpret_cast<const __m64*>(&weights[offset]);
112 for (IndexType j = 0; j < NumChunks; ++j) {
113 __m64 row_j = row[j];
114 __m64 input_j = inputVector[j];
115 __m64 extendedRowLo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8);
116 __m64 extendedRowHi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8);
117 __m64 extendedInputLo = _mm_unpacklo_pi8(input_j, Zeros);
118 __m64 extendedInputHi = _mm_unpackhi_pi8(input_j, Zeros);
119 __m64 productLo = _mm_madd_pi16(extendedRowLo, extendedInputLo);
120 __m64 productHi = _mm_madd_pi16(extendedRowHi, extendedInputHi);
121 sumLo = _mm_add_pi32(sumLo, productLo);
122 sumHi = _mm_add_pi32(sumHi, productHi);
124 __m64 sum = _mm_add_pi32(sumLo, sumHi);
125 sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum));
126 output[i] = _mm_cvtsi64_si32(sum);
128 # elif defined(USE_NEON)
129 int32x4_t sum = {biases[i]};
130 const auto row = reinterpret_cast<const int8x8_t*>(&weights[offset]);
131 for (IndexType j = 0; j < NumChunks; ++j) {
132 int16x8_t product = vmull_s8(inputVector[j * 2], row[j * 2]);
133 product = vmlal_s8(product, inputVector[j * 2 + 1], row[j * 2 + 1]);
134 sum = vpadalq_s16(sum, product);
136 output[i] = sum[0] + sum[1] + sum[2] + sum[3];
139 std::int32_t sum = biases[i];
140 for (IndexType j = 0; j < InputDimensions; ++j) {
141 sum += weights[offset + j] * input[j];
147 # if defined(USE_MMX)
153 template <typename PreviousLayer, IndexType OutDims, typename Enabled = void>
154 class AffineTransform;
156 // A specialization for large inputs.
157 template <typename PreviousLayer, IndexType OutDims>
158 class AffineTransform<PreviousLayer, OutDims, std::enable_if_t<(PreviousLayer::OutputDimensions >= 2*64-1)>> {
161 using InputType = typename PreviousLayer::OutputType;
162 using OutputType = std::int32_t;
163 static_assert(std::is_same<InputType, std::uint8_t>::value, "");
165 // Number of input/output dimensions
166 static constexpr IndexType InputDimensions = PreviousLayer::OutputDimensions;
167 static constexpr IndexType OutputDimensions = OutDims;
169 static constexpr IndexType PaddedInputDimensions =
170 ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
172 static_assert(PaddedInputDimensions >= 128, "Something went wrong. This specialization should not have been chosen.");
174 #if defined (USE_AVX512)
175 static constexpr const IndexType InputSimdWidth = 64;
176 static constexpr const IndexType MaxNumOutputRegs = 16;
177 #elif defined (USE_AVX2)
178 static constexpr const IndexType InputSimdWidth = 32;
179 static constexpr const IndexType MaxNumOutputRegs = 8;
180 #elif defined (USE_SSSE3)
181 static constexpr const IndexType InputSimdWidth = 16;
182 static constexpr const IndexType MaxNumOutputRegs = 8;
183 #elif defined (USE_NEON)
184 static constexpr const IndexType InputSimdWidth = 8;
185 static constexpr const IndexType MaxNumOutputRegs = 8;
187 // The fallback implementation will not have permuted weights.
188 // We define these to avoid a lot of ifdefs later.
189 static constexpr const IndexType InputSimdWidth = 1;
190 static constexpr const IndexType MaxNumOutputRegs = 1;
193 // A big block is a region in the weight matrix of the size [PaddedInputDimensions, NumOutputRegs].
194 // A small block is a region of size [InputSimdWidth, 1]
196 static constexpr const IndexType NumOutputRegs = std::min(MaxNumOutputRegs, OutputDimensions);
197 static constexpr const IndexType SmallBlockSize = InputSimdWidth;
198 static constexpr const IndexType BigBlockSize = NumOutputRegs * PaddedInputDimensions;
199 static constexpr const IndexType NumSmallBlocksInBigBlock = BigBlockSize / SmallBlockSize;
200 static constexpr const IndexType NumSmallBlocksPerOutput = PaddedInputDimensions / SmallBlockSize;
201 static constexpr const IndexType NumBigBlocks = OutputDimensions / NumOutputRegs;
203 static_assert(OutputDimensions % NumOutputRegs == 0);
205 // Size of forward propagation buffer used in this layer
206 static constexpr std::size_t SelfBufferSize =
207 ceil_to_multiple(OutputDimensions * sizeof(OutputType), CacheLineSize);
209 // Size of the forward propagation buffer used from the input layer to this layer
210 static constexpr std::size_t BufferSize =
211 PreviousLayer::BufferSize + SelfBufferSize;
213 // Hash value embedded in the evaluation file
214 static constexpr std::uint32_t get_hash_value() {
215 std::uint32_t hashValue = 0xCC03DAE4u;
216 hashValue += OutputDimensions;
217 hashValue ^= PreviousLayer::get_hash_value() >> 1;
218 hashValue ^= PreviousLayer::get_hash_value() << 31;
223 Transposes the small blocks within a block.
224 Effectively means that weights can be traversed sequentially during inference.
226 static IndexType get_weight_index(IndexType i)
228 const IndexType smallBlock = (i / SmallBlockSize) % NumSmallBlocksInBigBlock;
229 const IndexType smallBlockCol = smallBlock / NumSmallBlocksPerOutput;
230 const IndexType smallBlockRow = smallBlock % NumSmallBlocksPerOutput;
231 const IndexType bigBlock = i / BigBlockSize;
232 const IndexType rest = i % SmallBlockSize;
234 const IndexType idx =
235 bigBlock * BigBlockSize
236 + smallBlockRow * SmallBlockSize * NumOutputRegs
237 + smallBlockCol * SmallBlockSize
243 // Read network parameters
244 bool read_parameters(std::istream& stream) {
245 if (!previousLayer.read_parameters(stream)) return false;
246 for (std::size_t i = 0; i < OutputDimensions; ++i)
247 biases[i] = read_little_endian<BiasType>(stream);
249 for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
250 weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);
252 return !stream.fail();
255 // Write network parameters
256 bool write_parameters(std::ostream& stream) const {
257 if (!previousLayer.write_parameters(stream)) return false;
258 for (std::size_t i = 0; i < OutputDimensions; ++i)
259 write_little_endian<BiasType>(stream, biases[i]);
261 for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
262 write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);
264 return !stream.fail();
267 // Forward propagation
268 const OutputType* propagate(
269 const TransformedFeatureType* transformedFeatures, char* buffer) const {
270 const auto input = previousLayer.propagate(
271 transformedFeatures, buffer + SelfBufferSize);
272 OutputType* output = reinterpret_cast<OutputType*>(buffer);
274 #if defined (USE_AVX512)
275 using acc_vec_t = __m512i;
276 using bias_vec_t = __m128i;
277 using weight_vec_t = __m512i;
278 using in_vec_t = __m512i;
279 #define vec_zero _mm512_setzero_si512()
280 #define vec_add_dpbusd_32x2 Simd::m512_add_dpbusd_epi32x2
281 #define vec_hadd Simd::m512_hadd
282 #define vec_haddx4 Simd::m512_haddx4
283 #elif defined (USE_AVX2)
284 using acc_vec_t = __m256i;
285 using bias_vec_t = __m128i;
286 using weight_vec_t = __m256i;
287 using in_vec_t = __m256i;
288 #define vec_zero _mm256_setzero_si256()
289 #define vec_add_dpbusd_32x2 Simd::m256_add_dpbusd_epi32x2
290 #define vec_hadd Simd::m256_hadd
291 #define vec_haddx4 Simd::m256_haddx4
292 #elif defined (USE_SSSE3)
293 using acc_vec_t = __m128i;
294 using bias_vec_t = __m128i;
295 using weight_vec_t = __m128i;
296 using in_vec_t = __m128i;
297 #define vec_zero _mm_setzero_si128()
298 #define vec_add_dpbusd_32x2 Simd::m128_add_dpbusd_epi32x2
299 #define vec_hadd Simd::m128_hadd
300 #define vec_haddx4 Simd::m128_haddx4
301 #elif defined (USE_NEON)
302 using acc_vec_t = int32x4_t;
303 using bias_vec_t = int32x4_t;
304 using weight_vec_t = int8x8_t;
305 using in_vec_t = int8x8_t;
307 #define vec_add_dpbusd_32x2 Simd::neon_m128_add_dpbusd_epi32x2
308 #define vec_hadd Simd::neon_m128_hadd
309 #define vec_haddx4 Simd::neon_m128_haddx4
312 #if defined (USE_SSSE3) || defined (USE_NEON)
313 const in_vec_t* invec = reinterpret_cast<const in_vec_t*>(input);
316 // Perform accumulation to registers for each big block
317 for (IndexType bigBlock = 0; bigBlock < NumBigBlocks; ++bigBlock)
319 acc_vec_t acc[NumOutputRegs] = { vec_zero };
321 // Each big block has NumOutputRegs small blocks in each "row", one per register.
322 // We process two small blocks at a time to save on one addition without VNNI.
323 for (IndexType smallBlock = 0; smallBlock < NumSmallBlocksPerOutput; smallBlock += 2)
325 const weight_vec_t* weightvec =
326 reinterpret_cast<const weight_vec_t*>(
328 + bigBlock * BigBlockSize
329 + smallBlock * SmallBlockSize * NumOutputRegs);
331 const in_vec_t in0 = invec[smallBlock + 0];
332 const in_vec_t in1 = invec[smallBlock + 1];
334 for (IndexType k = 0; k < NumOutputRegs; ++k)
335 vec_add_dpbusd_32x2(acc[k], in0, weightvec[k], in1, weightvec[k + NumOutputRegs]);
338 // Horizontally add all accumulators.
339 if constexpr (NumOutputRegs % 4 == 0)
341 bias_vec_t* outputvec = reinterpret_cast<bias_vec_t*>(output);
342 const bias_vec_t* biasvec = reinterpret_cast<const bias_vec_t*>(biases);
344 for (IndexType k = 0; k < NumOutputRegs; k += 4)
346 const IndexType idx = (bigBlock * NumOutputRegs + k) / 4;
347 outputvec[idx] = vec_haddx4(acc[k+0], acc[k+1], acc[k+2], acc[k+3], biasvec[idx]);
352 for (IndexType k = 0; k < NumOutputRegs; ++k)
354 const IndexType idx = (bigBlock * NumOutputRegs + k);
355 output[idx] = vec_hadd(acc[k], biases[idx]);
361 # undef vec_add_dpbusd_32x2
365 // Use old implementation for the other architectures.
366 affine_transform_non_ssse3<
368 PaddedInputDimensions,
369 OutputDimensions>(output, weights, biases, input);
377 using BiasType = OutputType;
378 using WeightType = std::int8_t;
380 PreviousLayer previousLayer;
382 alignas(CacheLineSize) BiasType biases[OutputDimensions];
383 alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
386 template <typename PreviousLayer, IndexType OutDims>
387 class AffineTransform<PreviousLayer, OutDims, std::enable_if_t<(PreviousLayer::OutputDimensions < 2*64-1)>> {
390 using InputType = typename PreviousLayer::OutputType;
391 using OutputType = std::int32_t;
392 static_assert(std::is_same<InputType, std::uint8_t>::value, "");
394 // Number of input/output dimensions
395 static constexpr IndexType InputDimensions =
396 PreviousLayer::OutputDimensions;
397 static constexpr IndexType OutputDimensions = OutDims;
398 static constexpr IndexType PaddedInputDimensions =
399 ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
401 static_assert(PaddedInputDimensions < 128, "Something went wrong. This specialization should not have been chosen.");
403 #if defined (USE_SSSE3)
404 static constexpr const IndexType OutputSimdWidth = SimdWidth / 4;
405 static constexpr const IndexType InputSimdWidth = SimdWidth;
408 // Size of forward propagation buffer used in this layer
409 static constexpr std::size_t SelfBufferSize =
410 ceil_to_multiple(OutputDimensions * sizeof(OutputType), CacheLineSize);
412 // Size of the forward propagation buffer used from the input layer to this layer
413 static constexpr std::size_t BufferSize =
414 PreviousLayer::BufferSize + SelfBufferSize;
416 // Hash value embedded in the evaluation file
417 static constexpr std::uint32_t get_hash_value() {
418 std::uint32_t hashValue = 0xCC03DAE4u;
419 hashValue += OutputDimensions;
420 hashValue ^= PreviousLayer::get_hash_value() >> 1;
421 hashValue ^= PreviousLayer::get_hash_value() << 31;
425 static IndexType get_weight_index_scrambled(IndexType i)
428 (i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 +
429 i / PaddedInputDimensions * 4 +
433 static IndexType get_weight_index(IndexType i)
435 #if defined (USE_SSSE3)
436 return get_weight_index_scrambled(i);
442 // Read network parameters
443 bool read_parameters(std::istream& stream) {
444 if (!previousLayer.read_parameters(stream)) return false;
445 for (std::size_t i = 0; i < OutputDimensions; ++i)
446 biases[i] = read_little_endian<BiasType>(stream);
447 for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
448 weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);
450 return !stream.fail();
453 // Write network parameters
454 bool write_parameters(std::ostream& stream) const {
455 if (!previousLayer.write_parameters(stream)) return false;
456 for (std::size_t i = 0; i < OutputDimensions; ++i)
457 write_little_endian<BiasType>(stream, biases[i]);
459 for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
460 write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);
462 return !stream.fail();
464 // Forward propagation
465 const OutputType* propagate(
466 const TransformedFeatureType* transformedFeatures, char* buffer) const {
467 const auto input = previousLayer.propagate(
468 transformedFeatures, buffer + SelfBufferSize);
469 const auto output = reinterpret_cast<OutputType*>(buffer);
471 #if defined (USE_AVX2)
472 using vec_t = __m256i;
473 #define vec_setzero _mm256_setzero_si256
474 #define vec_set_32 _mm256_set1_epi32
475 #define vec_add_dpbusd_32 Simd::m256_add_dpbusd_epi32
476 #define vec_add_dpbusd_32x2 Simd::m256_add_dpbusd_epi32x2
477 #define vec_add_dpbusd_32x4 Simd::m256_add_dpbusd_epi32x4
478 #define vec_hadd Simd::m256_hadd
479 #define vec_haddx4 Simd::m256_haddx4
480 #elif defined (USE_SSSE3)
481 using vec_t = __m128i;
482 #define vec_setzero _mm_setzero_si128
483 #define vec_set_32 _mm_set1_epi32
484 #define vec_add_dpbusd_32 Simd::m128_add_dpbusd_epi32
485 #define vec_add_dpbusd_32x2 Simd::m128_add_dpbusd_epi32x2
486 #define vec_add_dpbusd_32x4 Simd::m128_add_dpbusd_epi32x4
487 #define vec_hadd Simd::m128_hadd
488 #define vec_haddx4 Simd::m128_haddx4
491 #if defined (USE_SSSE3)
492 const auto inputVector = reinterpret_cast<const vec_t*>(input);
494 static_assert(InputDimensions % 8 == 0);
495 static_assert(OutputDimensions % OutputSimdWidth == 0 || OutputDimensions == 1);
497 if constexpr (OutputDimensions % OutputSimdWidth == 0)
499 constexpr IndexType NumChunks = InputDimensions / 4;
500 constexpr IndexType NumRegs = OutputDimensions / OutputSimdWidth;
502 const auto input32 = reinterpret_cast<const std::int32_t*>(input);
503 const vec_t* biasvec = reinterpret_cast<const vec_t*>(biases);
505 for (IndexType k = 0; k < NumRegs; ++k)
508 for (IndexType i = 0; i < NumChunks; i += 2)
510 const vec_t in0 = vec_set_32(input32[i + 0]);
511 const vec_t in1 = vec_set_32(input32[i + 1]);
512 const auto col0 = reinterpret_cast<const vec_t*>(&weights[(i + 0) * OutputDimensions * 4]);
513 const auto col1 = reinterpret_cast<const vec_t*>(&weights[(i + 1) * OutputDimensions * 4]);
514 for (IndexType k = 0; k < NumRegs; ++k)
515 vec_add_dpbusd_32x2(acc[k], in0, col0[k], in1, col1[k]);
518 vec_t* outptr = reinterpret_cast<vec_t*>(output);
519 for (IndexType k = 0; k < NumRegs; ++k)
522 else if constexpr (OutputDimensions == 1)
524 constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
525 vec_t sum0 = vec_setzero();
526 const auto row0 = reinterpret_cast<const vec_t*>(&weights[0]);
528 for (int j = 0; j < (int)NumChunks; ++j)
530 const vec_t in = inputVector[j];
531 vec_add_dpbusd_32(sum0, in, row0[j]);
533 output[0] = vec_hadd(sum0, biases[0]);
538 # undef vec_add_dpbusd_32
539 # undef vec_add_dpbusd_32x2
540 # undef vec_add_dpbusd_32x4
544 // Use old implementation for the other architectures.
545 affine_transform_non_ssse3<
547 PaddedInputDimensions,
548 OutputDimensions>(output, weights, biases, input);
555 using BiasType = OutputType;
556 using WeightType = std::int8_t;
558 PreviousLayer previousLayer;
560 alignas(CacheLineSize) BiasType biases[OutputDimensions];
561 alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
564 } // namespace Stockfish::Eval::NNUE::Layers
566 #endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED