2 Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3 Copyright (C) 2004-2021 The Stockfish developers (see AUTHORS file)
5 Stockfish is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 Stockfish is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
19 // Definition of layer AffineTransform of NNUE evaluation function
21 #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
22 #define NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
25 #include "../nnue_common.h"
27 namespace Stockfish::Eval::NNUE::Layers {
29 // Affine transformation layer
30 template <typename PreviousLayer, IndexType OutputDimensions>
31 class AffineTransform {
34 using InputType = typename PreviousLayer::OutputType;
35 using OutputType = std::int32_t;
36 static_assert(std::is_same<InputType, std::uint8_t>::value, "");
38 // Number of input/output dimensions
39 static constexpr IndexType kInputDimensions =
40 PreviousLayer::kOutputDimensions;
41 static constexpr IndexType kOutputDimensions = OutputDimensions;
42 static constexpr IndexType kPaddedInputDimensions =
43 CeilToMultiple<IndexType>(kInputDimensions, kMaxSimdWidth);
44 #if defined (USE_AVX512)
45 static constexpr const IndexType kOutputSimdWidth = kSimdWidth / 2;
46 #elif defined (USE_SSSE3)
47 static constexpr const IndexType kOutputSimdWidth = kSimdWidth / 4;
50 // Size of forward propagation buffer used in this layer
51 static constexpr std::size_t kSelfBufferSize =
52 CeilToMultiple(kOutputDimensions * sizeof(OutputType), kCacheLineSize);
54 // Size of the forward propagation buffer used from the input layer to this layer
55 static constexpr std::size_t kBufferSize =
56 PreviousLayer::kBufferSize + kSelfBufferSize;
58 // Hash value embedded in the evaluation file
59 static constexpr std::uint32_t GetHashValue() {
60 std::uint32_t hash_value = 0xCC03DAE4u;
61 hash_value += kOutputDimensions;
62 hash_value ^= PreviousLayer::GetHashValue() >> 1;
63 hash_value ^= PreviousLayer::GetHashValue() << 31;
67 // Read network parameters
68 bool ReadParameters(std::istream& stream) {
69 if (!previous_layer_.ReadParameters(stream)) return false;
70 for (std::size_t i = 0; i < kOutputDimensions; ++i)
71 biases_[i] = read_little_endian<BiasType>(stream);
72 for (std::size_t i = 0; i < kOutputDimensions * kPaddedInputDimensions; ++i)
73 #if !defined (USE_SSSE3)
74 weights_[i] = read_little_endian<WeightType>(stream);
77 (i / 4) % (kPaddedInputDimensions / 4) * kOutputDimensions * 4 +
78 i / kPaddedInputDimensions * 4 +
80 ] = read_little_endian<WeightType>(stream);
82 // Determine if eights of weight and input products can be summed using 16bits
83 // without saturation. We assume worst case combinations of 0 and 127 for all inputs.
84 if (kOutputDimensions > 1 && !stream.fail())
86 canSaturate16.count = 0;
87 #if !defined(USE_VNNI)
88 for (IndexType i = 0; i < kPaddedInputDimensions; i += 16)
89 for (IndexType j = 0; j < kOutputDimensions; ++j)
90 for (int x = 0; x < 2; ++x)
92 WeightType* w = &weights_[i * kOutputDimensions + j * 4 + x * 2];
94 for (int k = 0; k < 8; ++k)
96 IndexType idx = k / 2 * kOutputDimensions * 4 + k % 2;
97 sum[w[idx] < 0] += w[idx];
99 for (int sign : { -1, 1 })
100 while (sign * sum[sign == -1] > 258)
102 int maxK = 0, maxW = 0;
103 for (int k = 0; k < 8; ++k)
105 IndexType idx = k / 2 * kOutputDimensions * 4 + k % 2;
106 if (maxW < sign * w[idx])
107 maxK = k, maxW = sign * w[idx];
110 IndexType idx = maxK / 2 * kOutputDimensions * 4 + maxK % 2;
111 sum[sign == -1] -= w[idx];
112 canSaturate16.add(j, i + maxK / 2 * 4 + maxK % 2 + x * 2, w[idx]);
117 // Non functional optimization for faster more linear access
118 std::sort(canSaturate16.ids, canSaturate16.ids + canSaturate16.count,
119 [](const typename CanSaturate::Entry& e1, const typename CanSaturate::Entry& e2)
120 { return e1.in == e2.in ? e1.out < e2.out : e1.in < e2.in; });
125 return !stream.fail();
128 // Forward propagation
129 const OutputType* Propagate(
130 const TransformedFeatureType* transformed_features, char* buffer) const {
131 const auto input = previous_layer_.Propagate(
132 transformed_features, buffer + kSelfBufferSize);
134 #if defined (USE_AVX512)
136 [[maybe_unused]] const __m512i kOnes512 = _mm512_set1_epi16(1);
138 [[maybe_unused]] auto m512_hadd = [](__m512i sum, int bias) -> int {
139 return _mm512_reduce_add_epi32(sum) + bias;
142 [[maybe_unused]] auto m512_add_dpbusd_epi32 = [=](__m512i& acc, __m512i a, __m512i b) {
143 #if defined (USE_VNNI)
144 acc = _mm512_dpbusd_epi32(acc, a, b);
146 __m512i product0 = _mm512_maddubs_epi16(a, b);
147 product0 = _mm512_madd_epi16(product0, kOnes512);
148 acc = _mm512_add_epi32(acc, product0);
152 [[maybe_unused]] auto m512_add_dpbusd_epi32x4 = [=](__m512i& acc, __m512i a0, __m512i b0, __m512i a1, __m512i b1,
153 __m512i a2, __m512i b2, __m512i a3, __m512i b3) {
154 #if defined (USE_VNNI)
155 acc = _mm512_dpbusd_epi32(acc, a0, b0);
156 acc = _mm512_dpbusd_epi32(acc, a1, b1);
157 acc = _mm512_dpbusd_epi32(acc, a2, b2);
158 acc = _mm512_dpbusd_epi32(acc, a3, b3);
160 __m512i product0 = _mm512_maddubs_epi16(a0, b0);
161 __m512i product1 = _mm512_maddubs_epi16(a1, b1);
162 __m512i product2 = _mm512_maddubs_epi16(a2, b2);
163 __m512i product3 = _mm512_maddubs_epi16(a3, b3);
164 product0 = _mm512_add_epi16(product0, product1);
165 product2 = _mm512_add_epi16(product2, product3);
166 product0 = _mm512_add_epi16(product0, product2);
167 product0 = _mm512_madd_epi16(product0, kOnes512);
168 acc = _mm512_add_epi32(acc, product0);
173 #if defined (USE_AVX2)
175 [[maybe_unused]] const __m256i kOnes256 = _mm256_set1_epi16(1);
177 [[maybe_unused]] auto m256_hadd = [](__m256i sum, int bias) -> int {
178 __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extracti128_si256(sum, 1));
179 sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_BADC));
180 sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_CDAB));
181 return _mm_cvtsi128_si32(sum128) + bias;
184 [[maybe_unused]] auto m256_add_dpbusd_epi32 = [=](__m256i& acc, __m256i a, __m256i b) {
185 #if defined (USE_VNNI)
186 acc = _mm256_dpbusd_epi32(acc, a, b);
188 __m256i product0 = _mm256_maddubs_epi16(a, b);
189 product0 = _mm256_madd_epi16(product0, kOnes256);
190 acc = _mm256_add_epi32(acc, product0);
194 [[maybe_unused]] auto m256_add_dpbusd_epi32x4 = [=](__m256i& acc, __m256i a0, __m256i b0, __m256i a1, __m256i b1,
195 __m256i a2, __m256i b2, __m256i a3, __m256i b3) {
196 #if defined (USE_VNNI)
197 acc = _mm256_dpbusd_epi32(acc, a0, b0);
198 acc = _mm256_dpbusd_epi32(acc, a1, b1);
199 acc = _mm256_dpbusd_epi32(acc, a2, b2);
200 acc = _mm256_dpbusd_epi32(acc, a3, b3);
202 __m256i product0 = _mm256_maddubs_epi16(a0, b0);
203 __m256i product1 = _mm256_maddubs_epi16(a1, b1);
204 __m256i product2 = _mm256_maddubs_epi16(a2, b2);
205 __m256i product3 = _mm256_maddubs_epi16(a3, b3);
206 product0 = _mm256_add_epi16(product0, product1);
207 product2 = _mm256_add_epi16(product2, product3);
208 product0 = _mm256_add_epi16(product0, product2);
209 product0 = _mm256_madd_epi16(product0, kOnes256);
210 acc = _mm256_add_epi32(acc, product0);
215 #if defined (USE_SSSE3)
217 [[maybe_unused]] const __m128i kOnes128 = _mm_set1_epi16(1);
219 [[maybe_unused]] auto m128_hadd = [](__m128i sum, int bias) -> int {
220 sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0x4E)); //_MM_PERM_BADC
221 sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0xB1)); //_MM_PERM_CDAB
222 return _mm_cvtsi128_si32(sum) + bias;
225 [[maybe_unused]] auto m128_add_dpbusd_epi32 = [=](__m128i& acc, __m128i a, __m128i b) {
226 __m128i product0 = _mm_maddubs_epi16(a, b);
227 product0 = _mm_madd_epi16(product0, kOnes128);
228 acc = _mm_add_epi32(acc, product0);
231 [[maybe_unused]] auto m128_add_dpbusd_epi32x4 = [=](__m128i& acc, __m128i a0, __m128i b0, __m128i a1, __m128i b1,
232 __m128i a2, __m128i b2, __m128i a3, __m128i b3) {
233 __m128i product0 = _mm_maddubs_epi16(a0, b0);
234 __m128i product1 = _mm_maddubs_epi16(a1, b1);
235 __m128i product2 = _mm_maddubs_epi16(a2, b2);
236 __m128i product3 = _mm_maddubs_epi16(a3, b3);
237 product0 = _mm_add_epi16(product0, product1);
238 product2 = _mm_add_epi16(product2, product3);
239 product0 = _mm_add_epi16(product0, product2);
240 product0 = _mm_madd_epi16(product0, kOnes128);
241 acc = _mm_add_epi32(acc, product0);
246 #if defined (USE_AVX512)
247 using vec_t = __m512i;
248 #define vec_setzero _mm512_setzero_si512
249 #define vec_set_32 _mm512_set1_epi32
250 auto& vec_add_dpbusd_32 = m512_add_dpbusd_epi32;
251 auto& vec_add_dpbusd_32x4 = m512_add_dpbusd_epi32x4;
252 auto& vec_hadd = m512_hadd;
253 #elif defined (USE_AVX2)
254 using vec_t = __m256i;
255 #define vec_setzero _mm256_setzero_si256
256 #define vec_set_32 _mm256_set1_epi32
257 auto& vec_add_dpbusd_32 = m256_add_dpbusd_epi32;
258 auto& vec_add_dpbusd_32x4 = m256_add_dpbusd_epi32x4;
259 auto& vec_hadd = m256_hadd;
260 #elif defined (USE_SSSE3)
261 using vec_t = __m128i;
262 #define vec_setzero _mm_setzero_si128
263 #define vec_set_32 _mm_set1_epi32
264 auto& vec_add_dpbusd_32 = m128_add_dpbusd_epi32;
265 auto& vec_add_dpbusd_32x4 = m128_add_dpbusd_epi32x4;
266 auto& vec_hadd = m128_hadd;
269 #if defined (USE_SSSE3)
271 const auto output = reinterpret_cast<OutputType*>(buffer);
272 const auto input_vector = reinterpret_cast<const vec_t*>(input);
274 static_assert(kOutputDimensions % kOutputSimdWidth == 0 || kOutputDimensions == 1);
276 // kOutputDimensions is either 1 or a multiple of kSimdWidth
277 // because then it is also an input dimension.
278 if constexpr (kOutputDimensions % kOutputSimdWidth == 0)
280 constexpr IndexType kNumChunks = kPaddedInputDimensions / 4;
282 const auto input32 = reinterpret_cast<const std::int32_t*>(input);
283 vec_t* outptr = reinterpret_cast<vec_t*>(output);
284 std::memcpy(output, biases_, kOutputDimensions * sizeof(OutputType));
286 for (int i = 0; i < (int)kNumChunks - 3; i += 4)
288 const vec_t in0 = vec_set_32(input32[i + 0]);
289 const vec_t in1 = vec_set_32(input32[i + 1]);
290 const vec_t in2 = vec_set_32(input32[i + 2]);
291 const vec_t in3 = vec_set_32(input32[i + 3]);
292 const auto col0 = reinterpret_cast<const vec_t*>(&weights_[(i + 0) * kOutputDimensions * 4]);
293 const auto col1 = reinterpret_cast<const vec_t*>(&weights_[(i + 1) * kOutputDimensions * 4]);
294 const auto col2 = reinterpret_cast<const vec_t*>(&weights_[(i + 2) * kOutputDimensions * 4]);
295 const auto col3 = reinterpret_cast<const vec_t*>(&weights_[(i + 3) * kOutputDimensions * 4]);
296 for (int j = 0; j * kOutputSimdWidth < kOutputDimensions; ++j)
297 vec_add_dpbusd_32x4(outptr[j], in0, col0[j], in1, col1[j], in2, col2[j], in3, col3[j]);
299 for (int i = 0; i < canSaturate16.count; ++i)
300 output[canSaturate16.ids[i].out] += input[canSaturate16.ids[i].in] * canSaturate16.ids[i].w;
302 else if constexpr (kOutputDimensions == 1)
304 #if defined (USE_AVX512)
305 if constexpr (kPaddedInputDimensions % (kSimdWidth * 2) != 0)
307 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
308 const auto input_vector256 = reinterpret_cast<const __m256i*>(input);
310 __m256i sum0 = _mm256_setzero_si256();
311 const auto row0 = reinterpret_cast<const __m256i*>(&weights_[0]);
313 for (int j = 0; j < (int)kNumChunks; ++j)
315 const __m256i in = input_vector256[j];
316 m256_add_dpbusd_epi32(sum0, in, row0[j]);
318 output[0] = m256_hadd(sum0, biases_[0]);
323 #if defined (USE_AVX512)
324 constexpr IndexType kNumChunks = kPaddedInputDimensions / (kSimdWidth * 2);
326 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
328 vec_t sum0 = vec_setzero();
329 const auto row0 = reinterpret_cast<const vec_t*>(&weights_[0]);
331 for (int j = 0; j < (int)kNumChunks; ++j)
333 const vec_t in = input_vector[j];
334 vec_add_dpbusd_32(sum0, in, row0[j]);
336 output[0] = vec_hadd(sum0, biases_[0]);
342 // Use old implementation for the other architectures.
344 auto output = reinterpret_cast<OutputType*>(buffer);
346 #if defined(USE_SSE2)
347 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
348 const __m128i kZeros = _mm_setzero_si128();
349 const auto input_vector = reinterpret_cast<const __m128i*>(input);
351 #elif defined(USE_MMX)
352 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
353 const __m64 kZeros = _mm_setzero_si64();
354 const auto input_vector = reinterpret_cast<const __m64*>(input);
356 #elif defined(USE_NEON)
357 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
358 const auto input_vector = reinterpret_cast<const int8x8_t*>(input);
361 for (IndexType i = 0; i < kOutputDimensions; ++i) {
362 const IndexType offset = i * kPaddedInputDimensions;
364 #if defined(USE_SSE2)
365 __m128i sum_lo = _mm_cvtsi32_si128(biases_[i]);
366 __m128i sum_hi = kZeros;
367 const auto row = reinterpret_cast<const __m128i*>(&weights_[offset]);
368 for (IndexType j = 0; j < kNumChunks; ++j) {
369 __m128i row_j = _mm_load_si128(&row[j]);
370 __m128i input_j = _mm_load_si128(&input_vector[j]);
371 __m128i extended_row_lo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8);
372 __m128i extended_row_hi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8);
373 __m128i extended_input_lo = _mm_unpacklo_epi8(input_j, kZeros);
374 __m128i extended_input_hi = _mm_unpackhi_epi8(input_j, kZeros);
375 __m128i product_lo = _mm_madd_epi16(extended_row_lo, extended_input_lo);
376 __m128i product_hi = _mm_madd_epi16(extended_row_hi, extended_input_hi);
377 sum_lo = _mm_add_epi32(sum_lo, product_lo);
378 sum_hi = _mm_add_epi32(sum_hi, product_hi);
380 __m128i sum = _mm_add_epi32(sum_lo, sum_hi);
381 __m128i sum_high_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2));
382 sum = _mm_add_epi32(sum, sum_high_64);
383 __m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2));
384 sum = _mm_add_epi32(sum, sum_second_32);
385 output[i] = _mm_cvtsi128_si32(sum);
387 #elif defined(USE_MMX)
388 __m64 sum_lo = _mm_cvtsi32_si64(biases_[i]);
389 __m64 sum_hi = kZeros;
390 const auto row = reinterpret_cast<const __m64*>(&weights_[offset]);
391 for (IndexType j = 0; j < kNumChunks; ++j) {
392 __m64 row_j = row[j];
393 __m64 input_j = input_vector[j];
394 __m64 extended_row_lo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8);
395 __m64 extended_row_hi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8);
396 __m64 extended_input_lo = _mm_unpacklo_pi8(input_j, kZeros);
397 __m64 extended_input_hi = _mm_unpackhi_pi8(input_j, kZeros);
398 __m64 product_lo = _mm_madd_pi16(extended_row_lo, extended_input_lo);
399 __m64 product_hi = _mm_madd_pi16(extended_row_hi, extended_input_hi);
400 sum_lo = _mm_add_pi32(sum_lo, product_lo);
401 sum_hi = _mm_add_pi32(sum_hi, product_hi);
403 __m64 sum = _mm_add_pi32(sum_lo, sum_hi);
404 sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum));
405 output[i] = _mm_cvtsi64_si32(sum);
407 #elif defined(USE_NEON)
408 int32x4_t sum = {biases_[i]};
409 const auto row = reinterpret_cast<const int8x8_t*>(&weights_[offset]);
410 for (IndexType j = 0; j < kNumChunks; ++j) {
411 int16x8_t product = vmull_s8(input_vector[j * 2], row[j * 2]);
412 product = vmlal_s8(product, input_vector[j * 2 + 1], row[j * 2 + 1]);
413 sum = vpadalq_s16(sum, product);
415 output[i] = sum[0] + sum[1] + sum[2] + sum[3];
418 OutputType sum = biases_[i];
419 for (IndexType j = 0; j < kInputDimensions; ++j) {
420 sum += weights_[offset + j] * input[j];
436 using BiasType = OutputType;
437 using WeightType = std::int8_t;
439 PreviousLayer previous_layer_;
441 alignas(kCacheLineSize) BiasType biases_[kOutputDimensions];
442 alignas(kCacheLineSize) WeightType weights_[kOutputDimensions * kPaddedInputDimensions];
443 #if defined (USE_SSSE3)
450 } ids[kPaddedInputDimensions * kOutputDimensions * 3 / 4];
452 void add(int i, int j, int8_t w) {
462 } // namespace Stockfish::Eval::NNUE::Layers
464 #endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED