2 Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3 Copyright (C) 2004-2021 The Stockfish developers (see AUTHORS file)
5 Stockfish is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 Stockfish is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
19 // Definition of layer AffineTransform of NNUE evaluation function
21 #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
22 #define NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
25 #include "../nnue_common.h"
27 namespace Stockfish::Eval::NNUE::Layers {
29 // Affine transformation layer
30 template <typename PreviousLayer, IndexType OutDims>
31 class AffineTransform {
34 using InputType = typename PreviousLayer::OutputType;
35 using OutputType = std::int32_t;
36 static_assert(std::is_same<InputType, std::uint8_t>::value, "");
38 // Number of input/output dimensions
39 static constexpr IndexType InputDimensions =
40 PreviousLayer::OutputDimensions;
41 static constexpr IndexType OutputDimensions = OutDims;
42 static constexpr IndexType PaddedInputDimensions =
43 ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
44 #if defined (USE_AVX512)
45 static constexpr const IndexType OutputSimdWidth = SimdWidth / 2;
46 #elif defined (USE_SSSE3)
47 static constexpr const IndexType OutputSimdWidth = SimdWidth / 4;
50 // Size of forward propagation buffer used in this layer
51 static constexpr std::size_t SelfBufferSize =
52 ceil_to_multiple(OutputDimensions * sizeof(OutputType), CacheLineSize);
54 // Size of the forward propagation buffer used from the input layer to this layer
55 static constexpr std::size_t BufferSize =
56 PreviousLayer::BufferSize + SelfBufferSize;
58 // Hash value embedded in the evaluation file
59 static constexpr std::uint32_t get_hash_value() {
60 std::uint32_t hashValue = 0xCC03DAE4u;
61 hashValue += OutputDimensions;
62 hashValue ^= PreviousLayer::get_hash_value() >> 1;
63 hashValue ^= PreviousLayer::get_hash_value() << 31;
67 // Read network parameters
68 bool read_parameters(std::istream& stream) {
69 if (!previousLayer.read_parameters(stream)) return false;
70 for (std::size_t i = 0; i < OutputDimensions; ++i)
71 biases[i] = read_little_endian<BiasType>(stream);
72 #if !defined (USE_SSSE3)
73 for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
74 weights[i] = read_little_endian<WeightType>(stream);
76 std::unique_ptr<uint32_t[]> indexMap = std::make_unique<uint32_t[]>(OutputDimensions * PaddedInputDimensions);
77 for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) {
78 const uint32_t scrambledIdx =
79 (i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 +
80 i / PaddedInputDimensions * 4 +
82 weights[scrambledIdx] = read_little_endian<WeightType>(stream);
83 indexMap[scrambledIdx] = i;
86 // Determine if eights of weight and input products can be summed using 16bits
87 // without saturation. We assume worst case combinations of 0 and 127 for all inputs.
88 if (OutputDimensions > 1 && !stream.fail())
90 canSaturate16.count = 0;
91 #if !defined(USE_VNNI)
92 for (IndexType i = 0; i < PaddedInputDimensions; i += 16)
93 for (IndexType j = 0; j < OutputDimensions; ++j)
94 for (int x = 0; x < 2; ++x)
96 WeightType* w = &weights[i * OutputDimensions + j * 4 + x * 2];
98 for (int k = 0; k < 8; ++k)
100 IndexType idx = k / 2 * OutputDimensions * 4 + k % 2;
101 sum[w[idx] < 0] += w[idx];
103 for (int sign : { -1, 1 })
104 while (sign * sum[sign == -1] > 258)
106 int maxK = 0, maxW = 0;
107 for (int k = 0; k < 8; ++k)
109 IndexType idx = k / 2 * OutputDimensions * 4 + k % 2;
110 if (maxW < sign * w[idx])
111 maxK = k, maxW = sign * w[idx];
114 IndexType idx = maxK / 2 * OutputDimensions * 4 + maxK % 2;
115 sum[sign == -1] -= w[idx];
116 const uint32_t scrambledIdx = idx + i * OutputDimensions + j * 4 + x * 2;
117 canSaturate16.add(j, i + maxK / 2 * 4 + maxK % 2 + x * 2, w[idx], indexMap[scrambledIdx]);
122 // Non functional optimization for faster more linear access
123 std::sort(canSaturate16.ids, canSaturate16.ids + canSaturate16.count,
124 [](const typename CanSaturate::Entry& e1, const typename CanSaturate::Entry& e2)
125 { return e1.in == e2.in ? e1.out < e2.out : e1.in < e2.in; });
130 return !stream.fail();
133 // Write network parameters
134 bool write_parameters(std::ostream& stream) const {
135 if (!previousLayer.write_parameters(stream)) return false;
136 for (std::size_t i = 0; i < OutputDimensions; ++i)
137 write_little_endian<BiasType>(stream, biases[i]);
138 #if !defined (USE_SSSE3)
139 for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
140 write_little_endian<WeightType>(stream, weights[i]);
142 std::unique_ptr<WeightType[]> unscrambledWeights = std::make_unique<WeightType[]>(OutputDimensions * PaddedInputDimensions);
143 for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) {
144 unscrambledWeights[i] =
146 (i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 +
147 i / PaddedInputDimensions * 4 +
151 for (int i = 0; i < canSaturate16.count; ++i)
152 unscrambledWeights[canSaturate16.ids[i].wIdx] = canSaturate16.ids[i].w;
154 for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
155 write_little_endian<WeightType>(stream, unscrambledWeights[i]);
158 return !stream.fail();
161 // Forward propagation
162 const OutputType* propagate(
163 const TransformedFeatureType* transformedFeatures, char* buffer) const {
164 const auto input = previousLayer.propagate(
165 transformedFeatures, buffer + SelfBufferSize);
167 #if defined (USE_AVX512)
169 [[maybe_unused]] const __m512i Ones512 = _mm512_set1_epi16(1);
171 [[maybe_unused]] auto m512_hadd = [](__m512i sum, int bias) -> int {
172 return _mm512_reduce_add_epi32(sum) + bias;
175 [[maybe_unused]] auto m512_add_dpbusd_epi32 = [=](__m512i& acc, __m512i a, __m512i b) {
176 #if defined (USE_VNNI)
177 acc = _mm512_dpbusd_epi32(acc, a, b);
179 __m512i product0 = _mm512_maddubs_epi16(a, b);
180 product0 = _mm512_madd_epi16(product0, Ones512);
181 acc = _mm512_add_epi32(acc, product0);
185 [[maybe_unused]] auto m512_add_dpbusd_epi32x4 = [=](__m512i& acc, __m512i a0, __m512i b0, __m512i a1, __m512i b1,
186 __m512i a2, __m512i b2, __m512i a3, __m512i b3) {
187 #if defined (USE_VNNI)
188 acc = _mm512_dpbusd_epi32(acc, a0, b0);
189 acc = _mm512_dpbusd_epi32(acc, a1, b1);
190 acc = _mm512_dpbusd_epi32(acc, a2, b2);
191 acc = _mm512_dpbusd_epi32(acc, a3, b3);
193 __m512i product0 = _mm512_maddubs_epi16(a0, b0);
194 __m512i product1 = _mm512_maddubs_epi16(a1, b1);
195 __m512i product2 = _mm512_maddubs_epi16(a2, b2);
196 __m512i product3 = _mm512_maddubs_epi16(a3, b3);
197 product0 = _mm512_add_epi16(product0, product1);
198 product2 = _mm512_add_epi16(product2, product3);
199 product0 = _mm512_add_epi16(product0, product2);
200 product0 = _mm512_madd_epi16(product0, Ones512);
201 acc = _mm512_add_epi32(acc, product0);
206 #if defined (USE_AVX2)
208 [[maybe_unused]] const __m256i Ones256 = _mm256_set1_epi16(1);
210 [[maybe_unused]] auto m256_hadd = [](__m256i sum, int bias) -> int {
211 __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extracti128_si256(sum, 1));
212 sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_BADC));
213 sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_CDAB));
214 return _mm_cvtsi128_si32(sum128) + bias;
217 [[maybe_unused]] auto m256_add_dpbusd_epi32 = [=](__m256i& acc, __m256i a, __m256i b) {
218 #if defined (USE_VNNI)
219 acc = _mm256_dpbusd_epi32(acc, a, b);
221 __m256i product0 = _mm256_maddubs_epi16(a, b);
222 product0 = _mm256_madd_epi16(product0, Ones256);
223 acc = _mm256_add_epi32(acc, product0);
227 [[maybe_unused]] auto m256_add_dpbusd_epi32x4 = [=](__m256i& acc, __m256i a0, __m256i b0, __m256i a1, __m256i b1,
228 __m256i a2, __m256i b2, __m256i a3, __m256i b3) {
229 #if defined (USE_VNNI)
230 acc = _mm256_dpbusd_epi32(acc, a0, b0);
231 acc = _mm256_dpbusd_epi32(acc, a1, b1);
232 acc = _mm256_dpbusd_epi32(acc, a2, b2);
233 acc = _mm256_dpbusd_epi32(acc, a3, b3);
235 __m256i product0 = _mm256_maddubs_epi16(a0, b0);
236 __m256i product1 = _mm256_maddubs_epi16(a1, b1);
237 __m256i product2 = _mm256_maddubs_epi16(a2, b2);
238 __m256i product3 = _mm256_maddubs_epi16(a3, b3);
239 product0 = _mm256_add_epi16(product0, product1);
240 product2 = _mm256_add_epi16(product2, product3);
241 product0 = _mm256_add_epi16(product0, product2);
242 product0 = _mm256_madd_epi16(product0, Ones256);
243 acc = _mm256_add_epi32(acc, product0);
248 #if defined (USE_SSSE3)
250 [[maybe_unused]] const __m128i Ones128 = _mm_set1_epi16(1);
252 [[maybe_unused]] auto m128_hadd = [](__m128i sum, int bias) -> int {
253 sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0x4E)); //_MM_PERM_BADC
254 sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0xB1)); //_MM_PERM_CDAB
255 return _mm_cvtsi128_si32(sum) + bias;
258 [[maybe_unused]] auto m128_add_dpbusd_epi32 = [=](__m128i& acc, __m128i a, __m128i b) {
259 __m128i product0 = _mm_maddubs_epi16(a, b);
260 product0 = _mm_madd_epi16(product0, Ones128);
261 acc = _mm_add_epi32(acc, product0);
264 [[maybe_unused]] auto m128_add_dpbusd_epi32x4 = [=](__m128i& acc, __m128i a0, __m128i b0, __m128i a1, __m128i b1,
265 __m128i a2, __m128i b2, __m128i a3, __m128i b3) {
266 __m128i product0 = _mm_maddubs_epi16(a0, b0);
267 __m128i product1 = _mm_maddubs_epi16(a1, b1);
268 __m128i product2 = _mm_maddubs_epi16(a2, b2);
269 __m128i product3 = _mm_maddubs_epi16(a3, b3);
270 product0 = _mm_add_epi16(product0, product1);
271 product2 = _mm_add_epi16(product2, product3);
272 product0 = _mm_add_epi16(product0, product2);
273 product0 = _mm_madd_epi16(product0, Ones128);
274 acc = _mm_add_epi32(acc, product0);
279 #if defined (USE_AVX512)
280 using vec_t = __m512i;
281 #define vec_setzero _mm512_setzero_si512
282 #define vec_set_32 _mm512_set1_epi32
283 auto& vec_add_dpbusd_32 = m512_add_dpbusd_epi32;
284 auto& vec_add_dpbusd_32x4 = m512_add_dpbusd_epi32x4;
285 auto& vec_hadd = m512_hadd;
286 #elif defined (USE_AVX2)
287 using vec_t = __m256i;
288 #define vec_setzero _mm256_setzero_si256
289 #define vec_set_32 _mm256_set1_epi32
290 auto& vec_add_dpbusd_32 = m256_add_dpbusd_epi32;
291 auto& vec_add_dpbusd_32x4 = m256_add_dpbusd_epi32x4;
292 auto& vec_hadd = m256_hadd;
293 #elif defined (USE_SSSE3)
294 using vec_t = __m128i;
295 #define vec_setzero _mm_setzero_si128
296 #define vec_set_32 _mm_set1_epi32
297 auto& vec_add_dpbusd_32 = m128_add_dpbusd_epi32;
298 auto& vec_add_dpbusd_32x4 = m128_add_dpbusd_epi32x4;
299 auto& vec_hadd = m128_hadd;
302 #if defined (USE_SSSE3)
304 const auto output = reinterpret_cast<OutputType*>(buffer);
305 const auto inputVector = reinterpret_cast<const vec_t*>(input);
307 static_assert(OutputDimensions % OutputSimdWidth == 0 || OutputDimensions == 1);
309 // OutputDimensions is either 1 or a multiple of SimdWidth
310 // because then it is also an input dimension.
311 if constexpr (OutputDimensions % OutputSimdWidth == 0)
313 constexpr IndexType NumChunks = PaddedInputDimensions / 4;
315 const auto input32 = reinterpret_cast<const std::int32_t*>(input);
316 vec_t* outptr = reinterpret_cast<vec_t*>(output);
317 std::memcpy(output, biases, OutputDimensions * sizeof(OutputType));
319 for (int i = 0; i < (int)NumChunks - 3; i += 4)
321 const vec_t in0 = vec_set_32(input32[i + 0]);
322 const vec_t in1 = vec_set_32(input32[i + 1]);
323 const vec_t in2 = vec_set_32(input32[i + 2]);
324 const vec_t in3 = vec_set_32(input32[i + 3]);
325 const auto col0 = reinterpret_cast<const vec_t*>(&weights[(i + 0) * OutputDimensions * 4]);
326 const auto col1 = reinterpret_cast<const vec_t*>(&weights[(i + 1) * OutputDimensions * 4]);
327 const auto col2 = reinterpret_cast<const vec_t*>(&weights[(i + 2) * OutputDimensions * 4]);
328 const auto col3 = reinterpret_cast<const vec_t*>(&weights[(i + 3) * OutputDimensions * 4]);
329 for (int j = 0; j * OutputSimdWidth < OutputDimensions; ++j)
330 vec_add_dpbusd_32x4(outptr[j], in0, col0[j], in1, col1[j], in2, col2[j], in3, col3[j]);
332 for (int i = 0; i < canSaturate16.count; ++i)
333 output[canSaturate16.ids[i].out] += input[canSaturate16.ids[i].in] * canSaturate16.ids[i].w;
335 else if constexpr (OutputDimensions == 1)
337 #if defined (USE_AVX512)
338 if constexpr (PaddedInputDimensions % (SimdWidth * 2) != 0)
340 constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
341 const auto inputVector256 = reinterpret_cast<const __m256i*>(input);
343 __m256i sum0 = _mm256_setzero_si256();
344 const auto row0 = reinterpret_cast<const __m256i*>(&weights[0]);
346 for (int j = 0; j < (int)NumChunks; ++j)
348 const __m256i in = inputVector256[j];
349 m256_add_dpbusd_epi32(sum0, in, row0[j]);
351 output[0] = m256_hadd(sum0, biases[0]);
356 #if defined (USE_AVX512)
357 constexpr IndexType NumChunks = PaddedInputDimensions / (SimdWidth * 2);
359 constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
361 vec_t sum0 = vec_setzero();
362 const auto row0 = reinterpret_cast<const vec_t*>(&weights[0]);
364 for (int j = 0; j < (int)NumChunks; ++j)
366 const vec_t in = inputVector[j];
367 vec_add_dpbusd_32(sum0, in, row0[j]);
369 output[0] = vec_hadd(sum0, biases[0]);
375 // Use old implementation for the other architectures.
377 auto output = reinterpret_cast<OutputType*>(buffer);
379 #if defined(USE_SSE2)
380 constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
381 const __m128i Zeros = _mm_setzero_si128();
382 const auto inputVector = reinterpret_cast<const __m128i*>(input);
384 #elif defined(USE_MMX)
385 constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
386 const __m64 Zeros = _mm_setzero_si64();
387 const auto inputVector = reinterpret_cast<const __m64*>(input);
389 #elif defined(USE_NEON)
390 constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
391 const auto inputVector = reinterpret_cast<const int8x8_t*>(input);
394 for (IndexType i = 0; i < OutputDimensions; ++i) {
395 const IndexType offset = i * PaddedInputDimensions;
397 #if defined(USE_SSE2)
398 __m128i sumLo = _mm_cvtsi32_si128(biases[i]);
399 __m128i sumHi = Zeros;
400 const auto row = reinterpret_cast<const __m128i*>(&weights[offset]);
401 for (IndexType j = 0; j < NumChunks; ++j) {
402 __m128i row_j = _mm_load_si128(&row[j]);
403 __m128i input_j = _mm_load_si128(&inputVector[j]);
404 __m128i extendedRowLo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8);
405 __m128i extendedRowHi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8);
406 __m128i extendedInputLo = _mm_unpacklo_epi8(input_j, Zeros);
407 __m128i extendedInputHi = _mm_unpackhi_epi8(input_j, Zeros);
408 __m128i productLo = _mm_madd_epi16(extendedRowLo, extendedInputLo);
409 __m128i productHi = _mm_madd_epi16(extendedRowHi, extendedInputHi);
410 sumLo = _mm_add_epi32(sumLo, productLo);
411 sumHi = _mm_add_epi32(sumHi, productHi);
413 __m128i sum = _mm_add_epi32(sumLo, sumHi);
414 __m128i sumHigh_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2));
415 sum = _mm_add_epi32(sum, sumHigh_64);
416 __m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2));
417 sum = _mm_add_epi32(sum, sum_second_32);
418 output[i] = _mm_cvtsi128_si32(sum);
420 #elif defined(USE_MMX)
421 __m64 sumLo = _mm_cvtsi32_si64(biases[i]);
423 const auto row = reinterpret_cast<const __m64*>(&weights[offset]);
424 for (IndexType j = 0; j < NumChunks; ++j) {
425 __m64 row_j = row[j];
426 __m64 input_j = inputVector[j];
427 __m64 extendedRowLo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8);
428 __m64 extendedRowHi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8);
429 __m64 extendedInputLo = _mm_unpacklo_pi8(input_j, Zeros);
430 __m64 extendedInputHi = _mm_unpackhi_pi8(input_j, Zeros);
431 __m64 productLo = _mm_madd_pi16(extendedRowLo, extendedInputLo);
432 __m64 productHi = _mm_madd_pi16(extendedRowHi, extendedInputHi);
433 sumLo = _mm_add_pi32(sumLo, productLo);
434 sumHi = _mm_add_pi32(sumHi, productHi);
436 __m64 sum = _mm_add_pi32(sumLo, sumHi);
437 sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum));
438 output[i] = _mm_cvtsi64_si32(sum);
440 #elif defined(USE_NEON)
441 int32x4_t sum = {biases[i]};
442 const auto row = reinterpret_cast<const int8x8_t*>(&weights[offset]);
443 for (IndexType j = 0; j < NumChunks; ++j) {
444 int16x8_t product = vmull_s8(inputVector[j * 2], row[j * 2]);
445 product = vmlal_s8(product, inputVector[j * 2 + 1], row[j * 2 + 1]);
446 sum = vpadalq_s16(sum, product);
448 output[i] = sum[0] + sum[1] + sum[2] + sum[3];
451 OutputType sum = biases[i];
452 for (IndexType j = 0; j < InputDimensions; ++j) {
453 sum += weights[offset + j] * input[j];
469 using BiasType = OutputType;
470 using WeightType = std::int8_t;
472 PreviousLayer previousLayer;
474 alignas(CacheLineSize) BiasType biases[OutputDimensions];
475 alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
476 #if defined (USE_SSSE3)
484 } ids[PaddedInputDimensions * OutputDimensions * 3 / 4];
486 void add(int i, int j, int8_t w, uint32_t wIdx) {
487 ids[count].wIdx = wIdx;
497 } // namespace Stockfish::Eval::NNUE::Layers
499 #endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED