2 Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3 Copyright (C) 2004-2020 The Stockfish developers (see AUTHORS file)
5 Stockfish is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 Stockfish is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
19 // Definition of layer AffineTransform of NNUE evaluation function
21 #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
22 #define NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
25 #include "../nnue_common.h"
27 namespace Eval::NNUE::Layers {
29 // Affine transformation layer
30 template <typename PreviousLayer, IndexType OutputDimensions>
31 class AffineTransform {
34 using InputType = typename PreviousLayer::OutputType;
35 using OutputType = std::int32_t;
36 static_assert(std::is_same<InputType, std::uint8_t>::value, "");
38 // Number of input/output dimensions
39 static constexpr IndexType kInputDimensions =
40 PreviousLayer::kOutputDimensions;
41 static constexpr IndexType kOutputDimensions = OutputDimensions;
42 static constexpr IndexType kPaddedInputDimensions =
43 CeilToMultiple<IndexType>(kInputDimensions, kMaxSimdWidth);
45 // Size of forward propagation buffer used in this layer
46 static constexpr std::size_t kSelfBufferSize =
47 CeilToMultiple(kOutputDimensions * sizeof(OutputType), kCacheLineSize);
49 // Size of the forward propagation buffer used from the input layer to this layer
50 static constexpr std::size_t kBufferSize =
51 PreviousLayer::kBufferSize + kSelfBufferSize;
53 // Hash value embedded in the evaluation file
54 static constexpr std::uint32_t GetHashValue() {
55 std::uint32_t hash_value = 0xCC03DAE4u;
56 hash_value += kOutputDimensions;
57 hash_value ^= PreviousLayer::GetHashValue() >> 1;
58 hash_value ^= PreviousLayer::GetHashValue() << 31;
62 // Read network parameters
63 bool ReadParameters(std::istream& stream) {
64 if (!previous_layer_.ReadParameters(stream)) return false;
65 stream.read(reinterpret_cast<char*>(biases_),
66 kOutputDimensions * sizeof(BiasType));
67 stream.read(reinterpret_cast<char*>(weights_),
68 kOutputDimensions * kPaddedInputDimensions *
70 return !stream.fail();
73 // Forward propagation
74 const OutputType* Propagate(
75 const TransformedFeatureType* transformed_features, char* buffer) const {
76 const auto input = previous_layer_.Propagate(
77 transformed_features, buffer + kSelfBufferSize);
78 const auto output = reinterpret_cast<OutputType*>(buffer);
80 #if defined(USE_AVX512)
81 constexpr IndexType kNumChunks = kPaddedInputDimensions / (kSimdWidth * 2);
82 const __m512i kOnes = _mm512_set1_epi16(1);
83 const auto input_vector = reinterpret_cast<const __m512i*>(input);
85 #elif defined(USE_AVX2)
86 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
87 const __m256i kOnes = _mm256_set1_epi16(1);
88 const auto input_vector = reinterpret_cast<const __m256i*>(input);
90 #elif defined(USE_SSSE3)
91 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
92 const __m128i kOnes = _mm_set1_epi16(1);
93 const auto input_vector = reinterpret_cast<const __m128i*>(input);
95 #elif defined(USE_NEON)
96 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
97 const auto input_vector = reinterpret_cast<const int8x8_t*>(input);
100 for (IndexType i = 0; i < kOutputDimensions; ++i) {
101 const IndexType offset = i * kPaddedInputDimensions;
103 #if defined(USE_AVX512)
104 __m512i sum = _mm512_setzero_si512();
105 const auto row = reinterpret_cast<const __m512i*>(&weights_[offset]);
106 for (IndexType j = 0; j < kNumChunks; ++j) {
107 __m512i product = _mm512_maddubs_epi16(_mm512_loadA_si512(&input_vector[j]), _mm512_load_si512(&row[j]));
108 product = _mm512_madd_epi16(product, kOnes);
109 sum = _mm512_add_epi32(sum, product);
111 output[i] = _mm512_reduce_add_epi32(sum) + biases_[i];
113 // Note: Changing kMaxSimdWidth from 32 to 64 breaks loading existing networks.
114 // As a result kPaddedInputDimensions may not be an even multiple of 64(512bit)
115 // and we have to do one more 256bit chunk.
116 if (kPaddedInputDimensions != kNumChunks * kSimdWidth * 2)
118 const auto iv_256 = reinterpret_cast<const __m256i*>(input);
119 const auto row_256 = reinterpret_cast<const __m256i*>(&weights_[offset]);
120 int j = kNumChunks * 2;
121 __m256i sum256 = _mm256_maddubs_epi16(_mm256_loadA_si256(&iv_256[j]), _mm256_load_si256(&row_256[j]));
122 sum256 = _mm256_madd_epi16(sum256, _mm256_set1_epi16(1));
123 sum256 = _mm256_hadd_epi32(sum256, sum256);
124 sum256 = _mm256_hadd_epi32(sum256, sum256);
125 const __m128i lo = _mm256_extracti128_si256(sum256, 0);
126 const __m128i hi = _mm256_extracti128_si256(sum256, 1);
127 output[i] += _mm_cvtsi128_si32(lo) + _mm_cvtsi128_si32(hi);
130 #elif defined(USE_AVX2)
131 __m256i sum = _mm256_setzero_si256();
132 const auto row = reinterpret_cast<const __m256i*>(&weights_[offset]);
133 for (IndexType j = 0; j < kNumChunks; ++j) {
134 __m256i product = _mm256_maddubs_epi16(_mm256_loadA_si256(&input_vector[j]), _mm256_load_si256(&row[j]));
135 product = _mm256_madd_epi16(product, kOnes);
136 sum = _mm256_add_epi32(sum, product);
138 sum = _mm256_hadd_epi32(sum, sum);
139 sum = _mm256_hadd_epi32(sum, sum);
140 const __m128i lo = _mm256_extracti128_si256(sum, 0);
141 const __m128i hi = _mm256_extracti128_si256(sum, 1);
142 output[i] = _mm_cvtsi128_si32(lo) + _mm_cvtsi128_si32(hi) + biases_[i];
144 #elif defined(USE_SSSE3)
145 __m128i sum = _mm_cvtsi32_si128(biases_[i]);
146 const auto row = reinterpret_cast<const __m128i*>(&weights_[offset]);
147 for (IndexType j = 0; j < kNumChunks; ++j) {
148 __m128i product = _mm_maddubs_epi16(_mm_load_si128(&input_vector[j]), _mm_load_si128(&row[j]));
149 product = _mm_madd_epi16(product, kOnes);
150 sum = _mm_add_epi32(sum, product);
152 sum = _mm_hadd_epi32(sum, sum);
153 sum = _mm_hadd_epi32(sum, sum);
154 output[i] = _mm_cvtsi128_si32(sum);
156 #elif defined(USE_NEON)
157 int32x4_t sum = {biases_[i]};
158 const auto row = reinterpret_cast<const int8x8_t*>(&weights_[offset]);
159 for (IndexType j = 0; j < kNumChunks; ++j) {
160 int16x8_t product = vmull_s8(input_vector[j * 2], row[j * 2]);
161 product = vmlal_s8(product, input_vector[j * 2 + 1], row[j * 2 + 1]);
162 sum = vpadalq_s16(sum, product);
164 output[i] = sum[0] + sum[1] + sum[2] + sum[3];
167 OutputType sum = biases_[i];
168 for (IndexType j = 0; j < kInputDimensions; ++j) {
169 sum += weights_[offset + j] * input[j];
179 using BiasType = OutputType;
180 using WeightType = std::int8_t;
182 PreviousLayer previous_layer_;
184 alignas(kCacheLineSize) BiasType biases_[kOutputDimensions];
185 alignas(kCacheLineSize)
186 WeightType weights_[kOutputDimensions * kPaddedInputDimensions];
189 } // namespace Eval::NNUE::Layers
191 #endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED