2 Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3 Copyright (C) 2004-2020 The Stockfish developers (see AUTHORS file)
5 Stockfish is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 Stockfish is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
19 // Definition of layer AffineTransform of NNUE evaluation function
21 #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
22 #define NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
25 #include "../nnue_common.h"
27 namespace Eval::NNUE::Layers {
29 // Affine transformation layer
30 template <typename PreviousLayer, IndexType OutputDimensions>
31 class AffineTransform {
34 using InputType = typename PreviousLayer::OutputType;
35 using OutputType = std::int32_t;
36 static_assert(std::is_same<InputType, std::uint8_t>::value, "");
38 // Number of input/output dimensions
39 static constexpr IndexType kInputDimensions =
40 PreviousLayer::kOutputDimensions;
41 static constexpr IndexType kOutputDimensions = OutputDimensions;
42 static constexpr IndexType kPaddedInputDimensions =
43 CeilToMultiple<IndexType>(kInputDimensions, kMaxSimdWidth);
45 // Size of forward propagation buffer used in this layer
46 static constexpr std::size_t kSelfBufferSize =
47 CeilToMultiple(kOutputDimensions * sizeof(OutputType), kCacheLineSize);
49 // Size of the forward propagation buffer used from the input layer to this layer
50 static constexpr std::size_t kBufferSize =
51 PreviousLayer::kBufferSize + kSelfBufferSize;
53 // Hash value embedded in the evaluation file
54 static constexpr std::uint32_t GetHashValue() {
55 std::uint32_t hash_value = 0xCC03DAE4u;
56 hash_value += kOutputDimensions;
57 hash_value ^= PreviousLayer::GetHashValue() >> 1;
58 hash_value ^= PreviousLayer::GetHashValue() << 31;
62 // Read network parameters
63 bool ReadParameters(std::istream& stream) {
64 if (!previous_layer_.ReadParameters(stream)) return false;
65 stream.read(reinterpret_cast<char*>(biases_),
66 kOutputDimensions * sizeof(BiasType));
67 stream.read(reinterpret_cast<char*>(weights_),
68 kOutputDimensions * kPaddedInputDimensions *
70 return !stream.fail();
73 // Forward propagation
74 const OutputType* Propagate(
75 const TransformedFeatureType* transformed_features, char* buffer) const {
76 const auto input = previous_layer_.Propagate(
77 transformed_features, buffer + kSelfBufferSize);
78 const auto output = reinterpret_cast<OutputType*>(buffer);
80 #if defined(USE_AVX512)
81 constexpr IndexType kNumChunks = kPaddedInputDimensions / (kSimdWidth * 2);
82 const __m512i kOnes = _mm512_set1_epi16(1);
83 const auto input_vector = reinterpret_cast<const __m512i*>(input);
85 #elif defined(USE_AVX2)
86 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
87 const __m256i kOnes = _mm256_set1_epi16(1);
88 const auto input_vector = reinterpret_cast<const __m256i*>(input);
90 #elif defined(USE_SSE2)
91 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
93 const __m128i kZeros = _mm_setzero_si128();
95 const __m128i kOnes = _mm_set1_epi16(1);
97 const auto input_vector = reinterpret_cast<const __m128i*>(input);
99 #elif defined(USE_MMX)
100 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
101 const __m64 kZeros = _mm_setzero_si64();
102 const auto input_vector = reinterpret_cast<const __m64*>(input);
104 #elif defined(USE_NEON)
105 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
106 const auto input_vector = reinterpret_cast<const int8x8_t*>(input);
109 for (IndexType i = 0; i < kOutputDimensions; ++i) {
110 const IndexType offset = i * kPaddedInputDimensions;
112 #if defined(USE_AVX512)
113 __m512i sum = _mm512_setzero_si512();
114 const auto row = reinterpret_cast<const __m512i*>(&weights_[offset]);
115 for (IndexType j = 0; j < kNumChunks; ++j) {
116 __m512i product = _mm512_maddubs_epi16(_mm512_loadA_si512(&input_vector[j]), _mm512_load_si512(&row[j]));
117 product = _mm512_madd_epi16(product, kOnes);
118 sum = _mm512_add_epi32(sum, product);
121 // Note: Changing kMaxSimdWidth from 32 to 64 breaks loading existing networks.
122 // As a result kPaddedInputDimensions may not be an even multiple of 64(512bit)
123 // and we have to do one more 256bit chunk.
124 if (kPaddedInputDimensions != kNumChunks * kSimdWidth * 2)
126 const auto iv256 = reinterpret_cast<const __m256i*>(&input_vector[kNumChunks]);
127 const auto row256 = reinterpret_cast<const __m256i*>(&row[kNumChunks]);
128 __m256i product256 = _mm256_maddubs_epi16(_mm256_loadA_si256(&iv256[0]), _mm256_load_si256(&row256[0]));
129 sum = _mm512_add_epi32(sum, _mm512_cvtepi16_epi32(product256));
131 output[i] = _mm512_reduce_add_epi32(sum) + biases_[i];
133 #elif defined(USE_AVX2)
134 __m256i sum = _mm256_setzero_si256();
135 const auto row = reinterpret_cast<const __m256i*>(&weights_[offset]);
136 for (IndexType j = 0; j < kNumChunks; ++j) {
137 __m256i product = _mm256_maddubs_epi16(_mm256_loadA_si256(&input_vector[j]), _mm256_load_si256(&row[j]));
138 product = _mm256_madd_epi16(product, kOnes);
139 sum = _mm256_add_epi32(sum, product);
141 __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extracti128_si256(sum, 1));
142 sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_BADC));
143 sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_CDAB));
144 output[i] = _mm_cvtsi128_si32(sum128) + biases_[i];
146 #elif defined(USE_SSSE3)
147 __m128i sum = _mm_setzero_si128();
148 const auto row = reinterpret_cast<const __m128i*>(&weights_[offset]);
149 for (int j = 0; j < (int)kNumChunks - 1; j += 2) {
150 __m128i product0 = _mm_maddubs_epi16(_mm_load_si128(&input_vector[j]), _mm_load_si128(&row[j]));
151 product0 = _mm_madd_epi16(product0, kOnes);
152 sum = _mm_add_epi32(sum, product0);
153 __m128i product1 = _mm_maddubs_epi16(_mm_load_si128(&input_vector[j+1]), _mm_load_si128(&row[j+1]));
154 product1 = _mm_madd_epi16(product1, kOnes);
155 sum = _mm_add_epi32(sum, product1);
157 if (kNumChunks & 0x1) {
158 __m128i product = _mm_maddubs_epi16(_mm_load_si128(&input_vector[kNumChunks-1]), _mm_load_si128(&row[kNumChunks-1]));
159 product = _mm_madd_epi16(product, kOnes);
160 sum = _mm_add_epi32(sum, product);
162 sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0x4E)); //_MM_PERM_BADC
163 sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0xB1)); //_MM_PERM_CDAB
164 output[i] = _mm_cvtsi128_si32(sum) + biases_[i];
166 #elif defined(USE_SSE2)
167 __m128i sum_lo = _mm_cvtsi32_si128(biases_[i]);
168 __m128i sum_hi = kZeros;
169 const auto row = reinterpret_cast<const __m128i*>(&weights_[offset]);
170 for (IndexType j = 0; j < kNumChunks; ++j) {
171 __m128i row_j = _mm_load_si128(&row[j]);
172 __m128i input_j = _mm_load_si128(&input_vector[j]);
173 __m128i row_signs = _mm_cmpgt_epi8(kZeros, row_j);
174 __m128i extended_row_lo = _mm_unpacklo_epi8(row_j, row_signs);
175 __m128i extended_row_hi = _mm_unpackhi_epi8(row_j, row_signs);
176 __m128i extended_input_lo = _mm_unpacklo_epi8(input_j, kZeros);
177 __m128i extended_input_hi = _mm_unpackhi_epi8(input_j, kZeros);
178 __m128i product_lo = _mm_madd_epi16(extended_row_lo, extended_input_lo);
179 __m128i product_hi = _mm_madd_epi16(extended_row_hi, extended_input_hi);
180 sum_lo = _mm_add_epi32(sum_lo, product_lo);
181 sum_hi = _mm_add_epi32(sum_hi, product_hi);
183 __m128i sum = _mm_add_epi32(sum_lo, sum_hi);
184 __m128i sum_high_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2));
185 sum = _mm_add_epi32(sum, sum_high_64);
186 __m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2));
187 sum = _mm_add_epi32(sum, sum_second_32);
188 output[i] = _mm_cvtsi128_si32(sum);
190 #elif defined(USE_MMX)
191 __m64 sum_lo = _mm_cvtsi32_si64(biases_[i]);
192 __m64 sum_hi = kZeros;
193 const auto row = reinterpret_cast<const __m64*>(&weights_[offset]);
194 for (IndexType j = 0; j < kNumChunks; ++j) {
195 __m64 row_j = row[j];
196 __m64 input_j = input_vector[j];
197 __m64 row_signs = _mm_cmpgt_pi8(kZeros, row_j);
198 __m64 extended_row_lo = _mm_unpacklo_pi8(row_j, row_signs);
199 __m64 extended_row_hi = _mm_unpackhi_pi8(row_j, row_signs);
200 __m64 extended_input_lo = _mm_unpacklo_pi8(input_j, kZeros);
201 __m64 extended_input_hi = _mm_unpackhi_pi8(input_j, kZeros);
202 __m64 product_lo = _mm_madd_pi16(extended_row_lo, extended_input_lo);
203 __m64 product_hi = _mm_madd_pi16(extended_row_hi, extended_input_hi);
204 sum_lo = _mm_add_pi32(sum_lo, product_lo);
205 sum_hi = _mm_add_pi32(sum_hi, product_hi);
207 __m64 sum = _mm_add_pi32(sum_lo, sum_hi);
208 sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum));
209 output[i] = _mm_cvtsi64_si32(sum);
211 #elif defined(USE_NEON)
212 int32x4_t sum = {biases_[i]};
213 const auto row = reinterpret_cast<const int8x8_t*>(&weights_[offset]);
214 for (IndexType j = 0; j < kNumChunks; ++j) {
215 int16x8_t product = vmull_s8(input_vector[j * 2], row[j * 2]);
216 product = vmlal_s8(product, input_vector[j * 2 + 1], row[j * 2 + 1]);
217 sum = vpadalq_s16(sum, product);
219 output[i] = sum[0] + sum[1] + sum[2] + sum[3];
222 OutputType sum = biases_[i];
223 for (IndexType j = 0; j < kInputDimensions; ++j) {
224 sum += weights_[offset + j] * input[j];
237 using BiasType = OutputType;
238 using WeightType = std::int8_t;
240 PreviousLayer previous_layer_;
242 alignas(kCacheLineSize) BiasType biases_[kOutputDimensions];
243 alignas(kCacheLineSize)
244 WeightType weights_[kOutputDimensions * kPaddedInputDimensions];
247 } // namespace Eval::NNUE::Layers
249 #endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED