2 Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3 Copyright (C) 2004-2020 The Stockfish developers (see AUTHORS file)
5 Stockfish is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 Stockfish is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
19 // Definition of layer AffineTransform of NNUE evaluation function
21 #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
22 #define NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
25 #include "../nnue_common.h"
27 namespace Eval::NNUE::Layers {
29 // Affine transformation layer
30 template <typename PreviousLayer, IndexType OutputDimensions>
31 class AffineTransform {
34 using InputType = typename PreviousLayer::OutputType;
35 using OutputType = std::int32_t;
36 static_assert(std::is_same<InputType, std::uint8_t>::value, "");
38 // Number of input/output dimensions
39 static constexpr IndexType kInputDimensions =
40 PreviousLayer::kOutputDimensions;
41 static constexpr IndexType kOutputDimensions = OutputDimensions;
42 static constexpr IndexType kPaddedInputDimensions =
43 CeilToMultiple<IndexType>(kInputDimensions, kMaxSimdWidth);
45 // Size of forward propagation buffer used in this layer
46 static constexpr std::size_t kSelfBufferSize =
47 CeilToMultiple(kOutputDimensions * sizeof(OutputType), kCacheLineSize);
49 // Size of the forward propagation buffer used from the input layer to this layer
50 static constexpr std::size_t kBufferSize =
51 PreviousLayer::kBufferSize + kSelfBufferSize;
53 // Hash value embedded in the evaluation file
54 static constexpr std::uint32_t GetHashValue() {
55 std::uint32_t hash_value = 0xCC03DAE4u;
56 hash_value += kOutputDimensions;
57 hash_value ^= PreviousLayer::GetHashValue() >> 1;
58 hash_value ^= PreviousLayer::GetHashValue() << 31;
62 // Read network parameters
63 bool ReadParameters(std::istream& stream) {
64 if (!previous_layer_.ReadParameters(stream)) return false;
65 for (std::size_t i = 0; i < kOutputDimensions; ++i)
66 biases_[i] = read_little_endian<BiasType>(stream);
67 for (std::size_t i = 0; i < kOutputDimensions * kPaddedInputDimensions; ++i)
68 weights_[i] = read_little_endian<WeightType>(stream);
69 return !stream.fail();
72 // Forward propagation
73 const OutputType* Propagate(
74 const TransformedFeatureType* transformed_features, char* buffer) const {
75 const auto input = previous_layer_.Propagate(
76 transformed_features, buffer + kSelfBufferSize);
78 #if defined (USE_AVX512)
80 [[maybe_unused]] const __m512i kOnes512 = _mm512_set1_epi16(1);
82 [[maybe_unused]] auto m512_hadd = [](__m512i sum, int bias) -> int {
83 return _mm512_reduce_add_epi32(sum) + bias;
86 // This function takes
87 // sum0 = [xmm0a, xmm0b, xmm0c, xmm0d]
88 // sum1 = [xmm1a, xmm1b, xmm1c, xmm1d]
89 // sum2 = [xmm2a, xmm2b, xmm2c, xmm2d]
90 // sum3 = [xmm3a, xmm3b, xmm3c, xmm3d]
93 // reduce_add_epi32(xmm0a), reduce_add_epi32(xmm1a), reduce_add_epi32(xmm2a), reduce_add_epi32(xmm3a),
94 // reduce_add_epi32(xmm0b), reduce_add_epi32(xmm1b), reduce_add_epi32(xmm2b), reduce_add_epi32(xmm3b),
95 // reduce_add_epi32(xmm0c), reduce_add_epi32(xmm1c), reduce_add_epi32(xmm2c), reduce_add_epi32(xmm3c),
96 // reduce_add_epi32(xmm0d), reduce_add_epi32(xmm1d), reduce_add_epi32(xmm2d), reduce_add_epi32(xmm3d)
98 [[maybe_unused]] auto m512_hadd128x16_interleave = [](
99 __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3) -> __m512i {
101 __m512i sum01a = _mm512_unpacklo_epi32(sum0, sum1);
102 __m512i sum01b = _mm512_unpackhi_epi32(sum0, sum1);
104 __m512i sum23a = _mm512_unpacklo_epi32(sum2, sum3);
105 __m512i sum23b = _mm512_unpackhi_epi32(sum2, sum3);
107 __m512i sum01 = _mm512_add_epi32(sum01a, sum01b);
108 __m512i sum23 = _mm512_add_epi32(sum23a, sum23b);
110 __m512i sum0123a = _mm512_unpacklo_epi64(sum01, sum23);
111 __m512i sum0123b = _mm512_unpackhi_epi64(sum01, sum23);
113 return _mm512_add_epi32(sum0123a, sum0123b);
116 [[maybe_unused]] auto m512_haddx4 = [m512_hadd128x16_interleave](
117 __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3, __m128i bias) -> __m128i {
119 __m512i sum = m512_hadd128x16_interleave(sum0, sum1, sum2, sum3);
121 __m256i sum256lo = _mm512_castsi512_si256(sum);
122 __m256i sum256hi = _mm512_extracti64x4_epi64(sum, 1);
124 sum256lo = _mm256_add_epi32(sum256lo, sum256hi);
126 __m128i sum128lo = _mm256_castsi256_si128(sum256lo);
127 __m128i sum128hi = _mm256_extracti128_si256(sum256lo, 1);
129 return _mm_add_epi32(_mm_add_epi32(sum128lo, sum128hi), bias);
132 [[maybe_unused]] auto m512_haddx8 = [m512_hadd128x16_interleave](
133 __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3,
134 __m512i sum4, __m512i sum5, __m512i sum6, __m512i sum7, __m256i bias) -> __m256i {
136 __m512i suma = m512_hadd128x16_interleave(sum0, sum1, sum2, sum3);
137 __m512i sumb = m512_hadd128x16_interleave(sum4, sum5, sum6, sum7);
139 __m512i indices0 = _mm512_setr_epi64(0, 1, 8, 9, 4, 5, 12, 13);
140 __m512i indices1 = _mm512_setr_epi64(2, 3, 10, 11, 6, 7, 14, 15);
141 __m512i x = _mm512_add_epi32(
142 _mm512_permutex2var_epi64(suma, indices0, sumb),
143 _mm512_permutex2var_epi64(suma, indices1, sumb));
145 __m256i sum256lo = _mm512_castsi512_si256(x);
146 __m256i sum256hi = _mm512_extracti64x4_epi64(x, 1);
148 return _mm256_add_epi32(_mm256_add_epi32(sum256lo, sum256hi), bias);
151 [[maybe_unused]] auto m512_hadd256x8 =[m512_hadd128x16_interleave](
152 __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3, __m256i bias) -> __m256i {
154 __m512i sum = m512_hadd128x16_interleave(sum0, sum1, sum2, sum3);
156 __m512i indices = _mm512_setr_epi32(
157 0, 4, 8, 12, 2, 6, 10, 14,
158 1, 5, 9, 13, 3, 7, 11, 15);
159 sum = _mm512_permutexvar_epi32(indices, sum);
161 __m256i sum256lo = _mm512_castsi512_si256(sum);
162 __m256i sum256hi = _mm512_extracti64x4_epi64(sum, 1);
164 return _mm256_add_epi32(_mm256_hadd_epi32(sum256lo, sum256hi), bias);
167 [[maybe_unused]] auto m512_hadd256x16 = [m512_hadd128x16_interleave](
168 __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3,
169 __m512i sum4, __m512i sum5, __m512i sum6, __m512i sum7, __m512i bias) -> __m512i {
171 __m512i suma = m512_hadd128x16_interleave(sum0, sum1, sum2, sum3);
172 __m512i sumb = m512_hadd128x16_interleave(sum4, sum5, sum6, sum7);
174 __m512i indices0 = _mm512_setr_epi64(0, 1, 8, 9, 4, 5, 12, 13);
175 __m512i indices1 = _mm512_setr_epi64(2, 3, 10, 11, 6, 7, 14, 15);
176 __m512i x = _mm512_add_epi32(
177 _mm512_permutex2var_epi64(suma, indices0, sumb),
178 _mm512_permutex2var_epi64(suma, indices1, sumb));
180 __m512i indices = _mm512_setr_epi32(0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15);
181 return _mm512_add_epi32(_mm512_permutexvar_epi32(indices, x), bias);
184 #if defined (USE_VNNI)
185 [[maybe_unused]] auto m512_add_dpbusd_epi32 = [=](__m512i& acc, __m512i a, __m512i b) {
186 acc = _mm512_dpbusd_epi32(acc, a, b);
188 [[maybe_unused]] auto m512_dpbusd_epi32 = [=](__m512i a, __m512i b) -> __m512i {
189 __m512i product0 = _mm512_maddubs_epi16(a, b);
190 return _mm512_madd_epi16(product0, kOnes512);
195 #if defined (USE_AVX2)
197 [[maybe_unused]] const __m256i kOnes256 = _mm256_set1_epi16(1);
199 [[maybe_unused]] auto m256_hadd = [](__m256i sum, int bias) -> int {
200 __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extracti128_si256(sum, 1));
201 sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_BADC));
202 sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_CDAB));
203 return _mm_cvtsi128_si32(sum128) + bias;
206 [[maybe_unused]] auto m256_haddx4 = [](__m256i sum0, __m256i sum1, __m256i sum2, __m256i sum3, __m128i bias) -> __m128i {
207 sum0 = _mm256_hadd_epi32(sum0, sum1);
208 sum2 = _mm256_hadd_epi32(sum2, sum3);
210 sum0 = _mm256_hadd_epi32(sum0, sum2);
212 __m128i sum128lo = _mm256_castsi256_si128(sum0);
213 __m128i sum128hi = _mm256_extracti128_si256(sum0, 1);
215 return _mm_add_epi32(_mm_add_epi32(sum128lo, sum128hi), bias);
217 #if defined (USE_VNNI)
218 [[maybe_unused]] auto m256_add_dpbusd_epi32 = [=](__m256i& acc, __m256i a, __m256i b) {
219 acc = _mm256_dpbusd_epi32(acc, a, b);
221 [[maybe_unused]] auto m256_dpbusd_epi32 = [=](__m256i a, __m256i b) -> __m256i {
222 __m256i product0 = _mm256_maddubs_epi16(a, b);
223 return _mm256_madd_epi16(product0, kOnes256);
229 #if defined (USE_SSSE3)
231 [[maybe_unused]] const __m128i kOnes128 = _mm_set1_epi16(1);
233 [[maybe_unused]] auto m128_hadd = [](__m128i sum, int bias) -> int {
234 sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0x4E)); //_MM_PERM_BADC
235 sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0xB1)); //_MM_PERM_CDAB
236 return _mm_cvtsi128_si32(sum) + bias;
239 [[maybe_unused]] auto m128_haddx4 = [](__m128i sum0, __m128i sum1, __m128i sum2, __m128i sum3, __m128i bias) -> __m128i {
240 sum0 = _mm_hadd_epi32(sum0, sum1);
241 sum2 = _mm_hadd_epi32(sum2, sum3);
243 sum0 = _mm_hadd_epi32(sum0, sum2);
245 return _mm_add_epi32(sum0, bias);
248 [[maybe_unused]] auto m128_dpbusd_epi32 = [=](__m128i a, __m128i b) -> __m128i {
249 __m128i product0 = _mm_maddubs_epi16(a, b);
250 return _mm_madd_epi16(product0, kOnes128);
255 #if defined (USE_AVX512)
257 constexpr IndexType kNumChunks512 = kPaddedInputDimensions / (kSimdWidth * 2);
258 constexpr IndexType kNumChunks256 = kPaddedInputDimensions / kSimdWidth;
260 const auto output = reinterpret_cast<OutputType*>(buffer);
262 // Since to saturate a zmm register it takes 64 bytes we
263 // cannot use AVX512 for the smaller affine transforms.
264 // Instead we fallback to a AVX2 implementation if the
265 // kInputDimensions isn't a multiple of 64.
266 // Note that this means that for example for
267 // kInputDimensions of 96 we fallback to AVX2 even though
268 // the first 64 elements could be processed with AVX512.
269 // This is caused by mixing the __m256 and __m512 variables
270 // required to better handle that case and it would
271 // require handling more cases statically not to lose performance.
272 // This should be revisited if such input dimensions are to be considered.
273 [[maybe_unused]] const auto input_vector512 = reinterpret_cast<const __m512i*>(input);
274 [[maybe_unused]] const auto input_vector256 = reinterpret_cast<const __m256i*>(input);
276 // kOutputDimensions is either 1 or a multiple of kSimdWidth
277 // because then it is also an input dimension.
278 if constexpr (kOutputDimensions % 16 == 0 && kNumChunks256 == 1)
280 for (IndexType i = 0; i < kOutputDimensions; i += 16)
282 const IndexType offset01a = (i + 0) * kPaddedInputDimensions;
283 const IndexType offset23a = (i + 2) * kPaddedInputDimensions;
284 const IndexType offset45a = (i + 4) * kPaddedInputDimensions;
285 const IndexType offset67a = (i + 6) * kPaddedInputDimensions;
286 const IndexType offset01b = (i + 8) * kPaddedInputDimensions;
287 const IndexType offset23b = (i + 10) * kPaddedInputDimensions;
288 const IndexType offset45b = (i + 12) * kPaddedInputDimensions;
289 const IndexType offset67b = (i + 14) * kPaddedInputDimensions;
291 const __m512i bias = *reinterpret_cast<const __m512i*>(&biases_[i]);
292 __m512i* outptr = reinterpret_cast<__m512i*>(&output[i]);
294 const auto row01a = *reinterpret_cast<const __m512i*>(&weights_[offset01a]);
295 const auto row23a = *reinterpret_cast<const __m512i*>(&weights_[offset23a]);
296 const auto row45a = *reinterpret_cast<const __m512i*>(&weights_[offset45a]);
297 const auto row67a = *reinterpret_cast<const __m512i*>(&weights_[offset67a]);
298 const auto row01b = *reinterpret_cast<const __m512i*>(&weights_[offset01b]);
299 const auto row23b = *reinterpret_cast<const __m512i*>(&weights_[offset23b]);
300 const auto row45b = *reinterpret_cast<const __m512i*>(&weights_[offset45b]);
301 const auto row67b = *reinterpret_cast<const __m512i*>(&weights_[offset67b]);
303 const __m256i in256 = input_vector256[0];
304 const __m512i in = _mm512_inserti64x4(_mm512_castsi256_si512(in256), in256, 1);
306 #if defined (USE_VNNI)
307 __m512i sum01a = _mm512_setzero_si512();
308 __m512i sum23a = _mm512_setzero_si512();
309 __m512i sum45a = _mm512_setzero_si512();
310 __m512i sum67a = _mm512_setzero_si512();
311 __m512i sum01b = _mm512_setzero_si512();
312 __m512i sum23b = _mm512_setzero_si512();
313 __m512i sum45b = _mm512_setzero_si512();
314 __m512i sum67b = _mm512_setzero_si512();
316 m512_add_dpbusd_epi32(sum01a, in, row01a);
317 m512_add_dpbusd_epi32(sum23a, in, row23a);
318 m512_add_dpbusd_epi32(sum45a, in, row45a);
319 m512_add_dpbusd_epi32(sum67a, in, row67a);
320 m512_add_dpbusd_epi32(sum01b, in, row01b);
321 m512_add_dpbusd_epi32(sum23b, in, row23b);
322 m512_add_dpbusd_epi32(sum45b, in, row45b);
323 m512_add_dpbusd_epi32(sum67b, in, row67b);
325 __m512i sum01a = m512_dpbusd_epi32(in, row01a);
326 __m512i sum23a = m512_dpbusd_epi32(in, row23a);
327 __m512i sum45a = m512_dpbusd_epi32(in, row45a);
328 __m512i sum67a = m512_dpbusd_epi32(in, row67a);
329 __m512i sum01b = m512_dpbusd_epi32(in, row01b);
330 __m512i sum23b = m512_dpbusd_epi32(in, row23b);
331 __m512i sum45b = m512_dpbusd_epi32(in, row45b);
332 __m512i sum67b = m512_dpbusd_epi32(in, row67b);
335 *outptr = m512_hadd256x16(
336 sum01a, sum23a, sum45a, sum67a,
337 sum01b, sum23b, sum45b, sum67b, bias);
340 else if constexpr (kOutputDimensions % 4 == 0)
342 for (IndexType i = 0; i < kOutputDimensions; i += 4)
344 const IndexType offset0 = (i + 0) * kPaddedInputDimensions;
345 const IndexType offset1 = (i + 1) * kPaddedInputDimensions;
346 const IndexType offset2 = (i + 2) * kPaddedInputDimensions;
347 const IndexType offset3 = (i + 3) * kPaddedInputDimensions;
349 const __m128i bias = *reinterpret_cast<const __m128i*>(&biases_[i]);
350 __m128i* outptr = reinterpret_cast<__m128i*>(&output[i]);
352 if constexpr (kPaddedInputDimensions % (kSimdWidth * 2) == 0)
354 const auto row0 = reinterpret_cast<const __m512i*>(&weights_[offset0]);
355 const auto row1 = reinterpret_cast<const __m512i*>(&weights_[offset1]);
356 const auto row2 = reinterpret_cast<const __m512i*>(&weights_[offset2]);
357 const auto row3 = reinterpret_cast<const __m512i*>(&weights_[offset3]);
359 #if defined (USE_VNNI)
360 __m512i sum0 = _mm512_setzero_si512();
361 __m512i sum1 = _mm512_setzero_si512();
362 __m512i sum2 = _mm512_setzero_si512();
363 __m512i sum3 = _mm512_setzero_si512();
364 const IndexType kStart = 0;
366 __m512i sum0 = m512_dpbusd_epi32(input_vector512[0], row0[0]);
367 __m512i sum1 = m512_dpbusd_epi32(input_vector512[0], row1[0]);
368 __m512i sum2 = m512_dpbusd_epi32(input_vector512[0], row2[0]);
369 __m512i sum3 = m512_dpbusd_epi32(input_vector512[0], row3[0]);
370 const IndexType kStart = 1;
373 for (IndexType j = kStart; j < kNumChunks512; ++j)
375 const __m512i in = input_vector512[j];
377 #if defined (USE_VNNI)
378 m512_add_dpbusd_epi32(sum0, in, row0[j]);
379 m512_add_dpbusd_epi32(sum1, in, row1[j]);
380 m512_add_dpbusd_epi32(sum2, in, row2[j]);
381 m512_add_dpbusd_epi32(sum3, in, row3[j]);
383 sum0 = _mm512_add_epi32(sum0, m512_dpbusd_epi32(in, row0[j]));
384 sum1 = _mm512_add_epi32(sum1, m512_dpbusd_epi32(in, row1[j]));
385 sum2 = _mm512_add_epi32(sum2, m512_dpbusd_epi32(in, row2[j]));
386 sum3 = _mm512_add_epi32(sum3, m512_dpbusd_epi32(in, row3[j]));
390 *outptr = m512_haddx4(sum0, sum1, sum2, sum3, bias);
394 const auto row0 = reinterpret_cast<const __m256i*>(&weights_[offset0]);
395 const auto row1 = reinterpret_cast<const __m256i*>(&weights_[offset1]);
396 const auto row2 = reinterpret_cast<const __m256i*>(&weights_[offset2]);
397 const auto row3 = reinterpret_cast<const __m256i*>(&weights_[offset3]);
399 #if defined (USE_VNNI)
400 __m256i sum0 = _mm256_setzero_si256();
401 __m256i sum1 = _mm256_setzero_si256();
402 __m256i sum2 = _mm256_setzero_si256();
403 __m256i sum3 = _mm256_setzero_si256();
404 const IndexType kStart = 0;
406 __m256i sum0 = m256_dpbusd_epi32(input_vector256[0], row0[0]);
407 __m256i sum1 = m256_dpbusd_epi32(input_vector256[0], row1[0]);
408 __m256i sum2 = m256_dpbusd_epi32(input_vector256[0], row2[0]);
409 __m256i sum3 = m256_dpbusd_epi32(input_vector256[0], row3[0]);
410 const IndexType kStart = 1;
413 for (IndexType j = kStart; j < kNumChunks256; ++j)
415 const __m256i in = input_vector256[j];
417 #if defined (USE_VNNI)
418 m256_add_dpbusd_epi32(sum0, in, row0[j]);
419 m256_add_dpbusd_epi32(sum1, in, row1[j]);
420 m256_add_dpbusd_epi32(sum2, in, row2[j]);
421 m256_add_dpbusd_epi32(sum3, in, row3[j]);
423 sum0 = _mm256_add_epi32(sum0, m256_dpbusd_epi32(in, row0[j]));
424 sum1 = _mm256_add_epi32(sum1, m256_dpbusd_epi32(in, row1[j]));
425 sum2 = _mm256_add_epi32(sum2, m256_dpbusd_epi32(in, row2[j]));
426 sum3 = _mm256_add_epi32(sum3, m256_dpbusd_epi32(in, row3[j]));
430 *outptr = m256_haddx4(sum0, sum1, sum2, sum3, bias);
434 else if constexpr (kOutputDimensions == 1)
436 if constexpr (kPaddedInputDimensions % (kSimdWidth * 2) == 0)
438 const auto row0 = reinterpret_cast<const __m512i*>(&weights_[0]);
440 #if defined (USE_VNNI)
441 __m512i sum0 = _mm512_setzero_si512();
442 const IndexType kStart = 0;
444 __m512i sum0 = m512_dpbusd_epi32(input_vector512[0], row0[0]);
445 const IndexType kStart = 1;
448 for (IndexType j = kStart; j < kNumChunks512; ++j)
450 const __m512i in = input_vector512[j];
452 #if defined (USE_VNNI)
453 m512_add_dpbusd_epi32(sum0, in, row0[j]);
455 sum0 = _mm512_add_epi32(sum0, m512_dpbusd_epi32(in, row0[j]));
459 output[0] = m512_hadd(sum0, biases_[0]);
463 const auto row0 = reinterpret_cast<const __m256i*>(&weights_[0]);
465 #if defined (USE_VNNI)
466 __m256i sum0 = _mm256_setzero_si256();
467 const IndexType kStart = 0;
469 __m256i sum0 = m256_dpbusd_epi32(input_vector256[0], row0[0]);
470 const IndexType kStart = 1;
473 for (IndexType j = kStart; j < kNumChunks256; ++j)
475 const __m256i in = input_vector256[j];
477 #if defined (USE_VNNI)
478 m256_add_dpbusd_epi32(sum0, in, row0[j]);
480 sum0 = _mm256_add_epi32(sum0, m256_dpbusd_epi32(in, row0[j]));
484 output[0] = m256_hadd(sum0, biases_[0]);
489 // This case can never happen because kOutputDimensions
490 // is always 1 or a multiple of kSimdWidth.
494 #elif defined (USE_AVX2)
496 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
498 const auto output = reinterpret_cast<OutputType*>(buffer);
499 const auto input_vector = reinterpret_cast<const __m256i*>(input);
501 // kOutputDimensions is either 1 or a multiple of kSimdWidth
502 // because then it is also an input dimension.
503 if constexpr (kOutputDimensions % 4 == 0)
505 for (IndexType i = 0; i < kOutputDimensions; i += 4)
507 const IndexType offset0 = (i + 0) * kPaddedInputDimensions;
508 const IndexType offset1 = (i + 1) * kPaddedInputDimensions;
509 const IndexType offset2 = (i + 2) * kPaddedInputDimensions;
510 const IndexType offset3 = (i + 3) * kPaddedInputDimensions;
512 const __m128i bias = *reinterpret_cast<const __m128i*>(&biases_[i]);
513 __m128i* outptr = reinterpret_cast<__m128i*>(&output[i]);
515 const auto row0 = reinterpret_cast<const __m256i*>(&weights_[offset0]);
516 const auto row1 = reinterpret_cast<const __m256i*>(&weights_[offset1]);
517 const auto row2 = reinterpret_cast<const __m256i*>(&weights_[offset2]);
518 const auto row3 = reinterpret_cast<const __m256i*>(&weights_[offset3]);
520 #if defined (USE_VNNI)
521 __m256i sum0 = _mm256_setzero_si256();
522 __m256i sum1 = _mm256_setzero_si256();
523 __m256i sum2 = _mm256_setzero_si256();
524 __m256i sum3 = _mm256_setzero_si256();
525 const IndexType kStart = 0;
527 __m256i sum0 = m256_dpbusd_epi32(input_vector[0], row0[0]);
528 __m256i sum1 = m256_dpbusd_epi32(input_vector[0], row1[0]);
529 __m256i sum2 = m256_dpbusd_epi32(input_vector[0], row2[0]);
530 __m256i sum3 = m256_dpbusd_epi32(input_vector[0], row3[0]);
531 const IndexType kStart = 1;
534 for (IndexType j = kStart; j < kNumChunks; ++j)
536 const __m256i in = input_vector[j];
538 #if defined (USE_VNNI)
539 m256_add_dpbusd_epi32(sum0, in, row0[j]);
540 m256_add_dpbusd_epi32(sum1, in, row1[j]);
541 m256_add_dpbusd_epi32(sum2, in, row2[j]);
542 m256_add_dpbusd_epi32(sum3, in, row3[j]);
544 sum0 = _mm256_add_epi32(sum0, m256_dpbusd_epi32(in, row0[j]));
545 sum1 = _mm256_add_epi32(sum1, m256_dpbusd_epi32(in, row1[j]));
546 sum2 = _mm256_add_epi32(sum2, m256_dpbusd_epi32(in, row2[j]));
547 sum3 = _mm256_add_epi32(sum3, m256_dpbusd_epi32(in, row3[j]));
551 *outptr = m256_haddx4(sum0, sum1, sum2, sum3, bias);
554 else if constexpr (kOutputDimensions == 1)
556 const auto row0 = reinterpret_cast<const __m256i*>(&weights_[0]);
558 #if defined (USE_VNNI)
559 __m256i sum0 = _mm256_setzero_si256();
560 const IndexType kStart = 0;
562 __m256i sum0 = m256_dpbusd_epi32(input_vector[0], row0[0]);
563 const IndexType kStart = 1;
566 for (IndexType j = kStart; j < kNumChunks; ++j)
568 const __m256i in = input_vector[j];
570 #if defined (USE_VNNI)
571 m256_add_dpbusd_epi32(sum0, in, row0[j]);
573 sum0 = _mm256_add_epi32(sum0, m256_dpbusd_epi32(in, row0[j]));
577 output[0] = m256_hadd(sum0, biases_[0]);
581 // This case can never happen because kOutputDimensions
582 // is always 1 or a multiple of kSimdWidth.
586 #elif defined (USE_SSSE3)
588 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
590 auto output = reinterpret_cast<OutputType*>(buffer);
591 const auto input_vector = reinterpret_cast<const __m128i*>(input);
593 // kOutputDimensions is either 1 or a multiple of kSimdWidth
594 // because then it is also an input dimension.
595 if constexpr (kOutputDimensions % 4 == 0)
597 for (IndexType i = 0; i < kOutputDimensions; i += 4)
599 const IndexType offset0 = (i + 0) * kPaddedInputDimensions;
600 const IndexType offset1 = (i + 1) * kPaddedInputDimensions;
601 const IndexType offset2 = (i + 2) * kPaddedInputDimensions;
602 const IndexType offset3 = (i + 3) * kPaddedInputDimensions;
604 const __m128i bias = *reinterpret_cast<const __m128i*>(&biases_[i]);
605 __m128i* outptr = reinterpret_cast<__m128i*>(&output[i]);
607 const auto row0 = reinterpret_cast<const __m128i*>(&weights_[offset0]);
608 const auto row1 = reinterpret_cast<const __m128i*>(&weights_[offset1]);
609 const auto row2 = reinterpret_cast<const __m128i*>(&weights_[offset2]);
610 const auto row3 = reinterpret_cast<const __m128i*>(&weights_[offset3]);
612 __m128i sum0 = m128_dpbusd_epi32(input_vector[0], row0[0]);
613 __m128i sum1 = m128_dpbusd_epi32(input_vector[0], row1[0]);
614 __m128i sum2 = m128_dpbusd_epi32(input_vector[0], row2[0]);
615 __m128i sum3 = m128_dpbusd_epi32(input_vector[0], row3[0]);
617 for (int j = 1; j < (int)kNumChunks; ++j)
619 const __m128i in = input_vector[j];
621 sum0 = _mm_add_epi32(sum0, m128_dpbusd_epi32(in, row0[j]));
622 sum1 = _mm_add_epi32(sum1, m128_dpbusd_epi32(in, row1[j]));
623 sum2 = _mm_add_epi32(sum2, m128_dpbusd_epi32(in, row2[j]));
624 sum3 = _mm_add_epi32(sum3, m128_dpbusd_epi32(in, row3[j]));
627 *outptr = m128_haddx4(sum0, sum1, sum2, sum3, bias);
630 else if constexpr (kOutputDimensions == 1)
632 const auto row0 = reinterpret_cast<const __m128i*>(&weights_[0]);
634 __m128i sum0 = m128_dpbusd_epi32(input_vector[0], row0[0]);
636 for (int j = 1; j < (int)kNumChunks; ++j)
637 sum0 = _mm_add_epi32(sum0, m128_dpbusd_epi32(input_vector[j], row0[j]));
639 output[0] = m128_hadd(sum0, biases_[0]);
643 // This case can never happen because kOutputDimensions
644 // is always 1 or a multiple of kSimdWidth.
650 // Use old implementation for the other architectures.
652 auto output = reinterpret_cast<OutputType*>(buffer);
654 #if defined(USE_SSE2)
655 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
657 const __m128i kZeros = _mm_setzero_si128();
659 const __m128i kOnes = _mm_set1_epi16(1);
661 const auto input_vector = reinterpret_cast<const __m128i*>(input);
663 #elif defined(USE_MMX)
664 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
665 const __m64 kZeros = _mm_setzero_si64();
666 const auto input_vector = reinterpret_cast<const __m64*>(input);
668 #elif defined(USE_NEON)
669 constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
670 const auto input_vector = reinterpret_cast<const int8x8_t*>(input);
673 for (IndexType i = 0; i < kOutputDimensions; ++i) {
674 const IndexType offset = i * kPaddedInputDimensions;
676 #if defined(USE_SSE2)
677 __m128i sum_lo = _mm_cvtsi32_si128(biases_[i]);
678 __m128i sum_hi = kZeros;
679 const auto row = reinterpret_cast<const __m128i*>(&weights_[offset]);
680 for (IndexType j = 0; j < kNumChunks; ++j) {
681 __m128i row_j = _mm_load_si128(&row[j]);
682 __m128i input_j = _mm_load_si128(&input_vector[j]);
683 __m128i row_signs = _mm_cmpgt_epi8(kZeros, row_j);
684 __m128i extended_row_lo = _mm_unpacklo_epi8(row_j, row_signs);
685 __m128i extended_row_hi = _mm_unpackhi_epi8(row_j, row_signs);
686 __m128i extended_input_lo = _mm_unpacklo_epi8(input_j, kZeros);
687 __m128i extended_input_hi = _mm_unpackhi_epi8(input_j, kZeros);
688 __m128i product_lo = _mm_madd_epi16(extended_row_lo, extended_input_lo);
689 __m128i product_hi = _mm_madd_epi16(extended_row_hi, extended_input_hi);
690 sum_lo = _mm_add_epi32(sum_lo, product_lo);
691 sum_hi = _mm_add_epi32(sum_hi, product_hi);
693 __m128i sum = _mm_add_epi32(sum_lo, sum_hi);
694 __m128i sum_high_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2));
695 sum = _mm_add_epi32(sum, sum_high_64);
696 __m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2));
697 sum = _mm_add_epi32(sum, sum_second_32);
698 output[i] = _mm_cvtsi128_si32(sum);
700 #elif defined(USE_MMX)
701 __m64 sum_lo = _mm_cvtsi32_si64(biases_[i]);
702 __m64 sum_hi = kZeros;
703 const auto row = reinterpret_cast<const __m64*>(&weights_[offset]);
704 for (IndexType j = 0; j < kNumChunks; ++j) {
705 __m64 row_j = row[j];
706 __m64 input_j = input_vector[j];
707 __m64 row_signs = _mm_cmpgt_pi8(kZeros, row_j);
708 __m64 extended_row_lo = _mm_unpacklo_pi8(row_j, row_signs);
709 __m64 extended_row_hi = _mm_unpackhi_pi8(row_j, row_signs);
710 __m64 extended_input_lo = _mm_unpacklo_pi8(input_j, kZeros);
711 __m64 extended_input_hi = _mm_unpackhi_pi8(input_j, kZeros);
712 __m64 product_lo = _mm_madd_pi16(extended_row_lo, extended_input_lo);
713 __m64 product_hi = _mm_madd_pi16(extended_row_hi, extended_input_hi);
714 sum_lo = _mm_add_pi32(sum_lo, product_lo);
715 sum_hi = _mm_add_pi32(sum_hi, product_hi);
717 __m64 sum = _mm_add_pi32(sum_lo, sum_hi);
718 sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum));
719 output[i] = _mm_cvtsi64_si32(sum);
721 #elif defined(USE_NEON)
722 int32x4_t sum = {biases_[i]};
723 const auto row = reinterpret_cast<const int8x8_t*>(&weights_[offset]);
724 for (IndexType j = 0; j < kNumChunks; ++j) {
725 int16x8_t product = vmull_s8(input_vector[j * 2], row[j * 2]);
726 product = vmlal_s8(product, input_vector[j * 2 + 1], row[j * 2 + 1]);
727 sum = vpadalq_s16(sum, product);
729 output[i] = sum[0] + sum[1] + sum[2] + sum[3];
732 OutputType sum = biases_[i];
733 for (IndexType j = 0; j < kInputDimensions; ++j) {
734 sum += weights_[offset + j] * input[j];
750 using BiasType = OutputType;
751 using WeightType = std::int8_t;
753 PreviousLayer previous_layer_;
755 alignas(kCacheLineSize) BiasType biases_[kOutputDimensions];
756 alignas(kCacheLineSize)
757 WeightType weights_[kOutputDimensions * kPaddedInputDimensions];
760 } // namespace Eval::NNUE::Layers
762 #endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED