2 Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3 Copyright (C) 2004-2021 The Stockfish developers (see AUTHORS file)
5 Stockfish is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 Stockfish is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
19 // Definition of layer AffineTransform of NNUE evaluation function
21 #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
22 #define NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
25 #include "../nnue_common.h"
27 namespace Stockfish::Eval::NNUE::Layers {
29 // Affine transformation layer
30 template <typename PreviousLayer, IndexType OutDims>
31 class AffineTransform {
34 using InputType = typename PreviousLayer::OutputType;
35 using OutputType = std::int32_t;
36 static_assert(std::is_same<InputType, std::uint8_t>::value, "");
38 // Number of input/output dimensions
39 static constexpr IndexType InputDimensions =
40 PreviousLayer::OutputDimensions;
41 static constexpr IndexType OutputDimensions = OutDims;
42 static constexpr IndexType PaddedInputDimensions =
43 ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
44 #if defined (USE_AVX512)
45 static constexpr const IndexType OutputSimdWidth = SimdWidth / 2;
46 #elif defined (USE_SSSE3)
47 static constexpr const IndexType OutputSimdWidth = SimdWidth / 4;
49 #if defined (USE_AVX512)
50 static constexpr const IndexType InputSimdWidth = SimdWidth * 2;
51 #elif defined (USE_SSSE3)
52 static constexpr const IndexType InputSimdWidth = SimdWidth;
55 // Size of forward propagation buffer used in this layer
56 static constexpr std::size_t SelfBufferSize =
57 ceil_to_multiple(OutputDimensions * sizeof(OutputType), CacheLineSize);
59 // Size of the forward propagation buffer used from the input layer to this layer
60 static constexpr std::size_t BufferSize =
61 PreviousLayer::BufferSize + SelfBufferSize;
63 // Hash value embedded in the evaluation file
64 static constexpr std::uint32_t get_hash_value() {
65 std::uint32_t hashValue = 0xCC03DAE4u;
66 hashValue += OutputDimensions;
67 hashValue ^= PreviousLayer::get_hash_value() >> 1;
68 hashValue ^= PreviousLayer::get_hash_value() << 31;
72 // Read network parameters
73 bool read_parameters(std::istream& stream) {
74 if (!previousLayer.read_parameters(stream)) return false;
75 for (std::size_t i = 0; i < OutputDimensions; ++i)
76 biases[i] = read_little_endian<BiasType>(stream);
77 for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
78 #if !defined (USE_SSSE3)
79 weights[i] = read_little_endian<WeightType>(stream);
80 #elif defined (USE_VNNI) || defined (USE_AVX512)
81 if constexpr (OutputDimensions <= 8 && OutputDimensions != 1)
82 weights[i] = read_little_endian<WeightType>(stream);
85 (i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 +
86 i / PaddedInputDimensions * 4 +
88 ] = read_little_endian<WeightType>(stream);
91 (i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 +
92 i / PaddedInputDimensions * 4 +
94 ] = read_little_endian<WeightType>(stream);
97 return !stream.fail();
100 // Write network parameters
101 bool write_parameters(std::ostream& stream) const {
102 if (!previousLayer.write_parameters(stream)) return false;
103 for (std::size_t i = 0; i < OutputDimensions; ++i)
104 write_little_endian<BiasType>(stream, biases[i]);
105 #if !defined (USE_SSSE3)
106 for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
107 write_little_endian<WeightType>(stream, weights[i]);
109 std::unique_ptr<WeightType[]> unscrambledWeights = std::make_unique<WeightType[]>(OutputDimensions * PaddedInputDimensions);
110 for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) {
111 unscrambledWeights[i] =
113 (i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 +
114 i / PaddedInputDimensions * 4 +
119 for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
120 write_little_endian<WeightType>(stream, unscrambledWeights[i]);
123 return !stream.fail();
125 // Forward propagation
126 const OutputType* propagate(
127 const TransformedFeatureType* transformedFeatures, char* buffer) const {
128 const auto input = previousLayer.propagate(
129 transformedFeatures, buffer + SelfBufferSize);
131 #if defined (USE_AVX512)
133 [[maybe_unused]] const __m512i Ones512 = _mm512_set1_epi16(1);
135 [[maybe_unused]] auto m512_hadd = [](__m512i sum, int bias) -> int {
136 return _mm512_reduce_add_epi32(sum) + bias;
139 [[maybe_unused]] auto m512_hadd128x16_interleave = [](
140 __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3) -> __m512i {
142 __m512i sum01a = _mm512_unpacklo_epi32(sum0, sum1);
143 __m512i sum01b = _mm512_unpackhi_epi32(sum0, sum1);
145 __m512i sum23a = _mm512_unpacklo_epi32(sum2, sum3);
146 __m512i sum23b = _mm512_unpackhi_epi32(sum2, sum3);
148 __m512i sum01 = _mm512_add_epi32(sum01a, sum01b);
149 __m512i sum23 = _mm512_add_epi32(sum23a, sum23b);
151 __m512i sum0123a = _mm512_unpacklo_epi64(sum01, sum23);
152 __m512i sum0123b = _mm512_unpackhi_epi64(sum01, sum23);
154 return _mm512_add_epi32(sum0123a, sum0123b);
157 [[maybe_unused]] auto m512_haddx4 = [m512_hadd128x16_interleave](
158 __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3, __m128i bias) -> __m128i {
160 __m512i sum = m512_hadd128x16_interleave(sum0, sum1, sum2, sum3);
162 __m256i sum256lo = _mm512_castsi512_si256(sum);
163 __m256i sum256hi = _mm512_extracti64x4_epi64(sum, 1);
165 sum256lo = _mm256_add_epi32(sum256lo, sum256hi);
167 __m128i sum128lo = _mm256_castsi256_si128(sum256lo);
168 __m128i sum128hi = _mm256_extracti128_si256(sum256lo, 1);
170 return _mm_add_epi32(_mm_add_epi32(sum128lo, sum128hi), bias);
173 [[maybe_unused]] auto m512_add_dpbusd_epi32 = [=](__m512i& acc, __m512i a, __m512i b) {
174 #if defined (USE_VNNI)
175 acc = _mm512_dpbusd_epi32(acc, a, b);
177 __m512i product0 = _mm512_maddubs_epi16(a, b);
178 product0 = _mm512_madd_epi16(product0, Ones512);
179 acc = _mm512_add_epi32(acc, product0);
183 [[maybe_unused]] auto m512_add_dpbusd_epi32x2 = [=](__m512i& acc, __m512i a0, __m512i b0, __m512i a1, __m512i b1) {
184 #if defined (USE_VNNI)
185 acc = _mm512_dpbusd_epi32(acc, a0, b0);
186 acc = _mm512_dpbusd_epi32(acc, a1, b1);
188 __m512i product0 = _mm512_maddubs_epi16(a0, b0);
189 __m512i product1 = _mm512_maddubs_epi16(a1, b1);
190 product0 = _mm512_adds_epi16(product0, product1);
191 product0 = _mm512_madd_epi16(product0, Ones512);
192 acc = _mm512_add_epi32(acc, product0);
196 [[maybe_unused]] auto m512_add_dpbusd_epi32x4 = [=](__m512i& acc, __m512i a0, __m512i b0, __m512i a1, __m512i b1,
197 __m512i a2, __m512i b2, __m512i a3, __m512i b3) {
198 #if defined (USE_VNNI)
199 acc = _mm512_dpbusd_epi32(acc, a0, b0);
200 acc = _mm512_dpbusd_epi32(acc, a1, b1);
201 acc = _mm512_dpbusd_epi32(acc, a2, b2);
202 acc = _mm512_dpbusd_epi32(acc, a3, b3);
204 __m512i product0 = _mm512_maddubs_epi16(a0, b0);
205 __m512i product1 = _mm512_maddubs_epi16(a1, b1);
206 __m512i product2 = _mm512_maddubs_epi16(a2, b2);
207 __m512i product3 = _mm512_maddubs_epi16(a3, b3);
208 product0 = _mm512_adds_epi16(product0, product1);
209 product0 = _mm512_madd_epi16(product0, Ones512);
210 product2 = _mm512_adds_epi16(product2, product3);
211 product2 = _mm512_madd_epi16(product2, Ones512);
212 acc = _mm512_add_epi32(acc, _mm512_add_epi32(product0, product2));
217 #if defined (USE_AVX2)
219 [[maybe_unused]] const __m256i Ones256 = _mm256_set1_epi16(1);
221 [[maybe_unused]] auto m256_hadd = [](__m256i sum, int bias) -> int {
222 __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extracti128_si256(sum, 1));
223 sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_BADC));
224 sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_CDAB));
225 return _mm_cvtsi128_si32(sum128) + bias;
228 [[maybe_unused]] auto m256_haddx4 = [](__m256i sum0, __m256i sum1, __m256i sum2, __m256i sum3, __m128i bias) -> __m128i {
229 sum0 = _mm256_hadd_epi32(sum0, sum1);
230 sum2 = _mm256_hadd_epi32(sum2, sum3);
232 sum0 = _mm256_hadd_epi32(sum0, sum2);
234 __m128i sum128lo = _mm256_castsi256_si128(sum0);
235 __m128i sum128hi = _mm256_extracti128_si256(sum0, 1);
237 return _mm_add_epi32(_mm_add_epi32(sum128lo, sum128hi), bias);
240 [[maybe_unused]] auto m256_add_dpbusd_epi32 = [=](__m256i& acc, __m256i a, __m256i b) {
241 #if defined (USE_VNNI)
242 acc = _mm256_dpbusd_epi32(acc, a, b);
244 __m256i product0 = _mm256_maddubs_epi16(a, b);
245 product0 = _mm256_madd_epi16(product0, Ones256);
246 acc = _mm256_add_epi32(acc, product0);
250 [[maybe_unused]] auto m256_add_dpbusd_epi32x2 = [=](__m256i& acc, __m256i a0, __m256i b0, __m256i a1, __m256i b1) {
251 #if defined (USE_VNNI)
252 acc = _mm256_dpbusd_epi32(acc, a0, b0);
253 acc = _mm256_dpbusd_epi32(acc, a1, b1);
255 __m256i product0 = _mm256_maddubs_epi16(a0, b0);
256 __m256i product1 = _mm256_maddubs_epi16(a1, b1);
257 product0 = _mm256_adds_epi16(product0, product1);
258 product0 = _mm256_madd_epi16(product0, Ones256);
259 acc = _mm256_add_epi32(acc, product0);
263 [[maybe_unused]] auto m256_add_dpbusd_epi32x4 = [=](__m256i& acc, __m256i a0, __m256i b0, __m256i a1, __m256i b1,
264 __m256i a2, __m256i b2, __m256i a3, __m256i b3) {
265 #if defined (USE_VNNI)
266 acc = _mm256_dpbusd_epi32(acc, a0, b0);
267 acc = _mm256_dpbusd_epi32(acc, a1, b1);
268 acc = _mm256_dpbusd_epi32(acc, a2, b2);
269 acc = _mm256_dpbusd_epi32(acc, a3, b3);
271 __m256i product0 = _mm256_maddubs_epi16(a0, b0);
272 __m256i product1 = _mm256_maddubs_epi16(a1, b1);
273 __m256i product2 = _mm256_maddubs_epi16(a2, b2);
274 __m256i product3 = _mm256_maddubs_epi16(a3, b3);
275 product0 = _mm256_adds_epi16(product0, product1);
276 product0 = _mm256_madd_epi16(product0, Ones256);
277 product2 = _mm256_adds_epi16(product2, product3);
278 product2 = _mm256_madd_epi16(product2, Ones256);
279 acc = _mm256_add_epi32(acc, _mm256_add_epi32(product0, product2));
284 #if defined (USE_SSSE3)
286 [[maybe_unused]] const __m128i Ones128 = _mm_set1_epi16(1);
288 [[maybe_unused]] auto m128_hadd = [](__m128i sum, int bias) -> int {
289 sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0x4E)); //_MM_PERM_BADC
290 sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0xB1)); //_MM_PERM_CDAB
291 return _mm_cvtsi128_si32(sum) + bias;
294 [[maybe_unused]] auto m128_haddx4 = [](__m128i sum0, __m128i sum1, __m128i sum2, __m128i sum3, __m128i bias) -> __m128i {
295 sum0 = _mm_hadd_epi32(sum0, sum1);
296 sum2 = _mm_hadd_epi32(sum2, sum3);
297 sum0 = _mm_hadd_epi32(sum0, sum2);
298 return _mm_add_epi32(sum0, bias);
301 [[maybe_unused]] auto m128_add_dpbusd_epi32 = [=](__m128i& acc, __m128i a, __m128i b) {
302 __m128i product0 = _mm_maddubs_epi16(a, b);
303 product0 = _mm_madd_epi16(product0, Ones128);
304 acc = _mm_add_epi32(acc, product0);
307 [[maybe_unused]] auto m128_add_dpbusd_epi32x2 = [=](__m128i& acc, __m128i a0, __m128i b0, __m128i a1, __m128i b1) {
308 __m128i product0 = _mm_maddubs_epi16(a0, b0);
309 __m128i product1 = _mm_maddubs_epi16(a1, b1);
310 product0 = _mm_adds_epi16(product0, product1);
311 product0 = _mm_madd_epi16(product0, Ones128);
312 acc = _mm_add_epi32(acc, product0);
315 [[maybe_unused]] auto m128_add_dpbusd_epi32x4 = [=](__m128i& acc, __m128i a0, __m128i b0, __m128i a1, __m128i b1,
316 __m128i a2, __m128i b2, __m128i a3, __m128i b3) {
317 __m128i product0 = _mm_maddubs_epi16(a0, b0);
318 __m128i product1 = _mm_maddubs_epi16(a1, b1);
319 __m128i product2 = _mm_maddubs_epi16(a2, b2);
320 __m128i product3 = _mm_maddubs_epi16(a3, b3);
321 product0 = _mm_adds_epi16(product0, product1);
322 product0 = _mm_madd_epi16(product0, Ones128);
323 product2 = _mm_adds_epi16(product2, product3);
324 product2 = _mm_madd_epi16(product2, Ones128);
325 acc = _mm_add_epi32(acc, _mm_add_epi32(product0, product2));
330 #if defined (USE_AVX512)
331 using vec_t = __m512i;
332 #define vec_setzero _mm512_setzero_si512
333 #define vec_set_32 _mm512_set1_epi32
334 [[maybe_unused]] auto& vec_add_dpbusd_32 = m512_add_dpbusd_epi32;
335 [[maybe_unused]] auto& vec_add_dpbusd_32x2 = m512_add_dpbusd_epi32x2;
336 [[maybe_unused]] auto& vec_add_dpbusd_32x4 = m512_add_dpbusd_epi32x4;
337 [[maybe_unused]] auto& vec_hadd = m512_hadd;
338 [[maybe_unused]] auto& vec_haddx4 = m512_haddx4;
339 #elif defined (USE_AVX2)
340 using vec_t = __m256i;
341 #define vec_setzero _mm256_setzero_si256
342 #define vec_set_32 _mm256_set1_epi32
343 [[maybe_unused]] auto& vec_add_dpbusd_32 = m256_add_dpbusd_epi32;
344 [[maybe_unused]] auto& vec_add_dpbusd_32x2 = m256_add_dpbusd_epi32x2;
345 [[maybe_unused]] auto& vec_add_dpbusd_32x4 = m256_add_dpbusd_epi32x4;
346 [[maybe_unused]] auto& vec_hadd = m256_hadd;
347 [[maybe_unused]] auto& vec_haddx4 = m256_haddx4;
348 #elif defined (USE_SSSE3)
349 using vec_t = __m128i;
350 #define vec_setzero _mm_setzero_si128
351 #define vec_set_32 _mm_set1_epi32
352 [[maybe_unused]] auto& vec_add_dpbusd_32 = m128_add_dpbusd_epi32;
353 [[maybe_unused]] auto& vec_add_dpbusd_32x2 = m128_add_dpbusd_epi32x2;
354 [[maybe_unused]] auto& vec_add_dpbusd_32x4 = m128_add_dpbusd_epi32x4;
355 [[maybe_unused]] auto& vec_hadd = m128_hadd;
356 [[maybe_unused]] auto& vec_haddx4 = m128_haddx4;
359 #if defined (USE_SSSE3)
360 const auto output = reinterpret_cast<OutputType*>(buffer);
361 const auto inputVector = reinterpret_cast<const vec_t*>(input);
364 #if defined (USE_VNNI) || defined (USE_AVX512)
366 static_assert(OutputDimensions == 1 || OutputDimensions % 4 == 0);
368 // OutputDimensions is either 1 or a multiple of SimdWidth
369 // because then it is also an input dimension.
370 if constexpr (OutputDimensions <= 8 && OutputDimensions != 1)
372 constexpr IndexType NumChunks = PaddedInputDimensions / InputSimdWidth;
374 static_assert(NumChunks % 2 == 0);
376 const auto input_vec = reinterpret_cast<const vec_t*>(input);
377 const auto bias_vec = reinterpret_cast<const __m128i*>(biases);
378 auto out_vec = reinterpret_cast<__m128i*>(output);
380 vec_t regs[OutputDimensions];
381 for (IndexType k = 0; k < OutputDimensions; ++k)
382 regs[k] = vec_setzero();
384 for (IndexType i = 0; i < NumChunks / 2; ++i)
386 const vec_t in0 = input_vec[i * 2 + 0];
387 const vec_t in1 = input_vec[i * 2 + 1];
388 for (IndexType k = 0; k < OutputDimensions; ++k)
390 const vec_t w0 = reinterpret_cast<const vec_t*>(&weights[k * PaddedInputDimensions])[i * 2 + 0];
391 const vec_t w1 = reinterpret_cast<const vec_t*>(&weights[k * PaddedInputDimensions])[i * 2 + 1];
392 vec_add_dpbusd_32(regs[k], in0, w0);
393 vec_add_dpbusd_32(regs[k], in1, w1);
397 for (IndexType k = 0; k < OutputDimensions / 4; ++k)
399 out_vec[k] = vec_haddx4(
408 else if constexpr (InputDimensions == 8)
410 const auto input32 = reinterpret_cast<const std::int32_t*>(input);
411 __m256i* outptr = reinterpret_cast<__m256i*>(output);
412 std::memcpy(output, biases, OutputDimensions * sizeof(OutputType));
414 const __m256i in0 = _mm256_set1_epi32(input32[0]);
415 const __m256i in1 = _mm256_set1_epi32(input32[1]);
416 const auto col0 = reinterpret_cast<const __m256i*>(&weights[0]);
417 const auto col1 = reinterpret_cast<const __m256i*>(&weights[OutputDimensions * 4]);
418 for (IndexType j = 0; j * 8 < OutputDimensions; ++j)
419 m256_add_dpbusd_epi32x2(outptr[j], in0, col0[j], in1, col1[j]);
423 #elif defined (USE_SSSE3)
425 if constexpr (OutputDimensions % OutputSimdWidth == 0 && InputDimensions == 8)
427 const auto input32 = reinterpret_cast<const std::int32_t*>(input);
428 vec_t* outptr = reinterpret_cast<vec_t*>(output);
429 std::memcpy(output, biases, OutputDimensions * sizeof(OutputType));
431 const vec_t in0 = vec_set_32(input32[0]);
432 const vec_t in1 = vec_set_32(input32[1]);
433 const auto col0 = reinterpret_cast<const vec_t*>(&weights[0]);
434 const auto col1 = reinterpret_cast<const vec_t*>(&weights[OutputDimensions * 4]);
435 for (IndexType j = 0; j * OutputSimdWidth < OutputDimensions; ++j)
436 vec_add_dpbusd_32x2(outptr[j], in0, col0[j], in1, col1[j]);
442 #if defined (USE_SSSE3)
444 if constexpr (OutputDimensions % OutputSimdWidth == 0)
446 static_assert(InputDimensions % 16 == 0);
448 constexpr IndexType NumChunks = InputDimensions / 4;
449 constexpr IndexType NumRegs = OutputDimensions / OutputSimdWidth;
451 const auto input32 = reinterpret_cast<const std::int32_t*>(input);
452 const vec_t* biasvec = reinterpret_cast<const vec_t*>(biases);
454 for (IndexType k = 0; k < NumRegs; ++k)
455 outs[k] = biasvec[k];
457 for (IndexType i = 0; i < NumChunks; i += 4)
459 const vec_t in0 = vec_set_32(input32[i + 0]);
460 const vec_t in1 = vec_set_32(input32[i + 1]);
461 const vec_t in2 = vec_set_32(input32[i + 2]);
462 const vec_t in3 = vec_set_32(input32[i + 3]);
463 const auto col0 = reinterpret_cast<const vec_t*>(&weights[(i + 0) * OutputDimensions * 4]);
464 const auto col1 = reinterpret_cast<const vec_t*>(&weights[(i + 1) * OutputDimensions * 4]);
465 const auto col2 = reinterpret_cast<const vec_t*>(&weights[(i + 2) * OutputDimensions * 4]);
466 const auto col3 = reinterpret_cast<const vec_t*>(&weights[(i + 3) * OutputDimensions * 4]);
467 for (IndexType k = 0; k < NumRegs; ++k)
468 vec_add_dpbusd_32x4(outs[k], in0, col0[k], in1, col1[k], in2, col2[k], in3, col3[k]);
471 vec_t* outptr = reinterpret_cast<vec_t*>(output);
472 for (IndexType k = 0; k < NumRegs; ++k)
475 else if constexpr (OutputDimensions == 1)
477 static_assert(InputDimensions % 4 == 0);
479 #if defined (USE_AVX512)
480 if constexpr (PaddedInputDimensions % (SimdWidth * 2) != 0)
482 constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
483 const auto inputVector256 = reinterpret_cast<const __m256i*>(input);
485 __m256i sum0 = _mm256_setzero_si256();
486 const auto row0 = reinterpret_cast<const __m256i*>(&weights[0]);
488 for (int j = 0; j < (int)NumChunks; ++j)
490 const __m256i in = inputVector256[j];
491 m256_add_dpbusd_epi32(sum0, in, row0[j]);
493 output[0] = m256_hadd(sum0, biases[0]);
498 #if defined (USE_AVX512)
499 constexpr IndexType NumChunks = PaddedInputDimensions / (SimdWidth * 2);
501 constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
503 vec_t sum0 = vec_setzero();
504 const auto row0 = reinterpret_cast<const vec_t*>(&weights[0]);
506 for (int j = 0; j < (int)NumChunks; ++j)
508 const vec_t in = inputVector[j];
509 vec_add_dpbusd_32(sum0, in, row0[j]);
511 output[0] = vec_hadd(sum0, biases[0]);
517 // Use old implementation for the other architectures.
519 auto output = reinterpret_cast<OutputType*>(buffer);
521 #if defined(USE_SSE2)
522 // At least a multiple of 16, with SSE2.
523 static_assert(PaddedInputDimensions % SimdWidth == 0);
524 constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
525 const __m128i Zeros = _mm_setzero_si128();
526 const auto inputVector = reinterpret_cast<const __m128i*>(input);
528 #elif defined(USE_MMX)
529 static_assert(InputDimensions % SimdWidth == 0);
530 constexpr IndexType NumChunks = InputDimensions / SimdWidth;
531 const __m64 Zeros = _mm_setzero_si64();
532 const auto inputVector = reinterpret_cast<const __m64*>(input);
534 #elif defined(USE_NEON)
535 static_assert(PaddedInputDimensions % SimdWidth == 0);
536 constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
537 const auto inputVector = reinterpret_cast<const int8x8_t*>(input);
540 for (IndexType i = 0; i < OutputDimensions; ++i) {
541 const IndexType offset = i * PaddedInputDimensions;
543 #if defined(USE_SSE2)
544 __m128i sumLo = _mm_cvtsi32_si128(biases[i]);
545 __m128i sumHi = Zeros;
546 const auto row = reinterpret_cast<const __m128i*>(&weights[offset]);
547 for (IndexType j = 0; j < NumChunks; ++j) {
548 __m128i row_j = _mm_load_si128(&row[j]);
549 __m128i input_j = _mm_load_si128(&inputVector[j]);
550 __m128i extendedRowLo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8);
551 __m128i extendedRowHi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8);
552 __m128i extendedInputLo = _mm_unpacklo_epi8(input_j, Zeros);
553 __m128i extendedInputHi = _mm_unpackhi_epi8(input_j, Zeros);
554 __m128i productLo = _mm_madd_epi16(extendedRowLo, extendedInputLo);
555 __m128i productHi = _mm_madd_epi16(extendedRowHi, extendedInputHi);
556 sumLo = _mm_add_epi32(sumLo, productLo);
557 sumHi = _mm_add_epi32(sumHi, productHi);
559 __m128i sum = _mm_add_epi32(sumLo, sumHi);
560 __m128i sumHigh_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2));
561 sum = _mm_add_epi32(sum, sumHigh_64);
562 __m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2));
563 sum = _mm_add_epi32(sum, sum_second_32);
564 output[i] = _mm_cvtsi128_si32(sum);
566 #elif defined(USE_MMX)
567 __m64 sumLo = _mm_cvtsi32_si64(biases[i]);
569 const auto row = reinterpret_cast<const __m64*>(&weights[offset]);
570 for (IndexType j = 0; j < NumChunks; ++j) {
571 __m64 row_j = row[j];
572 __m64 input_j = inputVector[j];
573 __m64 extendedRowLo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8);
574 __m64 extendedRowHi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8);
575 __m64 extendedInputLo = _mm_unpacklo_pi8(input_j, Zeros);
576 __m64 extendedInputHi = _mm_unpackhi_pi8(input_j, Zeros);
577 __m64 productLo = _mm_madd_pi16(extendedRowLo, extendedInputLo);
578 __m64 productHi = _mm_madd_pi16(extendedRowHi, extendedInputHi);
579 sumLo = _mm_add_pi32(sumLo, productLo);
580 sumHi = _mm_add_pi32(sumHi, productHi);
582 __m64 sum = _mm_add_pi32(sumLo, sumHi);
583 sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum));
584 output[i] = _mm_cvtsi64_si32(sum);
586 #elif defined(USE_NEON)
587 int32x4_t sum = {biases[i]};
588 const auto row = reinterpret_cast<const int8x8_t*>(&weights[offset]);
589 for (IndexType j = 0; j < NumChunks; ++j) {
590 int16x8_t product = vmull_s8(inputVector[j * 2], row[j * 2]);
591 product = vmlal_s8(product, inputVector[j * 2 + 1], row[j * 2 + 1]);
592 sum = vpadalq_s16(sum, product);
594 output[i] = sum[0] + sum[1] + sum[2] + sum[3];
597 OutputType sum = biases[i];
598 for (IndexType j = 0; j < InputDimensions; ++j) {
599 sum += weights[offset + j] * input[j];
611 #if (!defined (USE_SSSE3) && defined (USE_SSE2)) || defined (USE_NEON)
612 static_assert(SimdWidth <= 16, "Otherwise we run outside of the padding for the output.");
613 if constexpr (SimdWidth > OutputDimensions && OutputDimensions != 1)
614 for (IndexType i = OutputDimensions; i < SimdWidth; ++i)
622 using BiasType = OutputType;
623 using WeightType = std::int8_t;
625 PreviousLayer previousLayer;
627 alignas(CacheLineSize) BiasType biases[OutputDimensions];
628 alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
631 } // namespace Stockfish::Eval::NNUE::Layers
633 #endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED