+/*
+ This file contains the definition for a fully connected layer (aka affine transform).
+ Two approaches are employed, depending on the sizes of the transform.
+
+ Approach 1:
+ - used when the PaddedInputDimensions >= 128
+ - uses AVX512 if possible
+ - processes inputs in batches of 2*InputSimdWidth
+ - so in batches of 128 for AVX512
+ - the weight blocks of size InputSimdWidth are transposed such that
+ access is sequential
+ - N columns of the weight matrix are processed a time, where N
+ depends on the architecture (the amount of registers)
+ - accumulate + hadd is used
+
+ Approach 2:
+ - used when the PaddedInputDimensions < 128
+ - does not use AVX512
+ - expected use-case is for when PaddedInputDimensions == 32 and InputDimensions <= 32.
+ - that's why AVX512 is hard to implement
+ - expected use-case is small layers
+ - not optimized as well as the approach 1
+ - inputs are processed in chunks of 4, weights are respectively transposed
+ - accumulation happens directly to int32s
+*/
+
+namespace Stockfish::Eval::NNUE::Layers {
+
+// Fallback implementation for older/other architectures.
+// Identical for both approaches. Requires the input to be padded to at least 16 values.
+#if !defined(USE_SSSE3)
+ template <IndexType InputDimensions, IndexType PaddedInputDimensions, IndexType OutputDimensions>
+ static void affine_transform_non_ssse3(std::int32_t* output, const std::int8_t* weights, const std::int32_t* biases, const std::uint8_t* input)
+ {
+# if defined(USE_SSE2)
+ // At least a multiple of 16, with SSE2.
+ static_assert(PaddedInputDimensions % 16 == 0);
+ constexpr IndexType NumChunks = PaddedInputDimensions / 16;
+ const __m128i Zeros = _mm_setzero_si128();
+ const auto inputVector = reinterpret_cast<const __m128i*>(input);
+
+# elif defined(USE_MMX)
+ static_assert(InputDimensions % 8 == 0);
+ constexpr IndexType NumChunks = InputDimensions / 8;
+ const __m64 Zeros = _mm_setzero_si64();
+ const auto inputVector = reinterpret_cast<const __m64*>(input);
+
+# elif defined(USE_NEON)
+ constexpr IndexType NumChunks = (InputDimensions + 15) / 16;
+ const auto inputVector = reinterpret_cast<const int8x8_t*>(input);
+# endif
+
+ for (IndexType i = 0; i < OutputDimensions; ++i) {
+ const IndexType offset = i * PaddedInputDimensions;
+
+# if defined(USE_SSE2)
+ __m128i sumLo = _mm_cvtsi32_si128(biases[i]);
+ __m128i sumHi = Zeros;
+ const auto row = reinterpret_cast<const __m128i*>(&weights[offset]);
+ for (IndexType j = 0; j < NumChunks; ++j) {
+ __m128i row_j = _mm_load_si128(&row[j]);
+ __m128i input_j = _mm_load_si128(&inputVector[j]);
+ __m128i extendedRowLo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8);
+ __m128i extendedRowHi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8);
+ __m128i extendedInputLo = _mm_unpacklo_epi8(input_j, Zeros);
+ __m128i extendedInputHi = _mm_unpackhi_epi8(input_j, Zeros);
+ __m128i productLo = _mm_madd_epi16(extendedRowLo, extendedInputLo);
+ __m128i productHi = _mm_madd_epi16(extendedRowHi, extendedInputHi);
+ sumLo = _mm_add_epi32(sumLo, productLo);
+ sumHi = _mm_add_epi32(sumHi, productHi);
+ }
+ __m128i sum = _mm_add_epi32(sumLo, sumHi);
+ __m128i sumHigh_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2));
+ sum = _mm_add_epi32(sum, sumHigh_64);
+ __m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2));
+ sum = _mm_add_epi32(sum, sum_second_32);
+ output[i] = _mm_cvtsi128_si32(sum);
+
+# elif defined(USE_MMX)
+ __m64 sumLo = _mm_cvtsi32_si64(biases[i]);
+ __m64 sumHi = Zeros;
+ const auto row = reinterpret_cast<const __m64*>(&weights[offset]);
+ for (IndexType j = 0; j < NumChunks; ++j) {
+ __m64 row_j = row[j];
+ __m64 input_j = inputVector[j];
+ __m64 extendedRowLo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8);
+ __m64 extendedRowHi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8);
+ __m64 extendedInputLo = _mm_unpacklo_pi8(input_j, Zeros);
+ __m64 extendedInputHi = _mm_unpackhi_pi8(input_j, Zeros);
+ __m64 productLo = _mm_madd_pi16(extendedRowLo, extendedInputLo);
+ __m64 productHi = _mm_madd_pi16(extendedRowHi, extendedInputHi);
+ sumLo = _mm_add_pi32(sumLo, productLo);
+ sumHi = _mm_add_pi32(sumHi, productHi);
+ }
+ __m64 sum = _mm_add_pi32(sumLo, sumHi);
+ sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum));
+ output[i] = _mm_cvtsi64_si32(sum);
+
+# elif defined(USE_NEON)
+ int32x4_t sum = {biases[i]};
+ const auto row = reinterpret_cast<const int8x8_t*>(&weights[offset]);
+ for (IndexType j = 0; j < NumChunks; ++j) {
+ int16x8_t product = vmull_s8(inputVector[j * 2], row[j * 2]);
+ product = vmlal_s8(product, inputVector[j * 2 + 1], row[j * 2 + 1]);
+ sum = vpadalq_s16(sum, product);
+ }
+ output[i] = sum[0] + sum[1] + sum[2] + sum[3];
+
+# else
+ std::int32_t sum = biases[i];
+ for (IndexType j = 0; j < InputDimensions; ++j) {
+ sum += weights[offset + j] * input[j];
+ }
+ output[i] = sum;
+# endif
+ }
+
+# if defined(USE_MMX)
+ _mm_empty();
+# endif
+ }
+#endif
+
+ template <typename PreviousLayer, IndexType OutDims, typename Enabled = void>
+ class AffineTransform;