/*
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
- Copyright (C) 2004-2020 The Stockfish developers (see AUTHORS file)
+ Copyright (C) 2004-2023 The Stockfish developers (see AUTHORS file)
Stockfish is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
#ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
#define NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
+#include <cstdint>
#include <iostream>
+
#include "../nnue_common.h"
+#include "simd.h"
+
+/*
+ This file contains the definition for a fully connected layer (aka affine transform).
+
+ - expected use-case is for when PaddedInputDimensions == 32 and InputDimensions <= 32.
+ - that's why AVX512 is hard to implement
+ - expected use-case is small layers
+ - inputs are processed in chunks of 4, weights are respectively transposed
+ - accumulation happens directly to int32s
+*/
+
+namespace Stockfish::Eval::NNUE::Layers {
+
+// Fallback implementation for older/other architectures.
+// Requires the input to be padded to at least 16 values.
+#if !defined(USE_SSSE3)
+template<IndexType InputDimensions, IndexType PaddedInputDimensions, IndexType OutputDimensions>
+static void affine_transform_non_ssse3(std::int32_t* output,
+ const std::int8_t* weights,
+ const std::int32_t* biases,
+ const std::uint8_t* input) {
+ #if defined(USE_SSE2) || defined(USE_NEON_DOTPROD) || defined(USE_NEON)
+ #if defined(USE_SSE2)
+ // At least a multiple of 16, with SSE2.
+ constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 16) / 16;
+ const __m128i Zeros = _mm_setzero_si128();
+ const auto inputVector = reinterpret_cast<const __m128i*>(input);
+
+ #elif defined(USE_NEON_DOTPROD)
+ constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 16) / 16;
+ const auto inputVector = reinterpret_cast<const int8x16_t*>(input);
+
+ #elif defined(USE_NEON)
+ constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 16) / 16;
+ const auto inputVector = reinterpret_cast<const int8x8_t*>(input);
+ #endif
+
+ for (IndexType i = 0; i < OutputDimensions; ++i)
+ {
+ const IndexType offset = i * PaddedInputDimensions;
+
+ #if defined(USE_SSE2)
+ __m128i sumLo = _mm_cvtsi32_si128(biases[i]);
+ __m128i sumHi = Zeros;
+ const auto row = reinterpret_cast<const __m128i*>(&weights[offset]);
+ for (IndexType j = 0; j < NumChunks; ++j)
+ {
+ __m128i row_j = _mm_load_si128(&row[j]);
+ __m128i input_j = _mm_load_si128(&inputVector[j]);
+ __m128i extendedRowLo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8);
+ __m128i extendedRowHi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8);
+ __m128i extendedInputLo = _mm_unpacklo_epi8(input_j, Zeros);
+ __m128i extendedInputHi = _mm_unpackhi_epi8(input_j, Zeros);
+ __m128i productLo = _mm_madd_epi16(extendedRowLo, extendedInputLo);
+ __m128i productHi = _mm_madd_epi16(extendedRowHi, extendedInputHi);
+ sumLo = _mm_add_epi32(sumLo, productLo);
+ sumHi = _mm_add_epi32(sumHi, productHi);
+ }
+ __m128i sum = _mm_add_epi32(sumLo, sumHi);
+ __m128i sumHigh_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2));
+ sum = _mm_add_epi32(sum, sumHigh_64);
+ __m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2));
+ sum = _mm_add_epi32(sum, sum_second_32);
+ output[i] = _mm_cvtsi128_si32(sum);
+
+ #elif defined(USE_NEON_DOTPROD)
+ int32x4_t sum = {biases[i]};
+ const auto row = reinterpret_cast<const int8x16_t*>(&weights[offset]);
+ for (IndexType j = 0; j < NumChunks; ++j)
+ {
+ sum = vdotq_s32(sum, inputVector[j], row[j]);
+ }
+ output[i] = vaddvq_s32(sum);
+
+ #elif defined(USE_NEON)
+ int32x4_t sum = {biases[i]};
+ const auto row = reinterpret_cast<const int8x8_t*>(&weights[offset]);
+ for (IndexType j = 0; j < NumChunks; ++j)
+ {
+ int16x8_t product = vmull_s8(inputVector[j * 2], row[j * 2]);
+ product = vmlal_s8(product, inputVector[j * 2 + 1], row[j * 2 + 1]);
+ sum = vpadalq_s16(sum, product);
+ }
+ output[i] = sum[0] + sum[1] + sum[2] + sum[3];
-namespace Eval::NNUE::Layers {
+ #endif
+ }
+ #else
+ std::memcpy(output, biases, sizeof(std::int32_t) * OutputDimensions);
+
+ // Traverse weights in transpose order to take advantage of input sparsity
+ for (IndexType i = 0; i < InputDimensions; ++i)
+ if (input[i])
+ {
+ const std::int8_t* w = &weights[i];
+ const int in = input[i];
+ for (IndexType j = 0; j < OutputDimensions; ++j)
+ output[j] += w[j * PaddedInputDimensions] * in;
+ }
+ #endif
+}
+#endif
- // Affine transformation layer
- template <typename PreviousLayer, IndexType OutputDimensions>
- class AffineTransform {
+template<IndexType InDims, IndexType OutDims>
+class AffineTransform {
public:
// Input/output type
- using InputType = typename PreviousLayer::OutputType;
+ using InputType = std::uint8_t;
using OutputType = std::int32_t;
- static_assert(std::is_same<InputType, std::uint8_t>::value, "");
// Number of input/output dimensions
- static constexpr IndexType kInputDimensions =
- PreviousLayer::kOutputDimensions;
- static constexpr IndexType kOutputDimensions = OutputDimensions;
- static constexpr IndexType kPaddedInputDimensions =
- CeilToMultiple<IndexType>(kInputDimensions, kMaxSimdWidth);
+ static constexpr IndexType InputDimensions = InDims;
+ static constexpr IndexType OutputDimensions = OutDims;
- // Size of forward propagation buffer used in this layer
- static constexpr std::size_t kSelfBufferSize =
- CeilToMultiple(kOutputDimensions * sizeof(OutputType), kCacheLineSize);
+ static constexpr IndexType PaddedInputDimensions =
+ ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
+ static constexpr IndexType PaddedOutputDimensions =
+ ceil_to_multiple<IndexType>(OutputDimensions, MaxSimdWidth);
- // Size of the forward propagation buffer used from the input layer to this layer
- static constexpr std::size_t kBufferSize =
- PreviousLayer::kBufferSize + kSelfBufferSize;
+ using OutputBuffer = OutputType[PaddedOutputDimensions];
// Hash value embedded in the evaluation file
- static constexpr std::uint32_t GetHashValue() {
- std::uint32_t hash_value = 0xCC03DAE4u;
- hash_value += kOutputDimensions;
- hash_value ^= PreviousLayer::GetHashValue() >> 1;
- hash_value ^= PreviousLayer::GetHashValue() << 31;
- return hash_value;
+ static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
+ std::uint32_t hashValue = 0xCC03DAE4u;
+ hashValue += OutputDimensions;
+ hashValue ^= prevHash >> 1;
+ hashValue ^= prevHash << 31;
+ return hashValue;
}
- // Read network parameters
- bool ReadParameters(std::istream& stream) {
- if (!previous_layer_.ReadParameters(stream)) return false;
- stream.read(reinterpret_cast<char*>(biases_),
- kOutputDimensions * sizeof(BiasType));
- stream.read(reinterpret_cast<char*>(weights_),
- kOutputDimensions * kPaddedInputDimensions *
- sizeof(WeightType));
- return !stream.fail();
+ static constexpr IndexType get_weight_index_scrambled(IndexType i) {
+ return (i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4
+ + i / PaddedInputDimensions * 4 + i % 4;
}
+ static constexpr IndexType get_weight_index(IndexType i) {
+#if defined(USE_SSSE3)
+ return get_weight_index_scrambled(i);
+#else
+ return i;
+#endif
+ }
+
+ // Read network parameters
+ bool read_parameters(std::istream& stream) {
+ read_little_endian<BiasType>(stream, biases, OutputDimensions);
+ for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
+ weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);
+
+ return !stream.fail();
+ }
+
+ // Write network parameters
+ bool write_parameters(std::ostream& stream) const {
+ write_little_endian<BiasType>(stream, biases, OutputDimensions);
+
+ for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
+ write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);
+
+ return !stream.fail();
+ }
// Forward propagation
- const OutputType* Propagate(
- const TransformedFeatureType* transformed_features, char* buffer) const {
- const auto input = previous_layer_.Propagate(
- transformed_features, buffer + kSelfBufferSize);
- const auto output = reinterpret_cast<OutputType*>(buffer);
-
- #if defined(USE_AVX512)
- constexpr IndexType kNumChunks = kPaddedInputDimensions / (kSimdWidth * 2);
- const __m512i kOnes = _mm512_set1_epi16(1);
- const auto input_vector = reinterpret_cast<const __m512i*>(input);
-
- #elif defined(USE_AVX2)
- constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
- const __m256i kOnes = _mm256_set1_epi16(1);
- const auto input_vector = reinterpret_cast<const __m256i*>(input);
-
- #elif defined(USE_SSSE3)
- constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
- const __m128i kOnes = _mm_set1_epi16(1);
- const auto input_vector = reinterpret_cast<const __m128i*>(input);
-
- #elif defined(USE_NEON)
- constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
- const auto input_vector = reinterpret_cast<const int8x8_t*>(input);
- #endif
-
- for (IndexType i = 0; i < kOutputDimensions; ++i) {
- const IndexType offset = i * kPaddedInputDimensions;
-
- #if defined(USE_AVX512)
- __m512i sum = _mm512_setzero_si512();
- const auto row = reinterpret_cast<const __m512i*>(&weights_[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
-
- #if defined(__MINGW32__) || defined(__MINGW64__)
- __m512i product = _mm512_maddubs_epi16(_mm512_loadu_si512(&input_vector[j]), _mm512_load_si512(&row[j]));
- #else
- __m512i product = _mm512_maddubs_epi16(_mm512_load_si512(&input_vector[j]), _mm512_load_si512(&row[j]));
- #endif
-
- product = _mm512_madd_epi16(product, kOnes);
- sum = _mm512_add_epi32(sum, product);
- }
- output[i] = _mm512_reduce_add_epi32(sum) + biases_[i];
+ void propagate(const InputType* input, OutputType* output) const {
- // Note: Changing kMaxSimdWidth from 32 to 64 breaks loading existing networks.
- // As a result kPaddedInputDimensions may not be an even multiple of 64(512bit)
- // and we have to do one more 256bit chunk.
- if (kPaddedInputDimensions != kNumChunks * kSimdWidth * 2)
+#if defined(USE_SSSE3)
+
+ if constexpr (OutputDimensions > 1)
{
- const auto iv_256 = reinterpret_cast<const __m256i*>(input);
- const auto row_256 = reinterpret_cast<const __m256i*>(&weights_[offset]);
- int j = kNumChunks * 2;
-
- #if defined(__MINGW32__) || defined(__MINGW64__) // See HACK comment below in AVX2.
- __m256i sum256 = _mm256_maddubs_epi16(_mm256_loadu_si256(&iv_256[j]), _mm256_load_si256(&row_256[j]));
- #else
- __m256i sum256 = _mm256_maddubs_epi16(_mm256_load_si256(&iv_256[j]), _mm256_load_si256(&row_256[j]));
- #endif
-
- sum256 = _mm256_madd_epi16(sum256, _mm256_set1_epi16(1));
- sum256 = _mm256_hadd_epi32(sum256, sum256);
- sum256 = _mm256_hadd_epi32(sum256, sum256);
- const __m128i lo = _mm256_extracti128_si256(sum256, 0);
- const __m128i hi = _mm256_extracti128_si256(sum256, 1);
- output[i] += _mm_cvtsi128_si32(lo) + _mm_cvtsi128_si32(hi);
- }
- #elif defined(USE_AVX2)
- __m256i sum = _mm256_setzero_si256();
- const auto row = reinterpret_cast<const __m256i*>(&weights_[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- __m256i product = _mm256_maddubs_epi16(
-
- #if defined(__MINGW32__) || defined(__MINGW64__)
- // HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary
- // compiled with g++ in MSYS2 crashes here because the output memory is not aligned
- // even though alignas is specified.
- _mm256_loadu_si256
- #else
- _mm256_load_si256
- #endif
-
- (&input_vector[j]), _mm256_load_si256(&row[j]));
- product = _mm256_madd_epi16(product, kOnes);
- sum = _mm256_add_epi32(sum, product);
- }
- sum = _mm256_hadd_epi32(sum, sum);
- sum = _mm256_hadd_epi32(sum, sum);
- const __m128i lo = _mm256_extracti128_si256(sum, 0);
- const __m128i hi = _mm256_extracti128_si256(sum, 1);
- output[i] = _mm_cvtsi128_si32(lo) + _mm_cvtsi128_si32(hi) + biases_[i];
-
- #elif defined(USE_SSSE3)
- __m128i sum = _mm_cvtsi32_si128(biases_[i]);
- const auto row = reinterpret_cast<const __m128i*>(&weights_[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- __m128i product = _mm_maddubs_epi16(
- _mm_load_si128(&input_vector[j]), _mm_load_si128(&row[j]));
- product = _mm_madd_epi16(product, kOnes);
- sum = _mm_add_epi32(sum, product);
- }
- sum = _mm_hadd_epi32(sum, sum);
- sum = _mm_hadd_epi32(sum, sum);
- output[i] = _mm_cvtsi128_si32(sum);
-
- #elif defined(USE_NEON)
- int32x4_t sum = {biases_[i]};
- const auto row = reinterpret_cast<const int8x8_t*>(&weights_[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- int16x8_t product = vmull_s8(input_vector[j * 2], row[j * 2]);
- product = vmlal_s8(product, input_vector[j * 2 + 1], row[j * 2 + 1]);
- sum = vpadalq_s16(sum, product);
+ #if defined(USE_AVX512)
+ using vec_t = __m512i;
+ #define vec_setzero _mm512_setzero_si512
+ #define vec_set_32 _mm512_set1_epi32
+ #define vec_add_dpbusd_32 Simd::m512_add_dpbusd_epi32
+ #define vec_add_dpbusd_32x2 Simd::m512_add_dpbusd_epi32x2
+ #define vec_hadd Simd::m512_hadd
+ #elif defined(USE_AVX2)
+ using vec_t = __m256i;
+ #define vec_setzero _mm256_setzero_si256
+ #define vec_set_32 _mm256_set1_epi32
+ #define vec_add_dpbusd_32 Simd::m256_add_dpbusd_epi32
+ #define vec_add_dpbusd_32x2 Simd::m256_add_dpbusd_epi32x2
+ #define vec_hadd Simd::m256_hadd
+ #elif defined(USE_SSSE3)
+ using vec_t = __m128i;
+ #define vec_setzero _mm_setzero_si128
+ #define vec_set_32 _mm_set1_epi32
+ #define vec_add_dpbusd_32 Simd::m128_add_dpbusd_epi32
+ #define vec_add_dpbusd_32x2 Simd::m128_add_dpbusd_epi32x2
+ #define vec_hadd Simd::m128_hadd
+ #endif
+
+ static constexpr IndexType OutputSimdWidth = sizeof(vec_t) / sizeof(OutputType);
+
+ static_assert(OutputDimensions % OutputSimdWidth == 0);
+
+ constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 8) / 4;
+ constexpr IndexType NumRegs = OutputDimensions / OutputSimdWidth;
+
+ const auto input32 = reinterpret_cast<const std::int32_t*>(input);
+ const vec_t* biasvec = reinterpret_cast<const vec_t*>(biases);
+ vec_t acc[NumRegs];
+ for (IndexType k = 0; k < NumRegs; ++k)
+ acc[k] = biasvec[k];
+
+ for (IndexType i = 0; i < NumChunks; i += 2)
+ {
+ const vec_t in0 = vec_set_32(input32[i + 0]);
+ const vec_t in1 = vec_set_32(input32[i + 1]);
+ const auto col0 =
+ reinterpret_cast<const vec_t*>(&weights[(i + 0) * OutputDimensions * 4]);
+ const auto col1 =
+ reinterpret_cast<const vec_t*>(&weights[(i + 1) * OutputDimensions * 4]);
+ for (IndexType k = 0; k < NumRegs; ++k)
+ vec_add_dpbusd_32x2(acc[k], in0, col0[k], in1, col1[k]);
+ }
+
+ vec_t* outptr = reinterpret_cast<vec_t*>(output);
+ for (IndexType k = 0; k < NumRegs; ++k)
+ outptr[k] = acc[k];
+
+ #undef vec_setzero
+ #undef vec_set_32
+ #undef vec_add_dpbusd_32
+ #undef vec_add_dpbusd_32x2
+ #undef vec_hadd
}
- output[i] = sum[0] + sum[1] + sum[2] + sum[3];
+ else if constexpr (OutputDimensions == 1)
+ {
- #else
- OutputType sum = biases_[i];
- for (IndexType j = 0; j < kInputDimensions; ++j) {
- sum += weights_[offset + j] * input[j];
+ // We cannot use AVX512 for the last layer because there are only 32 inputs
+ // and the buffer is not padded to 64 elements.
+ #if defined(USE_AVX2)
+ using vec_t = __m256i;
+ #define vec_setzero _mm256_setzero_si256
+ #define vec_set_32 _mm256_set1_epi32
+ #define vec_add_dpbusd_32 Simd::m256_add_dpbusd_epi32
+ #define vec_add_dpbusd_32x2 Simd::m256_add_dpbusd_epi32x2
+ #define vec_hadd Simd::m256_hadd
+ #elif defined(USE_SSSE3)
+ using vec_t = __m128i;
+ #define vec_setzero _mm_setzero_si128
+ #define vec_set_32 _mm_set1_epi32
+ #define vec_add_dpbusd_32 Simd::m128_add_dpbusd_epi32
+ #define vec_add_dpbusd_32x2 Simd::m128_add_dpbusd_epi32x2
+ #define vec_hadd Simd::m128_hadd
+ #endif
+
+ const auto inputVector = reinterpret_cast<const vec_t*>(input);
+
+ static constexpr IndexType InputSimdWidth = sizeof(vec_t) / sizeof(InputType);
+
+ static_assert(PaddedInputDimensions % InputSimdWidth == 0);
+
+ constexpr IndexType NumChunks = PaddedInputDimensions / InputSimdWidth;
+ vec_t sum0 = vec_setzero();
+ const auto row0 = reinterpret_cast<const vec_t*>(&weights[0]);
+
+ for (int j = 0; j < int(NumChunks); ++j)
+ {
+ const vec_t in = inputVector[j];
+ vec_add_dpbusd_32(sum0, in, row0[j]);
+ }
+ output[0] = vec_hadd(sum0, biases[0]);
+
+ #undef vec_setzero
+ #undef vec_set_32
+ #undef vec_add_dpbusd_32
+ #undef vec_add_dpbusd_32x2
+ #undef vec_hadd
}
- output[i] = sum;
- #endif
-
- }
- return output;
+#else
+ // Use old implementation for the other architectures.
+ affine_transform_non_ssse3<InputDimensions, PaddedInputDimensions, OutputDimensions>(
+ output, weights, biases, input);
+#endif
}
private:
- using BiasType = OutputType;
+ using BiasType = OutputType;
using WeightType = std::int8_t;
- PreviousLayer previous_layer_;
-
- alignas(kCacheLineSize) BiasType biases_[kOutputDimensions];
- alignas(kCacheLineSize)
- WeightType weights_[kOutputDimensions * kPaddedInputDimensions];
- };
+ alignas(CacheLineSize) BiasType biases[OutputDimensions];
+ alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
+};
-} // namespace Eval::NNUE::Layers
+} // namespace Stockfish::Eval::NNUE::Layers
-#endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
+#endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED