X-Git-Url: https://git.sesse.net/?p=stockfish;a=blobdiff_plain;f=src%2Fnnue%2Flayers%2Faffine_transform.h;h=9a3b778e6bbbedec7cb8b6d409c5d226f7569206;hp=b585bc87819d23c808ce66a472c4ffba59e47072;hb=d558f8a673b56b32ab6da8050f41b9e02fe1758b;hpb=4004bcf3a48c47450344e93fcf186e3eb5d32165 diff --git a/src/nnue/layers/affine_transform.h b/src/nnue/layers/affine_transform.h index b585bc87..9a3b778e 100644 --- a/src/nnue/layers/affine_transform.h +++ b/src/nnue/layers/affine_transform.h @@ -1,6 +1,6 @@ /* Stockfish, a UCI chess playing engine derived from Glaurung 2.1 - Copyright (C) 2004-2020 The Stockfish developers (see AUTHORS file) + Copyright (C) 2004-2021 The Stockfish developers (see AUTHORS file) Stockfish is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -24,10 +24,10 @@ #include #include "../nnue_common.h" -namespace Eval::NNUE::Layers { +namespace Stockfish::Eval::NNUE::Layers { // Affine transformation layer - template + template class AffineTransform { public: // Input/output type @@ -36,166 +36,387 @@ namespace Eval::NNUE::Layers { static_assert(std::is_same::value, ""); // Number of input/output dimensions - static constexpr IndexType kInputDimensions = - PreviousLayer::kOutputDimensions; - static constexpr IndexType kOutputDimensions = OutputDimensions; - static constexpr IndexType kPaddedInputDimensions = - CeilToMultiple(kInputDimensions, kMaxSimdWidth); + static constexpr IndexType InputDimensions = + PreviousLayer::OutputDimensions; + static constexpr IndexType OutputDimensions = OutDims; + static constexpr IndexType PaddedInputDimensions = + ceil_to_multiple(InputDimensions, MaxSimdWidth); +#if defined (USE_AVX512) + static constexpr const IndexType OutputSimdWidth = SimdWidth / 2; +#elif defined (USE_SSSE3) + static constexpr const IndexType OutputSimdWidth = SimdWidth / 4; +#endif // Size of forward propagation buffer used in this layer - static constexpr std::size_t kSelfBufferSize = - CeilToMultiple(kOutputDimensions * sizeof(OutputType), kCacheLineSize); + static constexpr std::size_t SelfBufferSize = + ceil_to_multiple(OutputDimensions * sizeof(OutputType), CacheLineSize); // Size of the forward propagation buffer used from the input layer to this layer - static constexpr std::size_t kBufferSize = - PreviousLayer::kBufferSize + kSelfBufferSize; + static constexpr std::size_t BufferSize = + PreviousLayer::BufferSize + SelfBufferSize; // Hash value embedded in the evaluation file - static constexpr std::uint32_t GetHashValue() { - std::uint32_t hash_value = 0xCC03DAE4u; - hash_value += kOutputDimensions; - hash_value ^= PreviousLayer::GetHashValue() >> 1; - hash_value ^= PreviousLayer::GetHashValue() << 31; - return hash_value; + static constexpr std::uint32_t get_hash_value() { + std::uint32_t hashValue = 0xCC03DAE4u; + hashValue += OutputDimensions; + hashValue ^= PreviousLayer::get_hash_value() >> 1; + hashValue ^= PreviousLayer::get_hash_value() << 31; + return hashValue; } - // Read network parameters - bool ReadParameters(std::istream& stream) { - if (!previous_layer_.ReadParameters(stream)) return false; - stream.read(reinterpret_cast(biases_), - kOutputDimensions * sizeof(BiasType)); - stream.read(reinterpret_cast(weights_), - kOutputDimensions * kPaddedInputDimensions * - sizeof(WeightType)); + // Read network parameters + bool read_parameters(std::istream& stream) { + if (!previousLayer.read_parameters(stream)) return false; + for (std::size_t i = 0; i < OutputDimensions; ++i) + biases[i] = read_little_endian(stream); + for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) +#if !defined (USE_SSSE3) + weights[i] = read_little_endian(stream); +#else + weights[ + (i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 + + i / PaddedInputDimensions * 4 + + i % 4 + ] = read_little_endian(stream); +#endif + + return !stream.fail(); + } + + // Write network parameters + bool write_parameters(std::ostream& stream) const { + if (!previousLayer.write_parameters(stream)) return false; + for (std::size_t i = 0; i < OutputDimensions; ++i) + write_little_endian(stream, biases[i]); +#if !defined (USE_SSSE3) + for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) + write_little_endian(stream, weights[i]); +#else + std::unique_ptr unscrambledWeights = std::make_unique(OutputDimensions * PaddedInputDimensions); + for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) { + unscrambledWeights[i] = + weights[ + (i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 + + i / PaddedInputDimensions * 4 + + i % 4 + ]; + } + + for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) + write_little_endian(stream, unscrambledWeights[i]); +#endif + return !stream.fail(); } // Forward propagation - const OutputType* Propagate( - const TransformedFeatureType* transformed_features, char* buffer) const { - const auto input = previous_layer_.Propagate( - transformed_features, buffer + kSelfBufferSize); - const auto output = reinterpret_cast(buffer); + const OutputType* propagate( + const TransformedFeatureType* transformedFeatures, char* buffer) const { + const auto input = previousLayer.propagate( + transformedFeatures, buffer + SelfBufferSize); + +#if defined (USE_AVX512) + + [[maybe_unused]] const __m512i Ones512 = _mm512_set1_epi16(1); + + [[maybe_unused]] auto m512_hadd = [](__m512i sum, int bias) -> int { + return _mm512_reduce_add_epi32(sum) + bias; + }; + + [[maybe_unused]] auto m512_add_dpbusd_epi32 = [=](__m512i& acc, __m512i a, __m512i b) { +#if defined (USE_VNNI) + acc = _mm512_dpbusd_epi32(acc, a, b); +#else + __m512i product0 = _mm512_maddubs_epi16(a, b); + product0 = _mm512_madd_epi16(product0, Ones512); + acc = _mm512_add_epi32(acc, product0); +#endif + }; + + [[maybe_unused]] auto m512_add_dpbusd_epi32x4 = [=](__m512i& acc, __m512i a0, __m512i b0, __m512i a1, __m512i b1, + __m512i a2, __m512i b2, __m512i a3, __m512i b3) { +#if defined (USE_VNNI) + acc = _mm512_dpbusd_epi32(acc, a0, b0); + acc = _mm512_dpbusd_epi32(acc, a1, b1); + acc = _mm512_dpbusd_epi32(acc, a2, b2); + acc = _mm512_dpbusd_epi32(acc, a3, b3); +#else + __m512i product0 = _mm512_maddubs_epi16(a0, b0); + __m512i product1 = _mm512_maddubs_epi16(a1, b1); + __m512i product2 = _mm512_maddubs_epi16(a2, b2); + __m512i product3 = _mm512_maddubs_epi16(a3, b3); + product0 = _mm512_adds_epi16(product0, product1); + product0 = _mm512_madd_epi16(product0, Ones512); + product2 = _mm512_adds_epi16(product2, product3); + product2 = _mm512_madd_epi16(product2, Ones512); + acc = _mm512_add_epi32(acc, _mm512_add_epi32(product0, product2)); +#endif + }; + +#endif +#if defined (USE_AVX2) + + [[maybe_unused]] const __m256i Ones256 = _mm256_set1_epi16(1); + + [[maybe_unused]] auto m256_hadd = [](__m256i sum, int bias) -> int { + __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extracti128_si256(sum, 1)); + sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_BADC)); + sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_CDAB)); + return _mm_cvtsi128_si32(sum128) + bias; + }; + + [[maybe_unused]] auto m256_add_dpbusd_epi32 = [=](__m256i& acc, __m256i a, __m256i b) { +#if defined (USE_VNNI) + acc = _mm256_dpbusd_epi32(acc, a, b); +#else + __m256i product0 = _mm256_maddubs_epi16(a, b); + product0 = _mm256_madd_epi16(product0, Ones256); + acc = _mm256_add_epi32(acc, product0); +#endif + }; + + [[maybe_unused]] auto m256_add_dpbusd_epi32x4 = [=](__m256i& acc, __m256i a0, __m256i b0, __m256i a1, __m256i b1, + __m256i a2, __m256i b2, __m256i a3, __m256i b3) { +#if defined (USE_VNNI) + acc = _mm256_dpbusd_epi32(acc, a0, b0); + acc = _mm256_dpbusd_epi32(acc, a1, b1); + acc = _mm256_dpbusd_epi32(acc, a2, b2); + acc = _mm256_dpbusd_epi32(acc, a3, b3); +#else + __m256i product0 = _mm256_maddubs_epi16(a0, b0); + __m256i product1 = _mm256_maddubs_epi16(a1, b1); + __m256i product2 = _mm256_maddubs_epi16(a2, b2); + __m256i product3 = _mm256_maddubs_epi16(a3, b3); + product0 = _mm256_adds_epi16(product0, product1); + product0 = _mm256_madd_epi16(product0, Ones256); + product2 = _mm256_adds_epi16(product2, product3); + product2 = _mm256_madd_epi16(product2, Ones256); + acc = _mm256_add_epi32(acc, _mm256_add_epi32(product0, product2)); +#endif + }; + +#endif +#if defined (USE_SSSE3) + + [[maybe_unused]] const __m128i Ones128 = _mm_set1_epi16(1); + + [[maybe_unused]] auto m128_hadd = [](__m128i sum, int bias) -> int { + sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0x4E)); //_MM_PERM_BADC + sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0xB1)); //_MM_PERM_CDAB + return _mm_cvtsi128_si32(sum) + bias; + }; + + [[maybe_unused]] auto m128_add_dpbusd_epi32 = [=](__m128i& acc, __m128i a, __m128i b) { + __m128i product0 = _mm_maddubs_epi16(a, b); + product0 = _mm_madd_epi16(product0, Ones128); + acc = _mm_add_epi32(acc, product0); + }; + + [[maybe_unused]] auto m128_add_dpbusd_epi32x4 = [=](__m128i& acc, __m128i a0, __m128i b0, __m128i a1, __m128i b1, + __m128i a2, __m128i b2, __m128i a3, __m128i b3) { + __m128i product0 = _mm_maddubs_epi16(a0, b0); + __m128i product1 = _mm_maddubs_epi16(a1, b1); + __m128i product2 = _mm_maddubs_epi16(a2, b2); + __m128i product3 = _mm_maddubs_epi16(a3, b3); + product0 = _mm_adds_epi16(product0, product1); + product0 = _mm_madd_epi16(product0, Ones128); + product2 = _mm_adds_epi16(product2, product3); + product2 = _mm_madd_epi16(product2, Ones128); + acc = _mm_add_epi32(acc, _mm_add_epi32(product0, product2)); + }; + +#endif + +#if defined (USE_AVX512) + using vec_t = __m512i; + #define vec_setzero _mm512_setzero_si512 + #define vec_set_32 _mm512_set1_epi32 + auto& vec_add_dpbusd_32 = m512_add_dpbusd_epi32; + auto& vec_add_dpbusd_32x4 = m512_add_dpbusd_epi32x4; + auto& vec_hadd = m512_hadd; +#elif defined (USE_AVX2) + using vec_t = __m256i; + #define vec_setzero _mm256_setzero_si256 + #define vec_set_32 _mm256_set1_epi32 + auto& vec_add_dpbusd_32 = m256_add_dpbusd_epi32; + auto& vec_add_dpbusd_32x4 = m256_add_dpbusd_epi32x4; + auto& vec_hadd = m256_hadd; +#elif defined (USE_SSSE3) + using vec_t = __m128i; + #define vec_setzero _mm_setzero_si128 + #define vec_set_32 _mm_set1_epi32 + auto& vec_add_dpbusd_32 = m128_add_dpbusd_epi32; + auto& vec_add_dpbusd_32x4 = m128_add_dpbusd_epi32x4; + auto& vec_hadd = m128_hadd; +#endif + +#if defined (USE_SSSE3) + // Different layout, we process 4 inputs at a time, always. + static_assert(InputDimensions % 4 == 0); - #if defined(USE_AVX512) - constexpr IndexType kNumChunks = kPaddedInputDimensions / (kSimdWidth * 2); - const __m512i kOnes = _mm512_set1_epi16(1); - const auto input_vector = reinterpret_cast(input); - - #elif defined(USE_AVX2) - constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth; - const __m256i kOnes = _mm256_set1_epi16(1); - const auto input_vector = reinterpret_cast(input); - - #elif defined(USE_SSSE3) - constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth; - const __m128i kOnes = _mm_set1_epi16(1); - const auto input_vector = reinterpret_cast(input); - - #elif defined(USE_NEON) - constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth; - const auto input_vector = reinterpret_cast(input); - #endif - - for (IndexType i = 0; i < kOutputDimensions; ++i) { - const IndexType offset = i * kPaddedInputDimensions; - - #if defined(USE_AVX512) - __m512i sum = _mm512_setzero_si512(); - const auto row = reinterpret_cast(&weights_[offset]); - for (IndexType j = 0; j < kNumChunks; ++j) { - - #if defined(__MINGW32__) || defined(__MINGW64__) - __m512i product = _mm512_maddubs_epi16(_mm512_loadu_si512(&input_vector[j]), _mm512_load_si512(&row[j])); - #else - __m512i product = _mm512_maddubs_epi16(_mm512_load_si512(&input_vector[j]), _mm512_load_si512(&row[j])); - #endif - - product = _mm512_madd_epi16(product, kOnes); - sum = _mm512_add_epi32(sum, product); - } - output[i] = _mm512_reduce_add_epi32(sum) + biases_[i]; - - // Note: Changing kMaxSimdWidth from 32 to 64 breaks loading existing networks. - // As a result kPaddedInputDimensions may not be an even multiple of 64(512bit) - // and we have to do one more 256bit chunk. - if (kPaddedInputDimensions != kNumChunks * kSimdWidth * 2) - { - const auto iv_256 = reinterpret_cast(input); - const auto row_256 = reinterpret_cast(&weights_[offset]); - int j = kNumChunks * 2; - - #if defined(__MINGW32__) || defined(__MINGW64__) // See HACK comment below in AVX2. - __m256i sum256 = _mm256_maddubs_epi16(_mm256_loadu_si256(&iv_256[j]), _mm256_load_si256(&row_256[j])); - #else - __m256i sum256 = _mm256_maddubs_epi16(_mm256_load_si256(&iv_256[j]), _mm256_load_si256(&row_256[j])); - #endif - - sum256 = _mm256_madd_epi16(sum256, _mm256_set1_epi16(1)); - sum256 = _mm256_hadd_epi32(sum256, sum256); - sum256 = _mm256_hadd_epi32(sum256, sum256); - const __m128i lo = _mm256_extracti128_si256(sum256, 0); - const __m128i hi = _mm256_extracti128_si256(sum256, 1); - output[i] += _mm_cvtsi128_si32(lo) + _mm_cvtsi128_si32(hi); - } + const auto output = reinterpret_cast(buffer); + const auto inputVector = reinterpret_cast(input); + + static_assert(OutputDimensions % OutputSimdWidth == 0 || OutputDimensions == 1); + + // OutputDimensions is either 1 or a multiple of SimdWidth + // because then it is also an input dimension. + if constexpr (OutputDimensions % OutputSimdWidth == 0) + { + constexpr IndexType NumChunks = InputDimensions / 4; + + const auto input32 = reinterpret_cast(input); + vec_t* outptr = reinterpret_cast(output); + std::memcpy(output, biases, OutputDimensions * sizeof(OutputType)); + + for (int i = 0; i < (int)NumChunks - 3; i += 4) + { + const vec_t in0 = vec_set_32(input32[i + 0]); + const vec_t in1 = vec_set_32(input32[i + 1]); + const vec_t in2 = vec_set_32(input32[i + 2]); + const vec_t in3 = vec_set_32(input32[i + 3]); + const auto col0 = reinterpret_cast(&weights[(i + 0) * OutputDimensions * 4]); + const auto col1 = reinterpret_cast(&weights[(i + 1) * OutputDimensions * 4]); + const auto col2 = reinterpret_cast(&weights[(i + 2) * OutputDimensions * 4]); + const auto col3 = reinterpret_cast(&weights[(i + 3) * OutputDimensions * 4]); + for (int j = 0; j * OutputSimdWidth < OutputDimensions; ++j) + vec_add_dpbusd_32x4(outptr[j], in0, col0[j], in1, col1[j], in2, col2[j], in3, col3[j]); + } + } + else if constexpr (OutputDimensions == 1) + { +#if defined (USE_AVX512) + if constexpr (PaddedInputDimensions % (SimdWidth * 2) != 0) + { + constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth; + const auto inputVector256 = reinterpret_cast(input); + + __m256i sum0 = _mm256_setzero_si256(); + const auto row0 = reinterpret_cast(&weights[0]); + + for (int j = 0; j < (int)NumChunks; ++j) + { + const __m256i in = inputVector256[j]; + m256_add_dpbusd_epi32(sum0, in, row0[j]); + } + output[0] = m256_hadd(sum0, biases[0]); + } + else +#endif + { +#if defined (USE_AVX512) + constexpr IndexType NumChunks = PaddedInputDimensions / (SimdWidth * 2); +#else + constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth; +#endif + vec_t sum0 = vec_setzero(); + const auto row0 = reinterpret_cast(&weights[0]); + + for (int j = 0; j < (int)NumChunks; ++j) + { + const vec_t in = inputVector[j]; + vec_add_dpbusd_32(sum0, in, row0[j]); + } + output[0] = vec_hadd(sum0, biases[0]); + } + } - #elif defined(USE_AVX2) - __m256i sum = _mm256_setzero_si256(); - const auto row = reinterpret_cast(&weights_[offset]); - for (IndexType j = 0; j < kNumChunks; ++j) { - __m256i product = _mm256_maddubs_epi16( - - #if defined(__MINGW32__) || defined(__MINGW64__) - // HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary - // compiled with g++ in MSYS2 crashes here because the output memory is not aligned - // even though alignas is specified. - _mm256_loadu_si256 - #else - _mm256_load_si256 - #endif - - (&input_vector[j]), _mm256_load_si256(&row[j])); - product = _mm256_madd_epi16(product, kOnes); - sum = _mm256_add_epi32(sum, product); +#else + +// Use old implementation for the other architectures. + + auto output = reinterpret_cast(buffer); + +#if defined(USE_SSE2) + // At least a multiple of 16, with SSE2. + static_assert(InputDimensions % SimdWidth == 0); + constexpr IndexType NumChunks = InputDimensions / SimdWidth; + const __m128i Zeros = _mm_setzero_si128(); + const auto inputVector = reinterpret_cast(input); + +#elif defined(USE_MMX) + static_assert(InputDimensions % SimdWidth == 0); + constexpr IndexType NumChunks = InputDimensions / SimdWidth; + const __m64 Zeros = _mm_setzero_si64(); + const auto inputVector = reinterpret_cast(input); + +#elif defined(USE_NEON) + static_assert(InputDimensions % SimdWidth == 0); + constexpr IndexType NumChunks = InputDimensions / SimdWidth; + const auto inputVector = reinterpret_cast(input); +#endif + + for (IndexType i = 0; i < OutputDimensions; ++i) { + const IndexType offset = i * PaddedInputDimensions; + +#if defined(USE_SSE2) + __m128i sumLo = _mm_cvtsi32_si128(biases[i]); + __m128i sumHi = Zeros; + const auto row = reinterpret_cast(&weights[offset]); + for (IndexType j = 0; j < NumChunks; ++j) { + __m128i row_j = _mm_load_si128(&row[j]); + __m128i input_j = _mm_load_si128(&inputVector[j]); + __m128i extendedRowLo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8); + __m128i extendedRowHi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8); + __m128i extendedInputLo = _mm_unpacklo_epi8(input_j, Zeros); + __m128i extendedInputHi = _mm_unpackhi_epi8(input_j, Zeros); + __m128i productLo = _mm_madd_epi16(extendedRowLo, extendedInputLo); + __m128i productHi = _mm_madd_epi16(extendedRowHi, extendedInputHi); + sumLo = _mm_add_epi32(sumLo, productLo); + sumHi = _mm_add_epi32(sumHi, productHi); } - sum = _mm256_hadd_epi32(sum, sum); - sum = _mm256_hadd_epi32(sum, sum); - const __m128i lo = _mm256_extracti128_si256(sum, 0); - const __m128i hi = _mm256_extracti128_si256(sum, 1); - output[i] = _mm_cvtsi128_si32(lo) + _mm_cvtsi128_si32(hi) + biases_[i]; - - #elif defined(USE_SSSE3) - __m128i sum = _mm_cvtsi32_si128(biases_[i]); - const auto row = reinterpret_cast(&weights_[offset]); - for (IndexType j = 0; j < kNumChunks; ++j) { - __m128i product = _mm_maddubs_epi16( - _mm_load_si128(&input_vector[j]), _mm_load_si128(&row[j])); - product = _mm_madd_epi16(product, kOnes); - sum = _mm_add_epi32(sum, product); - } - sum = _mm_hadd_epi32(sum, sum); - sum = _mm_hadd_epi32(sum, sum); + __m128i sum = _mm_add_epi32(sumLo, sumHi); + __m128i sumHigh_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2)); + sum = _mm_add_epi32(sum, sumHigh_64); + __m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2)); + sum = _mm_add_epi32(sum, sum_second_32); output[i] = _mm_cvtsi128_si32(sum); - #elif defined(USE_NEON) - int32x4_t sum = {biases_[i]}; - const auto row = reinterpret_cast(&weights_[offset]); - for (IndexType j = 0; j < kNumChunks; ++j) { - int16x8_t product = vmull_s8(input_vector[j * 2], row[j * 2]); - product = vmlal_s8(product, input_vector[j * 2 + 1], row[j * 2 + 1]); +#elif defined(USE_MMX) + __m64 sumLo = _mm_cvtsi32_si64(biases[i]); + __m64 sumHi = Zeros; + const auto row = reinterpret_cast(&weights[offset]); + for (IndexType j = 0; j < NumChunks; ++j) { + __m64 row_j = row[j]; + __m64 input_j = inputVector[j]; + __m64 extendedRowLo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8); + __m64 extendedRowHi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8); + __m64 extendedInputLo = _mm_unpacklo_pi8(input_j, Zeros); + __m64 extendedInputHi = _mm_unpackhi_pi8(input_j, Zeros); + __m64 productLo = _mm_madd_pi16(extendedRowLo, extendedInputLo); + __m64 productHi = _mm_madd_pi16(extendedRowHi, extendedInputHi); + sumLo = _mm_add_pi32(sumLo, productLo); + sumHi = _mm_add_pi32(sumHi, productHi); + } + __m64 sum = _mm_add_pi32(sumLo, sumHi); + sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum)); + output[i] = _mm_cvtsi64_si32(sum); + +#elif defined(USE_NEON) + int32x4_t sum = {biases[i]}; + const auto row = reinterpret_cast(&weights[offset]); + for (IndexType j = 0; j < NumChunks; ++j) { + int16x8_t product = vmull_s8(inputVector[j * 2], row[j * 2]); + product = vmlal_s8(product, inputVector[j * 2 + 1], row[j * 2 + 1]); sum = vpadalq_s16(sum, product); } output[i] = sum[0] + sum[1] + sum[2] + sum[3]; - #else - OutputType sum = biases_[i]; - for (IndexType j = 0; j < kInputDimensions; ++j) { - sum += weights_[offset + j] * input[j]; +#else + OutputType sum = biases[i]; + for (IndexType j = 0; j < InputDimensions; ++j) { + sum += weights[offset + j] * input[j]; } output[i] = sum; - #endif +#endif } +#if defined(USE_MMX) + _mm_empty(); +#endif + +#endif + return output; } @@ -203,13 +424,12 @@ namespace Eval::NNUE::Layers { using BiasType = OutputType; using WeightType = std::int8_t; - PreviousLayer previous_layer_; + PreviousLayer previousLayer; - alignas(kCacheLineSize) BiasType biases_[kOutputDimensions]; - alignas(kCacheLineSize) - WeightType weights_[kOutputDimensions * kPaddedInputDimensions]; + alignas(CacheLineSize) BiasType biases[OutputDimensions]; + alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions]; }; -} // namespace Eval::NNUE::Layers +} // namespace Stockfish::Eval::NNUE::Layers #endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED