X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=src%2Fnnue%2Flayers%2Faffine_transform.h;h=22451915ba1eb2f547c823c52c95c703d5ebdba1;hb=cb9c2594fcedc881ae8f8bfbfdf130cf89840e4c;hp=0e0515f932a0773cc82f72c1620bc0a1afe5e5eb;hpb=c7f0a768cb9d5972861baae0f215d69f9e86a626;p=stockfish diff --git a/src/nnue/layers/affine_transform.h b/src/nnue/layers/affine_transform.h index 0e0515f9..22451915 100644 --- a/src/nnue/layers/affine_transform.h +++ b/src/nnue/layers/affine_transform.h @@ -1,6 +1,6 @@ /* Stockfish, a UCI chess playing engine derived from Glaurung 2.1 - Copyright (C) 2004-2020 The Stockfish developers (see AUTHORS file) + Copyright (C) 2004-2022 The Stockfish developers (see AUTHORS file) Stockfish is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -22,723 +22,505 @@ #define NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED #include +#include +#include #include "../nnue_common.h" +#include "../../simd.h" -namespace Eval::NNUE::Layers { - - // Affine transformation layer - template - class AffineTransform { - public: - // Input/output type - using InputType = typename PreviousLayer::OutputType; - using OutputType = std::int32_t; - static_assert(std::is_same::value, ""); - - // Number of input/output dimensions - static constexpr IndexType kInputDimensions = - PreviousLayer::kOutputDimensions; - static constexpr IndexType kOutputDimensions = OutputDimensions; - static constexpr IndexType kPaddedInputDimensions = - CeilToMultiple(kInputDimensions, kMaxSimdWidth); - - // Size of forward propagation buffer used in this layer - static constexpr std::size_t kSelfBufferSize = - CeilToMultiple(kOutputDimensions * sizeof(OutputType), kCacheLineSize); - - // Size of the forward propagation buffer used from the input layer to this layer - static constexpr std::size_t kBufferSize = - PreviousLayer::kBufferSize + kSelfBufferSize; +/* + This file contains the definition for a fully connected layer (aka affine transform). + Two approaches are employed, depending on the sizes of the transform. + + Approach 1: + - used when the PaddedInputDimensions >= 128 + - uses AVX512 if possible + - processes inputs in batches of 2*InputSimdWidth + - so in batches of 128 for AVX512 + - the weight blocks of size InputSimdWidth are transposed such that + access is sequential + - N columns of the weight matrix are processed a time, where N + depends on the architecture (the amount of registers) + - accumulate + hadd is used + + Approach 2: + - used when the PaddedInputDimensions < 128 + - does not use AVX512 + - expected use-case is for when PaddedInputDimensions == 32 and InputDimensions <= 32. + - that's why AVX512 is hard to implement + - expected use-case is small layers + - not optimized as well as the approach 1 + - inputs are processed in chunks of 4, weights are respectively transposed + - accumulation happens directly to int32s +*/ - // Hash value embedded in the evaluation file - static constexpr std::uint32_t GetHashValue() { - std::uint32_t hash_value = 0xCC03DAE4u; - hash_value += kOutputDimensions; - hash_value ^= PreviousLayer::GetHashValue() >> 1; - hash_value ^= PreviousLayer::GetHashValue() << 31; - return hash_value; - } +namespace Stockfish::Eval::NNUE::Layers { + +// Fallback implementation for older/other architectures. +// Identical for both approaches. Requires the input to be padded to at least 16 values. +#if !defined(USE_SSSE3) + template + static void affine_transform_non_ssse3(std::int32_t* output, const std::int8_t* weights, const std::int32_t* biases, const std::uint8_t* input) + { +# if defined(USE_SSE2) + // At least a multiple of 16, with SSE2. + constexpr IndexType NumChunks = ceil_to_multiple(InputDimensions, 16) / 16; + const __m128i Zeros = _mm_setzero_si128(); + const auto inputVector = reinterpret_cast(input); + +# elif defined(USE_MMX) + constexpr IndexType NumChunks = ceil_to_multiple(InputDimensions, 8) / 8; + const __m64 Zeros = _mm_setzero_si64(); + const auto inputVector = reinterpret_cast(input); + +# elif defined(USE_NEON) + constexpr IndexType NumChunks = ceil_to_multiple(InputDimensions, 16) / 16; + const auto inputVector = reinterpret_cast(input); +# endif + + for (IndexType i = 0; i < OutputDimensions; ++i) { + const IndexType offset = i * PaddedInputDimensions; + +# if defined(USE_SSE2) + __m128i sumLo = _mm_cvtsi32_si128(biases[i]); + __m128i sumHi = Zeros; + const auto row = reinterpret_cast(&weights[offset]); + for (IndexType j = 0; j < NumChunks; ++j) { + __m128i row_j = _mm_load_si128(&row[j]); + __m128i input_j = _mm_load_si128(&inputVector[j]); + __m128i extendedRowLo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8); + __m128i extendedRowHi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8); + __m128i extendedInputLo = _mm_unpacklo_epi8(input_j, Zeros); + __m128i extendedInputHi = _mm_unpackhi_epi8(input_j, Zeros); + __m128i productLo = _mm_madd_epi16(extendedRowLo, extendedInputLo); + __m128i productHi = _mm_madd_epi16(extendedRowHi, extendedInputHi); + sumLo = _mm_add_epi32(sumLo, productLo); + sumHi = _mm_add_epi32(sumHi, productHi); + } + __m128i sum = _mm_add_epi32(sumLo, sumHi); + __m128i sumHigh_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2)); + sum = _mm_add_epi32(sum, sumHigh_64); + __m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2)); + sum = _mm_add_epi32(sum, sum_second_32); + output[i] = _mm_cvtsi128_si32(sum); + +# elif defined(USE_MMX) + __m64 sumLo = _mm_cvtsi32_si64(biases[i]); + __m64 sumHi = Zeros; + const auto row = reinterpret_cast(&weights[offset]); + for (IndexType j = 0; j < NumChunks; ++j) { + __m64 row_j = row[j]; + __m64 input_j = inputVector[j]; + __m64 extendedRowLo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8); + __m64 extendedRowHi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8); + __m64 extendedInputLo = _mm_unpacklo_pi8(input_j, Zeros); + __m64 extendedInputHi = _mm_unpackhi_pi8(input_j, Zeros); + __m64 productLo = _mm_madd_pi16(extendedRowLo, extendedInputLo); + __m64 productHi = _mm_madd_pi16(extendedRowHi, extendedInputHi); + sumLo = _mm_add_pi32(sumLo, productLo); + sumHi = _mm_add_pi32(sumHi, productHi); + } + __m64 sum = _mm_add_pi32(sumLo, sumHi); + sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum)); + output[i] = _mm_cvtsi64_si32(sum); + +# elif defined(USE_NEON) + int32x4_t sum = {biases[i]}; + const auto row = reinterpret_cast(&weights[offset]); + for (IndexType j = 0; j < NumChunks; ++j) { + int16x8_t product = vmull_s8(inputVector[j * 2], row[j * 2]); + product = vmlal_s8(product, inputVector[j * 2 + 1], row[j * 2 + 1]); + sum = vpadalq_s16(sum, product); + } + output[i] = sum[0] + sum[1] + sum[2] + sum[3]; - // Read network parameters - bool ReadParameters(std::istream& stream) { - if (!previous_layer_.ReadParameters(stream)) return false; - for (std::size_t i = 0; i < kOutputDimensions; ++i) - biases_[i] = read_little_endian(stream); - for (std::size_t i = 0; i < kOutputDimensions * kPaddedInputDimensions; ++i) - weights_[i] = read_little_endian(stream); - return !stream.fail(); +# else + std::int32_t sum = biases[i]; + for (IndexType j = 0; j < InputDimensions; ++j) { + sum += weights[offset + j] * input[j]; + } + output[i] = sum; +# endif } - // Forward propagation - const OutputType* Propagate( - const TransformedFeatureType* transformed_features, char* buffer) const { - const auto input = previous_layer_.Propagate( - transformed_features, buffer + kSelfBufferSize); - -#if defined (USE_AVX512) - - [[maybe_unused]] const __m512i kOnes512 = _mm512_set1_epi16(1); - - [[maybe_unused]] auto m512_hadd = [](__m512i sum, int bias) -> int { - return _mm512_reduce_add_epi32(sum) + bias; - }; - - // This function takes - // sum0 = [xmm0a, xmm0b, xmm0c, xmm0d] - // sum1 = [xmm1a, xmm1b, xmm1c, xmm1d] - // sum2 = [xmm2a, xmm2b, xmm2c, xmm2d] - // sum3 = [xmm3a, xmm3b, xmm3c, xmm3d] - // and returns - // ret = [ - // reduce_add_epi32(xmm0a), reduce_add_epi32(xmm1a), reduce_add_epi32(xmm2a), reduce_add_epi32(xmm3a), - // reduce_add_epi32(xmm0b), reduce_add_epi32(xmm1b), reduce_add_epi32(xmm2b), reduce_add_epi32(xmm3b), - // reduce_add_epi32(xmm0c), reduce_add_epi32(xmm1c), reduce_add_epi32(xmm2c), reduce_add_epi32(xmm3c), - // reduce_add_epi32(xmm0d), reduce_add_epi32(xmm1d), reduce_add_epi32(xmm2d), reduce_add_epi32(xmm3d) - // ] - [[maybe_unused]] auto m512_hadd128x16_interleave = []( - __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3) -> __m512i { - - __m512i sum01a = _mm512_unpacklo_epi32(sum0, sum1); - __m512i sum01b = _mm512_unpackhi_epi32(sum0, sum1); - - __m512i sum23a = _mm512_unpacklo_epi32(sum2, sum3); - __m512i sum23b = _mm512_unpackhi_epi32(sum2, sum3); - - __m512i sum01 = _mm512_add_epi32(sum01a, sum01b); - __m512i sum23 = _mm512_add_epi32(sum23a, sum23b); - - __m512i sum0123a = _mm512_unpacklo_epi64(sum01, sum23); - __m512i sum0123b = _mm512_unpackhi_epi64(sum01, sum23); - - return _mm512_add_epi32(sum0123a, sum0123b); - }; - - [[maybe_unused]] auto m512_haddx4 = [m512_hadd128x16_interleave]( - __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3, __m128i bias) -> __m128i { - - __m512i sum = m512_hadd128x16_interleave(sum0, sum1, sum2, sum3); - - __m256i sum256lo = _mm512_castsi512_si256(sum); - __m256i sum256hi = _mm512_extracti64x4_epi64(sum, 1); - - sum256lo = _mm256_add_epi32(sum256lo, sum256hi); - - __m128i sum128lo = _mm256_castsi256_si128(sum256lo); - __m128i sum128hi = _mm256_extracti128_si256(sum256lo, 1); - - return _mm_add_epi32(_mm_add_epi32(sum128lo, sum128hi), bias); - }; - - [[maybe_unused]] auto m512_haddx8 = [m512_hadd128x16_interleave]( - __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3, - __m512i sum4, __m512i sum5, __m512i sum6, __m512i sum7, __m256i bias) -> __m256i { - - __m512i suma = m512_hadd128x16_interleave(sum0, sum1, sum2, sum3); - __m512i sumb = m512_hadd128x16_interleave(sum4, sum5, sum6, sum7); - - __m512i indices0 = _mm512_setr_epi64(0, 1, 8, 9, 4, 5, 12, 13); - __m512i indices1 = _mm512_setr_epi64(2, 3, 10, 11, 6, 7, 14, 15); - __m512i x = _mm512_add_epi32( - _mm512_permutex2var_epi64(suma, indices0, sumb), - _mm512_permutex2var_epi64(suma, indices1, sumb)); - - __m256i sum256lo = _mm512_castsi512_si256(x); - __m256i sum256hi = _mm512_extracti64x4_epi64(x, 1); - - return _mm256_add_epi32(_mm256_add_epi32(sum256lo, sum256hi), bias); - }; - - [[maybe_unused]] auto m512_hadd256x8 =[m512_hadd128x16_interleave]( - __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3, __m256i bias) -> __m256i { - - __m512i sum = m512_hadd128x16_interleave(sum0, sum1, sum2, sum3); - - __m512i indices = _mm512_setr_epi32( - 0, 4, 8, 12, 2, 6, 10, 14, - 1, 5, 9, 13, 3, 7, 11, 15); - sum = _mm512_permutexvar_epi32(indices, sum); +# if defined(USE_MMX) + _mm_empty(); +# endif + } +#endif - __m256i sum256lo = _mm512_castsi512_si256(sum); - __m256i sum256hi = _mm512_extracti64x4_epi64(sum, 1); + template + class AffineTransform; - return _mm256_add_epi32(_mm256_hadd_epi32(sum256lo, sum256hi), bias); - }; + // A specialization for large inputs. + template + class AffineTransform(InDims, MaxSimdWidth) >= 2*64)>> { + public: + // Input/output type + using InputType = std::uint8_t; + using OutputType = std::int32_t; - [[maybe_unused]] auto m512_hadd256x16 = [m512_hadd128x16_interleave]( - __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3, - __m512i sum4, __m512i sum5, __m512i sum6, __m512i sum7, __m512i bias) -> __m512i { + // Number of input/output dimensions + static constexpr IndexType InputDimensions = InDims; + static constexpr IndexType OutputDimensions = OutDims; - __m512i suma = m512_hadd128x16_interleave(sum0, sum1, sum2, sum3); - __m512i sumb = m512_hadd128x16_interleave(sum4, sum5, sum6, sum7); + static constexpr IndexType PaddedInputDimensions = + ceil_to_multiple(InputDimensions, MaxSimdWidth); + static constexpr IndexType PaddedOutputDimensions = + ceil_to_multiple(OutputDimensions, MaxSimdWidth); - __m512i indices0 = _mm512_setr_epi64(0, 1, 8, 9, 4, 5, 12, 13); - __m512i indices1 = _mm512_setr_epi64(2, 3, 10, 11, 6, 7, 14, 15); - __m512i x = _mm512_add_epi32( - _mm512_permutex2var_epi64(suma, indices0, sumb), - _mm512_permutex2var_epi64(suma, indices1, sumb)); + using OutputBuffer = OutputType[PaddedOutputDimensions]; - __m512i indices = _mm512_setr_epi32(0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15); - return _mm512_add_epi32(_mm512_permutexvar_epi32(indices, x), bias); - }; + static_assert(PaddedInputDimensions >= 128, "Something went wrong. This specialization should not have been chosen."); -#if defined (USE_VNNI) - [[maybe_unused]] auto m512_add_dpbusd_epi32 = [=](__m512i& acc, __m512i a, __m512i b) { - acc = _mm512_dpbusd_epi32(acc, a, b); +#if defined (USE_AVX512) + static constexpr const IndexType InputSimdWidth = 64; + static constexpr const IndexType MaxNumOutputRegs = 16; +#elif defined (USE_AVX2) + static constexpr const IndexType InputSimdWidth = 32; + static constexpr const IndexType MaxNumOutputRegs = 8; +#elif defined (USE_SSSE3) + static constexpr const IndexType InputSimdWidth = 16; + static constexpr const IndexType MaxNumOutputRegs = 8; +#elif defined (USE_NEON) + static constexpr const IndexType InputSimdWidth = 8; + static constexpr const IndexType MaxNumOutputRegs = 8; #else - [[maybe_unused]] auto m512_dpbusd_epi32 = [=](__m512i a, __m512i b) -> __m512i { - __m512i product0 = _mm512_maddubs_epi16(a, b); - return _mm512_madd_epi16(product0, kOnes512); -#endif - }; - + // The fallback implementation will not have permuted weights. + // We define these to avoid a lot of ifdefs later. + static constexpr const IndexType InputSimdWidth = 1; + static constexpr const IndexType MaxNumOutputRegs = 1; #endif -#if defined (USE_AVX2) - - [[maybe_unused]] const __m256i kOnes256 = _mm256_set1_epi16(1); - - [[maybe_unused]] auto m256_hadd = [](__m256i sum, int bias) -> int { - __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extracti128_si256(sum, 1)); - sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_BADC)); - sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_CDAB)); - return _mm_cvtsi128_si32(sum128) + bias; - }; - [[maybe_unused]] auto m256_haddx4 = [](__m256i sum0, __m256i sum1, __m256i sum2, __m256i sum3, __m128i bias) -> __m128i { - sum0 = _mm256_hadd_epi32(sum0, sum1); - sum2 = _mm256_hadd_epi32(sum2, sum3); + // A big block is a region in the weight matrix of the size [PaddedInputDimensions, NumOutputRegs]. + // A small block is a region of size [InputSimdWidth, 1] - sum0 = _mm256_hadd_epi32(sum0, sum2); + static constexpr const IndexType NumOutputRegs = std::min(MaxNumOutputRegs, OutputDimensions); + static constexpr const IndexType SmallBlockSize = InputSimdWidth; + static constexpr const IndexType BigBlockSize = NumOutputRegs * PaddedInputDimensions; + static constexpr const IndexType NumSmallBlocksInBigBlock = BigBlockSize / SmallBlockSize; + static constexpr const IndexType NumSmallBlocksPerOutput = PaddedInputDimensions / SmallBlockSize; + static constexpr const IndexType NumBigBlocks = OutputDimensions / NumOutputRegs; - __m128i sum128lo = _mm256_castsi256_si128(sum0); - __m128i sum128hi = _mm256_extracti128_si256(sum0, 1); + static_assert(OutputDimensions % NumOutputRegs == 0); - return _mm_add_epi32(_mm_add_epi32(sum128lo, sum128hi), bias); - }; -#if defined (USE_VNNI) - [[maybe_unused]] auto m256_add_dpbusd_epi32 = [=](__m256i& acc, __m256i a, __m256i b) { - acc = _mm256_dpbusd_epi32(acc, a, b); -#else - [[maybe_unused]] auto m256_dpbusd_epi32 = [=](__m256i a, __m256i b) -> __m256i { - __m256i product0 = _mm256_maddubs_epi16(a, b); - return _mm256_madd_epi16(product0, kOnes256); -#endif - }; + // Hash value embedded in the evaluation file + static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) { + std::uint32_t hashValue = 0xCC03DAE4u; + hashValue += OutputDimensions; + hashValue ^= prevHash >> 1; + hashValue ^= prevHash << 31; + return hashValue; + } -#endif + /* + Transposes the small blocks within a block. + Effectively means that weights can be traversed sequentially during inference. + */ + static IndexType get_weight_index(IndexType i) + { + const IndexType smallBlock = (i / SmallBlockSize) % NumSmallBlocksInBigBlock; + const IndexType smallBlockCol = smallBlock / NumSmallBlocksPerOutput; + const IndexType smallBlockRow = smallBlock % NumSmallBlocksPerOutput; + const IndexType bigBlock = i / BigBlockSize; + const IndexType rest = i % SmallBlockSize; + + const IndexType idx = + bigBlock * BigBlockSize + + smallBlockRow * SmallBlockSize * NumOutputRegs + + smallBlockCol * SmallBlockSize + + rest; + + return idx; + } -#if defined (USE_SSSE3) + // Read network parameters + bool read_parameters(std::istream& stream) { + for (std::size_t i = 0; i < OutputDimensions; ++i) + biases[i] = read_little_endian(stream); - [[maybe_unused]] const __m128i kOnes128 = _mm_set1_epi16(1); + for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) + weights[get_weight_index(i)] = read_little_endian(stream); - [[maybe_unused]] auto m128_hadd = [](__m128i sum, int bias) -> int { - sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0x4E)); //_MM_PERM_BADC - sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0xB1)); //_MM_PERM_CDAB - return _mm_cvtsi128_si32(sum) + bias; - }; + return !stream.fail(); + } - [[maybe_unused]] auto m128_haddx4 = [](__m128i sum0, __m128i sum1, __m128i sum2, __m128i sum3, __m128i bias) -> __m128i { - sum0 = _mm_hadd_epi32(sum0, sum1); - sum2 = _mm_hadd_epi32(sum2, sum3); + // Write network parameters + bool write_parameters(std::ostream& stream) const { + for (std::size_t i = 0; i < OutputDimensions; ++i) + write_little_endian(stream, biases[i]); - sum0 = _mm_hadd_epi32(sum0, sum2); + for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) + write_little_endian(stream, weights[get_weight_index(i)]); - return _mm_add_epi32(sum0, bias); - }; + return !stream.fail(); + } - [[maybe_unused]] auto m128_dpbusd_epi32 = [=](__m128i a, __m128i b) -> __m128i { - __m128i product0 = _mm_maddubs_epi16(a, b); - return _mm_madd_epi16(product0, kOnes128); - }; + // Forward propagation + const OutputType* propagate( + const InputType* input, OutputType* output) const { +#if defined (USE_AVX512) + using acc_vec_t = __m512i; + using bias_vec_t = __m128i; + using weight_vec_t = __m512i; + using in_vec_t = __m512i; + #define vec_zero _mm512_setzero_si512() + #define vec_add_dpbusd_32x2 Simd::m512_add_dpbusd_epi32x2 + #define vec_hadd Simd::m512_hadd + #define vec_haddx4 Simd::m512_haddx4 +#elif defined (USE_AVX2) + using acc_vec_t = __m256i; + using bias_vec_t = __m128i; + using weight_vec_t = __m256i; + using in_vec_t = __m256i; + #define vec_zero _mm256_setzero_si256() + #define vec_add_dpbusd_32x2 Simd::m256_add_dpbusd_epi32x2 + #define vec_hadd Simd::m256_hadd + #define vec_haddx4 Simd::m256_haddx4 +#elif defined (USE_SSSE3) + using acc_vec_t = __m128i; + using bias_vec_t = __m128i; + using weight_vec_t = __m128i; + using in_vec_t = __m128i; + #define vec_zero _mm_setzero_si128() + #define vec_add_dpbusd_32x2 Simd::m128_add_dpbusd_epi32x2 + #define vec_hadd Simd::m128_hadd + #define vec_haddx4 Simd::m128_haddx4 +#elif defined (USE_NEON) + using acc_vec_t = int32x4_t; + using bias_vec_t = int32x4_t; + using weight_vec_t = int8x8_t; + using in_vec_t = int8x8_t; + #define vec_zero {0} + #define vec_add_dpbusd_32x2 Simd::neon_m128_add_dpbusd_epi32x2 + #define vec_hadd Simd::neon_m128_hadd + #define vec_haddx4 Simd::neon_m128_haddx4 #endif -#if defined (USE_AVX512) +#if defined (USE_SSSE3) || defined (USE_NEON) + const in_vec_t* invec = reinterpret_cast(input); - constexpr IndexType kNumChunks512 = kPaddedInputDimensions / (kSimdWidth * 2); - constexpr IndexType kNumChunks256 = kPaddedInputDimensions / kSimdWidth; - - const auto output = reinterpret_cast(buffer); - - // Since to saturate a zmm register it takes 64 bytes we - // cannot use AVX512 for the smaller affine transforms. - // Instead we fallback to a AVX2 implementation if the - // kInputDimensions isn't a multiple of 64. - // Note that this means that for example for - // kInputDimensions of 96 we fallback to AVX2 even though - // the first 64 elements could be processed with AVX512. - // This is caused by mixing the __m256 and __m512 variables - // required to better handle that case and it would - // require handling more cases statically not to lose performance. - // This should be revisited if such input dimensions are to be considered. - [[maybe_unused]] const auto input_vector512 = reinterpret_cast(input); - [[maybe_unused]] const auto input_vector256 = reinterpret_cast(input); - - // kOutputDimensions is either 1 or a multiple of kSimdWidth - // because then it is also an input dimension. - if constexpr (kOutputDimensions % 16 == 0 && kNumChunks256 == 1) + // Perform accumulation to registers for each big block + for (IndexType bigBlock = 0; bigBlock < NumBigBlocks; ++bigBlock) { - for (IndexType i = 0; i < kOutputDimensions; i += 16) - { - const IndexType offset01a = (i + 0) * kPaddedInputDimensions; - const IndexType offset23a = (i + 2) * kPaddedInputDimensions; - const IndexType offset45a = (i + 4) * kPaddedInputDimensions; - const IndexType offset67a = (i + 6) * kPaddedInputDimensions; - const IndexType offset01b = (i + 8) * kPaddedInputDimensions; - const IndexType offset23b = (i + 10) * kPaddedInputDimensions; - const IndexType offset45b = (i + 12) * kPaddedInputDimensions; - const IndexType offset67b = (i + 14) * kPaddedInputDimensions; - - const __m512i bias = *reinterpret_cast(&biases_[i]); - __m512i* outptr = reinterpret_cast<__m512i*>(&output[i]); - - const auto row01a = *reinterpret_cast(&weights_[offset01a]); - const auto row23a = *reinterpret_cast(&weights_[offset23a]); - const auto row45a = *reinterpret_cast(&weights_[offset45a]); - const auto row67a = *reinterpret_cast(&weights_[offset67a]); - const auto row01b = *reinterpret_cast(&weights_[offset01b]); - const auto row23b = *reinterpret_cast(&weights_[offset23b]); - const auto row45b = *reinterpret_cast(&weights_[offset45b]); - const auto row67b = *reinterpret_cast(&weights_[offset67b]); - - const __m256i in256 = input_vector256[0]; - const __m512i in = _mm512_inserti64x4(_mm512_castsi256_si512(in256), in256, 1); - -#if defined (USE_VNNI) - __m512i sum01a = _mm512_setzero_si512(); - __m512i sum23a = _mm512_setzero_si512(); - __m512i sum45a = _mm512_setzero_si512(); - __m512i sum67a = _mm512_setzero_si512(); - __m512i sum01b = _mm512_setzero_si512(); - __m512i sum23b = _mm512_setzero_si512(); - __m512i sum45b = _mm512_setzero_si512(); - __m512i sum67b = _mm512_setzero_si512(); - - m512_add_dpbusd_epi32(sum01a, in, row01a); - m512_add_dpbusd_epi32(sum23a, in, row23a); - m512_add_dpbusd_epi32(sum45a, in, row45a); - m512_add_dpbusd_epi32(sum67a, in, row67a); - m512_add_dpbusd_epi32(sum01b, in, row01b); - m512_add_dpbusd_epi32(sum23b, in, row23b); - m512_add_dpbusd_epi32(sum45b, in, row45b); - m512_add_dpbusd_epi32(sum67b, in, row67b); -#else - __m512i sum01a = m512_dpbusd_epi32(in, row01a); - __m512i sum23a = m512_dpbusd_epi32(in, row23a); - __m512i sum45a = m512_dpbusd_epi32(in, row45a); - __m512i sum67a = m512_dpbusd_epi32(in, row67a); - __m512i sum01b = m512_dpbusd_epi32(in, row01b); - __m512i sum23b = m512_dpbusd_epi32(in, row23b); - __m512i sum45b = m512_dpbusd_epi32(in, row45b); - __m512i sum67b = m512_dpbusd_epi32(in, row67b); -#endif + acc_vec_t acc[NumOutputRegs] = { vec_zero }; - *outptr = m512_hadd256x16( - sum01a, sum23a, sum45a, sum67a, - sum01b, sum23b, sum45b, sum67b, bias); - } - } - else if constexpr (kOutputDimensions % 4 == 0) - { - for (IndexType i = 0; i < kOutputDimensions; i += 4) + // Each big block has NumOutputRegs small blocks in each "row", one per register. + // We process two small blocks at a time to save on one addition without VNNI. + for (IndexType smallBlock = 0; smallBlock < NumSmallBlocksPerOutput; smallBlock += 2) { - const IndexType offset0 = (i + 0) * kPaddedInputDimensions; - const IndexType offset1 = (i + 1) * kPaddedInputDimensions; - const IndexType offset2 = (i + 2) * kPaddedInputDimensions; - const IndexType offset3 = (i + 3) * kPaddedInputDimensions; - - const __m128i bias = *reinterpret_cast(&biases_[i]); - __m128i* outptr = reinterpret_cast<__m128i*>(&output[i]); + const weight_vec_t* weightvec = + reinterpret_cast( + weights + + bigBlock * BigBlockSize + + smallBlock * SmallBlockSize * NumOutputRegs); - if constexpr (kPaddedInputDimensions % (kSimdWidth * 2) == 0) - { - const auto row0 = reinterpret_cast(&weights_[offset0]); - const auto row1 = reinterpret_cast(&weights_[offset1]); - const auto row2 = reinterpret_cast(&weights_[offset2]); - const auto row3 = reinterpret_cast(&weights_[offset3]); - -#if defined (USE_VNNI) - __m512i sum0 = _mm512_setzero_si512(); - __m512i sum1 = _mm512_setzero_si512(); - __m512i sum2 = _mm512_setzero_si512(); - __m512i sum3 = _mm512_setzero_si512(); - const IndexType kStart = 0; -#else - __m512i sum0 = m512_dpbusd_epi32(input_vector512[0], row0[0]); - __m512i sum1 = m512_dpbusd_epi32(input_vector512[0], row1[0]); - __m512i sum2 = m512_dpbusd_epi32(input_vector512[0], row2[0]); - __m512i sum3 = m512_dpbusd_epi32(input_vector512[0], row3[0]); - const IndexType kStart = 1; -#endif + const in_vec_t in0 = invec[smallBlock + 0]; + const in_vec_t in1 = invec[smallBlock + 1]; - for (IndexType j = kStart; j < kNumChunks512; ++j) - { - const __m512i in = input_vector512[j]; + for (IndexType k = 0; k < NumOutputRegs; ++k) + vec_add_dpbusd_32x2(acc[k], in0, weightvec[k], in1, weightvec[k + NumOutputRegs]); + } -#if defined (USE_VNNI) - m512_add_dpbusd_epi32(sum0, in, row0[j]); - m512_add_dpbusd_epi32(sum1, in, row1[j]); - m512_add_dpbusd_epi32(sum2, in, row2[j]); - m512_add_dpbusd_epi32(sum3, in, row3[j]); -#else - sum0 = _mm512_add_epi32(sum0, m512_dpbusd_epi32(in, row0[j])); - sum1 = _mm512_add_epi32(sum1, m512_dpbusd_epi32(in, row1[j])); - sum2 = _mm512_add_epi32(sum2, m512_dpbusd_epi32(in, row2[j])); - sum3 = _mm512_add_epi32(sum3, m512_dpbusd_epi32(in, row3[j])); -#endif - } + // Horizontally add all accumulators. + if constexpr (NumOutputRegs % 4 == 0) + { + bias_vec_t* outputvec = reinterpret_cast(output); + const bias_vec_t* biasvec = reinterpret_cast(biases); - *outptr = m512_haddx4(sum0, sum1, sum2, sum3, bias); - } - else + for (IndexType k = 0; k < NumOutputRegs; k += 4) { - const auto row0 = reinterpret_cast(&weights_[offset0]); - const auto row1 = reinterpret_cast(&weights_[offset1]); - const auto row2 = reinterpret_cast(&weights_[offset2]); - const auto row3 = reinterpret_cast(&weights_[offset3]); - -#if defined (USE_VNNI) - __m256i sum0 = _mm256_setzero_si256(); - __m256i sum1 = _mm256_setzero_si256(); - __m256i sum2 = _mm256_setzero_si256(); - __m256i sum3 = _mm256_setzero_si256(); - const IndexType kStart = 0; -#else - __m256i sum0 = m256_dpbusd_epi32(input_vector256[0], row0[0]); - __m256i sum1 = m256_dpbusd_epi32(input_vector256[0], row1[0]); - __m256i sum2 = m256_dpbusd_epi32(input_vector256[0], row2[0]); - __m256i sum3 = m256_dpbusd_epi32(input_vector256[0], row3[0]); - const IndexType kStart = 1; -#endif - - for (IndexType j = kStart; j < kNumChunks256; ++j) - { - const __m256i in = input_vector256[j]; - -#if defined (USE_VNNI) - m256_add_dpbusd_epi32(sum0, in, row0[j]); - m256_add_dpbusd_epi32(sum1, in, row1[j]); - m256_add_dpbusd_epi32(sum2, in, row2[j]); - m256_add_dpbusd_epi32(sum3, in, row3[j]); -#else - sum0 = _mm256_add_epi32(sum0, m256_dpbusd_epi32(in, row0[j])); - sum1 = _mm256_add_epi32(sum1, m256_dpbusd_epi32(in, row1[j])); - sum2 = _mm256_add_epi32(sum2, m256_dpbusd_epi32(in, row2[j])); - sum3 = _mm256_add_epi32(sum3, m256_dpbusd_epi32(in, row3[j])); -#endif - } - - *outptr = m256_haddx4(sum0, sum1, sum2, sum3, bias); + const IndexType idx = (bigBlock * NumOutputRegs + k) / 4; + outputvec[idx] = vec_haddx4(acc[k+0], acc[k+1], acc[k+2], acc[k+3], biasvec[idx]); } } - } - else if constexpr (kOutputDimensions == 1) - { - if constexpr (kPaddedInputDimensions % (kSimdWidth * 2) == 0) + else { - const auto row0 = reinterpret_cast(&weights_[0]); - -#if defined (USE_VNNI) - __m512i sum0 = _mm512_setzero_si512(); - const IndexType kStart = 0; -#else - __m512i sum0 = m512_dpbusd_epi32(input_vector512[0], row0[0]); - const IndexType kStart = 1; -#endif - - for (IndexType j = kStart; j < kNumChunks512; ++j) + for (IndexType k = 0; k < NumOutputRegs; ++k) { - const __m512i in = input_vector512[j]; - -#if defined (USE_VNNI) - m512_add_dpbusd_epi32(sum0, in, row0[j]); -#else - sum0 = _mm512_add_epi32(sum0, m512_dpbusd_epi32(in, row0[j])); -#endif + const IndexType idx = (bigBlock * NumOutputRegs + k); + output[idx] = vec_hadd(acc[k], biases[idx]); } - - output[0] = m512_hadd(sum0, biases_[0]); } - else - { - const auto row0 = reinterpret_cast(&weights_[0]); + } -#if defined (USE_VNNI) - __m256i sum0 = _mm256_setzero_si256(); - const IndexType kStart = 0; +# undef vec_zero +# undef vec_add_dpbusd_32x2 +# undef vec_hadd +# undef vec_haddx4 #else - __m256i sum0 = m256_dpbusd_epi32(input_vector256[0], row0[0]); - const IndexType kStart = 1; -#endif - - for (IndexType j = kStart; j < kNumChunks256; ++j) - { - const __m256i in = input_vector256[j]; + // Use old implementation for the other architectures. + affine_transform_non_ssse3< + InputDimensions, + PaddedInputDimensions, + OutputDimensions>(output, weights, biases, input); -#if defined (USE_VNNI) - m256_add_dpbusd_epi32(sum0, in, row0[j]); -#else - sum0 = _mm256_add_epi32(sum0, m256_dpbusd_epi32(in, row0[j])); #endif - } - output[0] = m256_hadd(sum0, biases_[0]); - } - } - else - { - // This case can never happen because kOutputDimensions - // is always 1 or a multiple of kSimdWidth. - assert(false); - } + return output; + } -#elif defined (USE_AVX2) + private: + using BiasType = OutputType; + using WeightType = std::int8_t; - constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth; + alignas(CacheLineSize) BiasType biases[OutputDimensions]; + alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions]; + }; - const auto output = reinterpret_cast(buffer); - const auto input_vector = reinterpret_cast(input); + template + class AffineTransform(InDims, MaxSimdWidth) < 2*64)>> { + public: + // Input/output type + // Input/output type + using InputType = std::uint8_t; + using OutputType = std::int32_t; - // kOutputDimensions is either 1 or a multiple of kSimdWidth - // because then it is also an input dimension. - if constexpr (kOutputDimensions % 4 == 0) - { - for (IndexType i = 0; i < kOutputDimensions; i += 4) - { - const IndexType offset0 = (i + 0) * kPaddedInputDimensions; - const IndexType offset1 = (i + 1) * kPaddedInputDimensions; - const IndexType offset2 = (i + 2) * kPaddedInputDimensions; - const IndexType offset3 = (i + 3) * kPaddedInputDimensions; - - const __m128i bias = *reinterpret_cast(&biases_[i]); - __m128i* outptr = reinterpret_cast<__m128i*>(&output[i]); - - const auto row0 = reinterpret_cast(&weights_[offset0]); - const auto row1 = reinterpret_cast(&weights_[offset1]); - const auto row2 = reinterpret_cast(&weights_[offset2]); - const auto row3 = reinterpret_cast(&weights_[offset3]); - -#if defined (USE_VNNI) - __m256i sum0 = _mm256_setzero_si256(); - __m256i sum1 = _mm256_setzero_si256(); - __m256i sum2 = _mm256_setzero_si256(); - __m256i sum3 = _mm256_setzero_si256(); - const IndexType kStart = 0; -#else - __m256i sum0 = m256_dpbusd_epi32(input_vector[0], row0[0]); - __m256i sum1 = m256_dpbusd_epi32(input_vector[0], row1[0]); - __m256i sum2 = m256_dpbusd_epi32(input_vector[0], row2[0]); - __m256i sum3 = m256_dpbusd_epi32(input_vector[0], row3[0]); - const IndexType kStart = 1; -#endif + // Number of input/output dimensions + static constexpr IndexType InputDimensions = InDims; + static constexpr IndexType OutputDimensions = OutDims; - for (IndexType j = kStart; j < kNumChunks; ++j) - { - const __m256i in = input_vector[j]; + static constexpr IndexType PaddedInputDimensions = + ceil_to_multiple(InputDimensions, MaxSimdWidth); + static constexpr IndexType PaddedOutputDimensions = + ceil_to_multiple(OutputDimensions, MaxSimdWidth); -#if defined (USE_VNNI) - m256_add_dpbusd_epi32(sum0, in, row0[j]); - m256_add_dpbusd_epi32(sum1, in, row1[j]); - m256_add_dpbusd_epi32(sum2, in, row2[j]); - m256_add_dpbusd_epi32(sum3, in, row3[j]); -#else - sum0 = _mm256_add_epi32(sum0, m256_dpbusd_epi32(in, row0[j])); - sum1 = _mm256_add_epi32(sum1, m256_dpbusd_epi32(in, row1[j])); - sum2 = _mm256_add_epi32(sum2, m256_dpbusd_epi32(in, row2[j])); - sum3 = _mm256_add_epi32(sum3, m256_dpbusd_epi32(in, row3[j])); -#endif - } + using OutputBuffer = OutputType[PaddedOutputDimensions]; - *outptr = m256_haddx4(sum0, sum1, sum2, sum3, bias); - } - } - else if constexpr (kOutputDimensions == 1) - { - const auto row0 = reinterpret_cast(&weights_[0]); + static_assert(PaddedInputDimensions < 128, "Something went wrong. This specialization should not have been chosen."); -#if defined (USE_VNNI) - __m256i sum0 = _mm256_setzero_si256(); - const IndexType kStart = 0; -#else - __m256i sum0 = m256_dpbusd_epi32(input_vector[0], row0[0]); - const IndexType kStart = 1; +#if defined (USE_SSSE3) + static constexpr const IndexType OutputSimdWidth = SimdWidth / 4; + static constexpr const IndexType InputSimdWidth = SimdWidth; #endif - for (IndexType j = kStart; j < kNumChunks; ++j) - { - const __m256i in = input_vector[j]; + // Hash value embedded in the evaluation file + static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) { + std::uint32_t hashValue = 0xCC03DAE4u; + hashValue += OutputDimensions; + hashValue ^= prevHash >> 1; + hashValue ^= prevHash << 31; + return hashValue; + } + + static IndexType get_weight_index_scrambled(IndexType i) + { + return + (i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 + + i / PaddedInputDimensions * 4 + + i % 4; + } -#if defined (USE_VNNI) - m256_add_dpbusd_epi32(sum0, in, row0[j]); + static IndexType get_weight_index(IndexType i) + { +#if defined (USE_SSSE3) + return get_weight_index_scrambled(i); #else - sum0 = _mm256_add_epi32(sum0, m256_dpbusd_epi32(in, row0[j])); + return i; #endif - } - - output[0] = m256_hadd(sum0, biases_[0]); - } - else - { - // This case can never happen because kOutputDimensions - // is always 1 or a multiple of kSimdWidth. - assert(false); - } - -#elif defined (USE_SSSE3) + } - constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth; + // Read network parameters + bool read_parameters(std::istream& stream) { + for (std::size_t i = 0; i < OutputDimensions; ++i) + biases[i] = read_little_endian(stream); + for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) + weights[get_weight_index(i)] = read_little_endian(stream); - auto output = reinterpret_cast(buffer); - const auto input_vector = reinterpret_cast(input); + return !stream.fail(); + } - // kOutputDimensions is either 1 or a multiple of kSimdWidth - // because then it is also an input dimension. - if constexpr (kOutputDimensions % 4 == 0) - { - for (IndexType i = 0; i < kOutputDimensions; i += 4) - { - const IndexType offset0 = (i + 0) * kPaddedInputDimensions; - const IndexType offset1 = (i + 1) * kPaddedInputDimensions; - const IndexType offset2 = (i + 2) * kPaddedInputDimensions; - const IndexType offset3 = (i + 3) * kPaddedInputDimensions; + // Write network parameters + bool write_parameters(std::ostream& stream) const { + for (std::size_t i = 0; i < OutputDimensions; ++i) + write_little_endian(stream, biases[i]); - const __m128i bias = *reinterpret_cast(&biases_[i]); - __m128i* outptr = reinterpret_cast<__m128i*>(&output[i]); + for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) + write_little_endian(stream, weights[get_weight_index(i)]); - const auto row0 = reinterpret_cast(&weights_[offset0]); - const auto row1 = reinterpret_cast(&weights_[offset1]); - const auto row2 = reinterpret_cast(&weights_[offset2]); - const auto row3 = reinterpret_cast(&weights_[offset3]); + return !stream.fail(); + } + // Forward propagation + const OutputType* propagate( + const InputType* input, OutputType* output) const { - __m128i sum0 = m128_dpbusd_epi32(input_vector[0], row0[0]); - __m128i sum1 = m128_dpbusd_epi32(input_vector[0], row1[0]); - __m128i sum2 = m128_dpbusd_epi32(input_vector[0], row2[0]); - __m128i sum3 = m128_dpbusd_epi32(input_vector[0], row3[0]); +#if defined (USE_AVX2) + using vec_t = __m256i; + #define vec_setzero _mm256_setzero_si256 + #define vec_set_32 _mm256_set1_epi32 + #define vec_add_dpbusd_32 Simd::m256_add_dpbusd_epi32 + #define vec_add_dpbusd_32x2 Simd::m256_add_dpbusd_epi32x2 + #define vec_add_dpbusd_32x4 Simd::m256_add_dpbusd_epi32x4 + #define vec_hadd Simd::m256_hadd + #define vec_haddx4 Simd::m256_haddx4 +#elif defined (USE_SSSE3) + using vec_t = __m128i; + #define vec_setzero _mm_setzero_si128 + #define vec_set_32 _mm_set1_epi32 + #define vec_add_dpbusd_32 Simd::m128_add_dpbusd_epi32 + #define vec_add_dpbusd_32x2 Simd::m128_add_dpbusd_epi32x2 + #define vec_add_dpbusd_32x4 Simd::m128_add_dpbusd_epi32x4 + #define vec_hadd Simd::m128_hadd + #define vec_haddx4 Simd::m128_haddx4 +#endif - for (int j = 1; j < (int)kNumChunks; ++j) - { - const __m128i in = input_vector[j]; +#if defined (USE_SSSE3) + const auto inputVector = reinterpret_cast(input); - sum0 = _mm_add_epi32(sum0, m128_dpbusd_epi32(in, row0[j])); - sum1 = _mm_add_epi32(sum1, m128_dpbusd_epi32(in, row1[j])); - sum2 = _mm_add_epi32(sum2, m128_dpbusd_epi32(in, row2[j])); - sum3 = _mm_add_epi32(sum3, m128_dpbusd_epi32(in, row3[j])); - } + static_assert(OutputDimensions % OutputSimdWidth == 0 || OutputDimensions == 1); - *outptr = m128_haddx4(sum0, sum1, sum2, sum3, bias); - } - } - else if constexpr (kOutputDimensions == 1) + if constexpr (OutputDimensions % OutputSimdWidth == 0) { - const auto row0 = reinterpret_cast(&weights_[0]); + constexpr IndexType NumChunks = ceil_to_multiple(InputDimensions, 8) / 4; + constexpr IndexType NumRegs = OutputDimensions / OutputSimdWidth; - __m128i sum0 = m128_dpbusd_epi32(input_vector[0], row0[0]); + const auto input32 = reinterpret_cast(input); + const vec_t* biasvec = reinterpret_cast(biases); + vec_t acc[NumRegs]; + for (IndexType k = 0; k < NumRegs; ++k) + acc[k] = biasvec[k]; - for (int j = 1; j < (int)kNumChunks; ++j) - sum0 = _mm_add_epi32(sum0, m128_dpbusd_epi32(input_vector[j], row0[j])); + for (IndexType i = 0; i < NumChunks; i += 2) + { + const vec_t in0 = vec_set_32(input32[i + 0]); + const vec_t in1 = vec_set_32(input32[i + 1]); + const auto col0 = reinterpret_cast(&weights[(i + 0) * OutputDimensions * 4]); + const auto col1 = reinterpret_cast(&weights[(i + 1) * OutputDimensions * 4]); + for (IndexType k = 0; k < NumRegs; ++k) + vec_add_dpbusd_32x2(acc[k], in0, col0[k], in1, col1[k]); + } - output[0] = m128_hadd(sum0, biases_[0]); + vec_t* outptr = reinterpret_cast(output); + for (IndexType k = 0; k < NumRegs; ++k) + outptr[k] = acc[k]; } - else + else if constexpr (OutputDimensions == 1) { - // This case can never happen because kOutputDimensions - // is always 1 or a multiple of kSimdWidth. - assert(false); - } - -#else - -// Use old implementation for the other architectures. - - auto output = reinterpret_cast(buffer); - -#if defined(USE_SSE2) - constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth; -#ifndef USE_SSSE3 - const __m128i kZeros = _mm_setzero_si128(); -#else - const __m128i kOnes = _mm_set1_epi16(1); -#endif - const auto input_vector = reinterpret_cast(input); - -#elif defined(USE_MMX) - constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth; - const __m64 kZeros = _mm_setzero_si64(); - const auto input_vector = reinterpret_cast(input); - -#elif defined(USE_NEON) - constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth; - const auto input_vector = reinterpret_cast(input); -#endif - - for (IndexType i = 0; i < kOutputDimensions; ++i) { - const IndexType offset = i * kPaddedInputDimensions; - -#if defined(USE_SSE2) - __m128i sum_lo = _mm_cvtsi32_si128(biases_[i]); - __m128i sum_hi = kZeros; - const auto row = reinterpret_cast(&weights_[offset]); - for (IndexType j = 0; j < kNumChunks; ++j) { - __m128i row_j = _mm_load_si128(&row[j]); - __m128i input_j = _mm_load_si128(&input_vector[j]); - __m128i extended_row_lo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8); - __m128i extended_row_hi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8); - __m128i extended_input_lo = _mm_unpacklo_epi8(input_j, kZeros); - __m128i extended_input_hi = _mm_unpackhi_epi8(input_j, kZeros); - __m128i product_lo = _mm_madd_epi16(extended_row_lo, extended_input_lo); - __m128i product_hi = _mm_madd_epi16(extended_row_hi, extended_input_hi); - sum_lo = _mm_add_epi32(sum_lo, product_lo); - sum_hi = _mm_add_epi32(sum_hi, product_hi); - } - __m128i sum = _mm_add_epi32(sum_lo, sum_hi); - __m128i sum_high_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2)); - sum = _mm_add_epi32(sum, sum_high_64); - __m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2)); - sum = _mm_add_epi32(sum, sum_second_32); - output[i] = _mm_cvtsi128_si32(sum); - -#elif defined(USE_MMX) - __m64 sum_lo = _mm_cvtsi32_si64(biases_[i]); - __m64 sum_hi = kZeros; - const auto row = reinterpret_cast(&weights_[offset]); - for (IndexType j = 0; j < kNumChunks; ++j) { - __m64 row_j = row[j]; - __m64 input_j = input_vector[j]; - __m64 extended_row_lo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8); - __m64 extended_row_hi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8); - __m64 extended_input_lo = _mm_unpacklo_pi8(input_j, kZeros); - __m64 extended_input_hi = _mm_unpackhi_pi8(input_j, kZeros); - __m64 product_lo = _mm_madd_pi16(extended_row_lo, extended_input_lo); - __m64 product_hi = _mm_madd_pi16(extended_row_hi, extended_input_hi); - sum_lo = _mm_add_pi32(sum_lo, product_lo); - sum_hi = _mm_add_pi32(sum_hi, product_hi); - } - __m64 sum = _mm_add_pi32(sum_lo, sum_hi); - sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum)); - output[i] = _mm_cvtsi64_si32(sum); - -#elif defined(USE_NEON) - int32x4_t sum = {biases_[i]}; - const auto row = reinterpret_cast(&weights_[offset]); - for (IndexType j = 0; j < kNumChunks; ++j) { - int16x8_t product = vmull_s8(input_vector[j * 2], row[j * 2]); - product = vmlal_s8(product, input_vector[j * 2 + 1], row[j * 2 + 1]); - sum = vpadalq_s16(sum, product); - } - output[i] = sum[0] + sum[1] + sum[2] + sum[3]; + constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth; + vec_t sum0 = vec_setzero(); + const auto row0 = reinterpret_cast(&weights[0]); -#else - OutputType sum = biases_[i]; - for (IndexType j = 0; j < kInputDimensions; ++j) { - sum += weights_[offset + j] * input[j]; + for (int j = 0; j < (int)NumChunks; ++j) + { + const vec_t in = inputVector[j]; + vec_add_dpbusd_32(sum0, in, row0[j]); } - output[i] = sum; -#endif - + output[0] = vec_hadd(sum0, biases[0]); } -#if defined(USE_MMX) - _mm_empty(); -#endif +# undef vec_setzero +# undef vec_set_32 +# undef vec_add_dpbusd_32 +# undef vec_add_dpbusd_32x2 +# undef vec_add_dpbusd_32x4 +# undef vec_hadd +# undef vec_haddx4 +#else + // Use old implementation for the other architectures. + affine_transform_non_ssse3< + InputDimensions, + PaddedInputDimensions, + OutputDimensions>(output, weights, biases, input); #endif return output; @@ -748,13 +530,10 @@ namespace Eval::NNUE::Layers { using BiasType = OutputType; using WeightType = std::int8_t; - PreviousLayer previous_layer_; - - alignas(kCacheLineSize) BiasType biases_[kOutputDimensions]; - alignas(kCacheLineSize) - WeightType weights_[kOutputDimensions * kPaddedInputDimensions]; + alignas(CacheLineSize) BiasType biases[OutputDimensions]; + alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions]; }; -} // namespace Eval::NNUE::Layers +} // namespace Stockfish::Eval::NNUE::Layers #endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED