From 38e61663d836e062af0bc002814ad5149c4b7729 Mon Sep 17 00:00:00 2001 From: AndrovT <31534597+AndrovT@users.noreply.github.com> Date: Sun, 11 Jun 2023 03:24:04 +0200 Subject: [PATCH] Use block sparse input for the first layer. Use block sparse input for the first fully connected layer on architectures with at least SSSE3. Depending on the CPU architecture, this yields a speedup of up to 10%, e.g. ``` Result of 100 runs of 'bench 16 1 13 default depth NNUE' base (...ockfish-base) = 959345 +/- 7477 test (...ckfish-patch) = 1054340 +/- 9640 diff = +94995 +/- 3999 speedup = +0.0990 P(speedup > 0) = 1.0000 CPU: 8 x AMD Ryzen 7 5700U with Radeon Graphics Hyperthreading: on ``` Passed STC: https://tests.stockfishchess.org/tests/view/6485aa0965ffe077ca12409c LLR: 2.93 (-2.94,2.94) <0.00,2.00> Total: 8864 W: 2479 L: 2223 D: 4162 Ptnml(0-2): 13, 829, 2504, 1061, 25 This commit includes a net with reordered weights, to increase the likelihood of block sparse inputs, but otherwise equivalent to the previous master net (nn-ea57bea57e32.nnue). Activation data collected with https://github.com/AndrovT/Stockfish/tree/log-activations, running bench 16 1 13 varied_1000.epd depth NNUE on this data. Net parameters permuted with https://gist.github.com/AndrovT/9e3fbaebb7082734dc84d27e02094cb3. closes https://github.com/official-stockfish/Stockfish/pull/4612 No functional change --- AUTHORS | 1 + src/evaluate.h | 2 +- .../layers/affine_transform_sparse_input.h | 286 ++++++++++++++++++ src/nnue/nnue_architecture.h | 3 +- 4 files changed, 290 insertions(+), 2 deletions(-) create mode 100644 src/nnue/layers/affine_transform_sparse_input.h diff --git a/AUTHORS b/AUTHORS index d01d23cd..63b862ce 100644 --- a/AUTHORS +++ b/AUTHORS @@ -159,6 +159,7 @@ Norman Schmidt (FireFather) notruck Ofek Shochat (OfekShochat, ghostway) Ondrej Mosnáček (WOnder93) +Ondřej Mišina (AndrovT) Oskar Werkelin Ahlin Pablo Vazquez Panthee diff --git a/src/evaluate.h b/src/evaluate.h index fc852c8d..94cd42cc 100644 --- a/src/evaluate.h +++ b/src/evaluate.h @@ -39,7 +39,7 @@ namespace Eval { // The default net name MUST follow the format nn-[SHA256 first 12 digits].nnue // for the build process (profile-build and fishtest) to work. Do not change the // name of the macro, as it is used in the Makefile. - #define EvalFileDefaultName "nn-ea57bea57e32.nnue" + #define EvalFileDefaultName "nn-fdc1d0fe6455.nnue" namespace NNUE { diff --git a/src/nnue/layers/affine_transform_sparse_input.h b/src/nnue/layers/affine_transform_sparse_input.h new file mode 100644 index 00000000..00b17c19 --- /dev/null +++ b/src/nnue/layers/affine_transform_sparse_input.h @@ -0,0 +1,286 @@ +/* + Stockfish, a UCI chess playing engine derived from Glaurung 2.1 + Copyright (C) 2004-2023 The Stockfish developers (see AUTHORS file) + + Stockfish is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Stockfish is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +*/ + +// Definition of layer AffineTransformSparseInput of NNUE evaluation function + +#ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED +#define NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED + +#include +#include +#include +#include +#include "../nnue_common.h" +#include "affine_transform.h" +#include "simd.h" + +/* + This file contains the definition for a fully connected layer (aka affine transform) with block sparse input. +*/ + +namespace Stockfish::Eval::NNUE::Layers { +#if defined(__GNUC__) // GCC, Clang, ICC + + static inline IndexType lsb_(std::uint32_t b) { + assert(b); + return IndexType(__builtin_ctzl(b)); + } + +#elif defined(_MSC_VER) // MSVC + + static inline IndexType lsb_(std::uint32_t b) { + assert(b); + unsigned long idx; + _BitScanForward(&idx, b); + return (IndexType) idx; + } + +#else // Compiler is neither GCC nor MSVC compatible + +#error "Compiler not supported." + +#endif + + +#if defined(USE_SSSE3) + alignas(CacheLineSize) static inline const std::array, 256> lookup_indices = [](){ + std::array, 256> v{}; + for (int i = 0; i < 256; ++i) + { + int j = i; + int k = 0; + while(j) + { + const IndexType lsbIndex = lsb_(std::uint32_t(j)); + j &= j - 1; + v[i][k] = lsbIndex; + ++k; + } + } + return v; + }(); + alignas(CacheLineSize) static inline const std::array lookup_count = [](){ + std::array v; + for (int i = 0; i < 256; ++i) + { + int j = i; + int k = 0; + while(j) + { + j &= j - 1; + ++k; + } + v[i] = k; + } + return v; + }(); + + // Find indices of nonzero numbers in an int32_t array + template + void find_nnz(const std::int32_t* input, std::uint16_t* out, IndexType& count_out) { +#if defined (USE_AVX512) + using vec_t = __m512i; + #define vec_nnz(a) _mm512_cmpgt_epi32_mask(a, _mm512_setzero_si512()) +#elif defined (USE_AVX2) + using vec_t = __m256i; + #define vec_nnz(a) _mm256_movemask_ps((__m256)_mm256_cmpgt_epi32(a, _mm256_setzero_si256())) +#elif defined (USE_SSSE3) + using vec_t = __m128i; + #define vec_nnz(a) _mm_movemask_ps((__m128)_mm_cmpgt_epi32(a, _mm_setzero_si128())) +#endif + constexpr IndexType InputSimdWidth = sizeof(vec_t) / sizeof(std::int32_t); + // Inputs are processed InputSimdWidth at a time and outputs are processed 8 at a time so we process in chunks of max(InputSimdWidth, 8) + constexpr IndexType ChunkSize = std::max(InputSimdWidth, 8); + constexpr IndexType NumChunks = InputDimensions / ChunkSize; + constexpr IndexType InputsPerChunk = ChunkSize / InputSimdWidth; + constexpr IndexType OutputsPerChunk = ChunkSize / 8; + + const auto inputVector = reinterpret_cast(input); + IndexType count = 0; + __m128i base = _mm_set1_epi16(0); + __m128i increment = _mm_set1_epi16(8); + for (IndexType i = 0; i < NumChunks; ++i) + { + // bitmask of nonzero values in this chunk + unsigned nnz = 0; + for (IndexType j = 0; j < InputsPerChunk; ++j) + { + const vec_t inputChunk = inputVector[i * InputsPerChunk + j]; + nnz |= (unsigned)vec_nnz(inputChunk) << (j * InputSimdWidth); + } + for (IndexType j = 0; j < OutputsPerChunk; ++j) + { + const auto lookup = (nnz >> (j * 8)) & 0xFF; + const auto offsets = _mm_loadu_si128(reinterpret_cast(&lookup_indices[lookup])); + _mm_storeu_si128(reinterpret_cast<__m128i*>(out + count), _mm_add_epi16(base, offsets)); + count += lookup_count[lookup]; + base = _mm_add_epi16(base, increment); + } + } + count_out = count; + } +# undef vec_nnz +#endif + + // Sparse input implementation + template + class AffineTransformSparseInput { + public: + // Input/output type + // Input/output type + using InputType = std::uint8_t; + using OutputType = std::int32_t; + + // Number of input/output dimensions + static constexpr IndexType InputDimensions = InDims; + static constexpr IndexType OutputDimensions = OutDims; + + static_assert(OutputDimensions % 16 == 0, "Only implemented for OutputDimensions divisible by 16."); + + static constexpr IndexType PaddedInputDimensions = + ceil_to_multiple(InputDimensions, MaxSimdWidth); + static constexpr IndexType PaddedOutputDimensions = + ceil_to_multiple(OutputDimensions, MaxSimdWidth); + +#if defined (USE_SSSE3) + static constexpr IndexType ChunkSize = 4; +#else + static constexpr IndexType ChunkSize = 1; +#endif + + using OutputBuffer = OutputType[PaddedOutputDimensions]; + + // Hash value embedded in the evaluation file + static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) { + std::uint32_t hashValue = 0xCC03DAE4u; + hashValue += OutputDimensions; + hashValue ^= prevHash >> 1; + hashValue ^= prevHash << 31; + return hashValue; + } + + static IndexType get_weight_index_scrambled(IndexType i) + { + return + (i / ChunkSize) % (PaddedInputDimensions / ChunkSize) * OutputDimensions * ChunkSize + + i / PaddedInputDimensions * ChunkSize + + i % ChunkSize; + } + + static IndexType get_weight_index(IndexType i) + { +#if defined (USE_SSSE3) + return get_weight_index_scrambled(i); +#else + return i; +#endif + } + + // Read network parameters + bool read_parameters(std::istream& stream) { + read_little_endian(stream, biases, OutputDimensions); + for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) + weights[get_weight_index(i)] = read_little_endian(stream); + + return !stream.fail(); + } + + // Write network parameters + bool write_parameters(std::ostream& stream) const { + write_little_endian(stream, biases, OutputDimensions); + + for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) + write_little_endian(stream, weights[get_weight_index(i)]); + + return !stream.fail(); + } + // Forward propagation + const OutputType* propagate( + const InputType* input, OutputType* output) const { + +#if defined (USE_SSSE3) +#if defined (USE_AVX512) + using vec_t = __m512i; + #define vec_setzero _mm512_setzero_si512 + #define vec_set_32 _mm512_set1_epi32 + #define vec_add_dpbusd_32 Simd::m512_add_dpbusd_epi32 +#elif defined (USE_AVX2) + using vec_t = __m256i; + #define vec_setzero _mm256_setzero_si256 + #define vec_set_32 _mm256_set1_epi32 + #define vec_add_dpbusd_32 Simd::m256_add_dpbusd_epi32 +#elif defined (USE_SSSE3) + using vec_t = __m128i; + #define vec_setzero _mm_setzero_si128 + #define vec_set_32 _mm_set1_epi32 + #define vec_add_dpbusd_32 Simd::m128_add_dpbusd_epi32 +#endif + static constexpr IndexType OutputSimdWidth = sizeof(vec_t) / sizeof(OutputType); + + constexpr IndexType NumChunks = ceil_to_multiple(InputDimensions, 8) / ChunkSize; + constexpr IndexType NumRegs = OutputDimensions / OutputSimdWidth; + std::uint16_t nnz[NumChunks]; + IndexType count; + + const auto input32 = reinterpret_cast(input); + + // Find indices of nonzero 32bit blocks + find_nnz(input32, nnz, count); + + const vec_t* biasvec = reinterpret_cast(biases); + vec_t acc[NumRegs]; + for (IndexType k = 0; k < NumRegs; ++k) + acc[k] = biasvec[k]; + + for (IndexType j = 0; j < count; ++j) + { + const auto i = nnz[j]; + const vec_t in = vec_set_32(input32[i]); + const auto col = reinterpret_cast(&weights[i * OutputDimensions * ChunkSize]); + for (IndexType k = 0; k < NumRegs; ++k) + vec_add_dpbusd_32(acc[k], in, col[k]); + } + + vec_t* outptr = reinterpret_cast(output); + for (IndexType k = 0; k < NumRegs; ++k) + outptr[k] = acc[k]; +# undef vec_setzero +# undef vec_set_32 +# undef vec_add_dpbusd_32 +#else + // Use dense implementation for the other architectures. + affine_transform_non_ssse3< + InputDimensions, + PaddedInputDimensions, + OutputDimensions>(output, weights, biases, input); +#endif + + return output; + } + + private: + using BiasType = OutputType; + using WeightType = std::int8_t; + + alignas(CacheLineSize) BiasType biases[OutputDimensions]; + alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions]; + }; + +} // namespace Stockfish::Eval::NNUE::Layers + +#endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED diff --git a/src/nnue/nnue_architecture.h b/src/nnue/nnue_architecture.h index d10434f3..413dbb3d 100644 --- a/src/nnue/nnue_architecture.h +++ b/src/nnue/nnue_architecture.h @@ -27,6 +27,7 @@ #include "features/half_ka_v2_hm.h" +#include "layers/affine_transform_sparse_input.h" #include "layers/affine_transform.h" #include "layers/clipped_relu.h" #include "layers/sqr_clipped_relu.h" @@ -48,7 +49,7 @@ struct Network static constexpr int FC_0_OUTPUTS = 15; static constexpr int FC_1_OUTPUTS = 32; - Layers::AffineTransform fc_0; + Layers::AffineTransformSparseInput fc_0; Layers::SqrClippedReLU ac_sqr_0; Layers::ClippedReLU ac_0; Layers::AffineTransform fc_1; -- 2.39.2