X-Git-Url: https://git.sesse.net/?p=stockfish;a=blobdiff_plain;f=src%2Fnnue%2Flayers%2Faffine_transform.h;fp=src%2Fnnue%2Flayers%2Faffine_transform.h;h=424fad5650f164b5c013ad775b3867b3109eb579;hp=1faa180d4dd082496b041552764c2682975ccfd2;hb=fbbd4adc3c01460faa3cc8f91771ab9b0ef718ca;hpb=a7ab92ec25c91e8413630c52cfc2db6b4ecacf0c diff --git a/src/nnue/layers/affine_transform.h b/src/nnue/layers/affine_transform.h index 1faa180d..424fad56 100644 --- a/src/nnue/layers/affine_transform.h +++ b/src/nnue/layers/affine_transform.h @@ -27,7 +27,7 @@ namespace Stockfish::Eval::NNUE::Layers { // Affine transformation layer - template + template class AffineTransform { public: // Input/output type @@ -36,64 +36,64 @@ namespace Stockfish::Eval::NNUE::Layers { static_assert(std::is_same::value, ""); // Number of input/output dimensions - static constexpr IndexType kInputDimensions = - PreviousLayer::kOutputDimensions; - static constexpr IndexType kOutputDimensions = OutputDimensions; - static constexpr IndexType kPaddedInputDimensions = - CeilToMultiple(kInputDimensions, kMaxSimdWidth); + static constexpr IndexType InputDimensions = + PreviousLayer::OutputDimensions; + static constexpr IndexType OutputDimensions = OutDims; + static constexpr IndexType PaddedInputDimensions = + ceil_to_multiple(InputDimensions, MaxSimdWidth); #if defined (USE_AVX512) - static constexpr const IndexType kOutputSimdWidth = kSimdWidth / 2; + static constexpr const IndexType OutputSimdWidth = SimdWidth / 2; #elif defined (USE_SSSE3) - static constexpr const IndexType kOutputSimdWidth = kSimdWidth / 4; + static constexpr const IndexType OutputSimdWidth = SimdWidth / 4; #endif // Size of forward propagation buffer used in this layer - static constexpr std::size_t kSelfBufferSize = - CeilToMultiple(kOutputDimensions * sizeof(OutputType), kCacheLineSize); + static constexpr std::size_t SelfBufferSize = + ceil_to_multiple(OutputDimensions * sizeof(OutputType), CacheLineSize); // Size of the forward propagation buffer used from the input layer to this layer - static constexpr std::size_t kBufferSize = - PreviousLayer::kBufferSize + kSelfBufferSize; + static constexpr std::size_t BufferSize = + PreviousLayer::BufferSize + SelfBufferSize; // Hash value embedded in the evaluation file - static constexpr std::uint32_t GetHashValue() { - std::uint32_t hash_value = 0xCC03DAE4u; - hash_value += kOutputDimensions; - hash_value ^= PreviousLayer::GetHashValue() >> 1; - hash_value ^= PreviousLayer::GetHashValue() << 31; - return hash_value; + static constexpr std::uint32_t get_hash_value() { + std::uint32_t hashValue = 0xCC03DAE4u; + hashValue += OutputDimensions; + hashValue ^= PreviousLayer::get_hash_value() >> 1; + hashValue ^= PreviousLayer::get_hash_value() << 31; + return hashValue; } - // Read network parameters - bool ReadParameters(std::istream& stream) { - if (!previous_layer_.ReadParameters(stream)) return false; - for (std::size_t i = 0; i < kOutputDimensions; ++i) - biases_[i] = read_little_endian(stream); - for (std::size_t i = 0; i < kOutputDimensions * kPaddedInputDimensions; ++i) + // Read network parameters + bool read_parameters(std::istream& stream) { + if (!previousLayer.read_parameters(stream)) return false; + for (std::size_t i = 0; i < OutputDimensions; ++i) + biases[i] = read_little_endian(stream); + for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) #if !defined (USE_SSSE3) - weights_[i] = read_little_endian(stream); + weights[i] = read_little_endian(stream); #else - weights_[ - (i / 4) % (kPaddedInputDimensions / 4) * kOutputDimensions * 4 + - i / kPaddedInputDimensions * 4 + + weights[ + (i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 + + i / PaddedInputDimensions * 4 + i % 4 ] = read_little_endian(stream); // Determine if eights of weight and input products can be summed using 16bits // without saturation. We assume worst case combinations of 0 and 127 for all inputs. - if (kOutputDimensions > 1 && !stream.fail()) + if (OutputDimensions > 1 && !stream.fail()) { canSaturate16.count = 0; #if !defined(USE_VNNI) - for (IndexType i = 0; i < kPaddedInputDimensions; i += 16) - for (IndexType j = 0; j < kOutputDimensions; ++j) + for (IndexType i = 0; i < PaddedInputDimensions; i += 16) + for (IndexType j = 0; j < OutputDimensions; ++j) for (int x = 0; x < 2; ++x) { - WeightType* w = &weights_[i * kOutputDimensions + j * 4 + x * 2]; + WeightType* w = &weights[i * OutputDimensions + j * 4 + x * 2]; int sum[2] = {0, 0}; for (int k = 0; k < 8; ++k) { - IndexType idx = k / 2 * kOutputDimensions * 4 + k % 2; + IndexType idx = k / 2 * OutputDimensions * 4 + k % 2; sum[w[idx] < 0] += w[idx]; } for (int sign : { -1, 1 }) @@ -102,12 +102,12 @@ namespace Stockfish::Eval::NNUE::Layers { int maxK = 0, maxW = 0; for (int k = 0; k < 8; ++k) { - IndexType idx = k / 2 * kOutputDimensions * 4 + k % 2; + IndexType idx = k / 2 * OutputDimensions * 4 + k % 2; if (maxW < sign * w[idx]) maxK = k, maxW = sign * w[idx]; } - IndexType idx = maxK / 2 * kOutputDimensions * 4 + maxK % 2; + IndexType idx = maxK / 2 * OutputDimensions * 4 + maxK % 2; sum[sign == -1] -= w[idx]; canSaturate16.add(j, i + maxK / 2 * 4 + maxK % 2 + x * 2, w[idx]); w[idx] = 0; @@ -126,14 +126,14 @@ namespace Stockfish::Eval::NNUE::Layers { } // Forward propagation - const OutputType* Propagate( - const TransformedFeatureType* transformed_features, char* buffer) const { - const auto input = previous_layer_.Propagate( - transformed_features, buffer + kSelfBufferSize); + const OutputType* propagate( + const TransformedFeatureType* transformedFeatures, char* buffer) const { + const auto input = previousLayer.propagate( + transformedFeatures, buffer + SelfBufferSize); #if defined (USE_AVX512) - [[maybe_unused]] const __m512i kOnes512 = _mm512_set1_epi16(1); + [[maybe_unused]] const __m512i Ones512 = _mm512_set1_epi16(1); [[maybe_unused]] auto m512_hadd = [](__m512i sum, int bias) -> int { return _mm512_reduce_add_epi32(sum) + bias; @@ -144,7 +144,7 @@ namespace Stockfish::Eval::NNUE::Layers { acc = _mm512_dpbusd_epi32(acc, a, b); #else __m512i product0 = _mm512_maddubs_epi16(a, b); - product0 = _mm512_madd_epi16(product0, kOnes512); + product0 = _mm512_madd_epi16(product0, Ones512); acc = _mm512_add_epi32(acc, product0); #endif }; @@ -164,7 +164,7 @@ namespace Stockfish::Eval::NNUE::Layers { product0 = _mm512_add_epi16(product0, product1); product2 = _mm512_add_epi16(product2, product3); product0 = _mm512_add_epi16(product0, product2); - product0 = _mm512_madd_epi16(product0, kOnes512); + product0 = _mm512_madd_epi16(product0, Ones512); acc = _mm512_add_epi32(acc, product0); #endif }; @@ -172,7 +172,7 @@ namespace Stockfish::Eval::NNUE::Layers { #endif #if defined (USE_AVX2) - [[maybe_unused]] const __m256i kOnes256 = _mm256_set1_epi16(1); + [[maybe_unused]] const __m256i Ones256 = _mm256_set1_epi16(1); [[maybe_unused]] auto m256_hadd = [](__m256i sum, int bias) -> int { __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extracti128_si256(sum, 1)); @@ -186,7 +186,7 @@ namespace Stockfish::Eval::NNUE::Layers { acc = _mm256_dpbusd_epi32(acc, a, b); #else __m256i product0 = _mm256_maddubs_epi16(a, b); - product0 = _mm256_madd_epi16(product0, kOnes256); + product0 = _mm256_madd_epi16(product0, Ones256); acc = _mm256_add_epi32(acc, product0); #endif }; @@ -206,7 +206,7 @@ namespace Stockfish::Eval::NNUE::Layers { product0 = _mm256_add_epi16(product0, product1); product2 = _mm256_add_epi16(product2, product3); product0 = _mm256_add_epi16(product0, product2); - product0 = _mm256_madd_epi16(product0, kOnes256); + product0 = _mm256_madd_epi16(product0, Ones256); acc = _mm256_add_epi32(acc, product0); #endif }; @@ -214,7 +214,7 @@ namespace Stockfish::Eval::NNUE::Layers { #endif #if defined (USE_SSSE3) - [[maybe_unused]] const __m128i kOnes128 = _mm_set1_epi16(1); + [[maybe_unused]] const __m128i Ones128 = _mm_set1_epi16(1); [[maybe_unused]] auto m128_hadd = [](__m128i sum, int bias) -> int { sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0x4E)); //_MM_PERM_BADC @@ -224,7 +224,7 @@ namespace Stockfish::Eval::NNUE::Layers { [[maybe_unused]] auto m128_add_dpbusd_epi32 = [=](__m128i& acc, __m128i a, __m128i b) { __m128i product0 = _mm_maddubs_epi16(a, b); - product0 = _mm_madd_epi16(product0, kOnes128); + product0 = _mm_madd_epi16(product0, Ones128); acc = _mm_add_epi32(acc, product0); }; @@ -237,7 +237,7 @@ namespace Stockfish::Eval::NNUE::Layers { product0 = _mm_add_epi16(product0, product1); product2 = _mm_add_epi16(product2, product3); product0 = _mm_add_epi16(product0, product2); - product0 = _mm_madd_epi16(product0, kOnes128); + product0 = _mm_madd_epi16(product0, Ones128); acc = _mm_add_epi32(acc, product0); }; @@ -269,71 +269,71 @@ namespace Stockfish::Eval::NNUE::Layers { #if defined (USE_SSSE3) const auto output = reinterpret_cast(buffer); - const auto input_vector = reinterpret_cast(input); + const auto inputVector = reinterpret_cast(input); - static_assert(kOutputDimensions % kOutputSimdWidth == 0 || kOutputDimensions == 1); + static_assert(OutputDimensions % OutputSimdWidth == 0 || OutputDimensions == 1); - // kOutputDimensions is either 1 or a multiple of kSimdWidth + // OutputDimensions is either 1 or a multiple of SimdWidth // because then it is also an input dimension. - if constexpr (kOutputDimensions % kOutputSimdWidth == 0) + if constexpr (OutputDimensions % OutputSimdWidth == 0) { - constexpr IndexType kNumChunks = kPaddedInputDimensions / 4; + constexpr IndexType NumChunks = PaddedInputDimensions / 4; const auto input32 = reinterpret_cast(input); vec_t* outptr = reinterpret_cast(output); - std::memcpy(output, biases_, kOutputDimensions * sizeof(OutputType)); + std::memcpy(output, biases, OutputDimensions * sizeof(OutputType)); - for (int i = 0; i < (int)kNumChunks - 3; i += 4) + for (int i = 0; i < (int)NumChunks - 3; i += 4) { const vec_t in0 = vec_set_32(input32[i + 0]); const vec_t in1 = vec_set_32(input32[i + 1]); const vec_t in2 = vec_set_32(input32[i + 2]); const vec_t in3 = vec_set_32(input32[i + 3]); - const auto col0 = reinterpret_cast(&weights_[(i + 0) * kOutputDimensions * 4]); - const auto col1 = reinterpret_cast(&weights_[(i + 1) * kOutputDimensions * 4]); - const auto col2 = reinterpret_cast(&weights_[(i + 2) * kOutputDimensions * 4]); - const auto col3 = reinterpret_cast(&weights_[(i + 3) * kOutputDimensions * 4]); - for (int j = 0; j * kOutputSimdWidth < kOutputDimensions; ++j) + const auto col0 = reinterpret_cast(&weights[(i + 0) * OutputDimensions * 4]); + const auto col1 = reinterpret_cast(&weights[(i + 1) * OutputDimensions * 4]); + const auto col2 = reinterpret_cast(&weights[(i + 2) * OutputDimensions * 4]); + const auto col3 = reinterpret_cast(&weights[(i + 3) * OutputDimensions * 4]); + for (int j = 0; j * OutputSimdWidth < OutputDimensions; ++j) vec_add_dpbusd_32x4(outptr[j], in0, col0[j], in1, col1[j], in2, col2[j], in3, col3[j]); } for (int i = 0; i < canSaturate16.count; ++i) output[canSaturate16.ids[i].out] += input[canSaturate16.ids[i].in] * canSaturate16.ids[i].w; } - else if constexpr (kOutputDimensions == 1) + else if constexpr (OutputDimensions == 1) { #if defined (USE_AVX512) - if constexpr (kPaddedInputDimensions % (kSimdWidth * 2) != 0) + if constexpr (PaddedInputDimensions % (SimdWidth * 2) != 0) { - constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth; - const auto input_vector256 = reinterpret_cast(input); + constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth; + const auto inputVector256 = reinterpret_cast(input); __m256i sum0 = _mm256_setzero_si256(); - const auto row0 = reinterpret_cast(&weights_[0]); + const auto row0 = reinterpret_cast(&weights[0]); - for (int j = 0; j < (int)kNumChunks; ++j) + for (int j = 0; j < (int)NumChunks; ++j) { - const __m256i in = input_vector256[j]; + const __m256i in = inputVector256[j]; m256_add_dpbusd_epi32(sum0, in, row0[j]); } - output[0] = m256_hadd(sum0, biases_[0]); + output[0] = m256_hadd(sum0, biases[0]); } else #endif { #if defined (USE_AVX512) - constexpr IndexType kNumChunks = kPaddedInputDimensions / (kSimdWidth * 2); + constexpr IndexType NumChunks = PaddedInputDimensions / (SimdWidth * 2); #else - constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth; + constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth; #endif vec_t sum0 = vec_setzero(); - const auto row0 = reinterpret_cast(&weights_[0]); + const auto row0 = reinterpret_cast(&weights[0]); - for (int j = 0; j < (int)kNumChunks; ++j) + for (int j = 0; j < (int)NumChunks; ++j) { - const vec_t in = input_vector[j]; + const vec_t in = inputVector[j]; vec_add_dpbusd_32(sum0, in, row0[j]); } - output[0] = vec_hadd(sum0, biases_[0]); + output[0] = vec_hadd(sum0, biases[0]); } } @@ -344,80 +344,80 @@ namespace Stockfish::Eval::NNUE::Layers { auto output = reinterpret_cast(buffer); #if defined(USE_SSE2) - constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth; - const __m128i kZeros = _mm_setzero_si128(); - const auto input_vector = reinterpret_cast(input); + constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth; + const __m128i Zeros = _mm_setzero_si128(); + const auto inputVector = reinterpret_cast(input); #elif defined(USE_MMX) - constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth; - const __m64 kZeros = _mm_setzero_si64(); - const auto input_vector = reinterpret_cast(input); + constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth; + const __m64 Zeros = _mm_setzero_si64(); + const auto inputVector = reinterpret_cast(input); #elif defined(USE_NEON) - constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth; - const auto input_vector = reinterpret_cast(input); + constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth; + const auto inputVector = reinterpret_cast(input); #endif - for (IndexType i = 0; i < kOutputDimensions; ++i) { - const IndexType offset = i * kPaddedInputDimensions; + for (IndexType i = 0; i < OutputDimensions; ++i) { + const IndexType offset = i * PaddedInputDimensions; #if defined(USE_SSE2) - __m128i sum_lo = _mm_cvtsi32_si128(biases_[i]); - __m128i sum_hi = kZeros; - const auto row = reinterpret_cast(&weights_[offset]); - for (IndexType j = 0; j < kNumChunks; ++j) { + __m128i sumLo = _mm_cvtsi32_si128(biases[i]); + __m128i sumHi = Zeros; + const auto row = reinterpret_cast(&weights[offset]); + for (IndexType j = 0; j < NumChunks; ++j) { __m128i row_j = _mm_load_si128(&row[j]); - __m128i input_j = _mm_load_si128(&input_vector[j]); - __m128i extended_row_lo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8); - __m128i extended_row_hi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8); - __m128i extended_input_lo = _mm_unpacklo_epi8(input_j, kZeros); - __m128i extended_input_hi = _mm_unpackhi_epi8(input_j, kZeros); - __m128i product_lo = _mm_madd_epi16(extended_row_lo, extended_input_lo); - __m128i product_hi = _mm_madd_epi16(extended_row_hi, extended_input_hi); - sum_lo = _mm_add_epi32(sum_lo, product_lo); - sum_hi = _mm_add_epi32(sum_hi, product_hi); + __m128i input_j = _mm_load_si128(&inputVector[j]); + __m128i extendedRowLo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8); + __m128i extendedRowHi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8); + __m128i extendedInputLo = _mm_unpacklo_epi8(input_j, Zeros); + __m128i extendedInputHi = _mm_unpackhi_epi8(input_j, Zeros); + __m128i productLo = _mm_madd_epi16(extendedRowLo, extendedInputLo); + __m128i productHi = _mm_madd_epi16(extendedRowHi, extendedInputHi); + sumLo = _mm_add_epi32(sumLo, productLo); + sumHi = _mm_add_epi32(sumHi, productHi); } - __m128i sum = _mm_add_epi32(sum_lo, sum_hi); - __m128i sum_high_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2)); - sum = _mm_add_epi32(sum, sum_high_64); + __m128i sum = _mm_add_epi32(sumLo, sumHi); + __m128i sumHigh_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2)); + sum = _mm_add_epi32(sum, sumHigh_64); __m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2)); sum = _mm_add_epi32(sum, sum_second_32); output[i] = _mm_cvtsi128_si32(sum); #elif defined(USE_MMX) - __m64 sum_lo = _mm_cvtsi32_si64(biases_[i]); - __m64 sum_hi = kZeros; - const auto row = reinterpret_cast(&weights_[offset]); - for (IndexType j = 0; j < kNumChunks; ++j) { + __m64 sumLo = _mm_cvtsi32_si64(biases[i]); + __m64 sumHi = Zeros; + const auto row = reinterpret_cast(&weights[offset]); + for (IndexType j = 0; j < NumChunks; ++j) { __m64 row_j = row[j]; - __m64 input_j = input_vector[j]; - __m64 extended_row_lo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8); - __m64 extended_row_hi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8); - __m64 extended_input_lo = _mm_unpacklo_pi8(input_j, kZeros); - __m64 extended_input_hi = _mm_unpackhi_pi8(input_j, kZeros); - __m64 product_lo = _mm_madd_pi16(extended_row_lo, extended_input_lo); - __m64 product_hi = _mm_madd_pi16(extended_row_hi, extended_input_hi); - sum_lo = _mm_add_pi32(sum_lo, product_lo); - sum_hi = _mm_add_pi32(sum_hi, product_hi); + __m64 input_j = inputVector[j]; + __m64 extendedRowLo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8); + __m64 extendedRowHi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8); + __m64 extendedInputLo = _mm_unpacklo_pi8(input_j, Zeros); + __m64 extendedInputHi = _mm_unpackhi_pi8(input_j, Zeros); + __m64 productLo = _mm_madd_pi16(extendedRowLo, extendedInputLo); + __m64 productHi = _mm_madd_pi16(extendedRowHi, extendedInputHi); + sumLo = _mm_add_pi32(sumLo, productLo); + sumHi = _mm_add_pi32(sumHi, productHi); } - __m64 sum = _mm_add_pi32(sum_lo, sum_hi); + __m64 sum = _mm_add_pi32(sumLo, sumHi); sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum)); output[i] = _mm_cvtsi64_si32(sum); #elif defined(USE_NEON) - int32x4_t sum = {biases_[i]}; - const auto row = reinterpret_cast(&weights_[offset]); - for (IndexType j = 0; j < kNumChunks; ++j) { - int16x8_t product = vmull_s8(input_vector[j * 2], row[j * 2]); - product = vmlal_s8(product, input_vector[j * 2 + 1], row[j * 2 + 1]); + int32x4_t sum = {biases[i]}; + const auto row = reinterpret_cast(&weights[offset]); + for (IndexType j = 0; j < NumChunks; ++j) { + int16x8_t product = vmull_s8(inputVector[j * 2], row[j * 2]); + product = vmlal_s8(product, inputVector[j * 2 + 1], row[j * 2 + 1]); sum = vpadalq_s16(sum, product); } output[i] = sum[0] + sum[1] + sum[2] + sum[3]; #else - OutputType sum = biases_[i]; - for (IndexType j = 0; j < kInputDimensions; ++j) { - sum += weights_[offset + j] * input[j]; + OutputType sum = biases[i]; + for (IndexType j = 0; j < InputDimensions; ++j) { + sum += weights[offset + j] * input[j]; } output[i] = sum; #endif @@ -436,10 +436,10 @@ namespace Stockfish::Eval::NNUE::Layers { using BiasType = OutputType; using WeightType = std::int8_t; - PreviousLayer previous_layer_; + PreviousLayer previousLayer; - alignas(kCacheLineSize) BiasType biases_[kOutputDimensions]; - alignas(kCacheLineSize) WeightType weights_[kOutputDimensions * kPaddedInputDimensions]; + alignas(CacheLineSize) BiasType biases[OutputDimensions]; + alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions]; #if defined (USE_SSSE3) struct CanSaturate { int count; @@ -447,7 +447,7 @@ namespace Stockfish::Eval::NNUE::Layers { uint16_t out; uint16_t in; int8_t w; - } ids[kPaddedInputDimensions * kOutputDimensions * 3 / 4]; + } ids[PaddedInputDimensions * OutputDimensions * 3 / 4]; void add(int i, int j, int8_t w) { ids[count].out = i;