]> git.sesse.net Git - stockfish/commitdiff
Sparse impl of affine_transform_non_ssse3()
authormstembera <MissingEmail@email>
Tue, 12 Sep 2023 19:23:24 +0000 (12:23 -0700)
committerJoost VandeVondele <Joost.VandeVondele@gmail.com>
Fri, 22 Sep 2023 17:03:47 +0000 (19:03 +0200)
deal with the general case

About a 8.6% speedup (for general arch)

Results for 200 tests for each version:

            Base      Test      Diff
    Mean    141741    153998    -12257
    StDev   2990      3042      3742

p-value: 0.999
speedup: 0.086

closes https://github.com/official-stockfish/Stockfish/pull/4786

No functional change

src/nnue/layers/affine_transform.h
src/nnue/layers/clipped_relu.h
src/nnue/layers/sqr_clipped_relu.h
src/nnue/nnue_feature_transformer.h

index 61cdb7818661286cde3c2be0c55f87035746fcad..af85c817c2aa0765e54aa14364c14ed7fb4a3e2d 100644 (file)
@@ -45,6 +45,7 @@ namespace Stockfish::Eval::NNUE::Layers {
   template <IndexType InputDimensions, IndexType PaddedInputDimensions, IndexType OutputDimensions>
   static void affine_transform_non_ssse3(std::int32_t* output, const std::int8_t* weights, const std::int32_t* biases, const std::uint8_t* input)
   {
+# if defined(USE_SSE2) || defined(USE_MMX) || defined(USE_NEON_DOTPROD) || defined(USE_NEON)
 # if defined(USE_SSE2)
     // At least a multiple of 16, with SSE2.
     constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 16) / 16;
@@ -129,18 +130,25 @@ namespace Stockfish::Eval::NNUE::Layers {
       }
       output[i] = sum[0] + sum[1] + sum[2] + sum[3];
 
-# else
-      std::int32_t sum = biases[i];
-      for (IndexType j = 0; j < InputDimensions; ++j) {
-        sum += weights[offset + j] * input[j];
-      }
-      output[i] = sum;
 # endif
     }
 
 # if defined(USE_MMX)
     _mm_empty();
 # endif
+
+# else
+  std::memcpy(output, biases, sizeof(std::int32_t) * OutputDimensions);
+
+  // Traverse weights in transpose order to take advantage of input sparsity
+  for (IndexType i = 0; i < InputDimensions; ++i)
+      if (input[i]) {
+          const std::int8_t* w = &weights[i];
+          const int in = input[i];
+          for (IndexType j = 0; j < OutputDimensions; ++j)
+              output[j] += w[j * PaddedInputDimensions] * in;
+      }
+# endif
   }
 #endif
 
index 2856bfb0a635164c486d6e474e5a3691dff24df3..aab824b357266079c4d7bce15fe4c86b8d684c9b 100644 (file)
@@ -172,7 +172,7 @@ namespace Stockfish::Eval::NNUE::Layers {
 
       for (IndexType i = Start; i < InputDimensions; ++i) {
         output[i] = static_cast<OutputType>(
-            std::max(0, std::min(127, input[i] >> WeightScaleBits)));
+            std::clamp(input[i] >> WeightScaleBits, 0, 127));
       }
     }
   };
index 503b283b25e53b7ef1590b526c50d21b0582a224..a3d2059b4de1fc0d0086b48352a41e5731bf3ab9 100644 (file)
@@ -96,9 +96,9 @@ namespace Stockfish::Eval::NNUE::Layers {
 
       for (IndexType i = Start; i < InputDimensions; ++i) {
         output[i] = static_cast<OutputType>(
-            // really should be /127 but we need to make it fast
-            // needs to be accounted for in the trainer
-            std::min(127ll, (((long long)input[i] * input[i]) >> (2 * WeightScaleBits)) / 128));
+            // Really should be /127 but we need to make it fast so we right shift
+            // by an extra 7 bits instead. Needs to be accounted for in the trainer.
+            std::min(127ll, ((long long)input[i] * input[i]) >> (2 * WeightScaleBits + 7)));
       }
     }
   };
index 0af0ed96cc5a1446a98150f1d9d6b1ba9f3b4c65..56442bac9b17818ba74610c1213d21e853c512d8 100644 (file)
@@ -327,9 +327,9 @@ namespace Stockfish::Eval::NNUE {
           for (IndexType j = 0; j < HalfDimensions / 2; ++j) {
               BiasType sum0 = accumulation[static_cast<int>(perspectives[p])][j + 0];
               BiasType sum1 = accumulation[static_cast<int>(perspectives[p])][j + HalfDimensions / 2];
-              sum0 = std::max<int>(0, std::min<int>(127, sum0));
-              sum1 = std::max<int>(0, std::min<int>(127, sum1));
-              output[offset + j] = static_cast<OutputType>(sum0 * sum1 / 128);
+              sum0 = std::clamp<BiasType>(sum0, 0, 127);
+              sum1 = std::clamp<BiasType>(sum1, 0, 127);
+              output[offset + j] = static_cast<OutputType>(unsigned(sum0 * sum1) / 128);
           }
 
 #endif