]> git.sesse.net Git - stockfish/commitdiff
Update NNUE architecture to SFNNv5. Update network to nn-3c0aa92af1da.nnue.
authorTomasz Sobczyk <tomasz.sobczyk1997@gmail.com>
Fri, 13 May 2022 15:26:50 +0000 (17:26 +0200)
committerJoost VandeVondele <Joost.VandeVondele@gmail.com>
Sat, 14 May 2022 10:47:22 +0000 (12:47 +0200)
Architecture changes:

    Duplicated activation after the 1024->15 layer with squared crelu (so 15->15*2). As proposed by vondele.

Trainer changes:

    Added bias to L1 factorization, which was previously missing (no measurable improvement but at least neutral in principle)
    For retraining linearly reduce lambda parameter from 1.0 at epoch 0 to 0.75 at epoch 800.
    reduce max_skipping_rate from 15 to 10 (compared to vondele's outstanding PR)

Note: This network was trained with a ~0.8% error in quantization regarding the newly added activation function.
      This will be fixed in the released trainer version. Expect a trainer PR tomorrow.

Note: The inference implementation cuts a corner to merge results from two activation functions.
       This could possibly be resolved nicer in the future. AVX2 implementation likely not necessary, but NEON is missing.

First training session invocation:

python3 train.py \
    ../nnue-pytorch-training/data/nodes5000pv2_UHO.binpack \
    ../nnue-pytorch-training/data/nodes5000pv2_UHO.binpack \
    --gpus "$3," \
    --threads 4 \
    --num-workers 8 \
    --batch-size 16384 \
    --progress_bar_refresh_rate 20 \
    --random-fen-skipping 3 \
    --features=HalfKAv2_hm^ \
    --lambda=1.0 \
    --max_epochs=400 \
    --default_root_dir ../nnue-pytorch-training/experiment_$1/run_$2

Second training session invocation:

python3 train.py \
    ../nnue-pytorch-training/data/T60T70wIsRightFarseerT60T74T75T76.binpack \
    ../nnue-pytorch-training/data/T60T70wIsRightFarseerT60T74T75T76.binpack \
    --gpus "$3," \
    --threads 4 \
    --num-workers 8 \
    --batch-size 16384 \
    --progress_bar_refresh_rate 20 \
    --random-fen-skipping 3 \
    --features=HalfKAv2_hm^ \
    --start-lambda=1.0 \
    --end-lambda=0.75 \
    --gamma=0.995 \
    --lr=4.375e-4 \
    --max_epochs=800 \
    --resume-from-model /data/sopel/nnue/nnue-pytorch-training/data/exp367/nn-exp367-run3-epoch399.pt \
    --default_root_dir ../nnue-pytorch-training/experiment_$1/run_$2

Passed STC:
LLR: 2.95 (-2.94,2.94) <0.00,2.50>
Total: 27288 W: 7445 L: 7178 D: 12665
Ptnml(0-2): 159, 3002, 7054, 3271, 158
https://tests.stockfishchess.org/tests/view/627e8c001919125939623644

Passed LTC:
LLR: 2.95 (-2.94,2.94) <0.50,3.00>
Total: 21792 W: 5969 L: 5727 D: 10096
Ptnml(0-2): 25, 2152, 6294, 2406, 19
https://tests.stockfishchess.org/tests/view/627f2a855734b18b2e2ece47

closes https://github.com/official-stockfish/Stockfish/pull/4020

Bench: 6481017

src/evaluate.h
src/nnue/layers/sqr_clipped_relu.h [new file with mode: 0644]
src/nnue/nnue_architecture.h

index e857b7992d886127d3bec7af71a6f3c733414923..f67961a90e50b257f8f87e428bb93ecbaa2e03bb 100644 (file)
@@ -39,7 +39,7 @@ namespace Eval {
   // The default net name MUST follow the format nn-[SHA256 first 12 digits].nnue
   // for the build process (profile-build and fishtest) to work. Do not change the
   // name of the macro, as it is used in the Makefile.
-  #define EvalFileDefaultName   "nn-d0b74ce1e5eb.nnue"
+  #define EvalFileDefaultName   "nn-3c0aa92af1da.nnue"
 
   namespace NNUE {
 
diff --git a/src/nnue/layers/sqr_clipped_relu.h b/src/nnue/layers/sqr_clipped_relu.h
new file mode 100644 (file)
index 0000000..b603a27
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+  Stockfish, a UCI chess playing engine derived from Glaurung 2.1
+  Copyright (C) 2004-2022 The Stockfish developers (see AUTHORS file)
+
+  Stockfish is free software: you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation, either version 3 of the License, or
+  (at your option) any later version.
+
+  Stockfish is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+*/
+
+// Definition of layer ClippedReLU of NNUE evaluation function
+
+#ifndef NNUE_LAYERS_SQR_CLIPPED_RELU_H_INCLUDED
+#define NNUE_LAYERS_SQR_CLIPPED_RELU_H_INCLUDED
+
+#include "../nnue_common.h"
+
+namespace Stockfish::Eval::NNUE::Layers {
+
+  // Clipped ReLU
+  template <IndexType InDims>
+  class SqrClippedReLU {
+   public:
+    // Input/output type
+    using InputType = std::int32_t;
+    using OutputType = std::uint8_t;
+
+    // Number of input/output dimensions
+    static constexpr IndexType InputDimensions = InDims;
+    static constexpr IndexType OutputDimensions = InputDimensions;
+    static constexpr IndexType PaddedOutputDimensions =
+        ceil_to_multiple<IndexType>(OutputDimensions, 32);
+
+    using OutputBuffer = OutputType[PaddedOutputDimensions];
+
+    // Hash value embedded in the evaluation file
+    static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
+      std::uint32_t hashValue = 0x538D24C7u;
+      hashValue += prevHash;
+      return hashValue;
+    }
+
+    // Read network parameters
+    bool read_parameters(std::istream&) {
+      return true;
+    }
+
+    // Write network parameters
+    bool write_parameters(std::ostream&) const {
+      return true;
+    }
+
+    // Forward propagation
+    const OutputType* propagate(
+        const InputType* input, OutputType* output) const {
+
+  #if defined(USE_SSE2)
+      constexpr IndexType NumChunks = InputDimensions / 16;
+
+  #ifdef USE_SSE41
+      const __m128i Zero = _mm_setzero_si128();
+  #else
+      const __m128i k0x80s = _mm_set1_epi8(-128);
+  #endif
+
+      static_assert(WeightScaleBits == 6);
+      const auto in = reinterpret_cast<const __m128i*>(input);
+      const auto out = reinterpret_cast<__m128i*>(output);
+      for (IndexType i = 0; i < NumChunks; ++i) {
+        __m128i words0 = _mm_packs_epi32(
+            _mm_load_si128(&in[i * 4 + 0]),
+            _mm_load_si128(&in[i * 4 + 1]));
+        __m128i words1 = _mm_packs_epi32(
+            _mm_load_si128(&in[i * 4 + 2]),
+            _mm_load_si128(&in[i * 4 + 3]));
+
+        // Not sure if
+        words0 = _mm_srli_epi16(_mm_mulhi_epi16(words0, words0), 3);
+        words1 = _mm_srli_epi16(_mm_mulhi_epi16(words1, words1), 3);
+
+        const __m128i packedbytes = _mm_packs_epi16(words0, words1);
+
+        _mm_store_si128(&out[i],
+
+  #ifdef USE_SSE41
+          _mm_max_epi8(packedbytes, Zero)
+  #else
+          _mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s)
+  #endif
+
+        );
+      }
+      constexpr IndexType Start = NumChunks * 16;
+
+  #else
+      constexpr IndexType Start = 0;
+  #endif
+
+      for (IndexType i = Start; i < InputDimensions; ++i) {
+        output[i] = static_cast<OutputType>(
+            // realy should be /127 but we need to make it fast
+            // needs to be accounted for in the trainer
+            std::max(0ll, std::min(127ll, (((long long)input[i] * input[i]) >> (2 * WeightScaleBits)) / 128)));
+      }
+
+      return output;
+    }
+  };
+
+}  // namespace Stockfish::Eval::NNUE::Layers
+
+#endif // NNUE_LAYERS_SQR_CLIPPED_RELU_H_INCLUDED
index 4f9596ae949e032a8e1d45d42f4c2312ad7f30c1..cac8373075e368b37e3c72493490c7e23920afcc 100644 (file)
@@ -29,6 +29,7 @@
 
 #include "layers/affine_transform.h"
 #include "layers/clipped_relu.h"
+#include "layers/sqr_clipped_relu.h"
 
 #include "../misc.h"
 
@@ -48,8 +49,9 @@ struct Network
   static constexpr int FC_1_OUTPUTS = 32;
 
   Layers::AffineTransform<TransformedFeatureDimensions, FC_0_OUTPUTS + 1> fc_0;
+  Layers::SqrClippedReLU<FC_0_OUTPUTS + 1> ac_sqr_0;
   Layers::ClippedReLU<FC_0_OUTPUTS + 1> ac_0;
-  Layers::AffineTransform<FC_0_OUTPUTS, FC_1_OUTPUTS> fc_1;
+  Layers::AffineTransform<FC_0_OUTPUTS * 2, FC_1_OUTPUTS> fc_1;
   Layers::ClippedReLU<FC_1_OUTPUTS> ac_1;
   Layers::AffineTransform<FC_1_OUTPUTS, 1> fc_2;
 
@@ -93,6 +95,7 @@ struct Network
     struct alignas(CacheLineSize) Buffer
     {
       alignas(CacheLineSize) decltype(fc_0)::OutputBuffer fc_0_out;
+      alignas(CacheLineSize) decltype(ac_sqr_0)::OutputType ac_sqr_0_out[ceil_to_multiple<IndexType>(FC_0_OUTPUTS * 2, 32)];
       alignas(CacheLineSize) decltype(ac_0)::OutputBuffer ac_0_out;
       alignas(CacheLineSize) decltype(fc_1)::OutputBuffer fc_1_out;
       alignas(CacheLineSize) decltype(ac_1)::OutputBuffer ac_1_out;
@@ -114,8 +117,10 @@ struct Network
 #endif
 
     fc_0.propagate(transformedFeatures, buffer.fc_0_out);
+    ac_sqr_0.propagate(buffer.fc_0_out, buffer.ac_sqr_0_out);
     ac_0.propagate(buffer.fc_0_out, buffer.ac_0_out);
-    fc_1.propagate(buffer.ac_0_out, buffer.fc_1_out);
+    std::memcpy(buffer.ac_sqr_0_out + FC_0_OUTPUTS, buffer.ac_0_out, FC_0_OUTPUTS * sizeof(decltype(ac_0)::OutputType));
+    fc_1.propagate(buffer.ac_sqr_0_out, buffer.fc_1_out);
     ac_1.propagate(buffer.fc_1_out, buffer.ac_1_out);
     fc_2.propagate(buffer.ac_1_out, buffer.fc_2_out);