]> git.sesse.net Git - stockfish/blob - src/nnue/layers/clipped_relu.h
Fix compilation after recent merge.
[stockfish] / src / nnue / layers / clipped_relu.h
1 /*
2   Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3   Copyright (C) 2004-2023 The Stockfish developers (see AUTHORS file)
4
5   Stockfish is free software: you can redistribute it and/or modify
6   it under the terms of the GNU General Public License as published by
7   the Free Software Foundation, either version 3 of the License, or
8   (at your option) any later version.
9
10   Stockfish is distributed in the hope that it will be useful,
11   but WITHOUT ANY WARRANTY; without even the implied warranty of
12   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13   GNU General Public License for more details.
14
15   You should have received a copy of the GNU General Public License
16   along with this program.  If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 // Definition of layer ClippedReLU of NNUE evaluation function
20
21 #ifndef NNUE_LAYERS_CLIPPED_RELU_H_INCLUDED
22 #define NNUE_LAYERS_CLIPPED_RELU_H_INCLUDED
23
24 #include <algorithm>
25 #include <cstdint>
26 #include <iosfwd>
27
28 #include "../nnue_common.h"
29
30 namespace Stockfish::Eval::NNUE::Layers {
31
32 // Clipped ReLU
33 template<IndexType InDims>
34 class ClippedReLU {
35    public:
36     // Input/output type
37     using InputType  = std::int32_t;
38     using OutputType = std::uint8_t;
39
40     // Number of input/output dimensions
41     static constexpr IndexType InputDimensions  = InDims;
42     static constexpr IndexType OutputDimensions = InputDimensions;
43     static constexpr IndexType PaddedOutputDimensions =
44       ceil_to_multiple<IndexType>(OutputDimensions, 32);
45
46     using OutputBuffer = OutputType[PaddedOutputDimensions];
47
48     // Hash value embedded in the evaluation file
49     static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
50         std::uint32_t hashValue = 0x538D24C7u;
51         hashValue += prevHash;
52         return hashValue;
53     }
54
55     // Read network parameters
56     bool read_parameters(std::istream&) { return true; }
57
58     // Write network parameters
59     bool write_parameters(std::ostream&) const { return true; }
60
61     // Forward propagation
62     void propagate(const InputType* input, OutputType* output) const {
63
64 #if defined(USE_AVX2)
65         if constexpr (InputDimensions % SimdWidth == 0)
66         {
67             constexpr IndexType NumChunks = InputDimensions / SimdWidth;
68             const __m256i       Zero      = _mm256_setzero_si256();
69             const __m256i       Offsets   = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
70             const auto          in        = reinterpret_cast<const __m256i*>(input);
71             const auto          out       = reinterpret_cast<__m256i*>(output);
72             for (IndexType i = 0; i < NumChunks; ++i)
73             {
74                 const __m256i words0 =
75                   _mm256_srai_epi16(_mm256_packs_epi32(_mm256_load_si256(&in[i * 4 + 0]),
76                                                        _mm256_load_si256(&in[i * 4 + 1])),
77                                     WeightScaleBits);
78                 const __m256i words1 =
79                   _mm256_srai_epi16(_mm256_packs_epi32(_mm256_load_si256(&in[i * 4 + 2]),
80                                                        _mm256_load_si256(&in[i * 4 + 3])),
81                                     WeightScaleBits);
82                 _mm256_store_si256(
83                   &out[i], _mm256_permutevar8x32_epi32(
84                              _mm256_max_epi8(_mm256_packs_epi16(words0, words1), Zero), Offsets));
85             }
86         }
87         else
88         {
89             constexpr IndexType NumChunks = InputDimensions / (SimdWidth / 2);
90             const __m128i       Zero      = _mm_setzero_si128();
91             const auto          in        = reinterpret_cast<const __m128i*>(input);
92             const auto          out       = reinterpret_cast<__m128i*>(output);
93             for (IndexType i = 0; i < NumChunks; ++i)
94             {
95                 const __m128i words0 = _mm_srai_epi16(
96                   _mm_packs_epi32(_mm_load_si128(&in[i * 4 + 0]), _mm_load_si128(&in[i * 4 + 1])),
97                   WeightScaleBits);
98                 const __m128i words1 = _mm_srai_epi16(
99                   _mm_packs_epi32(_mm_load_si128(&in[i * 4 + 2]), _mm_load_si128(&in[i * 4 + 3])),
100                   WeightScaleBits);
101                 const __m128i packedbytes = _mm_packs_epi16(words0, words1);
102                 _mm_store_si128(&out[i], _mm_max_epi8(packedbytes, Zero));
103             }
104         }
105         constexpr IndexType Start = InputDimensions % SimdWidth == 0
106                                     ? InputDimensions / SimdWidth * SimdWidth
107                                     : InputDimensions / (SimdWidth / 2) * (SimdWidth / 2);
108
109 #elif defined(USE_SSE2)
110         constexpr IndexType NumChunks = InputDimensions / SimdWidth;
111
112     #ifdef USE_SSE41
113         const __m128i Zero = _mm_setzero_si128();
114     #else
115         const __m128i k0x80s = _mm_set1_epi8(-128);
116     #endif
117
118         const auto in  = reinterpret_cast<const __m128i*>(input);
119         const auto out = reinterpret_cast<__m128i*>(output);
120         for (IndexType i = 0; i < NumChunks; ++i)
121         {
122             const __m128i words0 = _mm_srai_epi16(
123               _mm_packs_epi32(_mm_load_si128(&in[i * 4 + 0]), _mm_load_si128(&in[i * 4 + 1])),
124               WeightScaleBits);
125             const __m128i words1 = _mm_srai_epi16(
126               _mm_packs_epi32(_mm_load_si128(&in[i * 4 + 2]), _mm_load_si128(&in[i * 4 + 3])),
127               WeightScaleBits);
128             const __m128i packedbytes = _mm_packs_epi16(words0, words1);
129             _mm_store_si128(&out[i],
130
131     #ifdef USE_SSE41
132                             _mm_max_epi8(packedbytes, Zero)
133     #else
134                             _mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s)
135     #endif
136
137             );
138         }
139         constexpr IndexType Start = NumChunks * SimdWidth;
140
141 #elif defined(USE_NEON)
142         constexpr IndexType NumChunks = InputDimensions / (SimdWidth / 2);
143         const int8x8_t      Zero      = {0};
144         const auto          in        = reinterpret_cast<const int32x4_t*>(input);
145         const auto          out       = reinterpret_cast<int8x8_t*>(output);
146         for (IndexType i = 0; i < NumChunks; ++i)
147         {
148             int16x8_t  shifted;
149             const auto pack = reinterpret_cast<int16x4_t*>(&shifted);
150             pack[0]         = vqshrn_n_s32(in[i * 2 + 0], WeightScaleBits);
151             pack[1]         = vqshrn_n_s32(in[i * 2 + 1], WeightScaleBits);
152             out[i]          = vmax_s8(vqmovn_s16(shifted), Zero);
153         }
154         constexpr IndexType Start = NumChunks * (SimdWidth / 2);
155 #else
156         constexpr IndexType Start = 0;
157 #endif
158
159         for (IndexType i = Start; i < InputDimensions; ++i)
160         {
161             output[i] = static_cast<OutputType>(std::clamp(input[i] >> WeightScaleBits, 0, 127));
162         }
163     }
164 };
165
166 }  // namespace Stockfish::Eval::NNUE::Layers
167
168 #endif  // NNUE_LAYERS_CLIPPED_RELU_H_INCLUDED