]> git.sesse.net Git - stockfish/blob - src/nnue/nnue_feature_transformer.h
Cleanup and optimize SSE/AVX code
[stockfish] / src / nnue / nnue_feature_transformer.h
1 /*
2   Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3   Copyright (C) 2004-2020 The Stockfish developers (see AUTHORS file)
4
5   Stockfish is free software: you can redistribute it and/or modify
6   it under the terms of the GNU General Public License as published by
7   the Free Software Foundation, either version 3 of the License, or
8   (at your option) any later version.
9
10   Stockfish is distributed in the hope that it will be useful,
11   but WITHOUT ANY WARRANTY; without even the implied warranty of
12   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13   GNU General Public License for more details.
14
15   You should have received a copy of the GNU General Public License
16   along with this program.  If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 // A class that converts the input features of the NNUE evaluation function
20
21 #ifndef NNUE_FEATURE_TRANSFORMER_H_INCLUDED
22 #define NNUE_FEATURE_TRANSFORMER_H_INCLUDED
23
24 #include "nnue_common.h"
25 #include "nnue_architecture.h"
26 #include "features/index_list.h"
27
28 #include <cstring> // std::memset()
29
30 namespace Eval::NNUE {
31
32   // Input feature converter
33   class FeatureTransformer {
34
35    private:
36     // Number of output dimensions for one side
37     static constexpr IndexType kHalfDimensions = kTransformedFeatureDimensions;
38
39    public:
40     // Output type
41     using OutputType = TransformedFeatureType;
42
43     // Number of input/output dimensions
44     static constexpr IndexType kInputDimensions = RawFeatures::kDimensions;
45     static constexpr IndexType kOutputDimensions = kHalfDimensions * 2;
46
47     // Size of forward propagation buffer
48     static constexpr std::size_t kBufferSize =
49         kOutputDimensions * sizeof(OutputType);
50
51     // Hash value embedded in the evaluation file
52     static constexpr std::uint32_t GetHashValue() {
53       return RawFeatures::kHashValue ^ kOutputDimensions;
54     }
55
56     // Read network parameters
57     bool ReadParameters(std::istream& stream) {
58       stream.read(reinterpret_cast<char*>(biases_),
59                   kHalfDimensions * sizeof(BiasType));
60       stream.read(reinterpret_cast<char*>(weights_),
61                   kHalfDimensions * kInputDimensions * sizeof(WeightType));
62       return !stream.fail();
63     }
64
65     // Proceed with the difference calculation if possible
66     bool UpdateAccumulatorIfPossible(const Position& pos) const {
67       const auto now = pos.state();
68       if (now->accumulator.computed_accumulation) {
69         return true;
70       }
71       const auto prev = now->previous;
72       if (prev && prev->accumulator.computed_accumulation) {
73         UpdateAccumulator(pos);
74         return true;
75       }
76       return false;
77     }
78
79     // Convert input features
80     void Transform(const Position& pos, OutputType* output, bool refresh) const {
81       if (refresh || !UpdateAccumulatorIfPossible(pos)) {
82         RefreshAccumulator(pos);
83       }
84       const auto& accumulation = pos.state()->accumulator.accumulation;
85
86   #if defined(USE_AVX2)
87       constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
88       constexpr int kControl = 0b11011000;
89       const __m256i kZero = _mm256_setzero_si256();
90
91   #elif defined(USE_SSSE3)
92       constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
93
94   #ifdef USE_SSE41
95       const __m128i kZero = _mm_setzero_si128();
96   #else
97       const __m128i k0x80s = _mm_set1_epi8(-128);
98   #endif
99
100   #elif defined(USE_NEON)
101       constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
102       const int8x8_t kZero = {0};
103   #endif
104
105       const Color perspectives[2] = {pos.side_to_move(), ~pos.side_to_move()};
106       for (IndexType p = 0; p < 2; ++p) {
107         const IndexType offset = kHalfDimensions * p;
108
109   #if defined(USE_AVX2)
110         auto out = reinterpret_cast<__m256i*>(&output[offset]);
111         for (IndexType j = 0; j < kNumChunks; ++j) {
112           __m256i sum0 = _mm256_loadA_si256(
113               &reinterpret_cast<const __m256i*>(accumulation[perspectives[p]][0])[j * 2 + 0]);
114           __m256i sum1 = _mm256_loadA_si256(
115             &reinterpret_cast<const __m256i*>(accumulation[perspectives[p]][0])[j * 2 + 1]);
116           _mm256_storeA_si256(&out[j], _mm256_permute4x64_epi64(_mm256_max_epi8(
117               _mm256_packs_epi16(sum0, sum1), kZero), kControl));
118         }
119
120   #elif defined(USE_SSSE3)
121         auto out = reinterpret_cast<__m128i*>(&output[offset]);
122         for (IndexType j = 0; j < kNumChunks; ++j) {
123           __m128i sum0 = _mm_load_si128(&reinterpret_cast<const __m128i*>(
124               accumulation[perspectives[p]][0])[j * 2 + 0]);
125           __m128i sum1 = _mm_load_si128(&reinterpret_cast<const __m128i*>(
126               accumulation[perspectives[p]][0])[j * 2 + 1]);
127       const __m128i packedbytes = _mm_packs_epi16(sum0, sum1);
128
129           _mm_store_si128(&out[j],
130
131   #ifdef USE_SSE41
132             _mm_max_epi8(packedbytes, kZero)
133   #else
134             _mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s)
135   #endif
136
137           );
138         }
139
140   #elif defined(USE_NEON)
141         const auto out = reinterpret_cast<int8x8_t*>(&output[offset]);
142         for (IndexType j = 0; j < kNumChunks; ++j) {
143           int16x8_t sum = reinterpret_cast<const int16x8_t*>(
144               accumulation[perspectives[p]][0])[j];
145           out[j] = vmax_s8(vqmovn_s16(sum), kZero);
146         }
147
148   #else
149         for (IndexType j = 0; j < kHalfDimensions; ++j) {
150           BiasType sum = accumulation[static_cast<int>(perspectives[p])][0][j];
151           output[offset + j] = static_cast<OutputType>(
152               std::max<int>(0, std::min<int>(127, sum)));
153         }
154   #endif
155
156       }
157     }
158
159    private:
160     // Calculate cumulative value without using difference calculation
161     void RefreshAccumulator(const Position& pos) const {
162       auto& accumulator = pos.state()->accumulator;
163       IndexType i = 0;
164       Features::IndexList active_indices[2];
165       RawFeatures::AppendActiveIndices(pos, kRefreshTriggers[i],
166                                        active_indices);
167       for (Color perspective : { WHITE, BLACK }) {
168         std::memcpy(accumulator.accumulation[perspective][i], biases_,
169                    kHalfDimensions * sizeof(BiasType));
170         for (const auto index : active_indices[perspective]) {
171           const IndexType offset = kHalfDimensions * index;
172   #if defined(USE_AVX512)
173           auto accumulation = reinterpret_cast<__m512i*>(
174               &accumulator.accumulation[perspective][i][0]);
175           auto column = reinterpret_cast<const __m512i*>(&weights_[offset]);
176           constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
177           for (IndexType j = 0; j < kNumChunks; ++j)
178             _mm512_storeA_si512(&accumulation[j], _mm512_add_epi16(_mm512_loadA_si512(&accumulation[j]), column[j]));
179
180   #elif defined(USE_AVX2)
181           auto accumulation = reinterpret_cast<__m256i*>(
182               &accumulator.accumulation[perspective][i][0]);
183           auto column = reinterpret_cast<const __m256i*>(&weights_[offset]);
184           constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
185           for (IndexType j = 0; j < kNumChunks; ++j)
186             _mm256_storeA_si256(&accumulation[j], _mm256_add_epi16(_mm256_loadA_si256(&accumulation[j]), column[j]));
187
188   #elif defined(USE_SSE2)
189           auto accumulation = reinterpret_cast<__m128i*>(
190               &accumulator.accumulation[perspective][i][0]);
191           auto column = reinterpret_cast<const __m128i*>(&weights_[offset]);
192           constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
193           for (IndexType j = 0; j < kNumChunks; ++j)
194             accumulation[j] = _mm_add_epi16(accumulation[j], column[j]);
195
196   #elif defined(USE_NEON)
197           auto accumulation = reinterpret_cast<int16x8_t*>(
198               &accumulator.accumulation[perspective][i][0]);
199           auto column = reinterpret_cast<const int16x8_t*>(&weights_[offset]);
200           constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
201           for (IndexType j = 0; j < kNumChunks; ++j)
202             accumulation[j] = vaddq_s16(accumulation[j], column[j]);
203
204   #else
205           for (IndexType j = 0; j < kHalfDimensions; ++j)
206             accumulator.accumulation[perspective][i][j] += weights_[offset + j];
207   #endif
208
209         }
210       }
211
212       accumulator.computed_accumulation = true;
213       accumulator.computed_score = false;
214     }
215
216     // Calculate cumulative value using difference calculation
217     void UpdateAccumulator(const Position& pos) const {
218       const auto prev_accumulator = pos.state()->previous->accumulator;
219       auto& accumulator = pos.state()->accumulator;
220       IndexType i = 0;
221       Features::IndexList removed_indices[2], added_indices[2];
222       bool reset[2];
223       RawFeatures::AppendChangedIndices(pos, kRefreshTriggers[i],
224                                         removed_indices, added_indices, reset);
225       for (Color perspective : { WHITE, BLACK }) {
226
227   #if defined(USE_AVX2)
228         constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
229         auto accumulation = reinterpret_cast<__m256i*>(
230             &accumulator.accumulation[perspective][i][0]);
231
232   #elif defined(USE_SSE2)
233         constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
234         auto accumulation = reinterpret_cast<__m128i*>(
235             &accumulator.accumulation[perspective][i][0]);
236
237   #elif defined(USE_NEON)
238         constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
239         auto accumulation = reinterpret_cast<int16x8_t*>(
240             &accumulator.accumulation[perspective][i][0]);
241   #endif
242
243         if (reset[perspective]) {
244           std::memcpy(accumulator.accumulation[perspective][i], biases_,
245                       kHalfDimensions * sizeof(BiasType));
246         } else {
247           std::memcpy(accumulator.accumulation[perspective][i],
248                       prev_accumulator.accumulation[perspective][i],
249                       kHalfDimensions * sizeof(BiasType));
250           // Difference calculation for the deactivated features
251           for (const auto index : removed_indices[perspective]) {
252             const IndexType offset = kHalfDimensions * index;
253
254   #if defined(USE_AVX2)
255             auto column = reinterpret_cast<const __m256i*>(&weights_[offset]);
256             for (IndexType j = 0; j < kNumChunks; ++j) {
257               accumulation[j] = _mm256_sub_epi16(accumulation[j], column[j]);
258             }
259
260   #elif defined(USE_SSE2)
261             auto column = reinterpret_cast<const __m128i*>(&weights_[offset]);
262             for (IndexType j = 0; j < kNumChunks; ++j) {
263               accumulation[j] = _mm_sub_epi16(accumulation[j], column[j]);
264             }
265
266   #elif defined(USE_NEON)
267             auto column = reinterpret_cast<const int16x8_t*>(&weights_[offset]);
268             for (IndexType j = 0; j < kNumChunks; ++j) {
269               accumulation[j] = vsubq_s16(accumulation[j], column[j]);
270             }
271
272   #else
273             for (IndexType j = 0; j < kHalfDimensions; ++j) {
274               accumulator.accumulation[perspective][i][j] -=
275                   weights_[offset + j];
276             }
277   #endif
278
279           }
280         }
281         { // Difference calculation for the activated features
282           for (const auto index : added_indices[perspective]) {
283             const IndexType offset = kHalfDimensions * index;
284
285   #if defined(USE_AVX2)
286             auto column = reinterpret_cast<const __m256i*>(&weights_[offset]);
287             for (IndexType j = 0; j < kNumChunks; ++j) {
288               accumulation[j] = _mm256_add_epi16(accumulation[j], column[j]);
289             }
290
291   #elif defined(USE_SSE2)
292             auto column = reinterpret_cast<const __m128i*>(&weights_[offset]);
293             for (IndexType j = 0; j < kNumChunks; ++j) {
294               accumulation[j] = _mm_add_epi16(accumulation[j], column[j]);
295             }
296
297   #elif defined(USE_NEON)
298             auto column = reinterpret_cast<const int16x8_t*>(&weights_[offset]);
299             for (IndexType j = 0; j < kNumChunks; ++j) {
300               accumulation[j] = vaddq_s16(accumulation[j], column[j]);
301             }
302
303   #else
304             for (IndexType j = 0; j < kHalfDimensions; ++j) {
305               accumulator.accumulation[perspective][i][j] +=
306                   weights_[offset + j];
307             }
308   #endif
309
310           }
311         }
312       }
313
314       accumulator.computed_accumulation = true;
315       accumulator.computed_score = false;
316     }
317
318     using BiasType = std::int16_t;
319     using WeightType = std::int16_t;
320
321     alignas(kCacheLineSize) BiasType biases_[kHalfDimensions];
322     alignas(kCacheLineSize)
323         WeightType weights_[kHalfDimensions * kInputDimensions];
324   };
325
326 }  // namespace Eval::NNUE
327
328 #endif // #ifndef NNUE_FEATURE_TRANSFORMER_H_INCLUDED