2 Stockfish, a UCI chess playing engine derived from Glaurung 2.1
3 Copyright (C) 2004-2023 The Stockfish developers (see AUTHORS file)
5 Stockfish is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
10 Stockfish is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
19 // A class that converts the input features of the NNUE evaluation function
21 #ifndef NNUE_FEATURE_TRANSFORMER_H_INCLUDED
22 #define NNUE_FEATURE_TRANSFORMER_H_INCLUDED
31 #include "../position.h"
33 #include "nnue_accumulator.h"
34 #include "nnue_architecture.h"
35 #include "nnue_common.h"
37 namespace Stockfish::Eval::NNUE {
39 using BiasType = std::int16_t;
40 using WeightType = std::int16_t;
41 using PSQTWeightType = std::int32_t;
43 // If vector instructions are enabled, we update and refresh the
44 // accumulator tile by tile such that each tile fits in the CPU's
48 static_assert(PSQTBuckets % 8 == 0,
49 "Per feature PSQT values cannot be processed at granularity lower than 8 at a time.");
52 using vec_t = __m512i;
53 using psqt_vec_t = __m256i;
54 #define vec_load(a) _mm512_load_si512(a)
55 #define vec_store(a, b) _mm512_store_si512(a, b)
56 #define vec_add_16(a, b) _mm512_add_epi16(a, b)
57 #define vec_sub_16(a, b) _mm512_sub_epi16(a, b)
58 #define vec_mul_16(a, b) _mm512_mullo_epi16(a, b)
59 #define vec_zero() _mm512_setzero_epi32()
60 #define vec_set_16(a) _mm512_set1_epi16(a)
61 #define vec_max_16(a, b) _mm512_max_epi16(a, b)
62 #define vec_min_16(a, b) _mm512_min_epi16(a, b)
63 inline vec_t vec_msb_pack_16(vec_t a, vec_t b) {
64 vec_t compacted = _mm512_packs_epi16(_mm512_srli_epi16(a, 7), _mm512_srli_epi16(b, 7));
65 return _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 2, 4, 6, 1, 3, 5, 7), compacted);
67 #define vec_load_psqt(a) _mm256_load_si256(a)
68 #define vec_store_psqt(a, b) _mm256_store_si256(a, b)
69 #define vec_add_psqt_32(a, b) _mm256_add_epi32(a, b)
70 #define vec_sub_psqt_32(a, b) _mm256_sub_epi32(a, b)
71 #define vec_zero_psqt() _mm256_setzero_si256()
72 #define NumRegistersSIMD 16
73 #define MaxChunkSize 64
76 using vec_t = __m256i;
77 using psqt_vec_t = __m256i;
78 #define vec_load(a) _mm256_load_si256(a)
79 #define vec_store(a, b) _mm256_store_si256(a, b)
80 #define vec_add_16(a, b) _mm256_add_epi16(a, b)
81 #define vec_sub_16(a, b) _mm256_sub_epi16(a, b)
82 #define vec_mul_16(a, b) _mm256_mullo_epi16(a, b)
83 #define vec_zero() _mm256_setzero_si256()
84 #define vec_set_16(a) _mm256_set1_epi16(a)
85 #define vec_max_16(a, b) _mm256_max_epi16(a, b)
86 #define vec_min_16(a, b) _mm256_min_epi16(a, b)
87 inline vec_t vec_msb_pack_16(vec_t a, vec_t b) {
88 vec_t compacted = _mm256_packs_epi16(_mm256_srli_epi16(a, 7), _mm256_srli_epi16(b, 7));
89 return _mm256_permute4x64_epi64(compacted, 0b11011000);
91 #define vec_load_psqt(a) _mm256_load_si256(a)
92 #define vec_store_psqt(a, b) _mm256_store_si256(a, b)
93 #define vec_add_psqt_32(a, b) _mm256_add_epi32(a, b)
94 #define vec_sub_psqt_32(a, b) _mm256_sub_epi32(a, b)
95 #define vec_zero_psqt() _mm256_setzero_si256()
96 #define NumRegistersSIMD 16
97 #define MaxChunkSize 32
100 using vec_t = __m128i;
101 using psqt_vec_t = __m128i;
102 #define vec_load(a) (*(a))
103 #define vec_store(a, b) *(a) = (b)
104 #define vec_add_16(a, b) _mm_add_epi16(a, b)
105 #define vec_sub_16(a, b) _mm_sub_epi16(a, b)
106 #define vec_mul_16(a, b) _mm_mullo_epi16(a, b)
107 #define vec_zero() _mm_setzero_si128()
108 #define vec_set_16(a) _mm_set1_epi16(a)
109 #define vec_max_16(a, b) _mm_max_epi16(a, b)
110 #define vec_min_16(a, b) _mm_min_epi16(a, b)
111 #define vec_msb_pack_16(a, b) _mm_packs_epi16(_mm_srli_epi16(a, 7), _mm_srli_epi16(b, 7))
112 #define vec_load_psqt(a) (*(a))
113 #define vec_store_psqt(a, b) *(a) = (b)
114 #define vec_add_psqt_32(a, b) _mm_add_epi32(a, b)
115 #define vec_sub_psqt_32(a, b) _mm_sub_epi32(a, b)
116 #define vec_zero_psqt() _mm_setzero_si128()
117 #define NumRegistersSIMD (Is64Bit ? 16 : 8)
118 #define MaxChunkSize 16
121 using vec_t = int16x8_t;
122 using psqt_vec_t = int32x4_t;
123 #define vec_load(a) (*(a))
124 #define vec_store(a, b) *(a) = (b)
125 #define vec_add_16(a, b) vaddq_s16(a, b)
126 #define vec_sub_16(a, b) vsubq_s16(a, b)
127 #define vec_mul_16(a, b) vmulq_s16(a, b)
130 #define vec_set_16(a) vdupq_n_s16(a)
131 #define vec_max_16(a, b) vmaxq_s16(a, b)
132 #define vec_min_16(a, b) vminq_s16(a, b)
133 inline vec_t vec_msb_pack_16(vec_t a, vec_t b) {
134 const int8x8_t shifta = vshrn_n_s16(a, 7);
135 const int8x8_t shiftb = vshrn_n_s16(b, 7);
136 const int8x16_t compacted = vcombine_s8(shifta, shiftb);
137 return *reinterpret_cast<const vec_t*>(&compacted);
139 #define vec_load_psqt(a) (*(a))
140 #define vec_store_psqt(a, b) *(a) = (b)
141 #define vec_add_psqt_32(a, b) vaddq_s32(a, b)
142 #define vec_sub_psqt_32(a, b) vsubq_s32(a, b)
143 #define vec_zero_psqt() \
145 #define NumRegistersSIMD 16
146 #define MaxChunkSize 16
156 // Compute optimal SIMD register count for feature transformer accumulation.
158 // We use __m* types as template arguments, which causes GCC to emit warnings
159 // about losing some attribute information. This is irrelevant to us as we
160 // only take their size, so the following pragma are harmless.
161 #if defined(__GNUC__)
162 #pragma GCC diagnostic push
163 #pragma GCC diagnostic ignored "-Wignored-attributes"
166 template<typename SIMDRegisterType, typename LaneType, int NumLanes, int MaxRegisters>
167 static constexpr int BestRegisterCount() {
168 #define RegisterSize sizeof(SIMDRegisterType)
169 #define LaneSize sizeof(LaneType)
171 static_assert(RegisterSize >= LaneSize);
172 static_assert(MaxRegisters <= NumRegistersSIMD);
173 static_assert(MaxRegisters > 0);
174 static_assert(NumRegistersSIMD > 0);
175 static_assert(RegisterSize % LaneSize == 0);
176 static_assert((NumLanes * LaneSize) % RegisterSize == 0);
178 const int ideal = (NumLanes * LaneSize) / RegisterSize;
179 if (ideal <= MaxRegisters)
182 // Look for the largest divisor of the ideal register count that is smaller than MaxRegisters
183 for (int divisor = MaxRegisters; divisor > 1; --divisor)
184 if (ideal % divisor == 0)
190 static constexpr int NumRegs =
191 BestRegisterCount<vec_t, WeightType, TransformedFeatureDimensions, NumRegistersSIMD>();
192 static constexpr int NumPsqtRegs =
193 BestRegisterCount<psqt_vec_t, PSQTWeightType, PSQTBuckets, NumRegistersSIMD>();
194 #if defined(__GNUC__)
195 #pragma GCC diagnostic pop
200 // Input feature converter
201 class FeatureTransformer {
204 // Number of output dimensions for one side
205 static constexpr IndexType HalfDimensions = TransformedFeatureDimensions;
208 static constexpr IndexType TileHeight = NumRegs * sizeof(vec_t) / 2;
209 static constexpr IndexType PsqtTileHeight = NumPsqtRegs * sizeof(psqt_vec_t) / 4;
210 static_assert(HalfDimensions % TileHeight == 0, "TileHeight must divide HalfDimensions");
211 static_assert(PSQTBuckets % PsqtTileHeight == 0, "PsqtTileHeight must divide PSQTBuckets");
216 using OutputType = TransformedFeatureType;
218 // Number of input/output dimensions
219 static constexpr IndexType InputDimensions = FeatureSet::Dimensions;
220 static constexpr IndexType OutputDimensions = HalfDimensions;
222 // Size of forward propagation buffer
223 static constexpr std::size_t BufferSize = OutputDimensions * sizeof(OutputType);
225 // Hash value embedded in the evaluation file
226 static constexpr std::uint32_t get_hash_value() {
227 return FeatureSet::HashValue ^ (OutputDimensions * 2);
230 // Read network parameters
231 bool read_parameters(std::istream& stream) {
233 read_leb_128<BiasType>(stream, biases, HalfDimensions);
234 read_leb_128<WeightType>(stream, weights, HalfDimensions * InputDimensions);
235 read_leb_128<PSQTWeightType>(stream, psqtWeights, PSQTBuckets * InputDimensions);
237 return !stream.fail();
240 // Write network parameters
241 bool write_parameters(std::ostream& stream) const {
243 write_leb_128<BiasType>(stream, biases, HalfDimensions);
244 write_leb_128<WeightType>(stream, weights, HalfDimensions * InputDimensions);
245 write_leb_128<PSQTWeightType>(stream, psqtWeights, PSQTBuckets * InputDimensions);
247 return !stream.fail();
250 // Convert input features
251 std::int32_t transform(const Position& pos, OutputType* output, int bucket) const {
252 update_accumulator<WHITE>(pos);
253 update_accumulator<BLACK>(pos);
255 const Color perspectives[2] = {pos.side_to_move(), ~pos.side_to_move()};
256 const auto& accumulation = pos.state()->accumulator.accumulation;
257 const auto& psqtAccumulation = pos.state()->accumulator.psqtAccumulation;
260 (psqtAccumulation[perspectives[0]][bucket] - psqtAccumulation[perspectives[1]][bucket])
264 for (IndexType p = 0; p < 2; ++p)
266 const IndexType offset = (HalfDimensions / 2) * p;
270 constexpr IndexType OutputChunkSize = MaxChunkSize;
271 static_assert((HalfDimensions / 2) % OutputChunkSize == 0);
272 constexpr IndexType NumOutputChunks = HalfDimensions / 2 / OutputChunkSize;
274 vec_t Zero = vec_zero();
275 vec_t One = vec_set_16(127);
277 const vec_t* in0 = reinterpret_cast<const vec_t*>(&(accumulation[perspectives[p]][0]));
279 reinterpret_cast<const vec_t*>(&(accumulation[perspectives[p]][HalfDimensions / 2]));
280 vec_t* out = reinterpret_cast<vec_t*>(output + offset);
282 for (IndexType j = 0; j < NumOutputChunks; j += 1)
284 const vec_t sum0a = vec_max_16(vec_min_16(in0[j * 2 + 0], One), Zero);
285 const vec_t sum0b = vec_max_16(vec_min_16(in0[j * 2 + 1], One), Zero);
286 const vec_t sum1a = vec_max_16(vec_min_16(in1[j * 2 + 0], One), Zero);
287 const vec_t sum1b = vec_max_16(vec_min_16(in1[j * 2 + 1], One), Zero);
289 const vec_t pa = vec_mul_16(sum0a, sum1a);
290 const vec_t pb = vec_mul_16(sum0b, sum1b);
292 out[j] = vec_msb_pack_16(pa, pb);
297 for (IndexType j = 0; j < HalfDimensions / 2; ++j)
299 BiasType sum0 = accumulation[static_cast<int>(perspectives[p])][j + 0];
301 accumulation[static_cast<int>(perspectives[p])][j + HalfDimensions / 2];
302 sum0 = std::clamp<BiasType>(sum0, 0, 127);
303 sum1 = std::clamp<BiasType>(sum1, 0, 127);
304 output[offset + j] = static_cast<OutputType>(unsigned(sum0 * sum1) / 128);
311 } // end of function transform()
313 void hint_common_access(const Position& pos) const {
314 hint_common_access_for_perspective<WHITE>(pos);
315 hint_common_access_for_perspective<BLACK>(pos);
319 template<Color Perspective>
320 [[nodiscard]] std::pair<StateInfo*, StateInfo*>
321 try_find_computed_accumulator(const Position& pos) const {
322 // Look for a usable accumulator of an earlier position. We keep track
323 // of the estimated gain in terms of features to be added/subtracted.
324 StateInfo *st = pos.state(), *next = nullptr;
325 int gain = FeatureSet::refresh_cost(pos);
326 while (st->previous && !st->accumulator.computed[Perspective])
328 // This governs when a full feature refresh is needed and how many
329 // updates are better than just one full refresh.
330 if (FeatureSet::requires_refresh(st, Perspective)
331 || (gain -= FeatureSet::update_cost(st) + 1) < 0)
339 // NOTE: The parameter states_to_update is an array of position states, ending with nullptr.
340 // All states must be sequential, that is states_to_update[i] must either be reachable
341 // by repeatedly applying ->previous from states_to_update[i+1] or states_to_update[i] == nullptr.
342 // computed_st must be reachable by repeatedly applying ->previous on states_to_update[0], if not nullptr.
343 template<Color Perspective, size_t N>
344 void update_accumulator_incremental(const Position& pos,
345 StateInfo* computed_st,
346 StateInfo* states_to_update[N]) const {
347 static_assert(N > 0);
348 assert(states_to_update[N - 1] == nullptr);
351 // Gcc-10.2 unnecessarily spills AVX2 registers if this array
352 // is defined in the VECTOR code below, once in each branch
354 psqt_vec_t psqt[NumPsqtRegs];
357 if (states_to_update[0] == nullptr)
360 // Update incrementally going back through states_to_update.
362 // Gather all features to be updated.
363 const Square ksq = pos.square<KING>(Perspective);
365 // The size must be enough to contain the largest possible update.
366 // That might depend on the feature set and generally relies on the
367 // feature set's update cost calculation to be correct and never
368 // allow updates with more added/removed features than MaxActiveDimensions.
369 FeatureSet::IndexList removed[N - 1], added[N - 1];
374 - 2; // last potential state to update. Skip last element because it must be nullptr.
375 while (states_to_update[i] == nullptr)
378 StateInfo* st2 = states_to_update[i];
382 states_to_update[i]->accumulator.computed[Perspective] = true;
384 const StateInfo* end_state = i == 0 ? computed_st : states_to_update[i - 1];
386 for (; st2 != end_state; st2 = st2->previous)
387 FeatureSet::append_changed_indices<Perspective>(ksq, st2->dirtyPiece,
388 removed[i], added[i]);
392 StateInfo* st = computed_st;
394 // Now update the accumulators listed in states_to_update[], where the last element is a sentinel.
397 if (states_to_update[1] == nullptr && (removed[0].size() == 1 || removed[0].size() == 2)
398 && added[0].size() == 1)
400 assert(states_to_update[0]);
403 reinterpret_cast<const vec_t*>(&st->accumulator.accumulation[Perspective][0]);
404 auto accOut = reinterpret_cast<vec_t*>(
405 &states_to_update[0]->accumulator.accumulation[Perspective][0]);
407 const IndexType offsetR0 = HalfDimensions * removed[0][0];
408 auto columnR0 = reinterpret_cast<const vec_t*>(&weights[offsetR0]);
409 const IndexType offsetA = HalfDimensions * added[0][0];
410 auto columnA = reinterpret_cast<const vec_t*>(&weights[offsetA]);
412 if (removed[0].size() == 1)
414 for (IndexType k = 0; k < HalfDimensions * sizeof(std::int16_t) / sizeof(vec_t);
416 accOut[k] = vec_add_16(vec_sub_16(accIn[k], columnR0[k]), columnA[k]);
420 const IndexType offsetR1 = HalfDimensions * removed[0][1];
421 auto columnR1 = reinterpret_cast<const vec_t*>(&weights[offsetR1]);
423 for (IndexType k = 0; k < HalfDimensions * sizeof(std::int16_t) / sizeof(vec_t);
425 accOut[k] = vec_sub_16(vec_add_16(accIn[k], columnA[k]),
426 vec_add_16(columnR0[k], columnR1[k]));
429 auto accPsqtIn = reinterpret_cast<const psqt_vec_t*>(
430 &st->accumulator.psqtAccumulation[Perspective][0]);
431 auto accPsqtOut = reinterpret_cast<psqt_vec_t*>(
432 &states_to_update[0]->accumulator.psqtAccumulation[Perspective][0]);
434 const IndexType offsetPsqtR0 = PSQTBuckets * removed[0][0];
435 auto columnPsqtR0 = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offsetPsqtR0]);
436 const IndexType offsetPsqtA = PSQTBuckets * added[0][0];
437 auto columnPsqtA = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offsetPsqtA]);
439 if (removed[0].size() == 1)
441 for (std::size_t k = 0; k < PSQTBuckets * sizeof(std::int32_t) / sizeof(psqt_vec_t);
443 accPsqtOut[k] = vec_add_psqt_32(vec_sub_psqt_32(accPsqtIn[k], columnPsqtR0[k]),
448 const IndexType offsetPsqtR1 = PSQTBuckets * removed[0][1];
449 auto columnPsqtR1 = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offsetPsqtR1]);
451 for (std::size_t k = 0; k < PSQTBuckets * sizeof(std::int32_t) / sizeof(psqt_vec_t);
454 vec_sub_psqt_32(vec_add_psqt_32(accPsqtIn[k], columnPsqtA[k]),
455 vec_add_psqt_32(columnPsqtR0[k], columnPsqtR1[k]));
460 for (IndexType j = 0; j < HalfDimensions / TileHeight; ++j)
463 auto accTileIn = reinterpret_cast<const vec_t*>(
464 &st->accumulator.accumulation[Perspective][j * TileHeight]);
465 for (IndexType k = 0; k < NumRegs; ++k)
466 acc[k] = vec_load(&accTileIn[k]);
468 for (IndexType i = 0; states_to_update[i]; ++i)
470 // Difference calculation for the deactivated features
471 for (const auto index : removed[i])
473 const IndexType offset = HalfDimensions * index + j * TileHeight;
474 auto column = reinterpret_cast<const vec_t*>(&weights[offset]);
475 for (IndexType k = 0; k < NumRegs; ++k)
476 acc[k] = vec_sub_16(acc[k], column[k]);
479 // Difference calculation for the activated features
480 for (const auto index : added[i])
482 const IndexType offset = HalfDimensions * index + j * TileHeight;
483 auto column = reinterpret_cast<const vec_t*>(&weights[offset]);
484 for (IndexType k = 0; k < NumRegs; ++k)
485 acc[k] = vec_add_16(acc[k], column[k]);
489 auto accTileOut = reinterpret_cast<vec_t*>(
490 &states_to_update[i]->accumulator.accumulation[Perspective][j * TileHeight]);
491 for (IndexType k = 0; k < NumRegs; ++k)
492 vec_store(&accTileOut[k], acc[k]);
496 for (IndexType j = 0; j < PSQTBuckets / PsqtTileHeight; ++j)
499 auto accTilePsqtIn = reinterpret_cast<const psqt_vec_t*>(
500 &st->accumulator.psqtAccumulation[Perspective][j * PsqtTileHeight]);
501 for (std::size_t k = 0; k < NumPsqtRegs; ++k)
502 psqt[k] = vec_load_psqt(&accTilePsqtIn[k]);
504 for (IndexType i = 0; states_to_update[i]; ++i)
506 // Difference calculation for the deactivated features
507 for (const auto index : removed[i])
509 const IndexType offset = PSQTBuckets * index + j * PsqtTileHeight;
510 auto columnPsqt = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offset]);
511 for (std::size_t k = 0; k < NumPsqtRegs; ++k)
512 psqt[k] = vec_sub_psqt_32(psqt[k], columnPsqt[k]);
515 // Difference calculation for the activated features
516 for (const auto index : added[i])
518 const IndexType offset = PSQTBuckets * index + j * PsqtTileHeight;
519 auto columnPsqt = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offset]);
520 for (std::size_t k = 0; k < NumPsqtRegs; ++k)
521 psqt[k] = vec_add_psqt_32(psqt[k], columnPsqt[k]);
525 auto accTilePsqtOut = reinterpret_cast<psqt_vec_t*>(
527 ->accumulator.psqtAccumulation[Perspective][j * PsqtTileHeight]);
528 for (std::size_t k = 0; k < NumPsqtRegs; ++k)
529 vec_store_psqt(&accTilePsqtOut[k], psqt[k]);
534 for (IndexType i = 0; states_to_update[i]; ++i)
536 std::memcpy(states_to_update[i]->accumulator.accumulation[Perspective],
537 st->accumulator.accumulation[Perspective],
538 HalfDimensions * sizeof(BiasType));
540 for (std::size_t k = 0; k < PSQTBuckets; ++k)
541 states_to_update[i]->accumulator.psqtAccumulation[Perspective][k] =
542 st->accumulator.psqtAccumulation[Perspective][k];
544 st = states_to_update[i];
546 // Difference calculation for the deactivated features
547 for (const auto index : removed[i])
549 const IndexType offset = HalfDimensions * index;
551 for (IndexType j = 0; j < HalfDimensions; ++j)
552 st->accumulator.accumulation[Perspective][j] -= weights[offset + j];
554 for (std::size_t k = 0; k < PSQTBuckets; ++k)
555 st->accumulator.psqtAccumulation[Perspective][k] -=
556 psqtWeights[index * PSQTBuckets + k];
559 // Difference calculation for the activated features
560 for (const auto index : added[i])
562 const IndexType offset = HalfDimensions * index;
564 for (IndexType j = 0; j < HalfDimensions; ++j)
565 st->accumulator.accumulation[Perspective][j] += weights[offset + j];
567 for (std::size_t k = 0; k < PSQTBuckets; ++k)
568 st->accumulator.psqtAccumulation[Perspective][k] +=
569 psqtWeights[index * PSQTBuckets + k];
575 template<Color Perspective>
576 void update_accumulator_refresh(const Position& pos) const {
578 // Gcc-10.2 unnecessarily spills AVX2 registers if this array
579 // is defined in the VECTOR code below, once in each branch
581 psqt_vec_t psqt[NumPsqtRegs];
584 // Refresh the accumulator
585 // Could be extracted to a separate function because it's done in 2 places,
586 // but it's unclear if compilers would correctly handle register allocation.
587 auto& accumulator = pos.state()->accumulator;
588 accumulator.computed[Perspective] = true;
589 FeatureSet::IndexList active;
590 FeatureSet::append_active_indices<Perspective>(pos, active);
593 for (IndexType j = 0; j < HalfDimensions / TileHeight; ++j)
595 auto biasesTile = reinterpret_cast<const vec_t*>(&biases[j * TileHeight]);
596 for (IndexType k = 0; k < NumRegs; ++k)
597 acc[k] = biasesTile[k];
599 for (const auto index : active)
601 const IndexType offset = HalfDimensions * index + j * TileHeight;
602 auto column = reinterpret_cast<const vec_t*>(&weights[offset]);
604 for (unsigned k = 0; k < NumRegs; ++k)
605 acc[k] = vec_add_16(acc[k], column[k]);
609 reinterpret_cast<vec_t*>(&accumulator.accumulation[Perspective][j * TileHeight]);
610 for (unsigned k = 0; k < NumRegs; k++)
611 vec_store(&accTile[k], acc[k]);
614 for (IndexType j = 0; j < PSQTBuckets / PsqtTileHeight; ++j)
616 for (std::size_t k = 0; k < NumPsqtRegs; ++k)
617 psqt[k] = vec_zero_psqt();
619 for (const auto index : active)
621 const IndexType offset = PSQTBuckets * index + j * PsqtTileHeight;
622 auto columnPsqt = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offset]);
624 for (std::size_t k = 0; k < NumPsqtRegs; ++k)
625 psqt[k] = vec_add_psqt_32(psqt[k], columnPsqt[k]);
628 auto accTilePsqt = reinterpret_cast<psqt_vec_t*>(
629 &accumulator.psqtAccumulation[Perspective][j * PsqtTileHeight]);
630 for (std::size_t k = 0; k < NumPsqtRegs; ++k)
631 vec_store_psqt(&accTilePsqt[k], psqt[k]);
635 std::memcpy(accumulator.accumulation[Perspective], biases,
636 HalfDimensions * sizeof(BiasType));
638 for (std::size_t k = 0; k < PSQTBuckets; ++k)
639 accumulator.psqtAccumulation[Perspective][k] = 0;
641 for (const auto index : active)
643 const IndexType offset = HalfDimensions * index;
645 for (IndexType j = 0; j < HalfDimensions; ++j)
646 accumulator.accumulation[Perspective][j] += weights[offset + j];
648 for (std::size_t k = 0; k < PSQTBuckets; ++k)
649 accumulator.psqtAccumulation[Perspective][k] +=
650 psqtWeights[index * PSQTBuckets + k];
655 template<Color Perspective>
656 void hint_common_access_for_perspective(const Position& pos) const {
658 // Works like update_accumulator, but performs less work.
659 // Updates ONLY the accumulator for pos.
661 // Look for a usable accumulator of an earlier position. We keep track
662 // of the estimated gain in terms of features to be added/subtracted.
664 if (pos.state()->accumulator.computed[Perspective])
667 auto [oldest_st, _] = try_find_computed_accumulator<Perspective>(pos);
669 if (oldest_st->accumulator.computed[Perspective])
671 // Only update current position accumulator to minimize work.
672 StateInfo* states_to_update[2] = {pos.state(), nullptr};
673 update_accumulator_incremental<Perspective, 2>(pos, oldest_st, states_to_update);
677 update_accumulator_refresh<Perspective>(pos);
681 template<Color Perspective>
682 void update_accumulator(const Position& pos) const {
684 auto [oldest_st, next] = try_find_computed_accumulator<Perspective>(pos);
686 if (oldest_st->accumulator.computed[Perspective])
691 // Now update the accumulators listed in states_to_update[], where the last element is a sentinel.
692 // Currently we update 2 accumulators.
693 // 1. for the current position
694 // 2. the next accumulator after the computed one
695 // The heuristic may change in the future.
696 StateInfo* states_to_update[3] = {next, next == pos.state() ? nullptr : pos.state(),
699 update_accumulator_incremental<Perspective, 3>(pos, oldest_st, states_to_update);
703 update_accumulator_refresh<Perspective>(pos);
707 alignas(CacheLineSize) BiasType biases[HalfDimensions];
708 alignas(CacheLineSize) WeightType weights[HalfDimensions * InputDimensions];
709 alignas(CacheLineSize) PSQTWeightType psqtWeights[InputDimensions * PSQTBuckets];
712 } // namespace Stockfish::Eval::NNUE
714 #endif // #ifndef NNUE_FEATURE_TRANSFORMER_H_INCLUDED