namespace Stockfish::Eval::NNUE::Layers {
// Affine transformation layer
- template <typename PreviousLayer, IndexType OutputDimensions>
+ template <typename PreviousLayer, IndexType OutDims>
class AffineTransform {
public:
// Input/output type
static_assert(std::is_same<InputType, std::uint8_t>::value, "");
// Number of input/output dimensions
- static constexpr IndexType kInputDimensions =
- PreviousLayer::kOutputDimensions;
- static constexpr IndexType kOutputDimensions = OutputDimensions;
- static constexpr IndexType kPaddedInputDimensions =
- CeilToMultiple<IndexType>(kInputDimensions, kMaxSimdWidth);
+ static constexpr IndexType InputDimensions =
+ PreviousLayer::OutputDimensions;
+ static constexpr IndexType OutputDimensions = OutDims;
+ static constexpr IndexType PaddedInputDimensions =
+ ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
#if defined (USE_AVX512)
- static constexpr const IndexType kOutputSimdWidth = kSimdWidth / 2;
+ static constexpr const IndexType OutputSimdWidth = SimdWidth / 2;
#elif defined (USE_SSSE3)
- static constexpr const IndexType kOutputSimdWidth = kSimdWidth / 4;
+ static constexpr const IndexType OutputSimdWidth = SimdWidth / 4;
#endif
// Size of forward propagation buffer used in this layer
- static constexpr std::size_t kSelfBufferSize =
- CeilToMultiple(kOutputDimensions * sizeof(OutputType), kCacheLineSize);
+ static constexpr std::size_t SelfBufferSize =
+ ceil_to_multiple(OutputDimensions * sizeof(OutputType), CacheLineSize);
// Size of the forward propagation buffer used from the input layer to this layer
- static constexpr std::size_t kBufferSize =
- PreviousLayer::kBufferSize + kSelfBufferSize;
+ static constexpr std::size_t BufferSize =
+ PreviousLayer::BufferSize + SelfBufferSize;
// Hash value embedded in the evaluation file
- static constexpr std::uint32_t GetHashValue() {
- std::uint32_t hash_value = 0xCC03DAE4u;
- hash_value += kOutputDimensions;
- hash_value ^= PreviousLayer::GetHashValue() >> 1;
- hash_value ^= PreviousLayer::GetHashValue() << 31;
- return hash_value;
+ static constexpr std::uint32_t get_hash_value() {
+ std::uint32_t hashValue = 0xCC03DAE4u;
+ hashValue += OutputDimensions;
+ hashValue ^= PreviousLayer::get_hash_value() >> 1;
+ hashValue ^= PreviousLayer::get_hash_value() << 31;
+ return hashValue;
}
- // Read network parameters
- bool ReadParameters(std::istream& stream) {
- if (!previous_layer_.ReadParameters(stream)) return false;
- for (std::size_t i = 0; i < kOutputDimensions; ++i)
- biases_[i] = read_little_endian<BiasType>(stream);
- for (std::size_t i = 0; i < kOutputDimensions * kPaddedInputDimensions; ++i)
+ // Read network parameters
+ bool read_parameters(std::istream& stream) {
+ if (!previousLayer.read_parameters(stream)) return false;
+ for (std::size_t i = 0; i < OutputDimensions; ++i)
+ biases[i] = read_little_endian<BiasType>(stream);
#if !defined (USE_SSSE3)
- weights_[i] = read_little_endian<WeightType>(stream);
+ for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
+ weights[i] = read_little_endian<WeightType>(stream);
#else
- weights_[
- (i / 4) % (kPaddedInputDimensions / 4) * kOutputDimensions * 4 +
- i / kPaddedInputDimensions * 4 +
- i % 4
- ] = read_little_endian<WeightType>(stream);
+ std::unique_ptr<uint32_t[]> indexMap = std::make_unique<uint32_t[]>(OutputDimensions * PaddedInputDimensions);
+ for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) {
+ const uint32_t scrambledIdx =
+ (i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 +
+ i / PaddedInputDimensions * 4 +
+ i % 4;
+ weights[scrambledIdx] = read_little_endian<WeightType>(stream);
+ indexMap[scrambledIdx] = i;
+ }
// Determine if eights of weight and input products can be summed using 16bits
// without saturation. We assume worst case combinations of 0 and 127 for all inputs.
- if (kOutputDimensions > 1 && !stream.fail())
+ if (OutputDimensions > 1 && !stream.fail())
{
canSaturate16.count = 0;
#if !defined(USE_VNNI)
- for (IndexType i = 0; i < kPaddedInputDimensions; i += 16)
- for (IndexType j = 0; j < kOutputDimensions; ++j)
+ for (IndexType i = 0; i < PaddedInputDimensions; i += 16)
+ for (IndexType j = 0; j < OutputDimensions; ++j)
for (int x = 0; x < 2; ++x)
{
- WeightType* w = &weights_[i * kOutputDimensions + j * 4 + x * 2];
+ WeightType* w = &weights[i * OutputDimensions + j * 4 + x * 2];
int sum[2] = {0, 0};
for (int k = 0; k < 8; ++k)
{
- IndexType idx = k / 2 * kOutputDimensions * 4 + k % 2;
+ IndexType idx = k / 2 * OutputDimensions * 4 + k % 2;
sum[w[idx] < 0] += w[idx];
}
for (int sign : { -1, 1 })
int maxK = 0, maxW = 0;
for (int k = 0; k < 8; ++k)
{
- IndexType idx = k / 2 * kOutputDimensions * 4 + k % 2;
+ IndexType idx = k / 2 * OutputDimensions * 4 + k % 2;
if (maxW < sign * w[idx])
maxK = k, maxW = sign * w[idx];
}
- IndexType idx = maxK / 2 * kOutputDimensions * 4 + maxK % 2;
+ IndexType idx = maxK / 2 * OutputDimensions * 4 + maxK % 2;
sum[sign == -1] -= w[idx];
- canSaturate16.add(j, i + maxK / 2 * 4 + maxK % 2 + x * 2, w[idx]);
+ const uint32_t scrambledIdx = idx + i * OutputDimensions + j * 4 + x * 2;
+ canSaturate16.add(j, i + maxK / 2 * 4 + maxK % 2 + x * 2, w[idx], indexMap[scrambledIdx]);
w[idx] = 0;
}
}
return !stream.fail();
}
+ // Write network parameters
+ bool write_parameters(std::ostream& stream) const {
+ if (!previousLayer.write_parameters(stream)) return false;
+ for (std::size_t i = 0; i < OutputDimensions; ++i)
+ write_little_endian<BiasType>(stream, biases[i]);
+#if !defined (USE_SSSE3)
+ for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
+ write_little_endian<WeightType>(stream, weights[i]);
+#else
+ std::unique_ptr<WeightType[]> unscrambledWeights = std::make_unique<WeightType[]>(OutputDimensions * PaddedInputDimensions);
+ for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) {
+ unscrambledWeights[i] =
+ weights[
+ (i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 +
+ i / PaddedInputDimensions * 4 +
+ i % 4
+ ];
+ }
+ for (int i = 0; i < canSaturate16.count; ++i)
+ unscrambledWeights[canSaturate16.ids[i].wIdx] = canSaturate16.ids[i].w;
+
+ for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
+ write_little_endian<WeightType>(stream, unscrambledWeights[i]);
+#endif
+
+ return !stream.fail();
+ }
+
// Forward propagation
- const OutputType* Propagate(
- const TransformedFeatureType* transformed_features, char* buffer) const {
- const auto input = previous_layer_.Propagate(
- transformed_features, buffer + kSelfBufferSize);
+ const OutputType* propagate(
+ const TransformedFeatureType* transformedFeatures, char* buffer) const {
+ const auto input = previousLayer.propagate(
+ transformedFeatures, buffer + SelfBufferSize);
#if defined (USE_AVX512)
- [[maybe_unused]] const __m512i kOnes512 = _mm512_set1_epi16(1);
+ [[maybe_unused]] const __m512i Ones512 = _mm512_set1_epi16(1);
[[maybe_unused]] auto m512_hadd = [](__m512i sum, int bias) -> int {
return _mm512_reduce_add_epi32(sum) + bias;
acc = _mm512_dpbusd_epi32(acc, a, b);
#else
__m512i product0 = _mm512_maddubs_epi16(a, b);
- product0 = _mm512_madd_epi16(product0, kOnes512);
+ product0 = _mm512_madd_epi16(product0, Ones512);
acc = _mm512_add_epi32(acc, product0);
#endif
};
product0 = _mm512_add_epi16(product0, product1);
product2 = _mm512_add_epi16(product2, product3);
product0 = _mm512_add_epi16(product0, product2);
- product0 = _mm512_madd_epi16(product0, kOnes512);
+ product0 = _mm512_madd_epi16(product0, Ones512);
acc = _mm512_add_epi32(acc, product0);
#endif
};
#endif
#if defined (USE_AVX2)
- [[maybe_unused]] const __m256i kOnes256 = _mm256_set1_epi16(1);
+ [[maybe_unused]] const __m256i Ones256 = _mm256_set1_epi16(1);
[[maybe_unused]] auto m256_hadd = [](__m256i sum, int bias) -> int {
__m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extracti128_si256(sum, 1));
acc = _mm256_dpbusd_epi32(acc, a, b);
#else
__m256i product0 = _mm256_maddubs_epi16(a, b);
- product0 = _mm256_madd_epi16(product0, kOnes256);
+ product0 = _mm256_madd_epi16(product0, Ones256);
acc = _mm256_add_epi32(acc, product0);
#endif
};
product0 = _mm256_add_epi16(product0, product1);
product2 = _mm256_add_epi16(product2, product3);
product0 = _mm256_add_epi16(product0, product2);
- product0 = _mm256_madd_epi16(product0, kOnes256);
+ product0 = _mm256_madd_epi16(product0, Ones256);
acc = _mm256_add_epi32(acc, product0);
#endif
};
#endif
#if defined (USE_SSSE3)
- [[maybe_unused]] const __m128i kOnes128 = _mm_set1_epi16(1);
+ [[maybe_unused]] const __m128i Ones128 = _mm_set1_epi16(1);
[[maybe_unused]] auto m128_hadd = [](__m128i sum, int bias) -> int {
sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0x4E)); //_MM_PERM_BADC
[[maybe_unused]] auto m128_add_dpbusd_epi32 = [=](__m128i& acc, __m128i a, __m128i b) {
__m128i product0 = _mm_maddubs_epi16(a, b);
- product0 = _mm_madd_epi16(product0, kOnes128);
+ product0 = _mm_madd_epi16(product0, Ones128);
acc = _mm_add_epi32(acc, product0);
};
product0 = _mm_add_epi16(product0, product1);
product2 = _mm_add_epi16(product2, product3);
product0 = _mm_add_epi16(product0, product2);
- product0 = _mm_madd_epi16(product0, kOnes128);
+ product0 = _mm_madd_epi16(product0, Ones128);
acc = _mm_add_epi32(acc, product0);
};
#if defined (USE_SSSE3)
const auto output = reinterpret_cast<OutputType*>(buffer);
- const auto input_vector = reinterpret_cast<const vec_t*>(input);
+ const auto inputVector = reinterpret_cast<const vec_t*>(input);
- static_assert(kOutputDimensions % kOutputSimdWidth == 0 || kOutputDimensions == 1);
+ static_assert(OutputDimensions % OutputSimdWidth == 0 || OutputDimensions == 1);
- // kOutputDimensions is either 1 or a multiple of kSimdWidth
+ // OutputDimensions is either 1 or a multiple of SimdWidth
// because then it is also an input dimension.
- if constexpr (kOutputDimensions % kOutputSimdWidth == 0)
+ if constexpr (OutputDimensions % OutputSimdWidth == 0)
{
- constexpr IndexType kNumChunks = kPaddedInputDimensions / 4;
+ constexpr IndexType NumChunks = PaddedInputDimensions / 4;
const auto input32 = reinterpret_cast<const std::int32_t*>(input);
vec_t* outptr = reinterpret_cast<vec_t*>(output);
- std::memcpy(output, biases_, kOutputDimensions * sizeof(OutputType));
+ std::memcpy(output, biases, OutputDimensions * sizeof(OutputType));
- for (int i = 0; i < (int)kNumChunks - 3; i += 4)
+ for (int i = 0; i < (int)NumChunks - 3; i += 4)
{
const vec_t in0 = vec_set_32(input32[i + 0]);
const vec_t in1 = vec_set_32(input32[i + 1]);
const vec_t in2 = vec_set_32(input32[i + 2]);
const vec_t in3 = vec_set_32(input32[i + 3]);
- const auto col0 = reinterpret_cast<const vec_t*>(&weights_[(i + 0) * kOutputDimensions * 4]);
- const auto col1 = reinterpret_cast<const vec_t*>(&weights_[(i + 1) * kOutputDimensions * 4]);
- const auto col2 = reinterpret_cast<const vec_t*>(&weights_[(i + 2) * kOutputDimensions * 4]);
- const auto col3 = reinterpret_cast<const vec_t*>(&weights_[(i + 3) * kOutputDimensions * 4]);
- for (int j = 0; j * kOutputSimdWidth < kOutputDimensions; ++j)
+ const auto col0 = reinterpret_cast<const vec_t*>(&weights[(i + 0) * OutputDimensions * 4]);
+ const auto col1 = reinterpret_cast<const vec_t*>(&weights[(i + 1) * OutputDimensions * 4]);
+ const auto col2 = reinterpret_cast<const vec_t*>(&weights[(i + 2) * OutputDimensions * 4]);
+ const auto col3 = reinterpret_cast<const vec_t*>(&weights[(i + 3) * OutputDimensions * 4]);
+ for (int j = 0; j * OutputSimdWidth < OutputDimensions; ++j)
vec_add_dpbusd_32x4(outptr[j], in0, col0[j], in1, col1[j], in2, col2[j], in3, col3[j]);
}
for (int i = 0; i < canSaturate16.count; ++i)
output[canSaturate16.ids[i].out] += input[canSaturate16.ids[i].in] * canSaturate16.ids[i].w;
}
- else if constexpr (kOutputDimensions == 1)
+ else if constexpr (OutputDimensions == 1)
{
#if defined (USE_AVX512)
- if constexpr (kPaddedInputDimensions % (kSimdWidth * 2) != 0)
+ if constexpr (PaddedInputDimensions % (SimdWidth * 2) != 0)
{
- constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
- const auto input_vector256 = reinterpret_cast<const __m256i*>(input);
+ constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
+ const auto inputVector256 = reinterpret_cast<const __m256i*>(input);
__m256i sum0 = _mm256_setzero_si256();
- const auto row0 = reinterpret_cast<const __m256i*>(&weights_[0]);
+ const auto row0 = reinterpret_cast<const __m256i*>(&weights[0]);
- for (int j = 0; j < (int)kNumChunks; ++j)
+ for (int j = 0; j < (int)NumChunks; ++j)
{
- const __m256i in = input_vector256[j];
+ const __m256i in = inputVector256[j];
m256_add_dpbusd_epi32(sum0, in, row0[j]);
}
- output[0] = m256_hadd(sum0, biases_[0]);
+ output[0] = m256_hadd(sum0, biases[0]);
}
else
#endif
{
#if defined (USE_AVX512)
- constexpr IndexType kNumChunks = kPaddedInputDimensions / (kSimdWidth * 2);
+ constexpr IndexType NumChunks = PaddedInputDimensions / (SimdWidth * 2);
#else
- constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
+ constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
#endif
vec_t sum0 = vec_setzero();
- const auto row0 = reinterpret_cast<const vec_t*>(&weights_[0]);
+ const auto row0 = reinterpret_cast<const vec_t*>(&weights[0]);
- for (int j = 0; j < (int)kNumChunks; ++j)
+ for (int j = 0; j < (int)NumChunks; ++j)
{
- const vec_t in = input_vector[j];
+ const vec_t in = inputVector[j];
vec_add_dpbusd_32(sum0, in, row0[j]);
}
- output[0] = vec_hadd(sum0, biases_[0]);
+ output[0] = vec_hadd(sum0, biases[0]);
}
}
auto output = reinterpret_cast<OutputType*>(buffer);
#if defined(USE_SSE2)
- constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
- const __m128i kZeros = _mm_setzero_si128();
- const auto input_vector = reinterpret_cast<const __m128i*>(input);
+ constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
+ const __m128i Zeros = _mm_setzero_si128();
+ const auto inputVector = reinterpret_cast<const __m128i*>(input);
#elif defined(USE_MMX)
- constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
- const __m64 kZeros = _mm_setzero_si64();
- const auto input_vector = reinterpret_cast<const __m64*>(input);
+ constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
+ const __m64 Zeros = _mm_setzero_si64();
+ const auto inputVector = reinterpret_cast<const __m64*>(input);
#elif defined(USE_NEON)
- constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
- const auto input_vector = reinterpret_cast<const int8x8_t*>(input);
+ constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
+ const auto inputVector = reinterpret_cast<const int8x8_t*>(input);
#endif
- for (IndexType i = 0; i < kOutputDimensions; ++i) {
- const IndexType offset = i * kPaddedInputDimensions;
+ for (IndexType i = 0; i < OutputDimensions; ++i) {
+ const IndexType offset = i * PaddedInputDimensions;
#if defined(USE_SSE2)
- __m128i sum_lo = _mm_cvtsi32_si128(biases_[i]);
- __m128i sum_hi = kZeros;
- const auto row = reinterpret_cast<const __m128i*>(&weights_[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
+ __m128i sumLo = _mm_cvtsi32_si128(biases[i]);
+ __m128i sumHi = Zeros;
+ const auto row = reinterpret_cast<const __m128i*>(&weights[offset]);
+ for (IndexType j = 0; j < NumChunks; ++j) {
__m128i row_j = _mm_load_si128(&row[j]);
- __m128i input_j = _mm_load_si128(&input_vector[j]);
- __m128i extended_row_lo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8);
- __m128i extended_row_hi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8);
- __m128i extended_input_lo = _mm_unpacklo_epi8(input_j, kZeros);
- __m128i extended_input_hi = _mm_unpackhi_epi8(input_j, kZeros);
- __m128i product_lo = _mm_madd_epi16(extended_row_lo, extended_input_lo);
- __m128i product_hi = _mm_madd_epi16(extended_row_hi, extended_input_hi);
- sum_lo = _mm_add_epi32(sum_lo, product_lo);
- sum_hi = _mm_add_epi32(sum_hi, product_hi);
+ __m128i input_j = _mm_load_si128(&inputVector[j]);
+ __m128i extendedRowLo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8);
+ __m128i extendedRowHi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8);
+ __m128i extendedInputLo = _mm_unpacklo_epi8(input_j, Zeros);
+ __m128i extendedInputHi = _mm_unpackhi_epi8(input_j, Zeros);
+ __m128i productLo = _mm_madd_epi16(extendedRowLo, extendedInputLo);
+ __m128i productHi = _mm_madd_epi16(extendedRowHi, extendedInputHi);
+ sumLo = _mm_add_epi32(sumLo, productLo);
+ sumHi = _mm_add_epi32(sumHi, productHi);
}
- __m128i sum = _mm_add_epi32(sum_lo, sum_hi);
- __m128i sum_high_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2));
- sum = _mm_add_epi32(sum, sum_high_64);
+ __m128i sum = _mm_add_epi32(sumLo, sumHi);
+ __m128i sumHigh_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2));
+ sum = _mm_add_epi32(sum, sumHigh_64);
__m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2));
sum = _mm_add_epi32(sum, sum_second_32);
output[i] = _mm_cvtsi128_si32(sum);
#elif defined(USE_MMX)
- __m64 sum_lo = _mm_cvtsi32_si64(biases_[i]);
- __m64 sum_hi = kZeros;
- const auto row = reinterpret_cast<const __m64*>(&weights_[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
+ __m64 sumLo = _mm_cvtsi32_si64(biases[i]);
+ __m64 sumHi = Zeros;
+ const auto row = reinterpret_cast<const __m64*>(&weights[offset]);
+ for (IndexType j = 0; j < NumChunks; ++j) {
__m64 row_j = row[j];
- __m64 input_j = input_vector[j];
- __m64 extended_row_lo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8);
- __m64 extended_row_hi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8);
- __m64 extended_input_lo = _mm_unpacklo_pi8(input_j, kZeros);
- __m64 extended_input_hi = _mm_unpackhi_pi8(input_j, kZeros);
- __m64 product_lo = _mm_madd_pi16(extended_row_lo, extended_input_lo);
- __m64 product_hi = _mm_madd_pi16(extended_row_hi, extended_input_hi);
- sum_lo = _mm_add_pi32(sum_lo, product_lo);
- sum_hi = _mm_add_pi32(sum_hi, product_hi);
+ __m64 input_j = inputVector[j];
+ __m64 extendedRowLo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8);
+ __m64 extendedRowHi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8);
+ __m64 extendedInputLo = _mm_unpacklo_pi8(input_j, Zeros);
+ __m64 extendedInputHi = _mm_unpackhi_pi8(input_j, Zeros);
+ __m64 productLo = _mm_madd_pi16(extendedRowLo, extendedInputLo);
+ __m64 productHi = _mm_madd_pi16(extendedRowHi, extendedInputHi);
+ sumLo = _mm_add_pi32(sumLo, productLo);
+ sumHi = _mm_add_pi32(sumHi, productHi);
}
- __m64 sum = _mm_add_pi32(sum_lo, sum_hi);
+ __m64 sum = _mm_add_pi32(sumLo, sumHi);
sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum));
output[i] = _mm_cvtsi64_si32(sum);
#elif defined(USE_NEON)
- int32x4_t sum = {biases_[i]};
- const auto row = reinterpret_cast<const int8x8_t*>(&weights_[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- int16x8_t product = vmull_s8(input_vector[j * 2], row[j * 2]);
- product = vmlal_s8(product, input_vector[j * 2 + 1], row[j * 2 + 1]);
+ int32x4_t sum = {biases[i]};
+ const auto row = reinterpret_cast<const int8x8_t*>(&weights[offset]);
+ for (IndexType j = 0; j < NumChunks; ++j) {
+ int16x8_t product = vmull_s8(inputVector[j * 2], row[j * 2]);
+ product = vmlal_s8(product, inputVector[j * 2 + 1], row[j * 2 + 1]);
sum = vpadalq_s16(sum, product);
}
output[i] = sum[0] + sum[1] + sum[2] + sum[3];
#else
- OutputType sum = biases_[i];
- for (IndexType j = 0; j < kInputDimensions; ++j) {
- sum += weights_[offset + j] * input[j];
+ OutputType sum = biases[i];
+ for (IndexType j = 0; j < InputDimensions; ++j) {
+ sum += weights[offset + j] * input[j];
}
output[i] = sum;
#endif
using BiasType = OutputType;
using WeightType = std::int8_t;
- PreviousLayer previous_layer_;
+ PreviousLayer previousLayer;
- alignas(kCacheLineSize) BiasType biases_[kOutputDimensions];
- alignas(kCacheLineSize) WeightType weights_[kOutputDimensions * kPaddedInputDimensions];
+ alignas(CacheLineSize) BiasType biases[OutputDimensions];
+ alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
#if defined (USE_SSSE3)
struct CanSaturate {
int count;
struct Entry {
+ uint32_t wIdx;
uint16_t out;
uint16_t in;
int8_t w;
- } ids[kPaddedInputDimensions * kOutputDimensions * 3 / 4];
+ } ids[PaddedInputDimensions * OutputDimensions * 3 / 4];
- void add(int i, int j, int8_t w) {
+ void add(int i, int j, int8_t w, uint32_t wIdx) {
+ ids[count].wIdx = wIdx;
ids[count].out = i;
ids[count].in = j;
ids[count].w = w;