// Read network parameters
bool ReadParameters(std::istream& stream) {
- stream.read(reinterpret_cast<char*>(biases_),
- kHalfDimensions * sizeof(BiasType));
- stream.read(reinterpret_cast<char*>(weights_),
- kHalfDimensions * kInputDimensions * sizeof(WeightType));
+ for (std::size_t i = 0; i < kHalfDimensions; ++i)
+ biases_[i] = read_le<BiasType>(stream);
+ for (std::size_t i = 0; i < kHalfDimensions * kInputDimensions; ++i)
+ weights_[i] = read_le<WeightType>(stream);
return !stream.fail();
}
constexpr int kControl = 0b11011000;
const __m256i kZero = _mm256_setzero_si256();
- #elif defined(USE_SSSE3)
+ #elif defined(USE_SSE2)
constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
#ifdef USE_SSE41
const __m128i k0x80s = _mm_set1_epi8(-128);
#endif
+ #elif defined(USE_MMX)
+ constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
+ const __m64 k0x80s = _mm_set1_pi8(-128);
+
#elif defined(USE_NEON)
constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
const int8x8_t kZero = {0};
_mm256_packs_epi16(sum0, sum1), kZero), kControl));
}
- #elif defined(USE_SSSE3)
+ #elif defined(USE_SSE2)
auto out = reinterpret_cast<__m128i*>(&output[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) {
__m128i sum0 = _mm_load_si128(&reinterpret_cast<const __m128i*>(
);
}
+ #elif defined(USE_MMX)
+ auto out = reinterpret_cast<__m64*>(&output[offset]);
+ for (IndexType j = 0; j < kNumChunks; ++j) {
+ __m64 sum0 = *(&reinterpret_cast<const __m64*>(
+ accumulation[perspectives[p]][0])[j * 2 + 0]);
+ __m64 sum1 = *(&reinterpret_cast<const __m64*>(
+ accumulation[perspectives[p]][0])[j * 2 + 1]);
+ const __m64 packedbytes = _mm_packs_pi16(sum0, sum1);
+ out[j] = _mm_subs_pi8(_mm_adds_pi8(packedbytes, k0x80s), k0x80s);
+ }
+
#elif defined(USE_NEON)
const auto out = reinterpret_cast<int8x8_t*>(&output[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) {
#endif
}
+ #if defined(USE_MMX)
+ _mm_empty();
+ #endif
}
private:
for (IndexType j = 0; j < kNumChunks; ++j)
accumulation[j] = _mm_add_epi16(accumulation[j], column[j]);
+ #elif defined(USE_MMX)
+ auto accumulation = reinterpret_cast<__m64*>(
+ &accumulator.accumulation[perspective][i][0]);
+ auto column = reinterpret_cast<const __m64*>(&weights_[offset]);
+ constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
+ for (IndexType j = 0; j < kNumChunks; ++j) {
+ accumulation[j] = _mm_add_pi16(accumulation[j], column[j]);
+ }
+
#elif defined(USE_NEON)
auto accumulation = reinterpret_cast<int16x8_t*>(
&accumulator.accumulation[perspective][i][0]);
}
}
+ #if defined(USE_MMX)
+ _mm_empty();
+ #endif
accumulator.computed_accumulation = true;
accumulator.computed_score = false;
auto accumulation = reinterpret_cast<__m128i*>(
&accumulator.accumulation[perspective][i][0]);
+ #elif defined(USE_MMX)
+ constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
+ auto accumulation = reinterpret_cast<__m64*>(
+ &accumulator.accumulation[perspective][i][0]);
+
#elif defined(USE_NEON)
constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
auto accumulation = reinterpret_cast<int16x8_t*>(
accumulation[j] = _mm_sub_epi16(accumulation[j], column[j]);
}
+ #elif defined(USE_MMX)
+ auto column = reinterpret_cast<const __m64*>(&weights_[offset]);
+ for (IndexType j = 0; j < kNumChunks; ++j) {
+ accumulation[j] = _mm_sub_pi16(accumulation[j], column[j]);
+ }
+
#elif defined(USE_NEON)
auto column = reinterpret_cast<const int16x8_t*>(&weights_[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) {
accumulation[j] = _mm_add_epi16(accumulation[j], column[j]);
}
+ #elif defined(USE_MMX)
+ auto column = reinterpret_cast<const __m64*>(&weights_[offset]);
+ for (IndexType j = 0; j < kNumChunks; ++j) {
+ accumulation[j] = _mm_add_pi16(accumulation[j], column[j]);
+ }
+
#elif defined(USE_NEON)
auto column = reinterpret_cast<const int16x8_t*>(&weights_[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) {
}
}
}
+ #if defined(USE_MMX)
+ _mm_empty();
+ #endif
accumulator.computed_accumulation = true;
accumulator.computed_score = false;