const auto out = reinterpret_cast<__m256i*>(output);
for (IndexType i = 0; i < kNumChunks; ++i) {
const __m256i words0 = _mm256_srai_epi16(_mm256_packs_epi32(
- _mm256_load_si256(&in[i * 4 + 0]),
- _mm256_load_si256(&in[i * 4 + 1])), kWeightScaleBits);
+ _mm256_loadA_si256(&in[i * 4 + 0]),
+ _mm256_loadA_si256(&in[i * 4 + 1])), kWeightScaleBits);
const __m256i words1 = _mm256_srai_epi16(_mm256_packs_epi32(
- _mm256_load_si256(&in[i * 4 + 2]),
- _mm256_load_si256(&in[i * 4 + 3])), kWeightScaleBits);
- _mm256_store_si256(
- &out[i], _mm256_permutevar8x32_epi32(_mm256_max_epi8(
+ _mm256_loadA_si256(&in[i * 4 + 2]),
+ _mm256_loadA_si256(&in[i * 4 + 3])), kWeightScaleBits);
+ _mm256_storeA_si256(&out[i], _mm256_permutevar8x32_epi32(_mm256_max_epi8(
_mm256_packs_epi16(words0, words1), kZero), kOffsets));
}
constexpr IndexType kStart = kNumChunks * kSimdWidth;
- #elif defined(USE_SSSE3)
+ #elif defined(USE_SSE2)
constexpr IndexType kNumChunks = kInputDimensions / kSimdWidth;
#ifdef USE_SSE41
}
constexpr IndexType kStart = kNumChunks * kSimdWidth;
+ #elif defined(USE_MMX)
+ constexpr IndexType kNumChunks = kInputDimensions / kSimdWidth;
+ const __m64 k0x80s = _mm_set1_pi8(-128);
+ const auto in = reinterpret_cast<const __m64*>(input);
+ const auto out = reinterpret_cast<__m64*>(output);
+ for (IndexType i = 0; i < kNumChunks; ++i) {
+ const __m64 words0 = _mm_srai_pi16(
+ _mm_packs_pi32(in[i * 4 + 0], in[i * 4 + 1]),
+ kWeightScaleBits);
+ const __m64 words1 = _mm_srai_pi16(
+ _mm_packs_pi32(in[i * 4 + 2], in[i * 4 + 3]),
+ kWeightScaleBits);
+ const __m64 packedbytes = _mm_packs_pi16(words0, words1);
+ out[i] = _mm_subs_pi8(_mm_adds_pi8(packedbytes, k0x80s), k0x80s);
+ }
+ _mm_empty();
+ constexpr IndexType kStart = kNumChunks * kSimdWidth;
+
#elif defined(USE_NEON)
constexpr IndexType kNumChunks = kInputDimensions / (kSimdWidth / 2);
const int8x8_t kZero = {0};