From: mstembera Date: Sun, 24 Sep 2023 22:15:50 +0000 (-0700) Subject: Remove handcrafted MMX code X-Git-Url: https://git.sesse.net/?p=stockfish;a=commitdiff_plain;h=8a912951de6d4bff78d3ff5258213a0c7e6f494e Remove handcrafted MMX code too small a benefit to maintain this old target closes https://github.com/official-stockfish/Stockfish/pull/4804 No functional change --- diff --git a/src/Makefile b/src/Makefile index a59303ac..5b43c35f 100644 --- a/src/Makefile +++ b/src/Makefile @@ -674,7 +674,6 @@ ifeq ($(sse2),yes) endif ifeq ($(mmx),yes) - CXXFLAGS += -DUSE_MMX ifeq ($(comp),$(filter $(comp),gcc clang mingw icx)) CXXFLAGS += -mmmx endif @@ -794,11 +793,11 @@ help: @echo "x86-64-sse41-popcnt > x86 64-bit with sse41 and popcnt support" @echo "x86-64-modern > deprecated, currently x86-64-sse41-popcnt" @echo "x86-64-ssse3 > x86 64-bit with ssse3 support" - @echo "x86-64-sse3-popcnt > x86 64-bit with sse3 and popcnt support" + @echo "x86-64-sse3-popcnt > x86 64-bit with sse3 compile and popcnt support" @echo "x86-64 > x86 64-bit generic (with sse2 support)" @echo "x86-32-sse41-popcnt > x86 32-bit with sse41 and popcnt support" @echo "x86-32-sse2 > x86 32-bit with sse2 support" - @echo "x86-32 > x86 32-bit generic (with mmx and sse support)" + @echo "x86-32 > x86 32-bit generic (with mmx compile support)" @echo "ppc-64 > PPC 64-bit" @echo "ppc-32 > PPC 32-bit" @echo "armv7 > ARMv7 32-bit" diff --git a/src/misc.cpp b/src/misc.cpp index 98e346a6..2f6ffd28 100644 --- a/src/misc.cpp +++ b/src/misc.cpp @@ -282,9 +282,6 @@ std::string compiler_info() { compiler += " SSE2"; #endif compiler += (HasPopCnt ? " POPCNT" : ""); - #if defined(USE_MMX) - compiler += " MMX"; - #endif #if defined(USE_NEON_DOTPROD) compiler += " NEON_DOTPROD"; #elif defined(USE_NEON) diff --git a/src/nnue/layers/affine_transform.h b/src/nnue/layers/affine_transform.h index 42839bb5..fc65c343 100644 --- a/src/nnue/layers/affine_transform.h +++ b/src/nnue/layers/affine_transform.h @@ -45,18 +45,13 @@ namespace Stockfish::Eval::NNUE::Layers { template static void affine_transform_non_ssse3(std::int32_t* output, const std::int8_t* weights, const std::int32_t* biases, const std::uint8_t* input) { -# if defined(USE_SSE2) || defined(USE_MMX) || defined(USE_NEON_DOTPROD) || defined(USE_NEON) +# if defined(USE_SSE2) || defined(USE_NEON_DOTPROD) || defined(USE_NEON) # if defined(USE_SSE2) // At least a multiple of 16, with SSE2. constexpr IndexType NumChunks = ceil_to_multiple(InputDimensions, 16) / 16; const __m128i Zeros = _mm_setzero_si128(); const auto inputVector = reinterpret_cast(input); -# elif defined(USE_MMX) - constexpr IndexType NumChunks = ceil_to_multiple(InputDimensions, 8) / 8; - const __m64 Zeros = _mm_setzero_si64(); - const auto inputVector = reinterpret_cast(input); - # elif defined(USE_NEON_DOTPROD) constexpr IndexType NumChunks = ceil_to_multiple(InputDimensions, 16) / 16; const auto inputVector = reinterpret_cast(input); @@ -92,26 +87,6 @@ namespace Stockfish::Eval::NNUE::Layers { sum = _mm_add_epi32(sum, sum_second_32); output[i] = _mm_cvtsi128_si32(sum); -# elif defined(USE_MMX) - __m64 sumLo = _mm_cvtsi32_si64(biases[i]); - __m64 sumHi = Zeros; - const auto row = reinterpret_cast(&weights[offset]); - for (IndexType j = 0; j < NumChunks; ++j) { - __m64 row_j = row[j]; - __m64 input_j = inputVector[j]; - __m64 extendedRowLo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8); - __m64 extendedRowHi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8); - __m64 extendedInputLo = _mm_unpacklo_pi8(input_j, Zeros); - __m64 extendedInputHi = _mm_unpackhi_pi8(input_j, Zeros); - __m64 productLo = _mm_madd_pi16(extendedRowLo, extendedInputLo); - __m64 productHi = _mm_madd_pi16(extendedRowHi, extendedInputHi); - sumLo = _mm_add_pi32(sumLo, productLo); - sumHi = _mm_add_pi32(sumHi, productHi); - } - __m64 sum = _mm_add_pi32(sumLo, sumHi); - sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum)); - output[i] = _mm_cvtsi64_si32(sum); - # elif defined(USE_NEON_DOTPROD) int32x4_t sum = {biases[i]}; const auto row = reinterpret_cast(&weights[offset]); @@ -132,11 +107,6 @@ namespace Stockfish::Eval::NNUE::Layers { # endif } - -# if defined(USE_MMX) - _mm_empty(); -# endif - # else std::memcpy(output, biases, sizeof(std::int32_t) * OutputDimensions); diff --git a/src/nnue/layers/clipped_relu.h b/src/nnue/layers/clipped_relu.h index aab824b3..48cd6c69 100644 --- a/src/nnue/layers/clipped_relu.h +++ b/src/nnue/layers/clipped_relu.h @@ -135,24 +135,6 @@ namespace Stockfish::Eval::NNUE::Layers { } constexpr IndexType Start = NumChunks * SimdWidth; - #elif defined(USE_MMX) - constexpr IndexType NumChunks = InputDimensions / SimdWidth; - const __m64 k0x80s = _mm_set1_pi8(-128); - const auto in = reinterpret_cast(input); - const auto out = reinterpret_cast<__m64*>(output); - for (IndexType i = 0; i < NumChunks; ++i) { - const __m64 words0 = _mm_srai_pi16( - _mm_packs_pi32(in[i * 4 + 0], in[i * 4 + 1]), - WeightScaleBits); - const __m64 words1 = _mm_srai_pi16( - _mm_packs_pi32(in[i * 4 + 2], in[i * 4 + 3]), - WeightScaleBits); - const __m64 packedbytes = _mm_packs_pi16(words0, words1); - out[i] = _mm_subs_pi8(_mm_adds_pi8(packedbytes, k0x80s), k0x80s); - } - _mm_empty(); - constexpr IndexType Start = NumChunks * SimdWidth; - #elif defined(USE_NEON) constexpr IndexType NumChunks = InputDimensions / (SimdWidth / 2); const int8x8_t Zero = {0}; diff --git a/src/nnue/layers/simd.h b/src/nnue/layers/simd.h index f478cd78..349217ed 100644 --- a/src/nnue/layers/simd.h +++ b/src/nnue/layers/simd.h @@ -31,9 +31,6 @@ #elif defined(USE_SSE2) # include -#elif defined(USE_MMX) -# include - #elif defined(USE_NEON) # include #endif diff --git a/src/nnue/nnue_common.h b/src/nnue/nnue_common.h index e159c5dc..779f4e75 100644 --- a/src/nnue/nnue_common.h +++ b/src/nnue/nnue_common.h @@ -42,9 +42,6 @@ #elif defined(USE_SSE2) #include -#elif defined(USE_MMX) -#include - #elif defined(USE_NEON) #include #endif @@ -71,9 +68,6 @@ namespace Stockfish::Eval::NNUE { #elif defined(USE_SSE2) constexpr std::size_t SimdWidth = 16; - #elif defined(USE_MMX) - constexpr std::size_t SimdWidth = 8; - #elif defined(USE_NEON) constexpr std::size_t SimdWidth = 16; #endif diff --git a/src/nnue/nnue_feature_transformer.h b/src/nnue/nnue_feature_transformer.h index 902918b2..77a175f5 100644 --- a/src/nnue/nnue_feature_transformer.h +++ b/src/nnue/nnue_feature_transformer.h @@ -117,34 +117,6 @@ namespace Stockfish::Eval::NNUE { #define NumRegistersSIMD (Is64Bit ? 16 : 8) #define MaxChunkSize 16 - #elif USE_MMX - using vec_t = __m64; - using psqt_vec_t = __m64; - #define vec_load(a) (*(a)) - #define vec_store(a,b) *(a)=(b) - #define vec_add_16(a,b) _mm_add_pi16(a,b) - #define vec_sub_16(a,b) _mm_sub_pi16(a,b) - #define vec_mul_16(a,b) _mm_mullo_pi16(a,b) - #define vec_zero() _mm_setzero_si64() - #define vec_set_16(a) _mm_set1_pi16(a) - inline vec_t vec_max_16(vec_t a,vec_t b){ - vec_t comparison = _mm_cmpgt_pi16(a,b); - return _mm_or_si64(_mm_and_si64(comparison, a), _mm_andnot_si64(comparison, b)); - } - inline vec_t vec_min_16(vec_t a,vec_t b){ - vec_t comparison = _mm_cmpgt_pi16(a,b); - return _mm_or_si64(_mm_and_si64(comparison, b), _mm_andnot_si64(comparison, a)); - } - #define vec_msb_pack_16(a,b) _mm_packs_pi16(_mm_srli_pi16(a,7),_mm_srli_pi16(b,7)) - #define vec_load_psqt(a) (*(a)) - #define vec_store_psqt(a,b) *(a)=(b) - #define vec_add_psqt_32(a,b) _mm_add_pi32(a,b) - #define vec_sub_psqt_32(a,b) _mm_sub_pi32(a,b) - #define vec_zero_psqt() _mm_setzero_si64() - #define vec_cleanup() _mm_empty() - #define NumRegistersSIMD 8 - #define MaxChunkSize 8 - #elif USE_NEON using vec_t = int16x8_t; using psqt_vec_t = int32x4_t; @@ -335,10 +307,6 @@ namespace Stockfish::Eval::NNUE { #endif } -#if defined(vec_cleanup) - vec_cleanup(); -#endif - return psqt; } // end of function transform() @@ -529,10 +497,6 @@ namespace Stockfish::Eval::NNUE { } } #endif - - #if defined(USE_MMX) - _mm_empty(); - #endif } template @@ -613,10 +577,6 @@ namespace Stockfish::Eval::NNUE { accumulator.psqtAccumulation[Perspective][k] += psqtWeights[index * PSQTBuckets + k]; } #endif - - #if defined(USE_MMX) - _mm_empty(); - #endif } template