1 #if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__) && !defined(__clang__)
2 #define HAS_MULTIVERSIONING 1
8 #if HAS_MULTIVERSIONING
14 // TODO: Support stride.
15 void memcpy_interleaved_slow(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, size_t n)
18 uint8_t *dptr1 = dest1;
19 uint8_t *dptr2 = dest2;
21 for (size_t i = 0; i < n; i += 2) {
27 #if HAS_MULTIVERSIONING
29 __attribute__((target("sse2")))
30 size_t memcpy_interleaved_fastpath_core(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, const uint8_t *limit);
32 __attribute__((target("avx2")))
33 size_t memcpy_interleaved_fastpath_core(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, const uint8_t *limit);
35 __attribute__((target("sse2")))
36 size_t memcpy_interleaved_fastpath_core(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, const uint8_t *limit)
39 const __m128i * __restrict in = (const __m128i *)src;
40 __m128i * __restrict out1 = (__m128i *)dest1;
41 __m128i * __restrict out2 = (__m128i *)dest2;
43 __m128i mask_lower_byte = _mm_set1_epi16(0x00ff);
44 while (in < (const __m128i *)limit) {
45 __m128i data1 = _mm_load_si128(in);
46 __m128i data2 = _mm_load_si128(in + 1);
47 __m128i data1_lo = _mm_and_si128(data1, mask_lower_byte);
48 __m128i data2_lo = _mm_and_si128(data2, mask_lower_byte);
49 __m128i data1_hi = _mm_srli_epi16(data1, 8);
50 __m128i data2_hi = _mm_srli_epi16(data2, 8);
51 __m128i lo = _mm_packus_epi16(data1_lo, data2_lo);
52 _mm_storeu_si128(out1, lo);
53 __m128i hi = _mm_packus_epi16(data1_hi, data2_hi);
54 _mm_storeu_si128(out2, hi);
65 __attribute__((target("avx2")))
66 size_t memcpy_interleaved_fastpath_core(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, const uint8_t *limit)
69 const __m256i *__restrict in = (const __m256i *)src;
70 __m256i *__restrict out1 = (__m256i *)dest1;
71 __m256i *__restrict out2 = (__m256i *)dest2;
73 __m256i shuffle_cw = _mm256_set_epi8(
74 15, 13, 11, 9, 7, 5, 3, 1, 14, 12, 10, 8, 6, 4, 2, 0,
75 15, 13, 11, 9, 7, 5, 3, 1, 14, 12, 10, 8, 6, 4, 2, 0);
76 while (in < (const __m256i *)limit) {
77 // Note: For brevity, comments show lanes as if they were 2x64-bit (they're actually 2x128).
78 __m256i data1 = _mm256_stream_load_si256(in); // AaBbCcDd EeFfGgHh
79 __m256i data2 = _mm256_stream_load_si256(in + 1); // IiJjKkLl MmNnOoPp
81 data1 = _mm256_shuffle_epi8(data1, shuffle_cw); // ABCDabcd EFGHefgh
82 data2 = _mm256_shuffle_epi8(data2, shuffle_cw); // IJKLijkl MNOPmnop
84 data1 = _mm256_permute4x64_epi64(data1, 0b11011000); // ABCDEFGH abcdefgh
85 data2 = _mm256_permute4x64_epi64(data2, 0b11011000); // IJKLMNOP ijklmnop
87 __m256i lo = _mm256_permute2x128_si256(data1, data2, 0b00100000);
88 __m256i hi = _mm256_permute2x128_si256(data1, data2, 0b00110001);
90 _mm256_storeu_si256(out1, lo);
91 _mm256_storeu_si256(out2, hi);
102 // Returns the number of bytes consumed.
103 __attribute__((target("sse2", "avx2")))
104 size_t memcpy_interleaved_fastpath(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, size_t n)
106 const uint8_t *limit = src + n;
109 // Align end to 32 bytes.
110 limit = (const uint8_t *)(intptr_t(limit) & ~31);
116 // Process [0,31] bytes, such that start gets aligned to 32 bytes.
117 const uint8_t *aligned_src = (const uint8_t *)(intptr_t(src + 31) & ~31);
118 if (aligned_src != src) {
119 size_t n2 = aligned_src - src;
120 memcpy_interleaved_slow(dest1, dest2, src, n2);
130 // Make the length a multiple of 64.
131 if (((limit - src) % 64) != 0) {
134 assert(((limit - src) % 64) == 0);
136 return consumed + memcpy_interleaved_fastpath_core(dest1, dest2, src, limit);
139 #endif // defined(HAS_MULTIVERSIONING)
141 void memcpy_interleaved(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, size_t n)
143 #if HAS_MULTIVERSIONING
144 size_t consumed = memcpy_interleaved_fastpath(dest1, dest2, src, n);
146 dest1 += consumed / 2;
147 dest2 += consumed / 2;
155 memcpy_interleaved_slow(dest1, dest2, src, n);