1 #if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
2 #define HAS_MULTIVERSIONING 1
8 #if HAS_MULTIVERSIONING
14 // TODO: Support stride.
15 void memcpy_interleaved_slow(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, size_t n)
18 uint8_t *dptr1 = dest1;
19 uint8_t *dptr2 = dest2;
21 for (size_t i = 0; i < n; i += 2) {
27 #if HAS_MULTIVERSIONING
29 __attribute__((target("default")))
30 size_t memcpy_interleaved_fastpath_core(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, const uint8_t *limit);
32 __attribute__((target("sse2")))
33 size_t memcpy_interleaved_fastpath_core(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, const uint8_t *limit);
35 __attribute__((target("avx2")))
36 size_t memcpy_interleaved_fastpath_core(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, const uint8_t *limit);
38 __attribute__((target("default")))
39 size_t memcpy_interleaved_fastpath_core(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, const uint8_t *limit)
41 // No fast path possible unless we have SSE2 or higher.
45 __attribute__((target("sse2")))
46 size_t memcpy_interleaved_fastpath_core(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, const uint8_t *limit)
49 const __m128i * __restrict in = (const __m128i *)src;
50 __m128i * __restrict out1 = (__m128i *)dest1;
51 __m128i * __restrict out2 = (__m128i *)dest2;
53 __m128i mask_lower_byte = _mm_set1_epi16(0x00ff);
54 while (in < (const __m128i *)limit) {
55 __m128i data1 = _mm_load_si128(in);
56 __m128i data2 = _mm_load_si128(in + 1);
57 __m128i data1_lo = _mm_and_si128(data1, mask_lower_byte);
58 __m128i data2_lo = _mm_and_si128(data2, mask_lower_byte);
59 __m128i data1_hi = _mm_srli_epi16(data1, 8);
60 __m128i data2_hi = _mm_srli_epi16(data2, 8);
61 __m128i lo = _mm_packus_epi16(data1_lo, data2_lo);
62 _mm_storeu_si128(out1, lo);
63 __m128i hi = _mm_packus_epi16(data1_hi, data2_hi);
64 _mm_storeu_si128(out2, hi);
75 __attribute__((target("avx2")))
76 size_t memcpy_interleaved_fastpath_core(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, const uint8_t *limit)
79 const __m256i *__restrict in = (const __m256i *)src;
80 __m256i *__restrict out1 = (__m256i *)dest1;
81 __m256i *__restrict out2 = (__m256i *)dest2;
83 __m256i shuffle_cw = _mm256_set_epi8(
84 15, 13, 11, 9, 7, 5, 3, 1, 14, 12, 10, 8, 6, 4, 2, 0,
85 15, 13, 11, 9, 7, 5, 3, 1, 14, 12, 10, 8, 6, 4, 2, 0);
86 while (in < (const __m256i *)limit) {
87 // Note: For brevity, comments show lanes as if they were 2x64-bit (they're actually 2x128).
88 __m256i data1 = _mm256_stream_load_si256(in); // AaBbCcDd EeFfGgHh
89 __m256i data2 = _mm256_stream_load_si256(in + 1); // IiJjKkLl MmNnOoPp
91 data1 = _mm256_shuffle_epi8(data1, shuffle_cw); // ABCDabcd EFGHefgh
92 data2 = _mm256_shuffle_epi8(data2, shuffle_cw); // IJKLijkl MNOPmnop
94 data1 = _mm256_permute4x64_epi64(data1, 0b11011000); // ABCDEFGH abcdefgh
95 data2 = _mm256_permute4x64_epi64(data2, 0b11011000); // IJKLMNOP ijklmnop
97 __m256i lo = _mm256_permute2x128_si256(data1, data2, 0b00100000);
98 __m256i hi = _mm256_permute2x128_si256(data1, data2, 0b00110001);
100 _mm256_storeu_si256(out1, lo);
101 _mm256_storeu_si256(out2, hi);
112 // Returns the number of bytes consumed.
113 size_t memcpy_interleaved_fastpath(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, size_t n)
115 const uint8_t *limit = src + n;
118 // Align end to 32 bytes.
119 limit = (const uint8_t *)(intptr_t(limit) & ~31);
125 // Process [0,31] bytes, such that start gets aligned to 32 bytes.
126 const uint8_t *aligned_src = (const uint8_t *)(intptr_t(src + 31) & ~31);
127 if (aligned_src != src) {
128 size_t n2 = aligned_src - src;
129 memcpy_interleaved_slow(dest1, dest2, src, n2);
139 // Make the length a multiple of 64.
140 if (((limit - src) % 64) != 0) {
143 assert(((limit - src) % 64) == 0);
145 return consumed + memcpy_interleaved_fastpath_core(dest1, dest2, src, limit);
148 #endif // defined(HAS_MULTIVERSIONING)
150 void memcpy_interleaved(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, size_t n)
152 #if HAS_MULTIVERSIONING
153 size_t consumed = memcpy_interleaved_fastpath(dest1, dest2, src, n);
155 dest1 += consumed / 2;
156 dest2 += consumed / 2;
164 memcpy_interleaved_slow(dest1, dest2, src, n);