1 #if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
2 #define HAS_MULTIVERSIONING 1
8 #if HAS_MULTIVERSIONING
14 // TODO: Support stride.
15 void memcpy_interleaved_slow(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, size_t n)
18 uint8_t *dptr1 = dest1;
19 uint8_t *dptr2 = dest2;
21 for (size_t i = 0; i < n; i += 2) {
27 #if HAS_MULTIVERSIONING
29 __attribute__((target("default")))
30 size_t memcpy_interleaved_fastpath_core(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, const uint8_t *limit);
32 __attribute__((target("avx2")))
33 size_t memcpy_interleaved_fastpath_core(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, const uint8_t *limit);
35 __attribute__((target("default")))
36 size_t memcpy_interleaved_fastpath_core(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, const uint8_t *limit)
38 // No fast path supported unless we have AVX2.
42 __attribute__((target("avx2")))
43 size_t memcpy_interleaved_fastpath_core(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, const uint8_t *limit)
46 const __m256i *__restrict in = (const __m256i *)src;
47 __m256i *__restrict out1 = (__m256i *)dest1;
48 __m256i *__restrict out2 = (__m256i *)dest2;
50 __m256i shuffle_cw = _mm256_set_epi8(
51 15, 13, 11, 9, 7, 5, 3, 1, 14, 12, 10, 8, 6, 4, 2, 0,
52 15, 13, 11, 9, 7, 5, 3, 1, 14, 12, 10, 8, 6, 4, 2, 0);
53 while (in < (const __m256i *)limit) {
54 // Note: For brevity, comments show lanes as if they were 2x64-bit (they're actually 2x128).
55 __m256i data1 = _mm256_stream_load_si256(in); // AaBbCcDd EeFfGgHh
56 __m256i data2 = _mm256_stream_load_si256(in + 1); // IiJjKkLl MmNnOoPp
58 data1 = _mm256_shuffle_epi8(data1, shuffle_cw); // ABCDabcd EFGHefgh
59 data2 = _mm256_shuffle_epi8(data2, shuffle_cw); // IJKLijkl MNOPmnop
61 data1 = _mm256_permute4x64_epi64(data1, 0b11011000); // ABCDEFGH abcdefgh
62 data2 = _mm256_permute4x64_epi64(data2, 0b11011000); // IJKLMNOP ijklmnop
64 __m256i lo = _mm256_permute2x128_si256(data1, data2, 0b00100000);
65 __m256i hi = _mm256_permute2x128_si256(data1, data2, 0b00110001);
67 _mm256_storeu_si256(out1, lo);
68 _mm256_storeu_si256(out2, hi);
79 // Returns the number of bytes consumed.
80 size_t memcpy_interleaved_fastpath(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, size_t n)
82 const uint8_t *limit = src + n;
85 // Align end to 32 bytes.
86 limit = (const uint8_t *)(intptr_t(limit) & ~31);
92 // Process [0,31] bytes, such that start gets aligned to 32 bytes.
93 const uint8_t *aligned_src = (const uint8_t *)(intptr_t(src + 31) & ~31);
94 if (aligned_src != src) {
95 size_t n2 = aligned_src - src;
96 memcpy_interleaved_slow(dest1, dest2, src, n2);
106 // Make the length a multiple of 64.
107 if (((limit - src) % 64) != 0) {
110 assert(((limit - src) % 64) == 0);
112 return consumed + memcpy_interleaved_fastpath_core(dest1, dest2, src, limit);
115 #endif // defined(HAS_MULTIVERSIONING)
117 void memcpy_interleaved(uint8_t *dest1, uint8_t *dest2, const uint8_t *src, size_t n)
119 #if HAS_MULTIVERSIONING
120 size_t consumed = memcpy_interleaved_fastpath(dest1, dest2, src, n);
122 dest1 += consumed / 2;
123 dest2 += consumed / 2;
131 memcpy_interleaved_slow(dest1, dest2, src, n);