non_temporal_memcpy.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. // Copyright 2022 The Abseil Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // https://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #ifndef ABSL_CRC_INTERNAL_NON_TEMPORAL_MEMCPY_H_
  15. #define ABSL_CRC_INTERNAL_NON_TEMPORAL_MEMCPY_H_
  16. #ifdef _MSC_VER
  17. #include <intrin.h>
  18. #endif
  19. #if defined(__SSE__) || defined(__AVX__)
  20. // Pulls in both SSE and AVX intrinsics.
  21. #include <immintrin.h>
  22. #endif
  23. #ifdef __aarch64__
  24. #include "absl/crc/internal/non_temporal_arm_intrinsics.h"
  25. #endif
  26. #include <algorithm>
  27. #include <cassert>
  28. #include <cstdint>
  29. #include <cstring>
  30. #include "absl/base/attributes.h"
  31. #include "absl/base/config.h"
  32. #include "absl/base/optimization.h"
  33. namespace absl {
  34. ABSL_NAMESPACE_BEGIN
  35. namespace crc_internal {
  36. // This non-temporal memcpy does regular load and non-temporal store memory
  37. // copy. It is compatible to both 16-byte aligned and unaligned addresses. If
  38. // data at the destination is not immediately accessed, using non-temporal
  39. // memcpy can save 1 DRAM load of the destination cacheline.
  40. constexpr size_t kCacheLineSize = ABSL_CACHELINE_SIZE;
  41. // If the objects overlap, the behavior is undefined. Uses regular memcpy
  42. // instead of non-temporal memcpy if the required CPU intrinsics are unavailable
  43. // at compile time.
  44. inline void *non_temporal_store_memcpy(void *__restrict dst,
  45. const void *__restrict src, size_t len) {
  46. #if defined(__SSE3__) || defined(__aarch64__) || \
  47. (defined(_MSC_VER) && defined(__AVX__))
  48. // This implementation requires SSE3.
  49. // MSVC cannot target SSE3 directly, but when MSVC targets AVX,
  50. // SSE3 support is implied.
  51. uint8_t *d = reinterpret_cast<uint8_t *>(dst);
  52. const uint8_t *s = reinterpret_cast<const uint8_t *>(src);
  53. // memcpy() the misaligned header. At the end of this if block, <d> is
  54. // aligned to a 64-byte cacheline boundary or <len> == 0.
  55. if (reinterpret_cast<uintptr_t>(d) & (kCacheLineSize - 1)) {
  56. uintptr_t bytes_before_alignment_boundary =
  57. kCacheLineSize -
  58. (reinterpret_cast<uintptr_t>(d) & (kCacheLineSize - 1));
  59. size_t header_len = (std::min)(bytes_before_alignment_boundary, len);
  60. assert(bytes_before_alignment_boundary < kCacheLineSize);
  61. memcpy(d, s, header_len);
  62. d += header_len;
  63. s += header_len;
  64. len -= header_len;
  65. }
  66. if (len >= kCacheLineSize) {
  67. _mm_sfence();
  68. __m128i *dst_cacheline = reinterpret_cast<__m128i *>(d);
  69. const __m128i *src_cacheline = reinterpret_cast<const __m128i *>(s);
  70. constexpr int kOpsPerCacheLine = kCacheLineSize / sizeof(__m128i);
  71. size_t loops = len / kCacheLineSize;
  72. while (len >= kCacheLineSize) {
  73. __m128i temp1, temp2, temp3, temp4;
  74. temp1 = _mm_lddqu_si128(src_cacheline + 0);
  75. temp2 = _mm_lddqu_si128(src_cacheline + 1);
  76. temp3 = _mm_lddqu_si128(src_cacheline + 2);
  77. temp4 = _mm_lddqu_si128(src_cacheline + 3);
  78. _mm_stream_si128(dst_cacheline + 0, temp1);
  79. _mm_stream_si128(dst_cacheline + 1, temp2);
  80. _mm_stream_si128(dst_cacheline + 2, temp3);
  81. _mm_stream_si128(dst_cacheline + 3, temp4);
  82. src_cacheline += kOpsPerCacheLine;
  83. dst_cacheline += kOpsPerCacheLine;
  84. len -= kCacheLineSize;
  85. }
  86. d += loops * kCacheLineSize;
  87. s += loops * kCacheLineSize;
  88. _mm_sfence();
  89. }
  90. // memcpy the tail.
  91. if (len) {
  92. memcpy(d, s, len);
  93. }
  94. return dst;
  95. #else
  96. // Fallback to regular memcpy.
  97. return memcpy(dst, src, len);
  98. #endif // __SSE3__ || __aarch64__ || (_MSC_VER && __AVX__)
  99. }
  100. // If the objects overlap, the behavior is undefined. Uses regular memcpy
  101. // instead of non-temporal memcpy if the required CPU intrinsics are unavailable
  102. // at compile time.
  103. #if ABSL_HAVE_CPP_ATTRIBUTE(gnu::target) && \
  104. (defined(__x86_64__) || defined(__i386__))
  105. [[gnu::target("avx")]]
  106. #endif
  107. inline void *non_temporal_store_memcpy_avx(void *__restrict dst,
  108. const void *__restrict src,
  109. size_t len) {
  110. // This function requires AVX. For clang and gcc we compile it with AVX even
  111. // if the translation unit isn't built with AVX support. This works because we
  112. // only select this implementation at runtime if the CPU supports AVX.
  113. #if defined(__SSE3__) || (defined(_MSC_VER) && defined(__AVX__))
  114. uint8_t *d = reinterpret_cast<uint8_t *>(dst);
  115. const uint8_t *s = reinterpret_cast<const uint8_t *>(src);
  116. // memcpy() the misaligned header. At the end of this if block, <d> is
  117. // aligned to a 64-byte cacheline boundary or <len> == 0.
  118. if (reinterpret_cast<uintptr_t>(d) & (kCacheLineSize - 1)) {
  119. uintptr_t bytes_before_alignment_boundary =
  120. kCacheLineSize -
  121. (reinterpret_cast<uintptr_t>(d) & (kCacheLineSize - 1));
  122. size_t header_len = (std::min)(bytes_before_alignment_boundary, len);
  123. assert(bytes_before_alignment_boundary < kCacheLineSize);
  124. memcpy(d, s, header_len);
  125. d += header_len;
  126. s += header_len;
  127. len -= header_len;
  128. }
  129. if (len >= kCacheLineSize) {
  130. _mm_sfence();
  131. __m256i *dst_cacheline = reinterpret_cast<__m256i *>(d);
  132. const __m256i *src_cacheline = reinterpret_cast<const __m256i *>(s);
  133. constexpr int kOpsPerCacheLine = kCacheLineSize / sizeof(__m256i);
  134. size_t loops = len / kCacheLineSize;
  135. while (len >= kCacheLineSize) {
  136. __m256i temp1, temp2;
  137. temp1 = _mm256_lddqu_si256(src_cacheline + 0);
  138. temp2 = _mm256_lddqu_si256(src_cacheline + 1);
  139. _mm256_stream_si256(dst_cacheline + 0, temp1);
  140. _mm256_stream_si256(dst_cacheline + 1, temp2);
  141. src_cacheline += kOpsPerCacheLine;
  142. dst_cacheline += kOpsPerCacheLine;
  143. len -= kCacheLineSize;
  144. }
  145. d += loops * kCacheLineSize;
  146. s += loops * kCacheLineSize;
  147. _mm_sfence();
  148. }
  149. // memcpy the tail.
  150. if (len) {
  151. memcpy(d, s, len);
  152. }
  153. return dst;
  154. #else
  155. return memcpy(dst, src, len);
  156. #endif // __SSE3__ || (_MSC_VER && __AVX__)
  157. }
  158. } // namespace crc_internal
  159. ABSL_NAMESPACE_END
  160. } // namespace absl
  161. #endif // ABSL_CRC_INTERNAL_NON_TEMPORAL_MEMCPY_H_