crc_x86_arm_combined.cc 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. // Copyright 2022 The Abseil Authors.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // https://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // Hardware accelerated CRC32 computation on Intel and ARM architecture.
  15. #include <cstddef>
  16. #include <cstdint>
  17. #include <memory>
  18. #include <vector>
  19. #include "absl/base/attributes.h"
  20. #include "absl/base/config.h"
  21. #include "absl/base/internal/endian.h"
  22. #include "absl/base/prefetch.h"
  23. #include "absl/crc/internal/cpu_detect.h"
  24. #include "absl/crc/internal/crc32_x86_arm_combined_simd.h"
  25. #include "absl/crc/internal/crc_internal.h"
  26. #include "absl/memory/memory.h"
  27. #include "absl/numeric/bits.h"
  28. #if defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD) || \
  29. defined(ABSL_CRC_INTERNAL_HAVE_X86_SIMD)
  30. #define ABSL_INTERNAL_CAN_USE_SIMD_CRC32C
  31. #endif
  32. namespace absl {
  33. ABSL_NAMESPACE_BEGIN
  34. namespace crc_internal {
  35. #if defined(ABSL_INTERNAL_CAN_USE_SIMD_CRC32C)
  36. // Implementation details not exported outside of file
  37. namespace {
  38. // Some machines have CRC acceleration hardware.
  39. // We can do a faster version of Extend() on such machines.
  40. class CRC32AcceleratedX86ARMCombined : public CRC32 {
  41. public:
  42. CRC32AcceleratedX86ARMCombined() {}
  43. ~CRC32AcceleratedX86ARMCombined() override {}
  44. void ExtendByZeroes(uint32_t* crc, size_t length) const override;
  45. uint32_t ComputeZeroConstant(size_t length) const;
  46. private:
  47. CRC32AcceleratedX86ARMCombined(const CRC32AcceleratedX86ARMCombined&) =
  48. delete;
  49. CRC32AcceleratedX86ARMCombined& operator=(
  50. const CRC32AcceleratedX86ARMCombined&) = delete;
  51. };
  52. // Constants for switching between algorithms.
  53. // Chosen by comparing speed at different powers of 2.
  54. constexpr size_t kSmallCutoff = 256;
  55. constexpr size_t kMediumCutoff = 2048;
  56. #define ABSL_INTERNAL_STEP1(crc) \
  57. do { \
  58. crc = CRC32_u8(static_cast<uint32_t>(crc), *p++); \
  59. } while (0)
  60. #define ABSL_INTERNAL_STEP2(crc) \
  61. do { \
  62. crc = \
  63. CRC32_u16(static_cast<uint32_t>(crc), absl::little_endian::Load16(p)); \
  64. p += 2; \
  65. } while (0)
  66. #define ABSL_INTERNAL_STEP4(crc) \
  67. do { \
  68. crc = \
  69. CRC32_u32(static_cast<uint32_t>(crc), absl::little_endian::Load32(p)); \
  70. p += 4; \
  71. } while (0)
  72. #define ABSL_INTERNAL_STEP8(crc, data) \
  73. do { \
  74. crc = CRC32_u64(static_cast<uint32_t>(crc), \
  75. absl::little_endian::Load64(data)); \
  76. data += 8; \
  77. } while (0)
  78. #define ABSL_INTERNAL_STEP8BY2(crc0, crc1, p0, p1) \
  79. do { \
  80. ABSL_INTERNAL_STEP8(crc0, p0); \
  81. ABSL_INTERNAL_STEP8(crc1, p1); \
  82. } while (0)
  83. #define ABSL_INTERNAL_STEP8BY3(crc0, crc1, crc2, p0, p1, p2) \
  84. do { \
  85. ABSL_INTERNAL_STEP8(crc0, p0); \
  86. ABSL_INTERNAL_STEP8(crc1, p1); \
  87. ABSL_INTERNAL_STEP8(crc2, p2); \
  88. } while (0)
  89. namespace {
  90. uint32_t multiply(uint32_t a, uint32_t b) {
  91. V128 shifts = V128_From2x64(0, 1);
  92. V128 power = V128_From2x64(0, a);
  93. V128 crc = V128_From2x64(0, b);
  94. V128 res = V128_PMulLow(power, crc);
  95. // Combine crc values
  96. res = V128_ShiftLeft64(res, shifts);
  97. return static_cast<uint32_t>(V128_Extract32<1>(res)) ^
  98. CRC32_u32(0, static_cast<uint32_t>(V128_Low64(res)));
  99. }
  100. // Powers of crc32c polynomial, for faster ExtendByZeros.
  101. // Verified against folly:
  102. // folly/hash/detail/Crc32CombineDetail.cpp
  103. constexpr uint32_t kCRC32CPowers[] = {
  104. 0x82f63b78, 0x6ea2d55c, 0x18b8ea18, 0x510ac59a, 0xb82be955, 0xb8fdb1e7,
  105. 0x88e56f72, 0x74c360a4, 0xe4172b16, 0x0d65762a, 0x35d73a62, 0x28461564,
  106. 0xbf455269, 0xe2ea32dc, 0xfe7740e6, 0xf946610b, 0x3c204f8f, 0x538586e3,
  107. 0x59726915, 0x734d5309, 0xbc1ac763, 0x7d0722cc, 0xd289cabe, 0xe94ca9bc,
  108. 0x05b74f3f, 0xa51e1f42, 0x40000000, 0x20000000, 0x08000000, 0x00800000,
  109. 0x00008000, 0x82f63b78, 0x6ea2d55c, 0x18b8ea18, 0x510ac59a, 0xb82be955,
  110. 0xb8fdb1e7, 0x88e56f72, 0x74c360a4, 0xe4172b16, 0x0d65762a, 0x35d73a62,
  111. 0x28461564, 0xbf455269, 0xe2ea32dc, 0xfe7740e6, 0xf946610b, 0x3c204f8f,
  112. 0x538586e3, 0x59726915, 0x734d5309, 0xbc1ac763, 0x7d0722cc, 0xd289cabe,
  113. 0xe94ca9bc, 0x05b74f3f, 0xa51e1f42, 0x40000000, 0x20000000, 0x08000000,
  114. 0x00800000, 0x00008000,
  115. };
  116. } // namespace
  117. // Compute a magic constant, so that multiplying by it is the same as
  118. // extending crc by length zeros.
  119. uint32_t CRC32AcceleratedX86ARMCombined::ComputeZeroConstant(
  120. size_t length) const {
  121. // Lowest 2 bits are handled separately in ExtendByZeroes
  122. length >>= 2;
  123. int index = absl::countr_zero(length);
  124. uint32_t prev = kCRC32CPowers[index];
  125. length &= length - 1;
  126. while (length) {
  127. // For each bit of length, extend by 2**n zeros.
  128. index = absl::countr_zero(length);
  129. prev = multiply(prev, kCRC32CPowers[index]);
  130. length &= length - 1;
  131. }
  132. return prev;
  133. }
  134. void CRC32AcceleratedX86ARMCombined::ExtendByZeroes(uint32_t* crc,
  135. size_t length) const {
  136. uint32_t val = *crc;
  137. // Don't bother with multiplication for small length.
  138. switch (length & 3) {
  139. case 0:
  140. break;
  141. case 1:
  142. val = CRC32_u8(val, 0);
  143. break;
  144. case 2:
  145. val = CRC32_u16(val, 0);
  146. break;
  147. case 3:
  148. val = CRC32_u8(val, 0);
  149. val = CRC32_u16(val, 0);
  150. break;
  151. }
  152. if (length > 3) {
  153. val = multiply(val, ComputeZeroConstant(length));
  154. }
  155. *crc = val;
  156. }
  157. // Taken from Intel paper "Fast CRC Computation for iSCSI Polynomial Using CRC32
  158. // Instruction"
  159. // https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/crc-iscsi-polynomial-crc32-instruction-paper.pdf
  160. // We only need every 4th value, because we unroll loop by 4.
  161. constexpr uint64_t kClmulConstants[] = {
  162. 0x09e4addf8, 0x0ba4fc28e, 0x00d3b6092, 0x09e4addf8, 0x0ab7aff2a,
  163. 0x102f9b8a2, 0x0b9e02b86, 0x00d3b6092, 0x1bf2e8b8a, 0x18266e456,
  164. 0x0d270f1a2, 0x0ab7aff2a, 0x11eef4f8e, 0x083348832, 0x0dd7e3b0c,
  165. 0x0b9e02b86, 0x0271d9844, 0x1b331e26a, 0x06b749fb2, 0x1bf2e8b8a,
  166. 0x0e6fc4e6a, 0x0ce7f39f4, 0x0d7a4825c, 0x0d270f1a2, 0x026f6a60a,
  167. 0x12ed0daac, 0x068bce87a, 0x11eef4f8e, 0x1329d9f7e, 0x0b3e32c28,
  168. 0x0170076fa, 0x0dd7e3b0c, 0x1fae1cc66, 0x010746f3c, 0x086d8e4d2,
  169. 0x0271d9844, 0x0b3af077a, 0x093a5f730, 0x1d88abd4a, 0x06b749fb2,
  170. 0x0c9c8b782, 0x0cec3662e, 0x1ddffc5d4, 0x0e6fc4e6a, 0x168763fa6,
  171. 0x0b0cd4768, 0x19b1afbc4, 0x0d7a4825c, 0x123888b7a, 0x00167d312,
  172. 0x133d7a042, 0x026f6a60a, 0x000bcf5f6, 0x19d34af3a, 0x1af900c24,
  173. 0x068bce87a, 0x06d390dec, 0x16cba8aca, 0x1f16a3418, 0x1329d9f7e,
  174. 0x19fb2a8b0, 0x02178513a, 0x1a0f717c4, 0x0170076fa,
  175. };
  176. enum class CutoffStrategy {
  177. // Use 3 CRC streams to fold into 1.
  178. Fold3,
  179. // Unroll CRC instructions for 64 bytes.
  180. Unroll64CRC,
  181. };
  182. // Base class for CRC32AcceleratedX86ARMCombinedMultipleStreams containing the
  183. // methods and data that don't need the template arguments.
  184. class CRC32AcceleratedX86ARMCombinedMultipleStreamsBase
  185. : public CRC32AcceleratedX86ARMCombined {
  186. protected:
  187. // Update partialCRC with crc of 64 byte block. Calling FinalizePclmulStream
  188. // would produce a single crc checksum, but it is expensive. PCLMULQDQ has a
  189. // high latency, so we run 4 128-bit partial checksums that can be reduced to
  190. // a single value by FinalizePclmulStream later. Computing crc for arbitrary
  191. // polynomialas with PCLMULQDQ is described in Intel paper "Fast CRC
  192. // Computation for Generic Polynomials Using PCLMULQDQ Instruction"
  193. // https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
  194. // We are applying it to CRC32C polynomial.
  195. ABSL_ATTRIBUTE_ALWAYS_INLINE void Process64BytesPclmul(
  196. const uint8_t* p, V128* partialCRC) const {
  197. V128 loopMultiplicands = V128_Load(reinterpret_cast<const V128*>(k1k2));
  198. V128 partialCRC1 = partialCRC[0];
  199. V128 partialCRC2 = partialCRC[1];
  200. V128 partialCRC3 = partialCRC[2];
  201. V128 partialCRC4 = partialCRC[3];
  202. V128 tmp1 = V128_PMulHi(partialCRC1, loopMultiplicands);
  203. V128 tmp2 = V128_PMulHi(partialCRC2, loopMultiplicands);
  204. V128 tmp3 = V128_PMulHi(partialCRC3, loopMultiplicands);
  205. V128 tmp4 = V128_PMulHi(partialCRC4, loopMultiplicands);
  206. V128 data1 = V128_LoadU(reinterpret_cast<const V128*>(p + 16 * 0));
  207. V128 data2 = V128_LoadU(reinterpret_cast<const V128*>(p + 16 * 1));
  208. V128 data3 = V128_LoadU(reinterpret_cast<const V128*>(p + 16 * 2));
  209. V128 data4 = V128_LoadU(reinterpret_cast<const V128*>(p + 16 * 3));
  210. partialCRC1 = V128_PMulLow(partialCRC1, loopMultiplicands);
  211. partialCRC2 = V128_PMulLow(partialCRC2, loopMultiplicands);
  212. partialCRC3 = V128_PMulLow(partialCRC3, loopMultiplicands);
  213. partialCRC4 = V128_PMulLow(partialCRC4, loopMultiplicands);
  214. partialCRC1 = V128_Xor(tmp1, partialCRC1);
  215. partialCRC2 = V128_Xor(tmp2, partialCRC2);
  216. partialCRC3 = V128_Xor(tmp3, partialCRC3);
  217. partialCRC4 = V128_Xor(tmp4, partialCRC4);
  218. partialCRC1 = V128_Xor(partialCRC1, data1);
  219. partialCRC2 = V128_Xor(partialCRC2, data2);
  220. partialCRC3 = V128_Xor(partialCRC3, data3);
  221. partialCRC4 = V128_Xor(partialCRC4, data4);
  222. partialCRC[0] = partialCRC1;
  223. partialCRC[1] = partialCRC2;
  224. partialCRC[2] = partialCRC3;
  225. partialCRC[3] = partialCRC4;
  226. }
  227. // Reduce partialCRC produced by Process64BytesPclmul into a single value,
  228. // that represents crc checksum of all the processed bytes.
  229. ABSL_ATTRIBUTE_ALWAYS_INLINE uint64_t
  230. FinalizePclmulStream(V128* partialCRC) const {
  231. V128 partialCRC1 = partialCRC[0];
  232. V128 partialCRC2 = partialCRC[1];
  233. V128 partialCRC3 = partialCRC[2];
  234. V128 partialCRC4 = partialCRC[3];
  235. // Combine 4 vectors of partial crc into a single vector.
  236. V128 reductionMultiplicands =
  237. V128_Load(reinterpret_cast<const V128*>(k5k6));
  238. V128 low = V128_PMulLow(reductionMultiplicands, partialCRC1);
  239. V128 high = V128_PMulHi(reductionMultiplicands, partialCRC1);
  240. partialCRC1 = V128_Xor(low, high);
  241. partialCRC1 = V128_Xor(partialCRC1, partialCRC2);
  242. low = V128_PMulLow(reductionMultiplicands, partialCRC3);
  243. high = V128_PMulHi(reductionMultiplicands, partialCRC3);
  244. partialCRC3 = V128_Xor(low, high);
  245. partialCRC3 = V128_Xor(partialCRC3, partialCRC4);
  246. reductionMultiplicands = V128_Load(reinterpret_cast<const V128*>(k3k4));
  247. low = V128_PMulLow(reductionMultiplicands, partialCRC1);
  248. high = V128_PMulHi(reductionMultiplicands, partialCRC1);
  249. V128 fullCRC = V128_Xor(low, high);
  250. fullCRC = V128_Xor(fullCRC, partialCRC3);
  251. // Reduce fullCRC into scalar value.
  252. reductionMultiplicands = V128_Load(reinterpret_cast<const V128*>(k5k6));
  253. V128 mask = V128_Load(reinterpret_cast<const V128*>(kMask));
  254. V128 tmp = V128_PMul01(reductionMultiplicands, fullCRC);
  255. fullCRC = V128_ShiftRight<8>(fullCRC);
  256. fullCRC = V128_Xor(fullCRC, tmp);
  257. reductionMultiplicands = V128_Load(reinterpret_cast<const V128*>(k7k0));
  258. tmp = V128_ShiftRight<4>(fullCRC);
  259. fullCRC = V128_And(fullCRC, mask);
  260. fullCRC = V128_PMulLow(reductionMultiplicands, fullCRC);
  261. fullCRC = V128_Xor(tmp, fullCRC);
  262. reductionMultiplicands = V128_Load(reinterpret_cast<const V128*>(kPoly));
  263. tmp = V128_And(fullCRC, mask);
  264. tmp = V128_PMul01(reductionMultiplicands, tmp);
  265. tmp = V128_And(tmp, mask);
  266. tmp = V128_PMulLow(reductionMultiplicands, tmp);
  267. fullCRC = V128_Xor(tmp, fullCRC);
  268. return static_cast<uint64_t>(V128_Extract32<1>(fullCRC));
  269. }
  270. // Update crc with 64 bytes of data from p.
  271. ABSL_ATTRIBUTE_ALWAYS_INLINE uint64_t Process64BytesCRC(const uint8_t* p,
  272. uint64_t crc) const {
  273. for (int i = 0; i < 8; i++) {
  274. crc =
  275. CRC32_u64(static_cast<uint32_t>(crc), absl::little_endian::Load64(p));
  276. p += 8;
  277. }
  278. return crc;
  279. }
  280. // Generated by crc32c_x86_test --crc32c_generate_constants=true
  281. // and verified against constants in linux kernel for S390:
  282. // https://github.com/torvalds/linux/blob/master/arch/s390/crypto/crc32le-vx.S
  283. alignas(16) static constexpr uint64_t k1k2[2] = {0x0740eef02, 0x09e4addf8};
  284. alignas(16) static constexpr uint64_t k3k4[2] = {0x1384aa63a, 0x0ba4fc28e};
  285. alignas(16) static constexpr uint64_t k5k6[2] = {0x0f20c0dfe, 0x14cd00bd6};
  286. alignas(16) static constexpr uint64_t k7k0[2] = {0x0dd45aab8, 0x000000000};
  287. alignas(16) static constexpr uint64_t kPoly[2] = {0x105ec76f0, 0x0dea713f1};
  288. alignas(16) static constexpr uint32_t kMask[4] = {~0u, 0u, ~0u, 0u};
  289. // Medium runs of bytes are broken into groups of kGroupsSmall blocks of same
  290. // size. Each group is CRCed in parallel then combined at the end of the
  291. // block.
  292. static constexpr size_t kGroupsSmall = 3;
  293. // For large runs we use up to kMaxStreams blocks computed with CRC
  294. // instruction, and up to kMaxStreams blocks computed with PCLMULQDQ, which
  295. // are combined in the end.
  296. static constexpr size_t kMaxStreams = 3;
  297. };
  298. #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
  299. alignas(16) constexpr uint64_t
  300. CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::k1k2[2];
  301. alignas(16) constexpr uint64_t
  302. CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::k3k4[2];
  303. alignas(16) constexpr uint64_t
  304. CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::k5k6[2];
  305. alignas(16) constexpr uint64_t
  306. CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::k7k0[2];
  307. alignas(16) constexpr uint64_t
  308. CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::kPoly[2];
  309. alignas(16) constexpr uint32_t
  310. CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::kMask[4];
  311. constexpr size_t
  312. CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::kGroupsSmall;
  313. constexpr size_t CRC32AcceleratedX86ARMCombinedMultipleStreamsBase::kMaxStreams;
  314. #endif // ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
  315. template <size_t num_crc_streams, size_t num_pclmul_streams,
  316. CutoffStrategy strategy>
  317. class CRC32AcceleratedX86ARMCombinedMultipleStreams
  318. : public CRC32AcceleratedX86ARMCombinedMultipleStreamsBase {
  319. ABSL_ATTRIBUTE_HOT
  320. void Extend(uint32_t* crc, const void* bytes, size_t length) const override {
  321. static_assert(num_crc_streams >= 1 && num_crc_streams <= kMaxStreams,
  322. "Invalid number of crc streams");
  323. static_assert(num_pclmul_streams >= 0 && num_pclmul_streams <= kMaxStreams,
  324. "Invalid number of pclmul streams");
  325. const uint8_t* p = static_cast<const uint8_t*>(bytes);
  326. const uint8_t* e = p + length;
  327. uint32_t l = *crc;
  328. uint64_t l64;
  329. // We have dedicated instruction for 1,2,4 and 8 bytes.
  330. if (length & 8) {
  331. ABSL_INTERNAL_STEP8(l, p);
  332. length &= ~size_t{8};
  333. }
  334. if (length & 4) {
  335. ABSL_INTERNAL_STEP4(l);
  336. length &= ~size_t{4};
  337. }
  338. if (length & 2) {
  339. ABSL_INTERNAL_STEP2(l);
  340. length &= ~size_t{2};
  341. }
  342. if (length & 1) {
  343. ABSL_INTERNAL_STEP1(l);
  344. length &= ~size_t{1};
  345. }
  346. if (length == 0) {
  347. *crc = l;
  348. return;
  349. }
  350. // length is now multiple of 16.
  351. // For small blocks just run simple loop, because cost of combining multiple
  352. // streams is significant.
  353. if (strategy != CutoffStrategy::Unroll64CRC) {
  354. if (length < kSmallCutoff) {
  355. while (length >= 16) {
  356. ABSL_INTERNAL_STEP8(l, p);
  357. ABSL_INTERNAL_STEP8(l, p);
  358. length -= 16;
  359. }
  360. *crc = l;
  361. return;
  362. }
  363. }
  364. // For medium blocks we run 3 crc streams and combine them as described in
  365. // Intel paper above. Running 4th stream doesn't help, because crc
  366. // instruction has latency 3 and throughput 1.
  367. if (length < kMediumCutoff) {
  368. l64 = l;
  369. if (strategy == CutoffStrategy::Fold3) {
  370. uint64_t l641 = 0;
  371. uint64_t l642 = 0;
  372. const size_t blockSize = 32;
  373. size_t bs = static_cast<size_t>(e - p) / kGroupsSmall / blockSize;
  374. const uint8_t* p1 = p + bs * blockSize;
  375. const uint8_t* p2 = p1 + bs * blockSize;
  376. for (size_t i = 0; i + 1 < bs; ++i) {
  377. ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
  378. ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
  379. ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
  380. ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
  381. PrefetchToLocalCache(
  382. reinterpret_cast<const char*>(p + kPrefetchHorizonMedium));
  383. PrefetchToLocalCache(
  384. reinterpret_cast<const char*>(p1 + kPrefetchHorizonMedium));
  385. PrefetchToLocalCache(
  386. reinterpret_cast<const char*>(p2 + kPrefetchHorizonMedium));
  387. }
  388. // Don't run crc on last 8 bytes.
  389. ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
  390. ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
  391. ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
  392. ABSL_INTERNAL_STEP8BY2(l64, l641, p, p1);
  393. V128 magic = *(reinterpret_cast<const V128*>(kClmulConstants) + bs - 1);
  394. V128 tmp = V128_From2x64(0, l64);
  395. V128 res1 = V128_PMulLow(tmp, magic);
  396. tmp = V128_From2x64(0, l641);
  397. V128 res2 = V128_PMul10(tmp, magic);
  398. V128 x = V128_Xor(res1, res2);
  399. l64 = static_cast<uint64_t>(V128_Low64(x)) ^
  400. absl::little_endian::Load64(p2);
  401. l64 = CRC32_u64(static_cast<uint32_t>(l642), l64);
  402. p = p2 + 8;
  403. } else if (strategy == CutoffStrategy::Unroll64CRC) {
  404. while ((e - p) >= 64) {
  405. l64 = Process64BytesCRC(p, l64);
  406. p += 64;
  407. }
  408. }
  409. } else {
  410. // There is a lot of data, we can ignore combine costs and run all
  411. // requested streams (num_crc_streams + num_pclmul_streams),
  412. // using prefetch. CRC and PCLMULQDQ use different cpu execution units,
  413. // so on some cpus it makes sense to execute both of them for different
  414. // streams.
  415. // Point x at first 8-byte aligned byte in string.
  416. const uint8_t* x = RoundUp<8>(p);
  417. // Process bytes until p is 8-byte aligned, if that isn't past the end.
  418. while (p != x) {
  419. ABSL_INTERNAL_STEP1(l);
  420. }
  421. size_t bs = static_cast<size_t>(e - p) /
  422. (num_crc_streams + num_pclmul_streams) / 64;
  423. const uint8_t* crc_streams[kMaxStreams];
  424. const uint8_t* pclmul_streams[kMaxStreams];
  425. // We are guaranteed to have at least one crc stream.
  426. crc_streams[0] = p;
  427. for (size_t i = 1; i < num_crc_streams; i++) {
  428. crc_streams[i] = crc_streams[i - 1] + bs * 64;
  429. }
  430. pclmul_streams[0] = crc_streams[num_crc_streams - 1] + bs * 64;
  431. for (size_t i = 1; i < num_pclmul_streams; i++) {
  432. pclmul_streams[i] = pclmul_streams[i - 1] + bs * 64;
  433. }
  434. // Per stream crc sums.
  435. uint64_t l64_crc[kMaxStreams] = {l};
  436. uint64_t l64_pclmul[kMaxStreams] = {0};
  437. // Peel first iteration, because PCLMULQDQ stream, needs setup.
  438. for (size_t i = 0; i < num_crc_streams; i++) {
  439. l64_crc[i] = Process64BytesCRC(crc_streams[i], l64_crc[i]);
  440. crc_streams[i] += 16 * 4;
  441. }
  442. V128 partialCRC[kMaxStreams][4];
  443. for (size_t i = 0; i < num_pclmul_streams; i++) {
  444. partialCRC[i][0] = V128_LoadU(
  445. reinterpret_cast<const V128*>(pclmul_streams[i] + 16 * 0));
  446. partialCRC[i][1] = V128_LoadU(
  447. reinterpret_cast<const V128*>(pclmul_streams[i] + 16 * 1));
  448. partialCRC[i][2] = V128_LoadU(
  449. reinterpret_cast<const V128*>(pclmul_streams[i] + 16 * 2));
  450. partialCRC[i][3] = V128_LoadU(
  451. reinterpret_cast<const V128*>(pclmul_streams[i] + 16 * 3));
  452. pclmul_streams[i] += 16 * 4;
  453. }
  454. for (size_t i = 1; i < bs; i++) {
  455. // Prefetch data for next iterations.
  456. for (size_t j = 0; j < num_crc_streams; j++) {
  457. PrefetchToLocalCache(
  458. reinterpret_cast<const char*>(crc_streams[j] + kPrefetchHorizon));
  459. }
  460. for (size_t j = 0; j < num_pclmul_streams; j++) {
  461. PrefetchToLocalCache(reinterpret_cast<const char*>(pclmul_streams[j] +
  462. kPrefetchHorizon));
  463. }
  464. // We process each stream in 64 byte blocks. This can be written as
  465. // for (int i = 0; i < num_pclmul_streams; i++) {
  466. // Process64BytesPclmul(pclmul_streams[i], partialCRC[i]);
  467. // pclmul_streams[i] += 16 * 4;
  468. // }
  469. // for (int i = 0; i < num_crc_streams; i++) {
  470. // l64_crc[i] = Process64BytesCRC(crc_streams[i], l64_crc[i]);
  471. // crc_streams[i] += 16*4;
  472. // }
  473. // But unrolling and interleaving PCLMULQDQ and CRC blocks manually
  474. // gives ~2% performance boost.
  475. l64_crc[0] = Process64BytesCRC(crc_streams[0], l64_crc[0]);
  476. crc_streams[0] += 16 * 4;
  477. if (num_pclmul_streams > 0) {
  478. Process64BytesPclmul(pclmul_streams[0], partialCRC[0]);
  479. pclmul_streams[0] += 16 * 4;
  480. }
  481. if (num_crc_streams > 1) {
  482. l64_crc[1] = Process64BytesCRC(crc_streams[1], l64_crc[1]);
  483. crc_streams[1] += 16 * 4;
  484. }
  485. if (num_pclmul_streams > 1) {
  486. Process64BytesPclmul(pclmul_streams[1], partialCRC[1]);
  487. pclmul_streams[1] += 16 * 4;
  488. }
  489. if (num_crc_streams > 2) {
  490. l64_crc[2] = Process64BytesCRC(crc_streams[2], l64_crc[2]);
  491. crc_streams[2] += 16 * 4;
  492. }
  493. if (num_pclmul_streams > 2) {
  494. Process64BytesPclmul(pclmul_streams[2], partialCRC[2]);
  495. pclmul_streams[2] += 16 * 4;
  496. }
  497. }
  498. // PCLMULQDQ based streams require special final step;
  499. // CRC based don't.
  500. for (size_t i = 0; i < num_pclmul_streams; i++) {
  501. l64_pclmul[i] = FinalizePclmulStream(partialCRC[i]);
  502. }
  503. // Combine all streams into single result.
  504. uint32_t magic = ComputeZeroConstant(bs * 64);
  505. l64 = l64_crc[0];
  506. for (size_t i = 1; i < num_crc_streams; i++) {
  507. l64 = multiply(static_cast<uint32_t>(l64), magic);
  508. l64 ^= l64_crc[i];
  509. }
  510. for (size_t i = 0; i < num_pclmul_streams; i++) {
  511. l64 = multiply(static_cast<uint32_t>(l64), magic);
  512. l64 ^= l64_pclmul[i];
  513. }
  514. // Update p.
  515. if (num_pclmul_streams > 0) {
  516. p = pclmul_streams[num_pclmul_streams - 1];
  517. } else {
  518. p = crc_streams[num_crc_streams - 1];
  519. }
  520. }
  521. l = static_cast<uint32_t>(l64);
  522. while ((e - p) >= 16) {
  523. ABSL_INTERNAL_STEP8(l, p);
  524. ABSL_INTERNAL_STEP8(l, p);
  525. }
  526. // Process the last few bytes
  527. while (p != e) {
  528. ABSL_INTERNAL_STEP1(l);
  529. }
  530. #undef ABSL_INTERNAL_STEP8BY3
  531. #undef ABSL_INTERNAL_STEP8BY2
  532. #undef ABSL_INTERNAL_STEP8
  533. #undef ABSL_INTERNAL_STEP4
  534. #undef ABSL_INTERNAL_STEP2
  535. #undef ABSL_INTERNAL_STEP1
  536. *crc = l;
  537. }
  538. };
  539. } // namespace
  540. // Intel processors with SSE4.2 have an instruction for one particular
  541. // 32-bit CRC polynomial: crc32c
  542. CRCImpl* TryNewCRC32AcceleratedX86ARMCombined() {
  543. CpuType type = GetCpuType();
  544. switch (type) {
  545. case CpuType::kIntelHaswell:
  546. case CpuType::kAmdRome:
  547. case CpuType::kAmdNaples:
  548. case CpuType::kAmdMilan:
  549. return new CRC32AcceleratedX86ARMCombinedMultipleStreams<
  550. 3, 1, CutoffStrategy::Fold3>();
  551. // PCLMULQDQ is fast, use combined PCLMULQDQ + CRC implementation.
  552. case CpuType::kIntelCascadelakeXeon:
  553. case CpuType::kIntelSkylakeXeon:
  554. case CpuType::kIntelBroadwell:
  555. case CpuType::kIntelSkylake:
  556. return new CRC32AcceleratedX86ARMCombinedMultipleStreams<
  557. 3, 2, CutoffStrategy::Fold3>();
  558. // PCLMULQDQ is slow, don't use it.
  559. case CpuType::kIntelIvybridge:
  560. case CpuType::kIntelSandybridge:
  561. case CpuType::kIntelWestmere:
  562. return new CRC32AcceleratedX86ARMCombinedMultipleStreams<
  563. 3, 0, CutoffStrategy::Fold3>();
  564. case CpuType::kArmNeoverseN1:
  565. case CpuType::kArmNeoverseN2:
  566. case CpuType::kArmNeoverseV1:
  567. return new CRC32AcceleratedX86ARMCombinedMultipleStreams<
  568. 1, 1, CutoffStrategy::Unroll64CRC>();
  569. case CpuType::kAmpereSiryn:
  570. return new CRC32AcceleratedX86ARMCombinedMultipleStreams<
  571. 3, 2, CutoffStrategy::Fold3>();
  572. case CpuType::kArmNeoverseV2:
  573. return new CRC32AcceleratedX86ARMCombinedMultipleStreams<
  574. 1, 2, CutoffStrategy::Unroll64CRC>();
  575. #if defined(__aarch64__)
  576. default:
  577. // Not all ARM processors support the needed instructions, so check here
  578. // before trying to use an accelerated implementation.
  579. if (SupportsArmCRC32PMULL()) {
  580. return new CRC32AcceleratedX86ARMCombinedMultipleStreams<
  581. 1, 1, CutoffStrategy::Unroll64CRC>();
  582. } else {
  583. return nullptr;
  584. }
  585. #else
  586. default:
  587. // Something else, play it safe and assume slow PCLMULQDQ.
  588. return new CRC32AcceleratedX86ARMCombinedMultipleStreams<
  589. 3, 0, CutoffStrategy::Fold3>();
  590. #endif
  591. }
  592. }
  593. std::vector<std::unique_ptr<CRCImpl>> NewCRC32AcceleratedX86ARMCombinedAll() {
  594. auto ret = std::vector<std::unique_ptr<CRCImpl>>();
  595. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  596. 1, 0, CutoffStrategy::Fold3>>());
  597. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  598. 1, 1, CutoffStrategy::Fold3>>());
  599. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  600. 1, 2, CutoffStrategy::Fold3>>());
  601. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  602. 1, 3, CutoffStrategy::Fold3>>());
  603. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  604. 2, 0, CutoffStrategy::Fold3>>());
  605. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  606. 2, 1, CutoffStrategy::Fold3>>());
  607. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  608. 2, 2, CutoffStrategy::Fold3>>());
  609. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  610. 2, 3, CutoffStrategy::Fold3>>());
  611. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  612. 3, 0, CutoffStrategy::Fold3>>());
  613. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  614. 3, 1, CutoffStrategy::Fold3>>());
  615. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  616. 3, 2, CutoffStrategy::Fold3>>());
  617. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  618. 3, 3, CutoffStrategy::Fold3>>());
  619. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  620. 1, 0, CutoffStrategy::Unroll64CRC>>());
  621. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  622. 1, 1, CutoffStrategy::Unroll64CRC>>());
  623. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  624. 1, 2, CutoffStrategy::Unroll64CRC>>());
  625. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  626. 1, 3, CutoffStrategy::Unroll64CRC>>());
  627. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  628. 2, 0, CutoffStrategy::Unroll64CRC>>());
  629. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  630. 2, 1, CutoffStrategy::Unroll64CRC>>());
  631. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  632. 2, 2, CutoffStrategy::Unroll64CRC>>());
  633. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  634. 2, 3, CutoffStrategy::Unroll64CRC>>());
  635. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  636. 3, 0, CutoffStrategy::Unroll64CRC>>());
  637. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  638. 3, 1, CutoffStrategy::Unroll64CRC>>());
  639. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  640. 3, 2, CutoffStrategy::Unroll64CRC>>());
  641. ret.push_back(absl::make_unique<CRC32AcceleratedX86ARMCombinedMultipleStreams<
  642. 3, 3, CutoffStrategy::Unroll64CRC>>());
  643. return ret;
  644. }
  645. #else // !ABSL_INTERNAL_CAN_USE_SIMD_CRC32C
  646. std::vector<std::unique_ptr<CRCImpl>> NewCRC32AcceleratedX86ARMCombinedAll() {
  647. return std::vector<std::unique_ptr<CRCImpl>>();
  648. }
  649. // no hardware acceleration available
  650. CRCImpl* TryNewCRC32AcceleratedX86ARMCombined() { return nullptr; }
  651. #endif
  652. } // namespace crc_internal
  653. ABSL_NAMESPACE_END
  654. } // namespace absl