t1ha_bits.h 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254
  1. /*
  2. * Copyright (c) 2016-2020 Positive Technologies, https://www.ptsecurity.com,
  3. * Fast Positive Hash.
  4. *
  5. * Portions Copyright (c) 2010-2020 Leonid Yuriev <leo@yuriev.ru>,
  6. * The 1Hippeus project (t1h).
  7. *
  8. * This software is provided 'as-is', without any express or implied
  9. * warranty. In no event will the authors be held liable for any damages
  10. * arising from the use of this software.
  11. *
  12. * Permission is granted to anyone to use this software for any purpose,
  13. * including commercial applications, and to alter it and redistribute it
  14. * freely, subject to the following restrictions:
  15. *
  16. * 1. The origin of this software must not be misrepresented; you must not
  17. * claim that you wrote the original software. If you use this software
  18. * in a product, an acknowledgement in the product documentation would be
  19. * appreciated but is not required.
  20. * 2. Altered source versions must be plainly marked as such, and must not be
  21. * misrepresented as being the original software.
  22. * 3. This notice may not be removed or altered from any source distribution.
  23. */
  24. /*
  25. * t1ha = { Fast Positive Hash, aka "Позитивный Хэш" }
  26. * by [Positive Technologies](https://www.ptsecurity.ru)
  27. *
  28. * Briefly, it is a 64-bit Hash Function:
  29. * 1. Created for 64-bit little-endian platforms, in predominantly for x86_64,
  30. * but portable and without penalties it can run on any 64-bit CPU.
  31. * 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash
  32. * and all others portable hash-functions (which do not use specific
  33. * hardware tricks).
  34. * 3. Not suitable for cryptography.
  35. *
  36. * The Future will (be) Positive. Всё будет хорошо.
  37. *
  38. * ACKNOWLEDGEMENT:
  39. * The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев)
  40. * for The 1Hippeus project - zerocopy messaging in the spirit of Sparta!
  41. */
  42. #pragma once
  43. #if defined(_MSC_VER)
  44. #pragma warning(disable : 4201) /* nameless struct/union */
  45. #if _MSC_VER > 1800
  46. #pragma warning(disable : 4464) /* relative include path contains '..' */
  47. #endif /* 1800 */
  48. #endif /* MSVC */
  49. #include "../t1ha.h"
  50. #ifndef T1HA_USE_FAST_ONESHOT_READ
  51. /* Define it to 1 for little bit faster code.
  52. * Unfortunately this may triggering a false-positive alarms from Valgrind,
  53. * AddressSanitizer and other similar tool.
  54. * So, define it to 0 for calmness if doubt. */
  55. #define T1HA_USE_FAST_ONESHOT_READ 1
  56. #endif /* T1HA_USE_FAST_ONESHOT_READ */
  57. /*****************************************************************************/
  58. #include <assert.h> /* for assert() */
  59. #include <stdbool.h> /* for bool */
  60. #include <string.h> /* for memcpy() */
  61. #if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ && \
  62. __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__
  63. #error Unsupported byte order.
  64. #endif
  65. #define T1HA_UNALIGNED_ACCESS__UNABLE 0
  66. #define T1HA_UNALIGNED_ACCESS__SLOW 1
  67. #define T1HA_UNALIGNED_ACCESS__EFFICIENT 2
  68. #ifndef T1HA_SYS_UNALIGNED_ACCESS
  69. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  70. #define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT
  71. #elif defined(__ia32__)
  72. #define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT
  73. #elif defined(__e2k__)
  74. #define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__SLOW
  75. #elif defined(__ARM_FEATURE_UNALIGNED)
  76. #define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT
  77. #else
  78. #define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__UNABLE
  79. #endif
  80. #endif /* T1HA_SYS_UNALIGNED_ACCESS */
  81. #define ALIGNMENT_16 2
  82. #define ALIGNMENT_32 4
  83. #if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul
  84. #define ALIGNMENT_64 8
  85. #else
  86. #define ALIGNMENT_64 4
  87. #endif
  88. #ifndef PAGESIZE
  89. #define PAGESIZE 4096
  90. #endif /* PAGESIZE */
  91. /***************************************************************************/
  92. #ifndef __has_builtin
  93. #define __has_builtin(x) (0)
  94. #endif
  95. #ifndef __has_warning
  96. #define __has_warning(x) (0)
  97. #endif
  98. #ifndef __has_feature
  99. #define __has_feature(x) (0)
  100. #endif
  101. #ifndef __has_extension
  102. #define __has_extension(x) (0)
  103. #endif
  104. #if __has_feature(address_sanitizer)
  105. #define __SANITIZE_ADDRESS__ 1
  106. #endif
  107. #ifndef __optimize
  108. #if defined(__clang__) && !__has_attribute(__optimize__)
  109. #define __optimize(ops)
  110. #elif defined(__GNUC__) || __has_attribute(__optimize__)
  111. #define __optimize(ops) __attribute__((__optimize__(ops)))
  112. #else
  113. #define __optimize(ops)
  114. #endif
  115. #endif /* __optimize */
  116. #ifndef __cold
  117. #if defined(__OPTIMIZE__)
  118. #if defined(__e2k__)
  119. #define __cold __optimize(1) __attribute__((__cold__))
  120. #elif defined(__clang__) && !__has_attribute(__cold__) && \
  121. __has_attribute(__section__)
  122. /* just put infrequently used functions in separate section */
  123. #define __cold __attribute__((__section__("text.unlikely"))) __optimize("Os")
  124. #elif defined(__GNUC__) || __has_attribute(__cold__)
  125. #define __cold __attribute__((__cold__)) __optimize("Os")
  126. #else
  127. #define __cold __optimize("Os")
  128. #endif
  129. #else
  130. #define __cold
  131. #endif
  132. #endif /* __cold */
  133. #if __GNUC_PREREQ(4, 4) || defined(__clang__)
  134. #if defined(__ia32__) || defined(__e2k__)
  135. #include <x86intrin.h>
  136. #endif
  137. #if defined(__ia32__) && !defined(__cpuid_count)
  138. #include <cpuid.h>
  139. #endif
  140. #if defined(__e2k__)
  141. #include <e2kbuiltin.h>
  142. #endif
  143. #ifndef likely
  144. #define likely(cond) __builtin_expect(!!(cond), 1)
  145. #endif
  146. #ifndef unlikely
  147. #define unlikely(cond) __builtin_expect(!!(cond), 0)
  148. #endif
  149. #if __GNUC_PREREQ(4, 5) || __has_builtin(__builtin_unreachable)
  150. #define unreachable() __builtin_unreachable()
  151. #endif
  152. #define bswap64(v) __builtin_bswap64(v)
  153. #define bswap32(v) __builtin_bswap32(v)
  154. #if __GNUC_PREREQ(4, 8) || __has_builtin(__builtin_bswap16)
  155. #define bswap16(v) __builtin_bswap16(v)
  156. #endif
  157. #if !defined(__maybe_unused) && \
  158. (__GNUC_PREREQ(4, 3) || __has_attribute(__unused__))
  159. #define __maybe_unused __attribute__((__unused__))
  160. #endif
  161. #if !defined(__always_inline) && \
  162. (__GNUC_PREREQ(3, 2) || __has_attribute(__always_inline__))
  163. #define __always_inline __inline __attribute__((__always_inline__))
  164. #endif
  165. #if defined(__e2k__)
  166. #if __iset__ >= 3
  167. #define mul_64x64_high(a, b) __builtin_e2k_umulhd(a, b)
  168. #endif /* __iset__ >= 3 */
  169. #if __iset__ >= 5
  170. static __maybe_unused __always_inline unsigned
  171. e2k_add64carry_first(uint64_t base, uint64_t addend, uint64_t *sum) {
  172. *sum = base + addend;
  173. return (unsigned)__builtin_e2k_addcd_c(base, addend, 0);
  174. }
  175. #define add64carry_first(base, addend, sum) \
  176. e2k_add64carry_first(base, addend, sum)
  177. static __maybe_unused __always_inline unsigned
  178. e2k_add64carry_next(unsigned carry, uint64_t base, uint64_t addend,
  179. uint64_t *sum) {
  180. *sum = __builtin_e2k_addcd(base, addend, carry);
  181. return (unsigned)__builtin_e2k_addcd_c(base, addend, carry);
  182. }
  183. #define add64carry_next(carry, base, addend, sum) \
  184. e2k_add64carry_next(carry, base, addend, sum)
  185. static __maybe_unused __always_inline void e2k_add64carry_last(unsigned carry,
  186. uint64_t base,
  187. uint64_t addend,
  188. uint64_t *sum) {
  189. *sum = __builtin_e2k_addcd(base, addend, carry);
  190. }
  191. #define add64carry_last(carry, base, addend, sum) \
  192. e2k_add64carry_last(carry, base, addend, sum)
  193. #endif /* __iset__ >= 5 */
  194. #define fetch64_be_aligned(ptr) ((uint64_t)__builtin_e2k_ld_64s_be(ptr))
  195. #define fetch32_be_aligned(ptr) ((uint32_t)__builtin_e2k_ld_32u_be(ptr))
  196. #endif /* __e2k__ Elbrus */
  197. #elif defined(_MSC_VER)
  198. #if _MSC_FULL_VER < 190024234 && defined(_M_IX86)
  199. #pragma message( \
  200. "For AES-NI at least \"Microsoft C/C++ Compiler\" version 19.00.24234 (Visual Studio 2015 Update 3) is required.")
  201. #endif
  202. #if _MSC_FULL_VER < 191526730
  203. #pragma message( \
  204. "It is recommended to use \"Microsoft C/C++ Compiler\" version 19.15.26730 (Visual Studio 2017 15.8) or newer.")
  205. #endif
  206. #if _MSC_FULL_VER < 180040629
  207. #error At least "Microsoft C/C++ Compiler" version 18.00.40629 (Visual Studio 2013 Update 5) is required.
  208. #endif
  209. #pragma warning(push, 1)
  210. #include <intrin.h>
  211. #include <stdlib.h>
  212. #define likely(cond) (cond)
  213. #define unlikely(cond) (cond)
  214. #define unreachable() __assume(0)
  215. #define bswap64(v) _byteswap_uint64(v)
  216. #define bswap32(v) _byteswap_ulong(v)
  217. #define bswap16(v) _byteswap_ushort(v)
  218. #define rot64(v, s) _rotr64(v, s)
  219. #define rot32(v, s) _rotr(v, s)
  220. #define __always_inline __forceinline
  221. #if defined(_M_X64) || defined(_M_IA64)
  222. #pragma intrinsic(_umul128)
  223. #define mul_64x64_128(a, b, ph) _umul128(a, b, ph)
  224. #pragma intrinsic(_addcarry_u64)
  225. #define add64carry_first(base, addend, sum) _addcarry_u64(0, base, addend, sum)
  226. #define add64carry_next(carry, base, addend, sum) \
  227. _addcarry_u64(carry, base, addend, sum)
  228. #define add64carry_last(carry, base, addend, sum) \
  229. (void)_addcarry_u64(carry, base, addend, sum)
  230. #endif
  231. #if defined(_M_ARM64) || defined(_M_X64) || defined(_M_IA64)
  232. #pragma intrinsic(__umulh)
  233. #define mul_64x64_high(a, b) __umulh(a, b)
  234. #endif
  235. #if defined(_M_IX86)
  236. #pragma intrinsic(__emulu)
  237. #define mul_32x32_64(a, b) __emulu(a, b)
  238. #if _MSC_VER >= 1915 /* LY: workaround for SSA-optimizer bug */
  239. #pragma intrinsic(_addcarry_u32)
  240. #define add32carry_first(base, addend, sum) _addcarry_u32(0, base, addend, sum)
  241. #define add32carry_next(carry, base, addend, sum) \
  242. _addcarry_u32(carry, base, addend, sum)
  243. #define add32carry_last(carry, base, addend, sum) \
  244. (void)_addcarry_u32(carry, base, addend, sum)
  245. static __forceinline char
  246. msvc32_add64carry_first(uint64_t base, uint64_t addend, uint64_t *sum) {
  247. uint32_t *const sum32 = (uint32_t *)sum;
  248. const uint32_t base_32l = (uint32_t)base;
  249. const uint32_t base_32h = (uint32_t)(base >> 32);
  250. const uint32_t addend_32l = (uint32_t)addend;
  251. const uint32_t addend_32h = (uint32_t)(addend >> 32);
  252. return add32carry_next(add32carry_first(base_32l, addend_32l, sum32),
  253. base_32h, addend_32h, sum32 + 1);
  254. }
  255. #define add64carry_first(base, addend, sum) \
  256. msvc32_add64carry_first(base, addend, sum)
  257. static __forceinline char msvc32_add64carry_next(char carry, uint64_t base,
  258. uint64_t addend,
  259. uint64_t *sum) {
  260. uint32_t *const sum32 = (uint32_t *)sum;
  261. const uint32_t base_32l = (uint32_t)base;
  262. const uint32_t base_32h = (uint32_t)(base >> 32);
  263. const uint32_t addend_32l = (uint32_t)addend;
  264. const uint32_t addend_32h = (uint32_t)(addend >> 32);
  265. return add32carry_next(add32carry_next(carry, base_32l, addend_32l, sum32),
  266. base_32h, addend_32h, sum32 + 1);
  267. }
  268. #define add64carry_next(carry, base, addend, sum) \
  269. msvc32_add64carry_next(carry, base, addend, sum)
  270. static __forceinline void msvc32_add64carry_last(char carry, uint64_t base,
  271. uint64_t addend,
  272. uint64_t *sum) {
  273. uint32_t *const sum32 = (uint32_t *)sum;
  274. const uint32_t base_32l = (uint32_t)base;
  275. const uint32_t base_32h = (uint32_t)(base >> 32);
  276. const uint32_t addend_32l = (uint32_t)addend;
  277. const uint32_t addend_32h = (uint32_t)(addend >> 32);
  278. add32carry_last(add32carry_next(carry, base_32l, addend_32l, sum32), base_32h,
  279. addend_32h, sum32 + 1);
  280. }
  281. #define add64carry_last(carry, base, addend, sum) \
  282. msvc32_add64carry_last(carry, base, addend, sum)
  283. #endif /* _MSC_FULL_VER >= 190024231 */
  284. #elif defined(_M_ARM)
  285. #define mul_32x32_64(a, b) _arm_umull(a, b)
  286. #endif
  287. #pragma warning(pop)
  288. #pragma warning(disable : 4514) /* 'xyz': unreferenced inline function \
  289. has been removed */
  290. #pragma warning(disable : 4710) /* 'xyz': function not inlined */
  291. #pragma warning(disable : 4711) /* function 'xyz' selected for \
  292. automatic inline expansion */
  293. #pragma warning(disable : 4127) /* conditional expression is constant */
  294. #pragma warning(disable : 4702) /* unreachable code */
  295. #endif /* Compiler */
  296. #ifndef likely
  297. #define likely(cond) (cond)
  298. #endif
  299. #ifndef unlikely
  300. #define unlikely(cond) (cond)
  301. #endif
  302. #ifndef __maybe_unused
  303. #define __maybe_unused
  304. #endif
  305. #ifndef __always_inline
  306. #define __always_inline __inline
  307. #endif
  308. #ifndef unreachable
  309. #define unreachable() \
  310. do { \
  311. } while (1)
  312. #endif
  313. #ifndef bswap64
  314. #if defined(bswap_64)
  315. #define bswap64 bswap_64
  316. #elif defined(__bswap_64)
  317. #define bswap64 __bswap_64
  318. #else
  319. static __always_inline uint64_t bswap64(uint64_t v) {
  320. return v << 56 | v >> 56 | ((v << 40) & UINT64_C(0x00ff000000000000)) |
  321. ((v << 24) & UINT64_C(0x0000ff0000000000)) |
  322. ((v << 8) & UINT64_C(0x000000ff00000000)) |
  323. ((v >> 8) & UINT64_C(0x00000000ff000000)) |
  324. ((v >> 24) & UINT64_C(0x0000000000ff0000)) |
  325. ((v >> 40) & UINT64_C(0x000000000000ff00));
  326. }
  327. #endif
  328. #endif /* bswap64 */
  329. #ifndef bswap32
  330. #if defined(bswap_32)
  331. #define bswap32 bswap_32
  332. #elif defined(__bswap_32)
  333. #define bswap32 __bswap_32
  334. #else
  335. static __always_inline uint32_t bswap32(uint32_t v) {
  336. return v << 24 | v >> 24 | ((v << 8) & UINT32_C(0x00ff0000)) |
  337. ((v >> 8) & UINT32_C(0x0000ff00));
  338. }
  339. #endif
  340. #endif /* bswap32 */
  341. #ifndef bswap16
  342. #if defined(bswap_16)
  343. #define bswap16 bswap_16
  344. #elif defined(__bswap_16)
  345. #define bswap16 __bswap_16
  346. #else
  347. static __always_inline uint16_t bswap16(uint16_t v) { return v << 8 | v >> 8; }
  348. #endif
  349. #endif /* bswap16 */
  350. #if defined(__ia32__) || \
  351. T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT
  352. /* The __builtin_assume_aligned() leads gcc/clang to load values into the
  353. * registers, even when it is possible to directly use an operand from memory.
  354. * This can lead to a shortage of registers and a significant slowdown.
  355. * Therefore avoid unnecessary use of __builtin_assume_aligned() for x86. */
  356. #define read_unaligned(ptr, bits) (*(const uint##bits##_t *__restrict)(ptr))
  357. #define read_aligned(ptr, bits) (*(const uint##bits##_t *__restrict)(ptr))
  358. #endif /* __ia32__ */
  359. #ifndef read_unaligned
  360. #if defined(__GNUC__) || __has_attribute(__packed__)
  361. typedef struct {
  362. uint8_t unaligned_8;
  363. uint16_t unaligned_16;
  364. uint32_t unaligned_32;
  365. uint64_t unaligned_64;
  366. } __attribute__((__packed__)) t1ha_unaligned_proxy;
  367. #define read_unaligned(ptr, bits) \
  368. (((const t1ha_unaligned_proxy *)((const uint8_t *)(ptr)-offsetof( \
  369. t1ha_unaligned_proxy, unaligned_##bits))) \
  370. ->unaligned_##bits)
  371. #elif defined(_MSC_VER)
  372. #pragma warning( \
  373. disable : 4235) /* nonstandard extension used: '__unaligned' \
  374. * keyword not supported on this architecture */
  375. #define read_unaligned(ptr, bits) (*(const __unaligned uint##bits##_t *)(ptr))
  376. #else
  377. #pragma pack(push, 1)
  378. typedef struct {
  379. uint8_t unaligned_8;
  380. uint16_t unaligned_16;
  381. uint32_t unaligned_32;
  382. uint64_t unaligned_64;
  383. } t1ha_unaligned_proxy;
  384. #pragma pack(pop)
  385. #define read_unaligned(ptr, bits) \
  386. (((const t1ha_unaligned_proxy *)((const uint8_t *)(ptr)-offsetof( \
  387. t1ha_unaligned_proxy, unaligned_##bits))) \
  388. ->unaligned_##bits)
  389. #endif
  390. #endif /* read_unaligned */
  391. #ifndef read_aligned
  392. #if __GNUC_PREREQ(4, 8) || __has_builtin(__builtin_assume_aligned)
  393. #define read_aligned(ptr, bits) \
  394. (*(const uint##bits##_t *)__builtin_assume_aligned(ptr, ALIGNMENT_##bits))
  395. #elif (__GNUC_PREREQ(3, 3) || __has_attribute(__aligned__)) && \
  396. !defined(__clang__)
  397. #define read_aligned(ptr, bits) \
  398. (*(const uint##bits##_t \
  399. __attribute__((__aligned__(ALIGNMENT_##bits))) *)(ptr))
  400. #elif __has_attribute(__assume_aligned__)
  401. static __always_inline const
  402. uint16_t *__attribute__((__assume_aligned__(ALIGNMENT_16)))
  403. cast_aligned_16(const void *ptr) {
  404. return (const uint16_t *)ptr;
  405. }
  406. static __always_inline const
  407. uint32_t *__attribute__((__assume_aligned__(ALIGNMENT_32)))
  408. cast_aligned_32(const void *ptr) {
  409. return (const uint32_t *)ptr;
  410. }
  411. static __always_inline const
  412. uint64_t *__attribute__((__assume_aligned__(ALIGNMENT_64)))
  413. cast_aligned_64(const void *ptr) {
  414. return (const uint64_t *)ptr;
  415. }
  416. #define read_aligned(ptr, bits) (*cast_aligned_##bits(ptr))
  417. #elif defined(_MSC_VER)
  418. #define read_aligned(ptr, bits) \
  419. (*(const __declspec(align(ALIGNMENT_##bits)) uint##bits##_t *)(ptr))
  420. #else
  421. #define read_aligned(ptr, bits) (*(const uint##bits##_t *)(ptr))
  422. #endif
  423. #endif /* read_aligned */
  424. #ifndef prefetch
  425. #if (__GNUC_PREREQ(4, 0) || __has_builtin(__builtin_prefetch)) && \
  426. !defined(__ia32__)
  427. #define prefetch(ptr) __builtin_prefetch(ptr)
  428. #elif defined(_M_ARM64) || defined(_M_ARM)
  429. #define prefetch(ptr) __prefetch(ptr)
  430. #else
  431. #define prefetch(ptr) \
  432. do { \
  433. (void)(ptr); \
  434. } while (0)
  435. #endif
  436. #endif /* prefetch */
  437. #if __has_warning("-Wconstant-logical-operand")
  438. #if defined(__clang__)
  439. #pragma clang diagnostic ignored "-Wconstant-logical-operand"
  440. #elif defined(__GNUC__)
  441. #pragma GCC diagnostic ignored "-Wconstant-logical-operand"
  442. #else
  443. #pragma warning disable "constant-logical-operand"
  444. #endif
  445. #endif /* -Wconstant-logical-operand */
  446. #if __has_warning("-Wtautological-pointer-compare")
  447. #if defined(__clang__)
  448. #pragma clang diagnostic ignored "-Wtautological-pointer-compare"
  449. #elif defined(__GNUC__)
  450. #pragma GCC diagnostic ignored "-Wtautological-pointer-compare"
  451. #else
  452. #pragma warning disable "tautological-pointer-compare"
  453. #endif
  454. #endif /* -Wtautological-pointer-compare */
  455. /***************************************************************************/
  456. #if __GNUC_PREREQ(4, 0)
  457. #pragma GCC visibility push(hidden)
  458. #endif /* __GNUC_PREREQ(4,0) */
  459. /*---------------------------------------------------------- Little Endian */
  460. #ifndef fetch16_le_aligned
  461. static __maybe_unused __always_inline uint16_t
  462. fetch16_le_aligned(const void *v) {
  463. assert(((uintptr_t)v) % ALIGNMENT_16 == 0);
  464. #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  465. return read_aligned(v, 16);
  466. #else
  467. return bswap16(read_aligned(v, 16));
  468. #endif
  469. }
  470. #endif /* fetch16_le_aligned */
  471. #ifndef fetch16_le_unaligned
  472. static __maybe_unused __always_inline uint16_t
  473. fetch16_le_unaligned(const void *v) {
  474. #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
  475. const uint8_t *p = (const uint8_t *)v;
  476. return p[0] | (uint16_t)p[1] << 8;
  477. #elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  478. return read_unaligned(v, 16);
  479. #else
  480. return bswap16(read_unaligned(v, 16));
  481. #endif
  482. }
  483. #endif /* fetch16_le_unaligned */
  484. #ifndef fetch32_le_aligned
  485. static __maybe_unused __always_inline uint32_t
  486. fetch32_le_aligned(const void *v) {
  487. assert(((uintptr_t)v) % ALIGNMENT_32 == 0);
  488. #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  489. return read_aligned(v, 32);
  490. #else
  491. return bswap32(read_aligned(v, 32));
  492. #endif
  493. }
  494. #endif /* fetch32_le_aligned */
  495. #ifndef fetch32_le_unaligned
  496. static __maybe_unused __always_inline uint32_t
  497. fetch32_le_unaligned(const void *v) {
  498. #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
  499. return fetch16_le_unaligned(v) |
  500. (uint32_t)fetch16_le_unaligned((const uint8_t *)v + 2) << 16;
  501. #elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  502. return read_unaligned(v, 32);
  503. #else
  504. return bswap32(read_unaligned(v, 32));
  505. #endif
  506. }
  507. #endif /* fetch32_le_unaligned */
  508. #ifndef fetch64_le_aligned
  509. static __maybe_unused __always_inline uint64_t
  510. fetch64_le_aligned(const void *v) {
  511. assert(((uintptr_t)v) % ALIGNMENT_64 == 0);
  512. #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  513. return read_aligned(v, 64);
  514. #else
  515. return bswap64(read_aligned(v, 64));
  516. #endif
  517. }
  518. #endif /* fetch64_le_aligned */
  519. #ifndef fetch64_le_unaligned
  520. static __maybe_unused __always_inline uint64_t
  521. fetch64_le_unaligned(const void *v) {
  522. #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
  523. return fetch32_le_unaligned(v) |
  524. (uint64_t)fetch32_le_unaligned((const uint8_t *)v + 4) << 32;
  525. #elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  526. return read_unaligned(v, 64);
  527. #else
  528. return bswap64(read_unaligned(v, 64));
  529. #endif
  530. }
  531. #endif /* fetch64_le_unaligned */
  532. static __maybe_unused __always_inline uint64_t tail64_le_aligned(const void *v,
  533. size_t tail) {
  534. const uint8_t *const p = (const uint8_t *)v;
  535. #if T1HA_USE_FAST_ONESHOT_READ && !defined(__SANITIZE_ADDRESS__)
  536. /* We can perform a 'oneshot' read, which is little bit faster. */
  537. const unsigned shift = ((8 - tail) & 7) << 3;
  538. return fetch64_le_aligned(p) & ((~UINT64_C(0)) >> shift);
  539. #else
  540. uint64_t r = 0;
  541. switch (tail & 7) {
  542. default:
  543. unreachable();
  544. /* fall through */
  545. #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  546. /* For most CPUs this code is better when not needed byte reordering. */
  547. case 0:
  548. return fetch64_le_aligned(p);
  549. case 7:
  550. r = (uint64_t)p[6] << 8;
  551. /* fall through */
  552. case 6:
  553. r += p[5];
  554. r <<= 8;
  555. /* fall through */
  556. case 5:
  557. r += p[4];
  558. r <<= 32;
  559. /* fall through */
  560. case 4:
  561. return r + fetch32_le_aligned(p);
  562. case 3:
  563. r = (uint64_t)p[2] << 16;
  564. /* fall through */
  565. case 2:
  566. return r + fetch16_le_aligned(p);
  567. case 1:
  568. return p[0];
  569. #else
  570. case 0:
  571. r = p[7] << 8;
  572. /* fall through */
  573. case 7:
  574. r += p[6];
  575. r <<= 8;
  576. /* fall through */
  577. case 6:
  578. r += p[5];
  579. r <<= 8;
  580. /* fall through */
  581. case 5:
  582. r += p[4];
  583. r <<= 8;
  584. /* fall through */
  585. case 4:
  586. r += p[3];
  587. r <<= 8;
  588. /* fall through */
  589. case 3:
  590. r += p[2];
  591. r <<= 8;
  592. /* fall through */
  593. case 2:
  594. r += p[1];
  595. r <<= 8;
  596. /* fall through */
  597. case 1:
  598. return r + p[0];
  599. #endif
  600. }
  601. #endif /* T1HA_USE_FAST_ONESHOT_READ */
  602. }
  603. #if T1HA_USE_FAST_ONESHOT_READ && \
  604. T1HA_SYS_UNALIGNED_ACCESS != T1HA_UNALIGNED_ACCESS__UNABLE && \
  605. defined(PAGESIZE) && PAGESIZE > 42 && !defined(__SANITIZE_ADDRESS__)
  606. #define can_read_underside(ptr, size) \
  607. (((PAGESIZE - (size)) & (uintptr_t)(ptr)) != 0)
  608. #endif /* T1HA_USE_FAST_ONESHOT_READ */
  609. static __maybe_unused __always_inline uint64_t
  610. tail64_le_unaligned(const void *v, size_t tail) {
  611. const uint8_t *p = (const uint8_t *)v;
  612. #if defined(can_read_underside) && \
  613. (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul)
  614. /* On some systems (e.g. x86_64) we can perform a 'oneshot' read, which
  615. * is little bit faster. Thanks Marcin Żukowski <marcin.zukowski@gmail.com>
  616. * for the reminder. */
  617. const unsigned offset = (8 - tail) & 7;
  618. const unsigned shift = offset << 3;
  619. if (likely(can_read_underside(p, 8))) {
  620. p -= offset;
  621. return fetch64_le_unaligned(p) >> shift;
  622. }
  623. return fetch64_le_unaligned(p) & ((~UINT64_C(0)) >> shift);
  624. #else
  625. uint64_t r = 0;
  626. switch (tail & 7) {
  627. default:
  628. unreachable();
  629. /* fall through */
  630. #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT && \
  631. __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  632. /* For most CPUs this code is better when not needed
  633. * copying for alignment or byte reordering. */
  634. case 0:
  635. return fetch64_le_unaligned(p);
  636. case 7:
  637. r = (uint64_t)p[6] << 8;
  638. /* fall through */
  639. case 6:
  640. r += p[5];
  641. r <<= 8;
  642. /* fall through */
  643. case 5:
  644. r += p[4];
  645. r <<= 32;
  646. /* fall through */
  647. case 4:
  648. return r + fetch32_le_unaligned(p);
  649. case 3:
  650. r = (uint64_t)p[2] << 16;
  651. /* fall through */
  652. case 2:
  653. return r + fetch16_le_unaligned(p);
  654. case 1:
  655. return p[0];
  656. #else
  657. /* For most CPUs this code is better than a
  658. * copying for alignment and/or byte reordering. */
  659. case 0:
  660. r = p[7] << 8;
  661. /* fall through */
  662. case 7:
  663. r += p[6];
  664. r <<= 8;
  665. /* fall through */
  666. case 6:
  667. r += p[5];
  668. r <<= 8;
  669. /* fall through */
  670. case 5:
  671. r += p[4];
  672. r <<= 8;
  673. /* fall through */
  674. case 4:
  675. r += p[3];
  676. r <<= 8;
  677. /* fall through */
  678. case 3:
  679. r += p[2];
  680. r <<= 8;
  681. /* fall through */
  682. case 2:
  683. r += p[1];
  684. r <<= 8;
  685. /* fall through */
  686. case 1:
  687. return r + p[0];
  688. #endif
  689. }
  690. #endif /* can_read_underside */
  691. }
  692. /*------------------------------------------------------------- Big Endian */
  693. #ifndef fetch16_be_aligned
  694. static __maybe_unused __always_inline uint16_t
  695. fetch16_be_aligned(const void *v) {
  696. assert(((uintptr_t)v) % ALIGNMENT_16 == 0);
  697. #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  698. return read_aligned(v, 16);
  699. #else
  700. return bswap16(read_aligned(v, 16));
  701. #endif
  702. }
  703. #endif /* fetch16_be_aligned */
  704. #ifndef fetch16_be_unaligned
  705. static __maybe_unused __always_inline uint16_t
  706. fetch16_be_unaligned(const void *v) {
  707. #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
  708. const uint8_t *p = (const uint8_t *)v;
  709. return (uint16_t)p[0] << 8 | p[1];
  710. #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  711. return read_unaligned(v, 16);
  712. #else
  713. return bswap16(read_unaligned(v, 16));
  714. #endif
  715. }
  716. #endif /* fetch16_be_unaligned */
  717. #ifndef fetch32_be_aligned
  718. static __maybe_unused __always_inline uint32_t
  719. fetch32_be_aligned(const void *v) {
  720. assert(((uintptr_t)v) % ALIGNMENT_32 == 0);
  721. #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  722. return read_aligned(v, 32);
  723. #else
  724. return bswap32(read_aligned(v, 32));
  725. #endif
  726. }
  727. #endif /* fetch32_be_aligned */
  728. #ifndef fetch32_be_unaligned
  729. static __maybe_unused __always_inline uint32_t
  730. fetch32_be_unaligned(const void *v) {
  731. #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
  732. return (uint32_t)fetch16_be_unaligned(v) << 16 |
  733. fetch16_be_unaligned((const uint8_t *)v + 2);
  734. #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  735. return read_unaligned(v, 32);
  736. #else
  737. return bswap32(read_unaligned(v, 32));
  738. #endif
  739. }
  740. #endif /* fetch32_be_unaligned */
  741. #ifndef fetch64_be_aligned
  742. static __maybe_unused __always_inline uint64_t
  743. fetch64_be_aligned(const void *v) {
  744. assert(((uintptr_t)v) % ALIGNMENT_64 == 0);
  745. #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  746. return read_aligned(v, 64);
  747. #else
  748. return bswap64(read_aligned(v, 64));
  749. #endif
  750. }
  751. #endif /* fetch64_be_aligned */
  752. #ifndef fetch64_be_unaligned
  753. static __maybe_unused __always_inline uint64_t
  754. fetch64_be_unaligned(const void *v) {
  755. #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
  756. return (uint64_t)fetch32_be_unaligned(v) << 32 |
  757. fetch32_be_unaligned((const uint8_t *)v + 4);
  758. #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  759. return read_unaligned(v, 64);
  760. #else
  761. return bswap64(read_unaligned(v, 64));
  762. #endif
  763. }
  764. #endif /* fetch64_be_unaligned */
  765. static __maybe_unused __always_inline uint64_t tail64_be_aligned(const void *v,
  766. size_t tail) {
  767. const uint8_t *const p = (const uint8_t *)v;
  768. #if T1HA_USE_FAST_ONESHOT_READ && !defined(__SANITIZE_ADDRESS__)
  769. /* We can perform a 'oneshot' read, which is little bit faster. */
  770. const unsigned shift = ((8 - tail) & 7) << 3;
  771. return fetch64_be_aligned(p) >> shift;
  772. #else
  773. switch (tail & 7) {
  774. default:
  775. unreachable();
  776. /* fall through */
  777. #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  778. /* For most CPUs this code is better when not byte reordering. */
  779. case 1:
  780. return p[0];
  781. case 2:
  782. return fetch16_be_aligned(p);
  783. case 3:
  784. return (uint32_t)fetch16_be_aligned(p) << 8 | p[2];
  785. case 4:
  786. return fetch32_be_aligned(p);
  787. case 5:
  788. return (uint64_t)fetch32_be_aligned(p) << 8 | p[4];
  789. case 6:
  790. return (uint64_t)fetch32_be_aligned(p) << 16 | fetch16_be_aligned(p + 4);
  791. case 7:
  792. return (uint64_t)fetch32_be_aligned(p) << 24 |
  793. (uint32_t)fetch16_be_aligned(p + 4) << 8 | p[6];
  794. case 0:
  795. return fetch64_be_aligned(p);
  796. #else
  797. case 1:
  798. return p[0];
  799. case 2:
  800. return p[1] | (uint32_t)p[0] << 8;
  801. case 3:
  802. return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16;
  803. case 4:
  804. return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 |
  805. (uint32_t)p[0] << 24;
  806. case 5:
  807. return p[4] | (uint32_t)p[3] << 8 | (uint32_t)p[2] << 16 |
  808. (uint32_t)p[1] << 24 | (uint64_t)p[0] << 32;
  809. case 6:
  810. return p[5] | (uint32_t)p[4] << 8 | (uint32_t)p[3] << 16 |
  811. (uint32_t)p[2] << 24 | (uint64_t)p[1] << 32 | (uint64_t)p[0] << 40;
  812. case 7:
  813. return p[6] | (uint32_t)p[5] << 8 | (uint32_t)p[4] << 16 |
  814. (uint32_t)p[3] << 24 | (uint64_t)p[2] << 32 | (uint64_t)p[1] << 40 |
  815. (uint64_t)p[0] << 48;
  816. case 0:
  817. return p[7] | (uint32_t)p[6] << 8 | (uint32_t)p[5] << 16 |
  818. (uint32_t)p[4] << 24 | (uint64_t)p[3] << 32 | (uint64_t)p[2] << 40 |
  819. (uint64_t)p[1] << 48 | (uint64_t)p[0] << 56;
  820. #endif
  821. }
  822. #endif /* T1HA_USE_FAST_ONESHOT_READ */
  823. }
  824. static __maybe_unused __always_inline uint64_t
  825. tail64_be_unaligned(const void *v, size_t tail) {
  826. const uint8_t *p = (const uint8_t *)v;
  827. #if defined(can_read_underside) && \
  828. (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul)
  829. /* On some systems (e.g. x86_64) we can perform a 'oneshot' read, which
  830. * is little bit faster. Thanks Marcin Żukowski <marcin.zukowski@gmail.com>
  831. * for the reminder. */
  832. const unsigned offset = (8 - tail) & 7;
  833. const unsigned shift = offset << 3;
  834. if (likely(can_read_underside(p, 8))) {
  835. p -= offset;
  836. return fetch64_be_unaligned(p) & ((~UINT64_C(0)) >> shift);
  837. }
  838. return fetch64_be_unaligned(p) >> shift;
  839. #else
  840. switch (tail & 7) {
  841. default:
  842. unreachable();
  843. /* fall through */
  844. #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT && \
  845. __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  846. /* For most CPUs this code is better when not needed
  847. * copying for alignment or byte reordering. */
  848. case 1:
  849. return p[0];
  850. case 2:
  851. return fetch16_be_unaligned(p);
  852. case 3:
  853. return (uint32_t)fetch16_be_unaligned(p) << 8 | p[2];
  854. case 4:
  855. return fetch32_be(p);
  856. case 5:
  857. return (uint64_t)fetch32_be_unaligned(p) << 8 | p[4];
  858. case 6:
  859. return (uint64_t)fetch32_be_unaligned(p) << 16 |
  860. fetch16_be_unaligned(p + 4);
  861. case 7:
  862. return (uint64_t)fetch32_be_unaligned(p) << 24 |
  863. (uint32_t)fetch16_be_unaligned(p + 4) << 8 | p[6];
  864. case 0:
  865. return fetch64_be_unaligned(p);
  866. #else
  867. /* For most CPUs this code is better than a
  868. * copying for alignment and/or byte reordering. */
  869. case 1:
  870. return p[0];
  871. case 2:
  872. return p[1] | (uint32_t)p[0] << 8;
  873. case 3:
  874. return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16;
  875. case 4:
  876. return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 |
  877. (uint32_t)p[0] << 24;
  878. case 5:
  879. return p[4] | (uint32_t)p[3] << 8 | (uint32_t)p[2] << 16 |
  880. (uint32_t)p[1] << 24 | (uint64_t)p[0] << 32;
  881. case 6:
  882. return p[5] | (uint32_t)p[4] << 8 | (uint32_t)p[3] << 16 |
  883. (uint32_t)p[2] << 24 | (uint64_t)p[1] << 32 | (uint64_t)p[0] << 40;
  884. case 7:
  885. return p[6] | (uint32_t)p[5] << 8 | (uint32_t)p[4] << 16 |
  886. (uint32_t)p[3] << 24 | (uint64_t)p[2] << 32 | (uint64_t)p[1] << 40 |
  887. (uint64_t)p[0] << 48;
  888. case 0:
  889. return p[7] | (uint32_t)p[6] << 8 | (uint32_t)p[5] << 16 |
  890. (uint32_t)p[4] << 24 | (uint64_t)p[3] << 32 | (uint64_t)p[2] << 40 |
  891. (uint64_t)p[1] << 48 | (uint64_t)p[0] << 56;
  892. #endif
  893. }
  894. #endif /* can_read_underside */
  895. }
  896. /***************************************************************************/
  897. #ifndef rot64
  898. static __maybe_unused __always_inline uint64_t rot64(uint64_t v, unsigned s) {
  899. return (v >> s) | (v << (64 - s));
  900. }
  901. #endif /* rot64 */
  902. #ifndef mul_32x32_64
  903. static __maybe_unused __always_inline uint64_t mul_32x32_64(uint32_t a,
  904. uint32_t b) {
  905. return a * (uint64_t)b;
  906. }
  907. #endif /* mul_32x32_64 */
  908. #ifndef add64carry_first
  909. static __maybe_unused __always_inline unsigned
  910. add64carry_first(uint64_t base, uint64_t addend, uint64_t *sum) {
  911. #if __has_builtin(__builtin_addcll)
  912. unsigned long long carryout;
  913. *sum = __builtin_addcll(base, addend, 0, &carryout);
  914. return (unsigned)carryout;
  915. #else
  916. *sum = base + addend;
  917. return *sum < addend;
  918. #endif /* __has_builtin(__builtin_addcll) */
  919. }
  920. #endif /* add64carry_fist */
  921. #ifndef add64carry_next
  922. static __maybe_unused __always_inline unsigned
  923. add64carry_next(unsigned carry, uint64_t base, uint64_t addend, uint64_t *sum) {
  924. #if __has_builtin(__builtin_addcll)
  925. unsigned long long carryout;
  926. *sum = __builtin_addcll(base, addend, carry, &carryout);
  927. return (unsigned)carryout;
  928. #else
  929. *sum = base + addend + carry;
  930. return *sum < addend || (carry && *sum == addend);
  931. #endif /* __has_builtin(__builtin_addcll) */
  932. }
  933. #endif /* add64carry_next */
  934. #ifndef add64carry_last
  935. static __maybe_unused __always_inline void
  936. add64carry_last(unsigned carry, uint64_t base, uint64_t addend, uint64_t *sum) {
  937. #if __has_builtin(__builtin_addcll)
  938. unsigned long long carryout;
  939. *sum = __builtin_addcll(base, addend, carry, &carryout);
  940. (void)carryout;
  941. #else
  942. *sum = base + addend + carry;
  943. #endif /* __has_builtin(__builtin_addcll) */
  944. }
  945. #endif /* add64carry_last */
  946. #ifndef mul_64x64_128
  947. static __maybe_unused __always_inline uint64_t mul_64x64_128(uint64_t a,
  948. uint64_t b,
  949. uint64_t *h) {
  950. #if (defined(__SIZEOF_INT128__) || \
  951. (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)) && \
  952. (!defined(__LCC__) || __LCC__ != 124)
  953. __uint128_t r = (__uint128_t)a * (__uint128_t)b;
  954. /* modern GCC could nicely optimize this */
  955. *h = (uint64_t)(r >> 64);
  956. return (uint64_t)r;
  957. #elif defined(mul_64x64_high)
  958. *h = mul_64x64_high(a, b);
  959. return a * b;
  960. #else
  961. /* performs 64x64 to 128 bit multiplication */
  962. const uint64_t ll = mul_32x32_64((uint32_t)a, (uint32_t)b);
  963. const uint64_t lh = mul_32x32_64(a >> 32, (uint32_t)b);
  964. const uint64_t hl = mul_32x32_64((uint32_t)a, b >> 32);
  965. const uint64_t hh = mul_32x32_64(a >> 32, b >> 32);
  966. /* Few simplification are possible here for 32-bit architectures,
  967. * but thus we would lost compatibility with the original 64-bit
  968. * version. Think is very bad idea, because then 32-bit t1ha will
  969. * still (relatively) very slowly and well yet not compatible. */
  970. uint64_t l;
  971. add64carry_last(add64carry_first(ll, lh << 32, &l), hh, lh >> 32, h);
  972. add64carry_last(add64carry_first(l, hl << 32, &l), *h, hl >> 32, h);
  973. return l;
  974. #endif
  975. }
  976. #endif /* mul_64x64_128() */
  977. #ifndef mul_64x64_high
  978. static __maybe_unused __always_inline uint64_t mul_64x64_high(uint64_t a,
  979. uint64_t b) {
  980. uint64_t h;
  981. mul_64x64_128(a, b, &h);
  982. return h;
  983. }
  984. #endif /* mul_64x64_high */
  985. /***************************************************************************/
  986. /* 'magic' primes */
  987. static const uint64_t prime_0 = UINT64_C(0xEC99BF0D8372CAAB);
  988. static const uint64_t prime_1 = UINT64_C(0x82434FE90EDCEF39);
  989. static const uint64_t prime_2 = UINT64_C(0xD4F06DB99D67BE4B);
  990. static const uint64_t prime_3 = UINT64_C(0xBD9CACC22C6E9571);
  991. static const uint64_t prime_4 = UINT64_C(0x9C06FAF4D023E3AB);
  992. static const uint64_t prime_5 = UINT64_C(0xC060724A8424F345);
  993. static const uint64_t prime_6 = UINT64_C(0xCB5AF53AE3AAAC31);
  994. /* xor high and low parts of full 128-bit product */
  995. static __maybe_unused __always_inline uint64_t mux64(uint64_t v,
  996. uint64_t prime) {
  997. uint64_t l, h;
  998. l = mul_64x64_128(v, prime, &h);
  999. return l ^ h;
  1000. }
  1001. static __maybe_unused __always_inline uint64_t final64(uint64_t a, uint64_t b) {
  1002. uint64_t x = (a + rot64(b, 41)) * prime_0;
  1003. uint64_t y = (rot64(a, 23) + b) * prime_6;
  1004. return mux64(x ^ y, prime_5);
  1005. }
  1006. static __maybe_unused __always_inline void mixup64(uint64_t *__restrict a,
  1007. uint64_t *__restrict b,
  1008. uint64_t v, uint64_t prime) {
  1009. uint64_t h;
  1010. *a ^= mul_64x64_128(*b + v, prime, &h);
  1011. *b += h;
  1012. }
  1013. /***************************************************************************/
  1014. typedef union t1ha_uint128 {
  1015. #if defined(__SIZEOF_INT128__) || \
  1016. (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
  1017. __uint128_t v;
  1018. #endif
  1019. struct {
  1020. #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  1021. uint64_t l, h;
  1022. #else
  1023. uint64_t h, l;
  1024. #endif
  1025. };
  1026. } t1ha_uint128_t;
  1027. static __maybe_unused __always_inline t1ha_uint128_t
  1028. not128(const t1ha_uint128_t v) {
  1029. t1ha_uint128_t r;
  1030. #if defined(__SIZEOF_INT128__) || \
  1031. (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
  1032. r.v = ~v.v;
  1033. #else
  1034. r.l = ~v.l;
  1035. r.h = ~v.h;
  1036. #endif
  1037. return r;
  1038. }
  1039. static __maybe_unused __always_inline t1ha_uint128_t
  1040. left128(const t1ha_uint128_t v, unsigned s) {
  1041. t1ha_uint128_t r;
  1042. assert(s < 128);
  1043. #if defined(__SIZEOF_INT128__) || \
  1044. (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
  1045. r.v = v.v << s;
  1046. #else
  1047. r.l = (s < 64) ? v.l << s : 0;
  1048. r.h = (s < 64) ? (v.h << s) | (s ? v.l >> (64 - s) : 0) : v.l << (s - 64);
  1049. #endif
  1050. return r;
  1051. }
  1052. static __maybe_unused __always_inline t1ha_uint128_t
  1053. right128(const t1ha_uint128_t v, unsigned s) {
  1054. t1ha_uint128_t r;
  1055. assert(s < 128);
  1056. #if defined(__SIZEOF_INT128__) || \
  1057. (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
  1058. r.v = v.v >> s;
  1059. #else
  1060. r.l = (s < 64) ? (s ? v.h << (64 - s) : 0) | (v.l >> s) : v.h >> (s - 64);
  1061. r.h = (s < 64) ? v.h >> s : 0;
  1062. #endif
  1063. return r;
  1064. }
  1065. static __maybe_unused __always_inline t1ha_uint128_t or128(t1ha_uint128_t x,
  1066. t1ha_uint128_t y) {
  1067. t1ha_uint128_t r;
  1068. #if defined(__SIZEOF_INT128__) || \
  1069. (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
  1070. r.v = x.v | y.v;
  1071. #else
  1072. r.l = x.l | y.l;
  1073. r.h = x.h | y.h;
  1074. #endif
  1075. return r;
  1076. }
  1077. static __maybe_unused __always_inline t1ha_uint128_t xor128(t1ha_uint128_t x,
  1078. t1ha_uint128_t y) {
  1079. t1ha_uint128_t r;
  1080. #if defined(__SIZEOF_INT128__) || \
  1081. (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
  1082. r.v = x.v ^ y.v;
  1083. #else
  1084. r.l = x.l ^ y.l;
  1085. r.h = x.h ^ y.h;
  1086. #endif
  1087. return r;
  1088. }
  1089. static __maybe_unused __always_inline t1ha_uint128_t rot128(t1ha_uint128_t v,
  1090. unsigned s) {
  1091. s &= 127;
  1092. #if defined(__SIZEOF_INT128__) || \
  1093. (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
  1094. v.v = (v.v << (128 - s)) | (v.v >> s);
  1095. return v;
  1096. #else
  1097. return s ? or128(left128(v, 128 - s), right128(v, s)) : v;
  1098. #endif
  1099. }
  1100. static __maybe_unused __always_inline t1ha_uint128_t add128(t1ha_uint128_t x,
  1101. t1ha_uint128_t y) {
  1102. t1ha_uint128_t r;
  1103. #if defined(__SIZEOF_INT128__) || \
  1104. (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
  1105. r.v = x.v + y.v;
  1106. #else
  1107. add64carry_last(add64carry_first(x.l, y.l, &r.l), x.h, y.h, &r.h);
  1108. #endif
  1109. return r;
  1110. }
  1111. static __maybe_unused __always_inline t1ha_uint128_t mul128(t1ha_uint128_t x,
  1112. t1ha_uint128_t y) {
  1113. t1ha_uint128_t r;
  1114. #if defined(__SIZEOF_INT128__) || \
  1115. (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
  1116. r.v = x.v * y.v;
  1117. #else
  1118. r.l = mul_64x64_128(x.l, y.l, &r.h);
  1119. r.h += x.l * y.h + y.l * x.h;
  1120. #endif
  1121. return r;
  1122. }
  1123. /***************************************************************************/
  1124. #if T1HA0_AESNI_AVAILABLE && defined(__ia32__)
  1125. uint64_t t1ha_ia32cpu_features(void);
  1126. static __maybe_unused __always_inline bool
  1127. t1ha_ia32_AESNI_avail(uint64_t ia32cpu_features) {
  1128. /* check for AES-NI */
  1129. return (ia32cpu_features & UINT32_C(0x02000000)) != 0;
  1130. }
  1131. static __maybe_unused __always_inline bool
  1132. t1ha_ia32_AVX_avail(uint64_t ia32cpu_features) {
  1133. /* check for any AVX */
  1134. return (ia32cpu_features & UINT32_C(0x1A000000)) == UINT32_C(0x1A000000);
  1135. }
  1136. static __maybe_unused __always_inline bool
  1137. t1ha_ia32_AVX2_avail(uint64_t ia32cpu_features) {
  1138. /* check for 'Advanced Vector Extensions 2' */
  1139. return ((ia32cpu_features >> 32) & 32) != 0;
  1140. }
  1141. #endif /* T1HA0_AESNI_AVAILABLE && __ia32__ */