tuklib_integer.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946
  1. ///////////////////////////////////////////////////////////////////////////////
  2. //
  3. /// \file tuklib_integer.h
  4. /// \brief Various integer and bit operations
  5. ///
  6. /// This file provides macros or functions to do some basic integer and bit
  7. /// operations.
  8. ///
  9. /// Native endian inline functions (XX = 16, 32, or 64):
  10. /// - Unaligned native endian reads: readXXne(ptr)
  11. /// - Unaligned native endian writes: writeXXne(ptr, num)
  12. /// - Aligned native endian reads: aligned_readXXne(ptr)
  13. /// - Aligned native endian writes: aligned_writeXXne(ptr, num)
  14. ///
  15. /// Endianness-converting integer operations (these can be macros!)
  16. /// (XX = 16, 32, or 64; Y = b or l):
  17. /// - Byte swapping: bswapXX(num)
  18. /// - Byte order conversions to/from native (byteswaps if Y isn't
  19. /// the native endianness): convXXYe(num)
  20. /// - Unaligned reads: readXXYe(ptr)
  21. /// - Unaligned writes: writeXXYe(ptr, num)
  22. /// - Aligned reads: aligned_readXXYe(ptr)
  23. /// - Aligned writes: aligned_writeXXYe(ptr, num)
  24. ///
  25. /// Since the above can macros, the arguments should have no side effects
  26. /// because they may be evaluated more than once.
  27. ///
  28. /// Bit scan operations for non-zero 32-bit integers (inline functions):
  29. /// - Bit scan reverse (find highest non-zero bit): bsr32(num)
  30. /// - Count leading zeros: clz32(num)
  31. /// - Count trailing zeros: ctz32(num)
  32. /// - Bit scan forward (simply an alias for ctz32()): bsf32(num)
  33. ///
  34. /// The above bit scan operations return 0-31. If num is zero,
  35. /// the result is undefined.
  36. //
  37. // Authors: Lasse Collin
  38. // Joachim Henke
  39. //
  40. // This file has been put into the public domain.
  41. // You can do whatever you want with this file.
  42. //
  43. ///////////////////////////////////////////////////////////////////////////////
  44. #ifndef TUKLIB_INTEGER_H
  45. #define TUKLIB_INTEGER_H
  46. #include "tuklib_common.h"
  47. #include <string.h>
  48. // Newer Intel C compilers require immintrin.h for _bit_scan_reverse()
  49. // and such functions.
  50. #if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1500)
  51. # include <immintrin.h>
  52. // Only include <intrin.h> when it is needed. GCC and Clang can both
  53. // use __builtin's, so we only need Windows instrincs when using MSVC.
  54. // GCC and Clang can set _MSC_VER on Windows, so we need to exclude these
  55. // cases explicitly.
  56. #elif defined(_MSC_VER) && !TUKLIB_GNUC_REQ(3, 4) && !defined(__clang__)
  57. # include <intrin.h>
  58. #endif
  59. ///////////////////
  60. // Byte swapping //
  61. ///////////////////
  62. #if defined(HAVE___BUILTIN_BSWAPXX)
  63. // GCC >= 4.8 and Clang
  64. # define bswap16(n) __builtin_bswap16(n)
  65. # define bswap32(n) __builtin_bswap32(n)
  66. # define bswap64(n) __builtin_bswap64(n)
  67. #elif defined(HAVE_BYTESWAP_H)
  68. // glibc, uClibc, dietlibc
  69. # include <byteswap.h>
  70. # ifdef HAVE_BSWAP_16
  71. # define bswap16(num) bswap_16(num)
  72. # endif
  73. # ifdef HAVE_BSWAP_32
  74. # define bswap32(num) bswap_32(num)
  75. # endif
  76. # ifdef HAVE_BSWAP_64
  77. # define bswap64(num) bswap_64(num)
  78. # endif
  79. #elif defined(HAVE_SYS_ENDIAN_H)
  80. // *BSDs and Darwin
  81. # include <sys/endian.h>
  82. #elif defined(HAVE_SYS_BYTEORDER_H)
  83. // Solaris
  84. # error #include <sys/byteorder.h>
  85. # ifdef BSWAP_16
  86. # define bswap16(num) BSWAP_16(num)
  87. # endif
  88. # ifdef BSWAP_32
  89. # define bswap32(num) BSWAP_32(num)
  90. # endif
  91. # ifdef BSWAP_64
  92. # define bswap64(num) BSWAP_64(num)
  93. # endif
  94. # ifdef BE_16
  95. # define conv16be(num) BE_16(num)
  96. # endif
  97. # ifdef BE_32
  98. # define conv32be(num) BE_32(num)
  99. # endif
  100. # ifdef BE_64
  101. # define conv64be(num) BE_64(num)
  102. # endif
  103. # ifdef LE_16
  104. # define conv16le(num) LE_16(num)
  105. # endif
  106. # ifdef LE_32
  107. # define conv32le(num) LE_32(num)
  108. # endif
  109. # ifdef LE_64
  110. # define conv64le(num) LE_64(num)
  111. # endif
  112. #endif
  113. #ifndef bswap16
  114. # define bswap16(n) (uint16_t)( \
  115. (((n) & 0x00FFU) << 8) \
  116. | (((n) & 0xFF00U) >> 8) \
  117. )
  118. #endif
  119. #ifndef bswap32
  120. # define bswap32(n) (uint32_t)( \
  121. (((n) & UINT32_C(0x000000FF)) << 24) \
  122. | (((n) & UINT32_C(0x0000FF00)) << 8) \
  123. | (((n) & UINT32_C(0x00FF0000)) >> 8) \
  124. | (((n) & UINT32_C(0xFF000000)) >> 24) \
  125. )
  126. #endif
  127. #ifndef bswap64
  128. # define bswap64(n) (uint64_t)( \
  129. (((n) & UINT64_C(0x00000000000000FF)) << 56) \
  130. | (((n) & UINT64_C(0x000000000000FF00)) << 40) \
  131. | (((n) & UINT64_C(0x0000000000FF0000)) << 24) \
  132. | (((n) & UINT64_C(0x00000000FF000000)) << 8) \
  133. | (((n) & UINT64_C(0x000000FF00000000)) >> 8) \
  134. | (((n) & UINT64_C(0x0000FF0000000000)) >> 24) \
  135. | (((n) & UINT64_C(0x00FF000000000000)) >> 40) \
  136. | (((n) & UINT64_C(0xFF00000000000000)) >> 56) \
  137. )
  138. #endif
  139. // Define conversion macros using the basic byte swapping macros.
  140. #ifdef WORDS_BIGENDIAN
  141. # ifndef conv16be
  142. # define conv16be(num) ((uint16_t)(num))
  143. # endif
  144. # ifndef conv32be
  145. # define conv32be(num) ((uint32_t)(num))
  146. # endif
  147. # ifndef conv64be
  148. # define conv64be(num) ((uint64_t)(num))
  149. # endif
  150. # ifndef conv16le
  151. # define conv16le(num) bswap16(num)
  152. # endif
  153. # ifndef conv32le
  154. # define conv32le(num) bswap32(num)
  155. # endif
  156. # ifndef conv64le
  157. # define conv64le(num) bswap64(num)
  158. # endif
  159. #else
  160. # ifndef conv16be
  161. # define conv16be(num) bswap16(num)
  162. # endif
  163. # ifndef conv32be
  164. # define conv32be(num) bswap32(num)
  165. # endif
  166. # ifndef conv64be
  167. # define conv64be(num) bswap64(num)
  168. # endif
  169. # ifndef conv16le
  170. # define conv16le(num) ((uint16_t)(num))
  171. # endif
  172. # ifndef conv32le
  173. # define conv32le(num) ((uint32_t)(num))
  174. # endif
  175. # ifndef conv64le
  176. # define conv64le(num) ((uint64_t)(num))
  177. # endif
  178. #endif
  179. ////////////////////////////////
  180. // Unaligned reads and writes //
  181. ////////////////////////////////
  182. // No-strict-align archs like x86-64
  183. // ---------------------------------
  184. //
  185. // The traditional way of casting e.g. *(const uint16_t *)uint8_pointer
  186. // is bad even if the uint8_pointer is properly aligned because this kind
  187. // of casts break strict aliasing rules and result in undefined behavior.
  188. // With unaligned pointers it's even worse: compilers may emit vector
  189. // instructions that require aligned pointers even if non-vector
  190. // instructions work with unaligned pointers.
  191. //
  192. // Using memcpy() is the standard compliant way to do unaligned access.
  193. // Many modern compilers inline it so there is no function call overhead.
  194. // For those compilers that don't handle the memcpy() method well, the
  195. // old casting method (that violates strict aliasing) can be requested at
  196. // build time. A third method, casting to a packed struct, would also be
  197. // an option but isn't provided to keep things simpler (it's already a mess).
  198. // Hopefully this is flexible enough in practice.
  199. //
  200. // Some compilers on x86-64 like Clang >= 10 and GCC >= 5.1 detect that
  201. //
  202. // buf[0] | (buf[1] << 8)
  203. //
  204. // reads a 16-bit value and can emit a single 16-bit load and produce
  205. // identical code than with the memcpy() method. In other cases Clang and GCC
  206. // produce either the same or better code with memcpy(). For example, Clang 9
  207. // on x86-64 can detect 32-bit load but not 16-bit load.
  208. //
  209. // MSVC uses unaligned access with the memcpy() method but emits byte-by-byte
  210. // code for "buf[0] | (buf[1] << 8)".
  211. //
  212. // Conclusion: The memcpy() method is the best choice when unaligned access
  213. // is supported.
  214. //
  215. // Strict-align archs like SPARC
  216. // -----------------------------
  217. //
  218. // GCC versions from around 4.x to to at least 13.2.0 produce worse code
  219. // from the memcpy() method than from simple byte-by-byte shift-or code
  220. // when reading a 32-bit integer:
  221. //
  222. // (1) It may be constructed on stack using using four 8-bit loads,
  223. // four 8-bit stores to stack, and finally one 32-bit load from stack.
  224. //
  225. // (2) Especially with -Os, an actual memcpy() call may be emitted.
  226. //
  227. // This is true on at least on ARM, ARM64, SPARC, SPARC64, MIPS64EL, and
  228. // RISC-V. Of these, ARM, ARM64, and RISC-V support unaligned access in
  229. // some processors but not all so this is relevant only in the case when
  230. // GCC assumes that unaligned is not supported or -mstrict-align or
  231. // -mno-unaligned-access is used.
  232. //
  233. // For Clang it makes little difference. ARM64 with -O2 -mstrict-align
  234. // was one the very few with a minor difference: the memcpy() version
  235. // was one instruction longer.
  236. //
  237. // Conclusion: At least in case of GCC and Clang, byte-by-byte code is
  238. // the best choice for strict-align archs to do unaligned access.
  239. //
  240. // See also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=111502
  241. //
  242. // Thanks to <https://godbolt.org/> it was easy to test different compilers.
  243. // The following is for little endian targets:
  244. /*
  245. #include <stdint.h>
  246. #include <string.h>
  247. uint32_t bytes16(const uint8_t *b)
  248. {
  249. return (uint32_t)b[0]
  250. | ((uint32_t)b[1] << 8);
  251. }
  252. uint32_t copy16(const uint8_t *b)
  253. {
  254. uint16_t v;
  255. memcpy(&v, b, sizeof(v));
  256. return v;
  257. }
  258. uint32_t bytes32(const uint8_t *b)
  259. {
  260. return (uint32_t)b[0]
  261. | ((uint32_t)b[1] << 8)
  262. | ((uint32_t)b[2] << 16)
  263. | ((uint32_t)b[3] << 24);
  264. }
  265. uint32_t copy32(const uint8_t *b)
  266. {
  267. uint32_t v;
  268. memcpy(&v, b, sizeof(v));
  269. return v;
  270. }
  271. void wbytes16(uint8_t *b, uint16_t v)
  272. {
  273. b[0] = (uint8_t)v;
  274. b[1] = (uint8_t)(v >> 8);
  275. }
  276. void wcopy16(uint8_t *b, uint16_t v)
  277. {
  278. memcpy(b, &v, sizeof(v));
  279. }
  280. void wbytes32(uint8_t *b, uint32_t v)
  281. {
  282. b[0] = (uint8_t)v;
  283. b[1] = (uint8_t)(v >> 8);
  284. b[2] = (uint8_t)(v >> 16);
  285. b[3] = (uint8_t)(v >> 24);
  286. }
  287. void wcopy32(uint8_t *b, uint32_t v)
  288. {
  289. memcpy(b, &v, sizeof(v));
  290. }
  291. */
  292. #ifdef TUKLIB_FAST_UNALIGNED_ACCESS
  293. static inline uint16_t
  294. read16ne(const uint8_t *buf)
  295. {
  296. #ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
  297. return *(const uint16_t *)buf;
  298. #else
  299. uint16_t num;
  300. memcpy(&num, buf, sizeof(num));
  301. return num;
  302. #endif
  303. }
  304. static inline uint32_t
  305. read32ne(const uint8_t *buf)
  306. {
  307. #ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
  308. return *(const uint32_t *)buf;
  309. #else
  310. uint32_t num;
  311. memcpy(&num, buf, sizeof(num));
  312. return num;
  313. #endif
  314. }
  315. static inline uint64_t
  316. read64ne(const uint8_t *buf)
  317. {
  318. #ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
  319. return *(const uint64_t *)buf;
  320. #else
  321. uint64_t num;
  322. memcpy(&num, buf, sizeof(num));
  323. return num;
  324. #endif
  325. }
  326. static inline void
  327. write16ne(uint8_t *buf, uint16_t num)
  328. {
  329. #ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
  330. *(uint16_t *)buf = num;
  331. #else
  332. memcpy(buf, &num, sizeof(num));
  333. #endif
  334. return;
  335. }
  336. static inline void
  337. write32ne(uint8_t *buf, uint32_t num)
  338. {
  339. #ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
  340. *(uint32_t *)buf = num;
  341. #else
  342. memcpy(buf, &num, sizeof(num));
  343. #endif
  344. return;
  345. }
  346. static inline void
  347. write64ne(uint8_t *buf, uint64_t num)
  348. {
  349. #ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
  350. *(uint64_t *)buf = num;
  351. #else
  352. memcpy(buf, &num, sizeof(num));
  353. #endif
  354. return;
  355. }
  356. static inline uint16_t
  357. read16be(const uint8_t *buf)
  358. {
  359. uint16_t num = read16ne(buf);
  360. return conv16be(num);
  361. }
  362. static inline uint16_t
  363. read16le(const uint8_t *buf)
  364. {
  365. uint16_t num = read16ne(buf);
  366. return conv16le(num);
  367. }
  368. static inline uint32_t
  369. read32be(const uint8_t *buf)
  370. {
  371. uint32_t num = read32ne(buf);
  372. return conv32be(num);
  373. }
  374. static inline uint32_t
  375. read32le(const uint8_t *buf)
  376. {
  377. uint32_t num = read32ne(buf);
  378. return conv32le(num);
  379. }
  380. static inline uint64_t
  381. read64be(const uint8_t *buf)
  382. {
  383. uint64_t num = read64ne(buf);
  384. return conv64be(num);
  385. }
  386. static inline uint64_t
  387. read64le(const uint8_t *buf)
  388. {
  389. uint64_t num = read64ne(buf);
  390. return conv64le(num);
  391. }
  392. // NOTE: Possible byte swapping must be done in a macro to allow the compiler
  393. // to optimize byte swapping of constants when using glibc's or *BSD's
  394. // byte swapping macros. The actual write is done in an inline function
  395. // to make type checking of the buf pointer possible.
  396. #define write16be(buf, num) write16ne(buf, conv16be(num))
  397. #define write32be(buf, num) write32ne(buf, conv32be(num))
  398. #define write64be(buf, num) write64ne(buf, conv64be(num))
  399. #define write16le(buf, num) write16ne(buf, conv16le(num))
  400. #define write32le(buf, num) write32ne(buf, conv32le(num))
  401. #define write64le(buf, num) write64ne(buf, conv64le(num))
  402. #else
  403. #ifdef WORDS_BIGENDIAN
  404. # define read16ne read16be
  405. # define read32ne read32be
  406. # define read64ne read64be
  407. # define write16ne write16be
  408. # define write32ne write32be
  409. # define write64ne write64be
  410. #else
  411. # define read16ne read16le
  412. # define read32ne read32le
  413. # define read64ne read64le
  414. # define write16ne write16le
  415. # define write32ne write32le
  416. # define write64ne write64le
  417. #endif
  418. static inline uint16_t
  419. read16be(const uint8_t *buf)
  420. {
  421. uint16_t num = ((uint16_t)buf[0] << 8) | (uint16_t)buf[1];
  422. return num;
  423. }
  424. static inline uint16_t
  425. read16le(const uint8_t *buf)
  426. {
  427. uint16_t num = ((uint16_t)buf[0]) | ((uint16_t)buf[1] << 8);
  428. return num;
  429. }
  430. static inline uint32_t
  431. read32be(const uint8_t *buf)
  432. {
  433. uint32_t num = (uint32_t)buf[0] << 24;
  434. num |= (uint32_t)buf[1] << 16;
  435. num |= (uint32_t)buf[2] << 8;
  436. num |= (uint32_t)buf[3];
  437. return num;
  438. }
  439. static inline uint32_t
  440. read32le(const uint8_t *buf)
  441. {
  442. uint32_t num = (uint32_t)buf[0];
  443. num |= (uint32_t)buf[1] << 8;
  444. num |= (uint32_t)buf[2] << 16;
  445. num |= (uint32_t)buf[3] << 24;
  446. return num;
  447. }
  448. static inline uint64_t
  449. read64be(const uint8_t *buf)
  450. {
  451. uint64_t num = (uint64_t)buf[0] << 56;
  452. num |= (uint64_t)buf[1] << 48;
  453. num |= (uint64_t)buf[2] << 40;
  454. num |= (uint64_t)buf[3] << 32;
  455. num |= (uint64_t)buf[4] << 24;
  456. num |= (uint64_t)buf[5] << 16;
  457. num |= (uint64_t)buf[6] << 8;
  458. num |= (uint64_t)buf[7];
  459. return num;
  460. }
  461. static inline uint64_t
  462. read64le(const uint8_t *buf)
  463. {
  464. uint64_t num = (uint64_t)buf[0];
  465. num |= (uint64_t)buf[1] << 8;
  466. num |= (uint64_t)buf[2] << 16;
  467. num |= (uint64_t)buf[3] << 24;
  468. num |= (uint64_t)buf[4] << 32;
  469. num |= (uint64_t)buf[5] << 40;
  470. num |= (uint64_t)buf[6] << 48;
  471. num |= (uint64_t)buf[7] << 56;
  472. return num;
  473. }
  474. static inline void
  475. write16be(uint8_t *buf, uint16_t num)
  476. {
  477. buf[0] = (uint8_t)(num >> 8);
  478. buf[1] = (uint8_t)num;
  479. return;
  480. }
  481. static inline void
  482. write16le(uint8_t *buf, uint16_t num)
  483. {
  484. buf[0] = (uint8_t)num;
  485. buf[1] = (uint8_t)(num >> 8);
  486. return;
  487. }
  488. static inline void
  489. write32be(uint8_t *buf, uint32_t num)
  490. {
  491. buf[0] = (uint8_t)(num >> 24);
  492. buf[1] = (uint8_t)(num >> 16);
  493. buf[2] = (uint8_t)(num >> 8);
  494. buf[3] = (uint8_t)num;
  495. return;
  496. }
  497. static inline void
  498. write32le(uint8_t *buf, uint32_t num)
  499. {
  500. buf[0] = (uint8_t)num;
  501. buf[1] = (uint8_t)(num >> 8);
  502. buf[2] = (uint8_t)(num >> 16);
  503. buf[3] = (uint8_t)(num >> 24);
  504. return;
  505. }
  506. static inline void
  507. write64be(uint8_t *buf, uint64_t num)
  508. {
  509. buf[0] = (uint8_t)(num >> 56);
  510. buf[1] = (uint8_t)(num >> 48);
  511. buf[2] = (uint8_t)(num >> 40);
  512. buf[3] = (uint8_t)(num >> 32);
  513. buf[4] = (uint8_t)(num >> 24);
  514. buf[5] = (uint8_t)(num >> 16);
  515. buf[6] = (uint8_t)(num >> 8);
  516. buf[7] = (uint8_t)num;
  517. return;
  518. }
  519. static inline void
  520. write64le(uint8_t *buf, uint64_t num)
  521. {
  522. buf[0] = (uint8_t)num;
  523. buf[1] = (uint8_t)(num >> 8);
  524. buf[2] = (uint8_t)(num >> 16);
  525. buf[3] = (uint8_t)(num >> 24);
  526. buf[4] = (uint8_t)(num >> 32);
  527. buf[5] = (uint8_t)(num >> 40);
  528. buf[6] = (uint8_t)(num >> 48);
  529. buf[7] = (uint8_t)(num >> 56);
  530. return;
  531. }
  532. #endif
  533. //////////////////////////////
  534. // Aligned reads and writes //
  535. //////////////////////////////
  536. // Separate functions for aligned reads and writes are provided since on
  537. // strict-align archs aligned access is much faster than unaligned access.
  538. //
  539. // Just like in the unaligned case, memcpy() is needed to avoid
  540. // strict aliasing violations. However, on archs that don't support
  541. // unaligned access the compiler cannot know that the pointers given
  542. // to memcpy() are aligned which results in slow code. As of C11 there is
  543. // no standard way to tell the compiler that we know that the address is
  544. // aligned but some compilers have language extensions to do that. With
  545. // such language extensions the memcpy() method gives excellent results.
  546. //
  547. // What to do on a strict-align system when no known language extentensions
  548. // are available? Falling back to byte-by-byte access would be safe but ruin
  549. // optimizations that have been made specifically with aligned access in mind.
  550. // As a compromise, aligned reads will fall back to non-compliant type punning
  551. // but aligned writes will be byte-by-byte, that is, fast reads are preferred
  552. // over fast writes. This obviously isn't great but hopefully it's a working
  553. // compromise for now.
  554. //
  555. // __builtin_assume_aligned is support by GCC >= 4.7 and clang >= 3.6.
  556. #ifdef HAVE___BUILTIN_ASSUME_ALIGNED
  557. # define tuklib_memcpy_aligned(dest, src, size) \
  558. memcpy(dest, __builtin_assume_aligned(src, size), size)
  559. #else
  560. # define tuklib_memcpy_aligned(dest, src, size) \
  561. memcpy(dest, src, size)
  562. # ifndef TUKLIB_FAST_UNALIGNED_ACCESS
  563. # define TUKLIB_USE_UNSAFE_ALIGNED_READS 1
  564. # endif
  565. #endif
  566. static inline uint16_t
  567. aligned_read16ne(const uint8_t *buf)
  568. {
  569. #if defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING) \
  570. || defined(TUKLIB_USE_UNSAFE_ALIGNED_READS)
  571. return *(const uint16_t *)buf;
  572. #else
  573. uint16_t num;
  574. tuklib_memcpy_aligned(&num, buf, sizeof(num));
  575. return num;
  576. #endif
  577. }
  578. static inline uint32_t
  579. aligned_read32ne(const uint8_t *buf)
  580. {
  581. #if defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING) \
  582. || defined(TUKLIB_USE_UNSAFE_ALIGNED_READS)
  583. return *(const uint32_t *)buf;
  584. #else
  585. uint32_t num;
  586. tuklib_memcpy_aligned(&num, buf, sizeof(num));
  587. return num;
  588. #endif
  589. }
  590. static inline uint64_t
  591. aligned_read64ne(const uint8_t *buf)
  592. {
  593. #if defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING) \
  594. || defined(TUKLIB_USE_UNSAFE_ALIGNED_READS)
  595. return *(const uint64_t *)buf;
  596. #else
  597. uint64_t num;
  598. tuklib_memcpy_aligned(&num, buf, sizeof(num));
  599. return num;
  600. #endif
  601. }
  602. static inline void
  603. aligned_write16ne(uint8_t *buf, uint16_t num)
  604. {
  605. #ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
  606. *(uint16_t *)buf = num;
  607. #else
  608. tuklib_memcpy_aligned(buf, &num, sizeof(num));
  609. #endif
  610. return;
  611. }
  612. static inline void
  613. aligned_write32ne(uint8_t *buf, uint32_t num)
  614. {
  615. #ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
  616. *(uint32_t *)buf = num;
  617. #else
  618. tuklib_memcpy_aligned(buf, &num, sizeof(num));
  619. #endif
  620. return;
  621. }
  622. static inline void
  623. aligned_write64ne(uint8_t *buf, uint64_t num)
  624. {
  625. #ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
  626. *(uint64_t *)buf = num;
  627. #else
  628. tuklib_memcpy_aligned(buf, &num, sizeof(num));
  629. #endif
  630. return;
  631. }
  632. static inline uint16_t
  633. aligned_read16be(const uint8_t *buf)
  634. {
  635. uint16_t num = aligned_read16ne(buf);
  636. return conv16be(num);
  637. }
  638. static inline uint16_t
  639. aligned_read16le(const uint8_t *buf)
  640. {
  641. uint16_t num = aligned_read16ne(buf);
  642. return conv16le(num);
  643. }
  644. static inline uint32_t
  645. aligned_read32be(const uint8_t *buf)
  646. {
  647. uint32_t num = aligned_read32ne(buf);
  648. return conv32be(num);
  649. }
  650. static inline uint32_t
  651. aligned_read32le(const uint8_t *buf)
  652. {
  653. uint32_t num = aligned_read32ne(buf);
  654. return conv32le(num);
  655. }
  656. static inline uint64_t
  657. aligned_read64be(const uint8_t *buf)
  658. {
  659. uint64_t num = aligned_read64ne(buf);
  660. return conv64be(num);
  661. }
  662. static inline uint64_t
  663. aligned_read64le(const uint8_t *buf)
  664. {
  665. uint64_t num = aligned_read64ne(buf);
  666. return conv64le(num);
  667. }
  668. // These need to be macros like in the unaligned case.
  669. #define aligned_write16be(buf, num) aligned_write16ne((buf), conv16be(num))
  670. #define aligned_write16le(buf, num) aligned_write16ne((buf), conv16le(num))
  671. #define aligned_write32be(buf, num) aligned_write32ne((buf), conv32be(num))
  672. #define aligned_write32le(buf, num) aligned_write32ne((buf), conv32le(num))
  673. #define aligned_write64be(buf, num) aligned_write64ne((buf), conv64be(num))
  674. #define aligned_write64le(buf, num) aligned_write64ne((buf), conv64le(num))
  675. ////////////////////
  676. // Bit operations //
  677. ////////////////////
  678. static inline uint32_t
  679. bsr32(uint32_t n)
  680. {
  681. // Check for ICC first, since it tends to define __GNUC__ too.
  682. #if defined(__INTEL_COMPILER)
  683. return _bit_scan_reverse(n);
  684. #elif (TUKLIB_GNUC_REQ(3, 4) || defined(__clang__)) && UINT_MAX == UINT32_MAX
  685. // GCC >= 3.4 has __builtin_clz(), which gives good results on
  686. // multiple architectures. On x86, __builtin_clz() ^ 31U becomes
  687. // either plain BSR (so the XOR gets optimized away) or LZCNT and
  688. // XOR (if -march indicates that SSE4a instructions are supported).
  689. return (uint32_t)__builtin_clz(n) ^ 31U;
  690. #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
  691. uint32_t i;
  692. __asm__("bsrl %1, %0" : "=r" (i) : "rm" (n));
  693. return i;
  694. #elif defined(_MSC_VER)
  695. unsigned long i;
  696. _BitScanReverse(&i, n);
  697. return i;
  698. #else
  699. uint32_t i = 31;
  700. if ((n & 0xFFFF0000) == 0) {
  701. n <<= 16;
  702. i = 15;
  703. }
  704. if ((n & 0xFF000000) == 0) {
  705. n <<= 8;
  706. i -= 8;
  707. }
  708. if ((n & 0xF0000000) == 0) {
  709. n <<= 4;
  710. i -= 4;
  711. }
  712. if ((n & 0xC0000000) == 0) {
  713. n <<= 2;
  714. i -= 2;
  715. }
  716. if ((n & 0x80000000) == 0)
  717. --i;
  718. return i;
  719. #endif
  720. }
  721. static inline uint32_t
  722. clz32(uint32_t n)
  723. {
  724. #if defined(__INTEL_COMPILER)
  725. return _bit_scan_reverse(n) ^ 31U;
  726. #elif (TUKLIB_GNUC_REQ(3, 4) || defined(__clang__)) && UINT_MAX == UINT32_MAX
  727. return (uint32_t)__builtin_clz(n);
  728. #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
  729. uint32_t i;
  730. __asm__("bsrl %1, %0\n\t"
  731. "xorl $31, %0"
  732. : "=r" (i) : "rm" (n));
  733. return i;
  734. #elif defined(_MSC_VER)
  735. unsigned long i;
  736. _BitScanReverse(&i, n);
  737. return i ^ 31U;
  738. #else
  739. uint32_t i = 0;
  740. if ((n & 0xFFFF0000) == 0) {
  741. n <<= 16;
  742. i = 16;
  743. }
  744. if ((n & 0xFF000000) == 0) {
  745. n <<= 8;
  746. i += 8;
  747. }
  748. if ((n & 0xF0000000) == 0) {
  749. n <<= 4;
  750. i += 4;
  751. }
  752. if ((n & 0xC0000000) == 0) {
  753. n <<= 2;
  754. i += 2;
  755. }
  756. if ((n & 0x80000000) == 0)
  757. ++i;
  758. return i;
  759. #endif
  760. }
  761. static inline uint32_t
  762. ctz32(uint32_t n)
  763. {
  764. #if defined(__INTEL_COMPILER)
  765. return _bit_scan_forward(n);
  766. #elif (TUKLIB_GNUC_REQ(3, 4) || defined(__clang__)) && UINT_MAX >= UINT32_MAX
  767. return (uint32_t)__builtin_ctz(n);
  768. #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
  769. uint32_t i;
  770. __asm__("bsfl %1, %0" : "=r" (i) : "rm" (n));
  771. return i;
  772. #elif defined(_MSC_VER)
  773. unsigned long i;
  774. _BitScanForward(&i, n);
  775. return i;
  776. #else
  777. uint32_t i = 0;
  778. if ((n & 0x0000FFFF) == 0) {
  779. n >>= 16;
  780. i = 16;
  781. }
  782. if ((n & 0x000000FF) == 0) {
  783. n >>= 8;
  784. i += 8;
  785. }
  786. if ((n & 0x0000000F) == 0) {
  787. n >>= 4;
  788. i += 4;
  789. }
  790. if ((n & 0x00000003) == 0) {
  791. n >>= 2;
  792. i += 2;
  793. }
  794. if ((n & 0x00000001) == 0)
  795. ++i;
  796. return i;
  797. #endif
  798. }
  799. #define bsf32 ctz32
  800. #endif