snappy-stubs-internal.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525
  1. // Copyright 2011 Google Inc. All Rights Reserved.
  2. //
  3. // Redistribution and use in source and binary forms, with or without
  4. // modification, are permitted provided that the following conditions are
  5. // met:
  6. //
  7. // * Redistributions of source code must retain the above copyright
  8. // notice, this list of conditions and the following disclaimer.
  9. // * Redistributions in binary form must reproduce the above
  10. // copyright notice, this list of conditions and the following disclaimer
  11. // in the documentation and/or other materials provided with the
  12. // distribution.
  13. // * Neither the name of Google Inc. nor the names of its
  14. // contributors may be used to endorse or promote products derived from
  15. // this software without specific prior written permission.
  16. //
  17. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. //
  29. // Various stubs for the open-source version of Snappy.
  30. #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
  31. #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
  32. #if HAVE_CONFIG_H
  33. #include "config.h"
  34. #endif
  35. #include <stdint.h>
  36. #include <cassert>
  37. #include <cstdlib>
  38. #include <cstring>
  39. #include <limits>
  40. #include <string>
  41. #if HAVE_SYS_MMAN_H
  42. #include <sys/mman.h>
  43. #endif
  44. #if HAVE_UNISTD_H
  45. #include <unistd.h>
  46. #endif
  47. #if defined(_MSC_VER)
  48. #include <intrin.h>
  49. #endif // defined(_MSC_VER)
  50. #ifndef __has_feature
  51. #define __has_feature(x) 0
  52. #endif
  53. #if __has_feature(memory_sanitizer)
  54. #include <sanitizer/msan_interface.h>
  55. #define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
  56. __msan_unpoison((address), (size))
  57. #else
  58. #define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) /* empty */
  59. #endif // __has_feature(memory_sanitizer)
  60. #include "snappy-stubs-public.h"
  61. // Used to enable 64-bit optimized versions of some routines.
  62. #if defined(__PPC64__) || defined(__powerpc64__)
  63. #define ARCH_PPC 1
  64. #elif defined(__aarch64__) || defined(_M_ARM64)
  65. #define ARCH_ARM 1
  66. #endif
  67. // Needed by OS X, among others.
  68. #ifndef MAP_ANONYMOUS
  69. #define MAP_ANONYMOUS MAP_ANON
  70. #endif
  71. // The size of an array, if known at compile-time.
  72. // Will give unexpected results if used on a pointer.
  73. // We undefine it first, since some compilers already have a definition.
  74. #ifdef ARRAYSIZE
  75. #undef ARRAYSIZE
  76. #endif
  77. #define ARRAYSIZE(a) int{sizeof(a) / sizeof(*(a))}
  78. // Static prediction hints.
  79. #if HAVE_BUILTIN_EXPECT
  80. #define SNAPPY_PREDICT_FALSE(x) (__builtin_expect(x, 0))
  81. #define SNAPPY_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
  82. #else
  83. #define SNAPPY_PREDICT_FALSE(x) x
  84. #define SNAPPY_PREDICT_TRUE(x) x
  85. #endif // HAVE_BUILTIN_EXPECT
  86. // Inlining hints.
  87. #if HAVE_ATTRIBUTE_ALWAYS_INLINE
  88. #define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
  89. #else
  90. #define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
  91. #endif // HAVE_ATTRIBUTE_ALWAYS_INLINE
  92. // Stubbed version of ABSL_FLAG.
  93. //
  94. // In the open source version, flags can only be changed at compile time.
  95. #define SNAPPY_FLAG(flag_type, flag_name, default_value, help) \
  96. flag_type FLAGS_ ## flag_name = default_value
  97. namespace snappy {
  98. // Stubbed version of absl::GetFlag().
  99. template <typename T>
  100. inline T GetFlag(T flag) { return flag; }
  101. static const uint32_t kuint32max = std::numeric_limits<uint32_t>::max();
  102. static const int64_t kint64max = std::numeric_limits<int64_t>::max();
  103. // Potentially unaligned loads and stores.
  104. inline uint16_t UNALIGNED_LOAD16(const void *p) {
  105. // Compiles to a single movzx/ldrh on clang/gcc/msvc.
  106. uint16_t v;
  107. std::memcpy(&v, p, sizeof(v));
  108. return v;
  109. }
  110. inline uint32_t UNALIGNED_LOAD32(const void *p) {
  111. // Compiles to a single mov/ldr on clang/gcc/msvc.
  112. uint32_t v;
  113. std::memcpy(&v, p, sizeof(v));
  114. return v;
  115. }
  116. inline uint64_t UNALIGNED_LOAD64(const void *p) {
  117. // Compiles to a single mov/ldr on clang/gcc/msvc.
  118. uint64_t v;
  119. std::memcpy(&v, p, sizeof(v));
  120. return v;
  121. }
  122. inline void UNALIGNED_STORE16(void *p, uint16_t v) {
  123. // Compiles to a single mov/strh on clang/gcc/msvc.
  124. std::memcpy(p, &v, sizeof(v));
  125. }
  126. inline void UNALIGNED_STORE32(void *p, uint32_t v) {
  127. // Compiles to a single mov/str on clang/gcc/msvc.
  128. std::memcpy(p, &v, sizeof(v));
  129. }
  130. inline void UNALIGNED_STORE64(void *p, uint64_t v) {
  131. // Compiles to a single mov/str on clang/gcc/msvc.
  132. std::memcpy(p, &v, sizeof(v));
  133. }
  134. // Convert to little-endian storage, opposite of network format.
  135. // Convert x from host to little endian: x = LittleEndian.FromHost(x);
  136. // convert x from little endian to host: x = LittleEndian.ToHost(x);
  137. //
  138. // Store values into unaligned memory converting to little endian order:
  139. // LittleEndian.Store16(p, x);
  140. //
  141. // Load unaligned values stored in little endian converting to host order:
  142. // x = LittleEndian.Load16(p);
  143. class LittleEndian {
  144. public:
  145. // Functions to do unaligned loads and stores in little-endian order.
  146. static inline uint16_t Load16(const void *ptr) {
  147. // Compiles to a single mov/str on recent clang and gcc.
  148. #if SNAPPY_IS_BIG_ENDIAN
  149. const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
  150. return (static_cast<uint16_t>(buffer[0])) |
  151. (static_cast<uint16_t>(buffer[1]) << 8);
  152. #else
  153. // memcpy() turns into a single instruction early in the optimization
  154. // pipeline (relatively to a series of byte accesses). So, using memcpy
  155. // instead of byte accesses may lead to better decisions in more stages of
  156. // the optimization pipeline.
  157. uint16_t value;
  158. std::memcpy(&value, ptr, 2);
  159. return value;
  160. #endif
  161. }
  162. static inline uint32_t Load32(const void *ptr) {
  163. // Compiles to a single mov/str on recent clang and gcc.
  164. #if SNAPPY_IS_BIG_ENDIAN
  165. const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
  166. return (static_cast<uint32_t>(buffer[0])) |
  167. (static_cast<uint32_t>(buffer[1]) << 8) |
  168. (static_cast<uint32_t>(buffer[2]) << 16) |
  169. (static_cast<uint32_t>(buffer[3]) << 24);
  170. #else
  171. // See Load16() for the rationale of using memcpy().
  172. uint32_t value;
  173. std::memcpy(&value, ptr, 4);
  174. return value;
  175. #endif
  176. }
  177. static inline uint64_t Load64(const void *ptr) {
  178. // Compiles to a single mov/str on recent clang and gcc.
  179. #if SNAPPY_IS_BIG_ENDIAN
  180. const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
  181. return (static_cast<uint64_t>(buffer[0])) |
  182. (static_cast<uint64_t>(buffer[1]) << 8) |
  183. (static_cast<uint64_t>(buffer[2]) << 16) |
  184. (static_cast<uint64_t>(buffer[3]) << 24) |
  185. (static_cast<uint64_t>(buffer[4]) << 32) |
  186. (static_cast<uint64_t>(buffer[5]) << 40) |
  187. (static_cast<uint64_t>(buffer[6]) << 48) |
  188. (static_cast<uint64_t>(buffer[7]) << 56);
  189. #else
  190. // See Load16() for the rationale of using memcpy().
  191. uint64_t value;
  192. std::memcpy(&value, ptr, 8);
  193. return value;
  194. #endif
  195. }
  196. static inline void Store16(void *dst, uint16_t value) {
  197. // Compiles to a single mov/str on recent clang and gcc.
  198. #if SNAPPY_IS_BIG_ENDIAN
  199. uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
  200. buffer[0] = static_cast<uint8_t>(value);
  201. buffer[1] = static_cast<uint8_t>(value >> 8);
  202. #else
  203. // See Load16() for the rationale of using memcpy().
  204. std::memcpy(dst, &value, 2);
  205. #endif
  206. }
  207. static void Store32(void *dst, uint32_t value) {
  208. // Compiles to a single mov/str on recent clang and gcc.
  209. #if SNAPPY_IS_BIG_ENDIAN
  210. uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
  211. buffer[0] = static_cast<uint8_t>(value);
  212. buffer[1] = static_cast<uint8_t>(value >> 8);
  213. buffer[2] = static_cast<uint8_t>(value >> 16);
  214. buffer[3] = static_cast<uint8_t>(value >> 24);
  215. #else
  216. // See Load16() for the rationale of using memcpy().
  217. std::memcpy(dst, &value, 4);
  218. #endif
  219. }
  220. static void Store64(void* dst, uint64_t value) {
  221. // Compiles to a single mov/str on recent clang and gcc.
  222. #if SNAPPY_IS_BIG_ENDIAN
  223. uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
  224. buffer[0] = static_cast<uint8_t>(value);
  225. buffer[1] = static_cast<uint8_t>(value >> 8);
  226. buffer[2] = static_cast<uint8_t>(value >> 16);
  227. buffer[3] = static_cast<uint8_t>(value >> 24);
  228. buffer[4] = static_cast<uint8_t>(value >> 32);
  229. buffer[5] = static_cast<uint8_t>(value >> 40);
  230. buffer[6] = static_cast<uint8_t>(value >> 48);
  231. buffer[7] = static_cast<uint8_t>(value >> 56);
  232. #else
  233. // See Load16() for the rationale of using memcpy().
  234. std::memcpy(dst, &value, 8);
  235. #endif
  236. }
  237. static inline constexpr bool IsLittleEndian() {
  238. #if SNAPPY_IS_BIG_ENDIAN
  239. return false;
  240. #else
  241. return true;
  242. #endif // SNAPPY_IS_BIG_ENDIAN
  243. }
  244. };
  245. // Some bit-manipulation functions.
  246. class Bits {
  247. public:
  248. // Return floor(log2(n)) for positive integer n.
  249. static int Log2FloorNonZero(uint32_t n);
  250. // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
  251. static int Log2Floor(uint32_t n);
  252. // Return the first set least / most significant bit, 0-indexed. Returns an
  253. // undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
  254. // that it's 0-indexed.
  255. static int FindLSBSetNonZero(uint32_t n);
  256. static int FindLSBSetNonZero64(uint64_t n);
  257. private:
  258. // No copying
  259. Bits(const Bits&);
  260. void operator=(const Bits&);
  261. };
  262. #if HAVE_BUILTIN_CTZ
  263. inline int Bits::Log2FloorNonZero(uint32_t n) {
  264. assert(n != 0);
  265. // (31 ^ x) is equivalent to (31 - x) for x in [0, 31]. An easy proof
  266. // represents subtraction in base 2 and observes that there's no carry.
  267. //
  268. // GCC and Clang represent __builtin_clz on x86 as 31 ^ _bit_scan_reverse(x).
  269. // Using "31 ^" here instead of "31 -" allows the optimizer to strip the
  270. // function body down to _bit_scan_reverse(x).
  271. return 31 ^ __builtin_clz(n);
  272. }
  273. inline int Bits::Log2Floor(uint32_t n) {
  274. return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
  275. }
  276. inline int Bits::FindLSBSetNonZero(uint32_t n) {
  277. assert(n != 0);
  278. return __builtin_ctz(n);
  279. }
  280. #elif defined(_MSC_VER)
  281. inline int Bits::Log2FloorNonZero(uint32_t n) {
  282. assert(n != 0);
  283. // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
  284. unsigned long where;
  285. _BitScanReverse(&where, n);
  286. return static_cast<int>(where);
  287. }
  288. inline int Bits::Log2Floor(uint32_t n) {
  289. // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
  290. unsigned long where;
  291. if (_BitScanReverse(&where, n))
  292. return static_cast<int>(where);
  293. return -1;
  294. }
  295. inline int Bits::FindLSBSetNonZero(uint32_t n) {
  296. assert(n != 0);
  297. // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
  298. unsigned long where;
  299. if (_BitScanForward(&where, n))
  300. return static_cast<int>(where);
  301. return 32;
  302. }
  303. #else // Portable versions.
  304. inline int Bits::Log2FloorNonZero(uint32_t n) {
  305. assert(n != 0);
  306. int log = 0;
  307. uint32_t value = n;
  308. for (int i = 4; i >= 0; --i) {
  309. int shift = (1 << i);
  310. uint32_t x = value >> shift;
  311. if (x != 0) {
  312. value = x;
  313. log += shift;
  314. }
  315. }
  316. assert(value == 1);
  317. return log;
  318. }
  319. inline int Bits::Log2Floor(uint32_t n) {
  320. return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
  321. }
  322. inline int Bits::FindLSBSetNonZero(uint32_t n) {
  323. assert(n != 0);
  324. int rc = 31;
  325. for (int i = 4, shift = 1 << 4; i >= 0; --i) {
  326. const uint32_t x = n << shift;
  327. if (x != 0) {
  328. n = x;
  329. rc -= shift;
  330. }
  331. shift >>= 1;
  332. }
  333. return rc;
  334. }
  335. #endif // End portable versions.
  336. #if HAVE_BUILTIN_CTZ
  337. inline int Bits::FindLSBSetNonZero64(uint64_t n) {
  338. assert(n != 0);
  339. return __builtin_ctzll(n);
  340. }
  341. #elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
  342. // _BitScanForward64() is only available on x64 and ARM64.
  343. inline int Bits::FindLSBSetNonZero64(uint64_t n) {
  344. assert(n != 0);
  345. // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
  346. unsigned long where;
  347. if (_BitScanForward64(&where, n))
  348. return static_cast<int>(where);
  349. return 64;
  350. }
  351. #else // Portable version.
  352. // FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
  353. inline int Bits::FindLSBSetNonZero64(uint64_t n) {
  354. assert(n != 0);
  355. const uint32_t bottombits = static_cast<uint32_t>(n);
  356. if (bottombits == 0) {
  357. // Bottom bits are zero, so scan the top bits.
  358. return 32 + FindLSBSetNonZero(static_cast<uint32_t>(n >> 32));
  359. } else {
  360. return FindLSBSetNonZero(bottombits);
  361. }
  362. }
  363. #endif // HAVE_BUILTIN_CTZ
  364. // Variable-length integer encoding.
  365. class Varint {
  366. public:
  367. // Maximum lengths of varint encoding of uint32_t.
  368. static const int kMax32 = 5;
  369. // Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
  370. // Never reads a character at or beyond limit. If a valid/terminated varint32
  371. // was found in the range, stores it in *OUTPUT and returns a pointer just
  372. // past the last byte of the varint32. Else returns NULL. On success,
  373. // "result <= limit".
  374. static const char* Parse32WithLimit(const char* ptr, const char* limit,
  375. uint32_t* OUTPUT);
  376. // REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
  377. // EFFECTS Encodes "v" into "ptr" and returns a pointer to the
  378. // byte just past the last encoded byte.
  379. static char* Encode32(char* ptr, uint32_t v);
  380. // EFFECTS Appends the varint representation of "value" to "*s".
  381. static void Append32(std::string* s, uint32_t value);
  382. };
  383. inline const char* Varint::Parse32WithLimit(const char* p,
  384. const char* l,
  385. uint32_t* OUTPUT) {
  386. const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
  387. const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
  388. uint32_t b, result;
  389. if (ptr >= limit) return NULL;
  390. b = *(ptr++); result = b & 127; if (b < 128) goto done;
  391. if (ptr >= limit) return NULL;
  392. b = *(ptr++); result |= (b & 127) << 7; if (b < 128) goto done;
  393. if (ptr >= limit) return NULL;
  394. b = *(ptr++); result |= (b & 127) << 14; if (b < 128) goto done;
  395. if (ptr >= limit) return NULL;
  396. b = *(ptr++); result |= (b & 127) << 21; if (b < 128) goto done;
  397. if (ptr >= limit) return NULL;
  398. b = *(ptr++); result |= (b & 127) << 28; if (b < 16) goto done;
  399. return NULL; // Value is too long to be a varint32
  400. done:
  401. *OUTPUT = result;
  402. return reinterpret_cast<const char*>(ptr);
  403. }
  404. inline char* Varint::Encode32(char* sptr, uint32_t v) {
  405. // Operate on characters as unsigneds
  406. uint8_t* ptr = reinterpret_cast<uint8_t*>(sptr);
  407. static const uint8_t B = 128;
  408. if (v < (1 << 7)) {
  409. *(ptr++) = static_cast<uint8_t>(v);
  410. } else if (v < (1 << 14)) {
  411. *(ptr++) = static_cast<uint8_t>(v | B);
  412. *(ptr++) = static_cast<uint8_t>(v >> 7);
  413. } else if (v < (1 << 21)) {
  414. *(ptr++) = static_cast<uint8_t>(v | B);
  415. *(ptr++) = static_cast<uint8_t>((v >> 7) | B);
  416. *(ptr++) = static_cast<uint8_t>(v >> 14);
  417. } else if (v < (1 << 28)) {
  418. *(ptr++) = static_cast<uint8_t>(v | B);
  419. *(ptr++) = static_cast<uint8_t>((v >> 7) | B);
  420. *(ptr++) = static_cast<uint8_t>((v >> 14) | B);
  421. *(ptr++) = static_cast<uint8_t>(v >> 21);
  422. } else {
  423. *(ptr++) = static_cast<uint8_t>(v | B);
  424. *(ptr++) = static_cast<uint8_t>((v>>7) | B);
  425. *(ptr++) = static_cast<uint8_t>((v>>14) | B);
  426. *(ptr++) = static_cast<uint8_t>((v>>21) | B);
  427. *(ptr++) = static_cast<uint8_t>(v >> 28);
  428. }
  429. return reinterpret_cast<char*>(ptr);
  430. }
  431. // If you know the internal layout of the std::string in use, you can
  432. // replace this function with one that resizes the string without
  433. // filling the new space with zeros (if applicable) --
  434. // it will be non-portable but faster.
  435. inline void STLStringResizeUninitialized(std::string* s, size_t new_size) {
  436. s->resize(new_size);
  437. }
  438. // Return a mutable char* pointing to a string's internal buffer,
  439. // which may not be null-terminated. Writing through this pointer will
  440. // modify the string.
  441. //
  442. // string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
  443. // next call to a string method that invalidates iterators.
  444. //
  445. // As of 2006-04, there is no standard-blessed way of getting a
  446. // mutable reference to a string's internal buffer. However, issue 530
  447. // (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
  448. // proposes this as the method. It will officially be part of the standard
  449. // for C++0x. This should already work on all current implementations.
  450. inline char* string_as_array(std::string* str) {
  451. return str->empty() ? NULL : &*str->begin();
  452. }
  453. } // namespace snappy
  454. #endif // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_