city.cpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. // Copyright (c) 2011 Google, Inc.
  2. // Permission is hereby granted, free of charge, to any person obtaining a copy
  3. // of this software and associated documentation files (the "Software"), to deal
  4. // in the Software without restriction, including without limitation the rights
  5. // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  6. // copies of the Software, and to permit persons to whom the Software is
  7. // furnished to do so, subject to the following conditions:
  8. // The above copyright notice and this permission notice shall be included in
  9. // all copies or substantial portions of the Software.
  10. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  11. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  12. // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  13. // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  14. // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  15. // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  16. // THE SOFTWARE.
  17. // CityHash Version 1, by Geoff Pike and Jyrki Alakuijala
  18. // This file provides CityHash64() and related functions.
  19. // It's probably possible to create even faster hash functions by
  20. // writing a program that systematically explores some of the space of
  21. // possible hash functions, by using SIMD instructions, or by
  22. // compromising on hash quality.
  23. #include "city.h"
  24. #include "city_streaming.h"
  25. using uint8 = ui8;
  26. using uint32 = ui32;
  27. using uint64 = ui64;
  28. #include <util/system/unaligned_mem.h>
  29. #include <util/generic/algorithm.h>
  30. using namespace std;
  31. //#define UNALIGNED_LOAD64(p) (*(const uint64*)(p))
  32. //#define UNALIGNED_LOAD32(p) (*(const uint32*)(p))
  33. #define UNALIGNED_LOAD64(p) (ReadUnaligned<uint64>((const void*)(p)))
  34. #define UNALIGNED_LOAD32(p) (ReadUnaligned<uint32>((const void*)(p)))
  35. #define LIKELY(x) Y_LIKELY(!!(x))
  36. // Some primes between 2^63 and 2^64 for various uses.
  37. static const uint64 k0 = 0xc3a5c85c97cb3127ULL;
  38. static const uint64 k1 = 0xb492b66fbe98f273ULL;
  39. static const uint64 k2 = 0x9ae16a3b2f90404fULL;
  40. static const uint64 k3 = 0xc949d7c7509e6557ULL;
  41. // Bitwise right rotate. Normally this will compile to a single
  42. // instruction, especially if the shift is a manifest constant.
  43. static uint64 Rotate(uint64 val, int shift) {
  44. // Avoid shifting by 64: doing so yields an undefined result.
  45. return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
  46. }
  47. // Equivalent to Rotate(), but requires the second arg to be non-zero.
  48. // On x86-64, and probably others, it's possible for this to compile
  49. // to a single instruction if both args are already in registers.
  50. static uint64 RotateByAtLeast1(uint64 val, int shift) {
  51. return (val >> shift) | (val << (64 - shift));
  52. }
  53. static uint64 ShiftMix(uint64 val) {
  54. return val ^ (val >> 47);
  55. }
  56. static uint64 HashLen16(uint64 u, uint64 v) {
  57. return Hash128to64(uint128(u, v));
  58. }
  59. static uint64 HashLen0to16(const char* s, size_t len) {
  60. if (len > 8) {
  61. uint64 a = UNALIGNED_LOAD64(s);
  62. uint64 b = UNALIGNED_LOAD64(s + len - 8);
  63. return HashLen16(a, RotateByAtLeast1(b + len, static_cast<int>(len))) ^ b;
  64. }
  65. if (len >= 4) {
  66. uint64 a = UNALIGNED_LOAD32(s);
  67. return HashLen16(len + (a << 3), UNALIGNED_LOAD32(s + len - 4));
  68. }
  69. if (len > 0) {
  70. uint8 a = s[0];
  71. uint8 b = s[len >> 1];
  72. uint8 c = s[len - 1];
  73. uint32 y = static_cast<uint32>(a) + (static_cast<uint32>(b) << 8);
  74. uint32 z = static_cast<uint32>(len) + (static_cast<uint32>(c) << 2);
  75. return ShiftMix(y * k2 ^ z * k3) * k2;
  76. }
  77. return k2;
  78. }
  79. // This probably works well for 16-byte strings as well, but it may be overkill
  80. // in that case.
  81. static uint64 HashLen17to32(const char* s, size_t len) {
  82. uint64 a = UNALIGNED_LOAD64(s) * k1;
  83. uint64 b = UNALIGNED_LOAD64(s + 8);
  84. uint64 c = UNALIGNED_LOAD64(s + len - 8) * k2;
  85. uint64 d = UNALIGNED_LOAD64(s + len - 16) * k0;
  86. return HashLen16(Rotate(a - b, 43) + Rotate(c, 30) + d,
  87. a + Rotate(b ^ k3, 20) - c + len);
  88. }
  89. // Return a 16-byte hash for 48 bytes. Quick and dirty.
  90. // Callers do best to use "random-looking" values for a and b.
  91. static pair<uint64, uint64> WeakHashLen32WithSeeds(
  92. uint64 w, uint64 x, uint64 y, uint64 z, uint64 a, uint64 b) {
  93. a += w;
  94. b = Rotate(b + a + z, 21);
  95. uint64 c = a;
  96. a += x;
  97. a += y;
  98. b += Rotate(a, 44);
  99. return make_pair(a + z, b + c);
  100. }
  101. // Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty.
  102. static pair<uint64, uint64> WeakHashLen32WithSeeds(
  103. const char* s, uint64 a, uint64 b) {
  104. return WeakHashLen32WithSeeds(UNALIGNED_LOAD64(s),
  105. UNALIGNED_LOAD64(s + 8),
  106. UNALIGNED_LOAD64(s + 16),
  107. UNALIGNED_LOAD64(s + 24),
  108. a,
  109. b);
  110. }
  111. // Return an 8-byte hash for 33 to 64 bytes.
  112. static uint64 HashLen33to64(const char* s, size_t len) {
  113. uint64 z = UNALIGNED_LOAD64(s + 24);
  114. uint64 a = UNALIGNED_LOAD64(s) + (len + UNALIGNED_LOAD64(s + len - 16)) * k0;
  115. uint64 b = Rotate(a + z, 52);
  116. uint64 c = Rotate(a, 37);
  117. a += UNALIGNED_LOAD64(s + 8);
  118. c += Rotate(a, 7);
  119. a += UNALIGNED_LOAD64(s + 16);
  120. uint64 vf = a + z;
  121. uint64 vs = b + Rotate(a, 31) + c;
  122. a = UNALIGNED_LOAD64(s + 16) + UNALIGNED_LOAD64(s + len - 32);
  123. z = UNALIGNED_LOAD64(s + len - 8);
  124. b = Rotate(a + z, 52);
  125. c = Rotate(a, 37);
  126. a += UNALIGNED_LOAD64(s + len - 24);
  127. c += Rotate(a, 7);
  128. a += UNALIGNED_LOAD64(s + len - 16);
  129. uint64 wf = a + z;
  130. uint64 ws = b + Rotate(a, 31) + c;
  131. uint64 r = ShiftMix((vf + ws) * k2 + (wf + vs) * k0);
  132. return ShiftMix(r * k0 + vs) * k2;
  133. }
  134. uint64 CityHash64(const char* s, size_t len) noexcept {
  135. if (len <= 32) {
  136. if (len <= 16) {
  137. return HashLen0to16(s, len);
  138. } else {
  139. return HashLen17to32(s, len);
  140. }
  141. } else if (len <= 64) {
  142. return HashLen33to64(s, len);
  143. }
  144. // For strings over 64 bytes we hash the end first, and then as we
  145. // loop we keep 56 bytes of state: v, w, x, y, and z.
  146. uint64 x = UNALIGNED_LOAD64(s);
  147. uint64 y = UNALIGNED_LOAD64(s + len - 16) ^ k1;
  148. uint64 z = UNALIGNED_LOAD64(s + len - 56) ^ k0;
  149. pair<uint64, uint64> v = WeakHashLen32WithSeeds(s + len - 64, len, y);
  150. pair<uint64, uint64> w = WeakHashLen32WithSeeds(s + len - 32, len * k1, k0);
  151. z += ShiftMix(v.second) * k1;
  152. x = Rotate(z + x, 39) * k1;
  153. y = Rotate(y, 33) * k1;
  154. // Decrease len to the nearest multiple of 64, and operate on 64-byte chunks.
  155. len = (len - 1) & ~static_cast<size_t>(63);
  156. do {
  157. x = Rotate(x + y + v.first + UNALIGNED_LOAD64(s + 16), 37) * k1;
  158. y = Rotate(y + v.second + UNALIGNED_LOAD64(s + 48), 42) * k1;
  159. x ^= w.second;
  160. y ^= v.first;
  161. z = Rotate(z ^ w.first, 33);
  162. v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
  163. w = WeakHashLen32WithSeeds(s + 32, z + w.second, y);
  164. DoSwap(z, x);
  165. s += 64;
  166. len -= 64;
  167. } while (len != 0);
  168. return HashLen16(HashLen16(v.first, w.first) + ShiftMix(y) * k1 + z,
  169. HashLen16(v.second, w.second) + x);
  170. }
  171. uint64 CityHash64WithSeed(const char* s, size_t len, uint64 seed) noexcept {
  172. return CityHash64WithSeeds(s, len, k2, seed);
  173. }
  174. uint64 CityHash64WithSeeds(const char* s, size_t len,
  175. uint64 seed0, uint64 seed1) noexcept {
  176. return HashLen16(CityHash64(s, len) - seed0, seed1);
  177. }
  178. // A subroutine for CityHash128(). Returns a decent 128-bit hash for strings
  179. // of any length representable in ssize_t. Based on City and Murmur.
  180. static uint128 CityMurmur(const char* s, size_t len, uint128 seed) {
  181. uint64 a = Uint128Low64(seed);
  182. uint64 b = Uint128High64(seed);
  183. uint64 c = 0;
  184. uint64 d = 0;
  185. ssize_t l = len - 16;
  186. if (l <= 0) { // len <= 16
  187. c = b * k1 + HashLen0to16(s, len);
  188. d = Rotate(a + (len >= 8 ? UNALIGNED_LOAD64(s) : c), 32);
  189. } else { // len > 16
  190. c = HashLen16(UNALIGNED_LOAD64(s + len - 8) + k1, a);
  191. d = HashLen16(b + len, c + UNALIGNED_LOAD64(s + len - 16));
  192. a += d;
  193. do {
  194. a ^= ShiftMix(UNALIGNED_LOAD64(s) * k1) * k1;
  195. a *= k1;
  196. b ^= a;
  197. c ^= ShiftMix(UNALIGNED_LOAD64(s + 8) * k1) * k1;
  198. c *= k1;
  199. d ^= c;
  200. s += 16;
  201. l -= 16;
  202. } while (l > 0);
  203. }
  204. a = HashLen16(a, c);
  205. b = HashLen16(d, b);
  206. return uint128(a ^ b, HashLen16(b, a));
  207. }
  208. uint128 CityHash128WithSeed(const char* s, size_t len, uint128 seed) noexcept {
  209. if (len < 128) {
  210. return CityMurmur(s, len, seed);
  211. }
  212. // We expect len >= 128 to be the common case. Keep 56 bytes of state:
  213. // v, w, x, y, and z.
  214. pair<uint64, uint64> v, w;
  215. uint64 x = Uint128Low64(seed);
  216. uint64 y = Uint128High64(seed);
  217. uint64 z = len * k1;
  218. v.first = Rotate(y ^ k1, 49) * k1 + UNALIGNED_LOAD64(s);
  219. v.second = Rotate(v.first, 42) * k1 + UNALIGNED_LOAD64(s + 8);
  220. w.first = Rotate(y + z, 35) * k1 + x;
  221. w.second = Rotate(x + UNALIGNED_LOAD64(s + 88), 53) * k1;
  222. // This is the same inner loop as CityHash64(), manually unrolled.
  223. do {
  224. x = Rotate(x + y + v.first + UNALIGNED_LOAD64(s + 16), 37) * k1;
  225. y = Rotate(y + v.second + UNALIGNED_LOAD64(s + 48), 42) * k1;
  226. x ^= w.second;
  227. y ^= v.first;
  228. z = Rotate(z ^ w.first, 33);
  229. v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
  230. w = WeakHashLen32WithSeeds(s + 32, z + w.second, y);
  231. DoSwap(z, x);
  232. s += 64;
  233. x = Rotate(x + y + v.first + UNALIGNED_LOAD64(s + 16), 37) * k1;
  234. y = Rotate(y + v.second + UNALIGNED_LOAD64(s + 48), 42) * k1;
  235. x ^= w.second;
  236. y ^= v.first;
  237. z = Rotate(z ^ w.first, 33);
  238. v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
  239. w = WeakHashLen32WithSeeds(s + 32, z + w.second, y);
  240. DoSwap(z, x);
  241. s += 64;
  242. len -= 128;
  243. } while (LIKELY(len >= 128));
  244. y += Rotate(w.first, 37) * k0 + z;
  245. x += Rotate(v.first + z, 49) * k0;
  246. // If 0 < len < 128, hash up to 4 chunks of 32 bytes each from the end of s.
  247. for (size_t tail_done = 0; tail_done < len;) {
  248. tail_done += 32;
  249. y = Rotate(y - x, 42) * k0 + v.second;
  250. w.first += UNALIGNED_LOAD64(s + len - tail_done + 16);
  251. x = Rotate(x, 49) * k0 + w.first;
  252. w.first += v.first;
  253. v = WeakHashLen32WithSeeds(s + len - tail_done, v.first, v.second);
  254. }
  255. // At this point our 48 bytes of state should contain more than
  256. // enough information for a strong 128-bit hash. We use two
  257. // different 48-byte-to-8-byte hashes to get a 16-byte final result.
  258. x = HashLen16(x, v.first);
  259. y = HashLen16(y, w.first);
  260. return uint128(HashLen16(x + v.second, w.second) + y,
  261. HashLen16(x + w.second, y + v.second));
  262. }
  263. uint128 CityHash128(const char* s, size_t len) noexcept {
  264. if (len >= 16) {
  265. return CityHash128WithSeed(s + 16,
  266. len - 16,
  267. uint128(UNALIGNED_LOAD64(s) ^ k3,
  268. UNALIGNED_LOAD64(s + 8)));
  269. } else if (len >= 8) {
  270. return CityHash128WithSeed(nullptr,
  271. 0,
  272. uint128(UNALIGNED_LOAD64(s) ^ (len * k0),
  273. UNALIGNED_LOAD64(s + len - 8) ^ k1));
  274. } else {
  275. return CityHash128WithSeed(s, len, uint128(k0, k1));
  276. }
  277. }
  278. TStreamingCityHash64::TStreamingCityHash64(size_t len, const char *head64, const char *tail64) {
  279. Y_ASSERT(len > 64);
  280. x = UNALIGNED_LOAD64(head64);
  281. y = UNALIGNED_LOAD64(tail64 + 48) ^ k1;
  282. z = UNALIGNED_LOAD64(tail64 + 8) ^ k0;
  283. v = WeakHashLen32WithSeeds(tail64, len, y);
  284. w = WeakHashLen32WithSeeds(tail64 + 32, len * k1, k0);
  285. z += ShiftMix(v.second) * k1;
  286. x = Rotate(z + x, 39) * k1;
  287. y = Rotate(y, 33) * k1;
  288. Rest64_ = (len - 1) / 64;
  289. UnalignBufSz_ = 0;
  290. }
  291. void TStreamingCityHash64::Process(const char *s, size_t avail) {
  292. if (Y_UNLIKELY(!Rest64_)) return;
  293. if (UnalignBufSz_) {
  294. if (UnalignBufSz_ + avail < 64) {
  295. memcpy(&UnalignBuf_[UnalignBufSz_], s, avail);
  296. UnalignBufSz_ += avail;
  297. return;
  298. } else {
  299. memcpy(&UnalignBuf_[UnalignBufSz_], s, 64 - UnalignBufSz_);
  300. x = Rotate(x + y + v.first + UNALIGNED_LOAD64(UnalignBuf_ + 16), 37) * k1;
  301. y = Rotate(y + v.second + UNALIGNED_LOAD64(UnalignBuf_ + 48), 42) * k1;
  302. x ^= w.second;
  303. y ^= v.first;
  304. z = Rotate(z ^ w.first, 33);
  305. v = WeakHashLen32WithSeeds(UnalignBuf_, v.second * k1, x + w.first);
  306. w = WeakHashLen32WithSeeds(UnalignBuf_ + 32, z + w.second, y);
  307. DoSwap(z, x);
  308. s += 64 - UnalignBufSz_;
  309. avail -= 64 - UnalignBufSz_;
  310. Rest64_--;
  311. UnalignBufSz_ = 0;
  312. }
  313. }
  314. while(Rest64_ && avail >= 64) {
  315. x = Rotate(x + y + v.first + UNALIGNED_LOAD64(s + 16), 37) * k1;
  316. y = Rotate(y + v.second + UNALIGNED_LOAD64(s + 48), 42) * k1;
  317. x ^= w.second;
  318. y ^= v.first;
  319. z = Rotate(z ^ w.first, 33);
  320. v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
  321. w = WeakHashLen32WithSeeds(s + 32, z + w.second, y);
  322. DoSwap(z, x);
  323. s += 64;
  324. avail -= 64;
  325. Rest64_--;
  326. }
  327. if (Rest64_ && avail) {
  328. memcpy(UnalignBuf_, s, avail);
  329. UnalignBufSz_ = avail;
  330. }
  331. }
  332. uint64 TStreamingCityHash64::operator() () {
  333. return HashLen16(HashLen16(v.first, w.first) + ShiftMix(y) * k1 + z,
  334. HashLen16(v.second, w.second) + x);
  335. }