DenseMap.h 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320
  1. #pragma once
  2. #ifdef __GNUC__
  3. #pragma GCC diagnostic push
  4. #pragma GCC diagnostic ignored "-Wunused-parameter"
  5. #endif
  6. //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
  7. //
  8. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  9. // See https://llvm.org/LICENSE.txt for license information.
  10. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  11. //
  12. //===----------------------------------------------------------------------===//
  13. ///
  14. /// \file
  15. /// This file defines the DenseMap class.
  16. ///
  17. //===----------------------------------------------------------------------===//
  18. #ifndef LLVM_ADT_DENSEMAP_H
  19. #define LLVM_ADT_DENSEMAP_H
  20. #include "llvm/ADT/DenseMapInfo.h"
  21. #include "llvm/ADT/EpochTracker.h"
  22. #include "llvm/Support/AlignOf.h"
  23. #include "llvm/Support/Compiler.h"
  24. #include "llvm/Support/MathExtras.h"
  25. #include "llvm/Support/MemAlloc.h"
  26. #include "llvm/Support/ReverseIteration.h"
  27. #include "llvm/Support/type_traits.h"
  28. #include <algorithm>
  29. #include <cassert>
  30. #include <cstddef>
  31. #include <cstring>
  32. #include <initializer_list>
  33. #include <iterator>
  34. #include <new>
  35. #include <type_traits>
  36. #include <utility>
  37. namespace llvm {
  38. namespace detail {
  39. // We extend a pair to allow users to override the bucket type with their own
  40. // implementation without requiring two members.
  41. template <typename KeyT, typename ValueT>
  42. struct DenseMapPair : public std::pair<KeyT, ValueT> {
  43. using std::pair<KeyT, ValueT>::pair;
  44. KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
  45. const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
  46. ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
  47. const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
  48. };
  49. } // end namespace detail
  50. template <typename KeyT, typename ValueT,
  51. typename KeyInfoT = DenseMapInfo<KeyT>,
  52. typename Bucket = llvm::detail::DenseMapPair<KeyT, ValueT>,
  53. bool IsConst = false>
  54. class DenseMapIterator;
  55. template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
  56. typename BucketT>
  57. class DenseMapBase : public DebugEpochBase {
  58. template <typename T>
  59. using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
  60. public:
  61. using size_type = unsigned;
  62. using key_type = KeyT;
  63. using mapped_type = ValueT;
  64. using value_type = BucketT;
  65. using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>;
  66. using const_iterator =
  67. DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>;
  68. inline iterator begin() {
  69. // When the map is empty, avoid the overhead of advancing/retreating past
  70. // empty buckets.
  71. if (empty())
  72. return end();
  73. if (shouldReverseIterate<KeyT>())
  74. return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
  75. return makeIterator(getBuckets(), getBucketsEnd(), *this);
  76. }
  77. inline iterator end() {
  78. return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
  79. }
  80. inline const_iterator begin() const {
  81. if (empty())
  82. return end();
  83. if (shouldReverseIterate<KeyT>())
  84. return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
  85. return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
  86. }
  87. inline const_iterator end() const {
  88. return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
  89. }
  90. [[nodiscard]] bool empty() const { return getNumEntries() == 0; }
  91. unsigned size() const { return getNumEntries(); }
  92. /// Grow the densemap so that it can contain at least \p NumEntries items
  93. /// before resizing again.
  94. void reserve(size_type NumEntries) {
  95. auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
  96. incrementEpoch();
  97. if (NumBuckets > getNumBuckets())
  98. grow(NumBuckets);
  99. }
  100. void clear() {
  101. incrementEpoch();
  102. if (getNumEntries() == 0 && getNumTombstones() == 0) return;
  103. // If the capacity of the array is huge, and the # elements used is small,
  104. // shrink the array.
  105. if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
  106. shrink_and_clear();
  107. return;
  108. }
  109. const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
  110. if (std::is_trivially_destructible<ValueT>::value) {
  111. // Use a simpler loop when values don't need destruction.
  112. for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
  113. P->getFirst() = EmptyKey;
  114. } else {
  115. unsigned NumEntries = getNumEntries();
  116. for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
  117. if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
  118. if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
  119. P->getSecond().~ValueT();
  120. --NumEntries;
  121. }
  122. P->getFirst() = EmptyKey;
  123. }
  124. }
  125. assert(NumEntries == 0 && "Node count imbalance!");
  126. (void)NumEntries;
  127. }
  128. setNumEntries(0);
  129. setNumTombstones(0);
  130. }
  131. /// Return 1 if the specified key is in the map, 0 otherwise.
  132. size_type count(const_arg_type_t<KeyT> Val) const {
  133. const BucketT *TheBucket;
  134. return LookupBucketFor(Val, TheBucket) ? 1 : 0;
  135. }
  136. iterator find(const_arg_type_t<KeyT> Val) {
  137. BucketT *TheBucket;
  138. if (LookupBucketFor(Val, TheBucket))
  139. return makeIterator(TheBucket,
  140. shouldReverseIterate<KeyT>() ? getBuckets()
  141. : getBucketsEnd(),
  142. *this, true);
  143. return end();
  144. }
  145. const_iterator find(const_arg_type_t<KeyT> Val) const {
  146. const BucketT *TheBucket;
  147. if (LookupBucketFor(Val, TheBucket))
  148. return makeConstIterator(TheBucket,
  149. shouldReverseIterate<KeyT>() ? getBuckets()
  150. : getBucketsEnd(),
  151. *this, true);
  152. return end();
  153. }
  154. /// Alternate version of find() which allows a different, and possibly
  155. /// less expensive, key type.
  156. /// The DenseMapInfo is responsible for supplying methods
  157. /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
  158. /// type used.
  159. template<class LookupKeyT>
  160. iterator find_as(const LookupKeyT &Val) {
  161. BucketT *TheBucket;
  162. if (LookupBucketFor(Val, TheBucket))
  163. return makeIterator(TheBucket,
  164. shouldReverseIterate<KeyT>() ? getBuckets()
  165. : getBucketsEnd(),
  166. *this, true);
  167. return end();
  168. }
  169. template<class LookupKeyT>
  170. const_iterator find_as(const LookupKeyT &Val) const {
  171. const BucketT *TheBucket;
  172. if (LookupBucketFor(Val, TheBucket))
  173. return makeConstIterator(TheBucket,
  174. shouldReverseIterate<KeyT>() ? getBuckets()
  175. : getBucketsEnd(),
  176. *this, true);
  177. return end();
  178. }
  179. /// lookup - Return the entry for the specified key, or a default
  180. /// constructed value if no such entry exists.
  181. ValueT lookup(const_arg_type_t<KeyT> Val) const {
  182. const BucketT *TheBucket;
  183. if (LookupBucketFor(Val, TheBucket))
  184. return TheBucket->getSecond();
  185. return ValueT();
  186. }
  187. // Inserts key,value pair into the map if the key isn't already in the map.
  188. // If the key is already in the map, it returns false and doesn't update the
  189. // value.
  190. std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
  191. return try_emplace(KV.first, KV.second);
  192. }
  193. // Inserts key,value pair into the map if the key isn't already in the map.
  194. // If the key is already in the map, it returns false and doesn't update the
  195. // value.
  196. std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
  197. return try_emplace(std::move(KV.first), std::move(KV.second));
  198. }
  199. // Inserts key,value pair into the map if the key isn't already in the map.
  200. // The value is constructed in-place if the key is not in the map, otherwise
  201. // it is not moved.
  202. template <typename... Ts>
  203. std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
  204. BucketT *TheBucket;
  205. if (LookupBucketFor(Key, TheBucket))
  206. return std::make_pair(makeIterator(TheBucket,
  207. shouldReverseIterate<KeyT>()
  208. ? getBuckets()
  209. : getBucketsEnd(),
  210. *this, true),
  211. false); // Already in map.
  212. // Otherwise, insert the new element.
  213. TheBucket =
  214. InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
  215. return std::make_pair(makeIterator(TheBucket,
  216. shouldReverseIterate<KeyT>()
  217. ? getBuckets()
  218. : getBucketsEnd(),
  219. *this, true),
  220. true);
  221. }
  222. // Inserts key,value pair into the map if the key isn't already in the map.
  223. // The value is constructed in-place if the key is not in the map, otherwise
  224. // it is not moved.
  225. template <typename... Ts>
  226. std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
  227. BucketT *TheBucket;
  228. if (LookupBucketFor(Key, TheBucket))
  229. return std::make_pair(makeIterator(TheBucket,
  230. shouldReverseIterate<KeyT>()
  231. ? getBuckets()
  232. : getBucketsEnd(),
  233. *this, true),
  234. false); // Already in map.
  235. // Otherwise, insert the new element.
  236. TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
  237. return std::make_pair(makeIterator(TheBucket,
  238. shouldReverseIterate<KeyT>()
  239. ? getBuckets()
  240. : getBucketsEnd(),
  241. *this, true),
  242. true);
  243. }
  244. /// Alternate version of insert() which allows a different, and possibly
  245. /// less expensive, key type.
  246. /// The DenseMapInfo is responsible for supplying methods
  247. /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
  248. /// type used.
  249. template <typename LookupKeyT>
  250. std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
  251. const LookupKeyT &Val) {
  252. BucketT *TheBucket;
  253. if (LookupBucketFor(Val, TheBucket))
  254. return std::make_pair(makeIterator(TheBucket,
  255. shouldReverseIterate<KeyT>()
  256. ? getBuckets()
  257. : getBucketsEnd(),
  258. *this, true),
  259. false); // Already in map.
  260. // Otherwise, insert the new element.
  261. TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
  262. std::move(KV.second), Val);
  263. return std::make_pair(makeIterator(TheBucket,
  264. shouldReverseIterate<KeyT>()
  265. ? getBuckets()
  266. : getBucketsEnd(),
  267. *this, true),
  268. true);
  269. }
  270. /// insert - Range insertion of pairs.
  271. template<typename InputIt>
  272. void insert(InputIt I, InputIt E) {
  273. for (; I != E; ++I)
  274. insert(*I);
  275. }
  276. bool erase(const KeyT &Val) {
  277. BucketT *TheBucket;
  278. if (!LookupBucketFor(Val, TheBucket))
  279. return false; // not in map.
  280. TheBucket->getSecond().~ValueT();
  281. TheBucket->getFirst() = getTombstoneKey();
  282. decrementNumEntries();
  283. incrementNumTombstones();
  284. return true;
  285. }
  286. void erase(iterator I) {
  287. BucketT *TheBucket = &*I;
  288. TheBucket->getSecond().~ValueT();
  289. TheBucket->getFirst() = getTombstoneKey();
  290. decrementNumEntries();
  291. incrementNumTombstones();
  292. }
  293. value_type& FindAndConstruct(const KeyT &Key) {
  294. BucketT *TheBucket;
  295. if (LookupBucketFor(Key, TheBucket))
  296. return *TheBucket;
  297. return *InsertIntoBucket(TheBucket, Key);
  298. }
  299. ValueT &operator[](const KeyT &Key) {
  300. return FindAndConstruct(Key).second;
  301. }
  302. value_type& FindAndConstruct(KeyT &&Key) {
  303. BucketT *TheBucket;
  304. if (LookupBucketFor(Key, TheBucket))
  305. return *TheBucket;
  306. return *InsertIntoBucket(TheBucket, std::move(Key));
  307. }
  308. ValueT &operator[](KeyT &&Key) {
  309. return FindAndConstruct(std::move(Key)).second;
  310. }
  311. /// isPointerIntoBucketsArray - Return true if the specified pointer points
  312. /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
  313. /// value in the DenseMap).
  314. bool isPointerIntoBucketsArray(const void *Ptr) const {
  315. return Ptr >= getBuckets() && Ptr < getBucketsEnd();
  316. }
  317. /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
  318. /// array. In conjunction with the previous method, this can be used to
  319. /// determine whether an insertion caused the DenseMap to reallocate.
  320. const void *getPointerIntoBucketsArray() const { return getBuckets(); }
  321. protected:
  322. DenseMapBase() = default;
  323. void destroyAll() {
  324. if (getNumBuckets() == 0) // Nothing to do.
  325. return;
  326. const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
  327. for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
  328. if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
  329. !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
  330. P->getSecond().~ValueT();
  331. P->getFirst().~KeyT();
  332. }
  333. }
  334. void initEmpty() {
  335. setNumEntries(0);
  336. setNumTombstones(0);
  337. assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
  338. "# initial buckets must be a power of two!");
  339. const KeyT EmptyKey = getEmptyKey();
  340. for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
  341. ::new (&B->getFirst()) KeyT(EmptyKey);
  342. }
  343. /// Returns the number of buckets to allocate to ensure that the DenseMap can
  344. /// accommodate \p NumEntries without need to grow().
  345. unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
  346. // Ensure that "NumEntries * 4 < NumBuckets * 3"
  347. if (NumEntries == 0)
  348. return 0;
  349. // +1 is required because of the strict equality.
  350. // For example if NumEntries is 48, we need to return 401.
  351. return NextPowerOf2(NumEntries * 4 / 3 + 1);
  352. }
  353. void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
  354. initEmpty();
  355. // Insert all the old elements.
  356. const KeyT EmptyKey = getEmptyKey();
  357. const KeyT TombstoneKey = getTombstoneKey();
  358. for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
  359. if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
  360. !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
  361. // Insert the key/value into the new table.
  362. BucketT *DestBucket;
  363. bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
  364. (void)FoundVal; // silence warning.
  365. assert(!FoundVal && "Key already in new map?");
  366. DestBucket->getFirst() = std::move(B->getFirst());
  367. ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
  368. incrementNumEntries();
  369. // Free the value.
  370. B->getSecond().~ValueT();
  371. }
  372. B->getFirst().~KeyT();
  373. }
  374. }
  375. template <typename OtherBaseT>
  376. void copyFrom(
  377. const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
  378. assert(&other != this);
  379. assert(getNumBuckets() == other.getNumBuckets());
  380. setNumEntries(other.getNumEntries());
  381. setNumTombstones(other.getNumTombstones());
  382. if (std::is_trivially_copyable<KeyT>::value &&
  383. std::is_trivially_copyable<ValueT>::value)
  384. memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
  385. getNumBuckets() * sizeof(BucketT));
  386. else
  387. for (size_t i = 0; i < getNumBuckets(); ++i) {
  388. ::new (&getBuckets()[i].getFirst())
  389. KeyT(other.getBuckets()[i].getFirst());
  390. if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
  391. !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
  392. ::new (&getBuckets()[i].getSecond())
  393. ValueT(other.getBuckets()[i].getSecond());
  394. }
  395. }
  396. static unsigned getHashValue(const KeyT &Val) {
  397. return KeyInfoT::getHashValue(Val);
  398. }
  399. template<typename LookupKeyT>
  400. static unsigned getHashValue(const LookupKeyT &Val) {
  401. return KeyInfoT::getHashValue(Val);
  402. }
  403. static const KeyT getEmptyKey() {
  404. static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
  405. "Must pass the derived type to this template!");
  406. return KeyInfoT::getEmptyKey();
  407. }
  408. static const KeyT getTombstoneKey() {
  409. return KeyInfoT::getTombstoneKey();
  410. }
  411. private:
  412. iterator makeIterator(BucketT *P, BucketT *E,
  413. DebugEpochBase &Epoch,
  414. bool NoAdvance=false) {
  415. if (shouldReverseIterate<KeyT>()) {
  416. BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
  417. return iterator(B, E, Epoch, NoAdvance);
  418. }
  419. return iterator(P, E, Epoch, NoAdvance);
  420. }
  421. const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
  422. const DebugEpochBase &Epoch,
  423. const bool NoAdvance=false) const {
  424. if (shouldReverseIterate<KeyT>()) {
  425. const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
  426. return const_iterator(B, E, Epoch, NoAdvance);
  427. }
  428. return const_iterator(P, E, Epoch, NoAdvance);
  429. }
  430. unsigned getNumEntries() const {
  431. return static_cast<const DerivedT *>(this)->getNumEntries();
  432. }
  433. void setNumEntries(unsigned Num) {
  434. static_cast<DerivedT *>(this)->setNumEntries(Num);
  435. }
  436. void incrementNumEntries() {
  437. setNumEntries(getNumEntries() + 1);
  438. }
  439. void decrementNumEntries() {
  440. setNumEntries(getNumEntries() - 1);
  441. }
  442. unsigned getNumTombstones() const {
  443. return static_cast<const DerivedT *>(this)->getNumTombstones();
  444. }
  445. void setNumTombstones(unsigned Num) {
  446. static_cast<DerivedT *>(this)->setNumTombstones(Num);
  447. }
  448. void incrementNumTombstones() {
  449. setNumTombstones(getNumTombstones() + 1);
  450. }
  451. void decrementNumTombstones() {
  452. setNumTombstones(getNumTombstones() - 1);
  453. }
  454. const BucketT *getBuckets() const {
  455. return static_cast<const DerivedT *>(this)->getBuckets();
  456. }
  457. BucketT *getBuckets() {
  458. return static_cast<DerivedT *>(this)->getBuckets();
  459. }
  460. unsigned getNumBuckets() const {
  461. return static_cast<const DerivedT *>(this)->getNumBuckets();
  462. }
  463. BucketT *getBucketsEnd() {
  464. return getBuckets() + getNumBuckets();
  465. }
  466. const BucketT *getBucketsEnd() const {
  467. return getBuckets() + getNumBuckets();
  468. }
  469. void grow(unsigned AtLeast) {
  470. static_cast<DerivedT *>(this)->grow(AtLeast);
  471. }
  472. void shrink_and_clear() {
  473. static_cast<DerivedT *>(this)->shrink_and_clear();
  474. }
  475. template <typename KeyArg, typename... ValueArgs>
  476. BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
  477. ValueArgs &&... Values) {
  478. TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
  479. TheBucket->getFirst() = std::forward<KeyArg>(Key);
  480. ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
  481. return TheBucket;
  482. }
  483. template <typename LookupKeyT>
  484. BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
  485. ValueT &&Value, LookupKeyT &Lookup) {
  486. TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
  487. TheBucket->getFirst() = std::move(Key);
  488. ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
  489. return TheBucket;
  490. }
  491. template <typename LookupKeyT>
  492. BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
  493. BucketT *TheBucket) {
  494. incrementEpoch();
  495. // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
  496. // the buckets are empty (meaning that many are filled with tombstones),
  497. // grow the table.
  498. //
  499. // The later case is tricky. For example, if we had one empty bucket with
  500. // tons of tombstones, failing lookups (e.g. for insertion) would have to
  501. // probe almost the entire table until it found the empty bucket. If the
  502. // table completely filled with tombstones, no lookup would ever succeed,
  503. // causing infinite loops in lookup.
  504. unsigned NewNumEntries = getNumEntries() + 1;
  505. unsigned NumBuckets = getNumBuckets();
  506. if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
  507. this->grow(NumBuckets * 2);
  508. LookupBucketFor(Lookup, TheBucket);
  509. NumBuckets = getNumBuckets();
  510. } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
  511. NumBuckets/8)) {
  512. this->grow(NumBuckets);
  513. LookupBucketFor(Lookup, TheBucket);
  514. }
  515. assert(TheBucket);
  516. // Only update the state after we've grown our bucket space appropriately
  517. // so that when growing buckets we have self-consistent entry count.
  518. incrementNumEntries();
  519. // If we are writing over a tombstone, remember this.
  520. const KeyT EmptyKey = getEmptyKey();
  521. if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
  522. decrementNumTombstones();
  523. return TheBucket;
  524. }
  525. /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
  526. /// FoundBucket. If the bucket contains the key and a value, this returns
  527. /// true, otherwise it returns a bucket with an empty marker or tombstone and
  528. /// returns false.
  529. template<typename LookupKeyT>
  530. bool LookupBucketFor(const LookupKeyT &Val,
  531. const BucketT *&FoundBucket) const {
  532. const BucketT *BucketsPtr = getBuckets();
  533. const unsigned NumBuckets = getNumBuckets();
  534. if (NumBuckets == 0) {
  535. FoundBucket = nullptr;
  536. return false;
  537. }
  538. // FoundTombstone - Keep track of whether we find a tombstone while probing.
  539. const BucketT *FoundTombstone = nullptr;
  540. const KeyT EmptyKey = getEmptyKey();
  541. const KeyT TombstoneKey = getTombstoneKey();
  542. assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
  543. !KeyInfoT::isEqual(Val, TombstoneKey) &&
  544. "Empty/Tombstone value shouldn't be inserted into map!");
  545. unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
  546. unsigned ProbeAmt = 1;
  547. while (true) {
  548. const BucketT *ThisBucket = BucketsPtr + BucketNo;
  549. // Found Val's bucket? If so, return it.
  550. if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
  551. FoundBucket = ThisBucket;
  552. return true;
  553. }
  554. // If we found an empty bucket, the key doesn't exist in the set.
  555. // Insert it and return the default value.
  556. if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
  557. // If we've already seen a tombstone while probing, fill it in instead
  558. // of the empty bucket we eventually probed to.
  559. FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
  560. return false;
  561. }
  562. // If this is a tombstone, remember it. If Val ends up not in the map, we
  563. // prefer to return it than something that would require more probing.
  564. if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
  565. !FoundTombstone)
  566. FoundTombstone = ThisBucket; // Remember the first tombstone found.
  567. // Otherwise, it's a hash collision or a tombstone, continue quadratic
  568. // probing.
  569. BucketNo += ProbeAmt++;
  570. BucketNo &= (NumBuckets-1);
  571. }
  572. }
  573. template <typename LookupKeyT>
  574. bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
  575. const BucketT *ConstFoundBucket;
  576. bool Result = const_cast<const DenseMapBase *>(this)
  577. ->LookupBucketFor(Val, ConstFoundBucket);
  578. FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
  579. return Result;
  580. }
  581. public:
  582. /// Return the approximate size (in bytes) of the actual map.
  583. /// This is just the raw memory used by DenseMap.
  584. /// If entries are pointers to objects, the size of the referenced objects
  585. /// are not included.
  586. size_t getMemorySize() const {
  587. return getNumBuckets() * sizeof(BucketT);
  588. }
  589. };
  590. /// Equality comparison for DenseMap.
  591. ///
  592. /// Iterates over elements of LHS confirming that each (key, value) pair in LHS
  593. /// is also in RHS, and that no additional pairs are in RHS.
  594. /// Equivalent to N calls to RHS.find and N value comparisons. Amortized
  595. /// complexity is linear, worst case is O(N^2) (if every hash collides).
  596. template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
  597. typename BucketT>
  598. bool operator==(
  599. const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
  600. const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
  601. if (LHS.size() != RHS.size())
  602. return false;
  603. for (auto &KV : LHS) {
  604. auto I = RHS.find(KV.first);
  605. if (I == RHS.end() || I->second != KV.second)
  606. return false;
  607. }
  608. return true;
  609. }
  610. /// Inequality comparison for DenseMap.
  611. ///
  612. /// Equivalent to !(LHS == RHS). See operator== for performance notes.
  613. template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
  614. typename BucketT>
  615. bool operator!=(
  616. const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
  617. const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
  618. return !(LHS == RHS);
  619. }
  620. template <typename KeyT, typename ValueT,
  621. typename KeyInfoT = DenseMapInfo<KeyT>,
  622. typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
  623. class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
  624. KeyT, ValueT, KeyInfoT, BucketT> {
  625. friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
  626. // Lift some types from the dependent base class into this class for
  627. // simplicity of referring to them.
  628. using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
  629. BucketT *Buckets;
  630. unsigned NumEntries;
  631. unsigned NumTombstones;
  632. unsigned NumBuckets;
  633. public:
  634. /// Create a DenseMap with an optional \p InitialReserve that guarantee that
  635. /// this number of elements can be inserted in the map without grow()
  636. explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
  637. DenseMap(const DenseMap &other) : BaseT() {
  638. init(0);
  639. copyFrom(other);
  640. }
  641. DenseMap(DenseMap &&other) : BaseT() {
  642. init(0);
  643. swap(other);
  644. }
  645. template<typename InputIt>
  646. DenseMap(const InputIt &I, const InputIt &E) {
  647. init(std::distance(I, E));
  648. this->insert(I, E);
  649. }
  650. DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
  651. init(Vals.size());
  652. this->insert(Vals.begin(), Vals.end());
  653. }
  654. ~DenseMap() {
  655. this->destroyAll();
  656. deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
  657. }
  658. void swap(DenseMap& RHS) {
  659. this->incrementEpoch();
  660. RHS.incrementEpoch();
  661. std::swap(Buckets, RHS.Buckets);
  662. std::swap(NumEntries, RHS.NumEntries);
  663. std::swap(NumTombstones, RHS.NumTombstones);
  664. std::swap(NumBuckets, RHS.NumBuckets);
  665. }
  666. DenseMap& operator=(const DenseMap& other) {
  667. if (&other != this)
  668. copyFrom(other);
  669. return *this;
  670. }
  671. DenseMap& operator=(DenseMap &&other) {
  672. this->destroyAll();
  673. deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
  674. init(0);
  675. swap(other);
  676. return *this;
  677. }
  678. void copyFrom(const DenseMap& other) {
  679. this->destroyAll();
  680. deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
  681. if (allocateBuckets(other.NumBuckets)) {
  682. this->BaseT::copyFrom(other);
  683. } else {
  684. NumEntries = 0;
  685. NumTombstones = 0;
  686. }
  687. }
  688. void init(unsigned InitNumEntries) {
  689. auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
  690. if (allocateBuckets(InitBuckets)) {
  691. this->BaseT::initEmpty();
  692. } else {
  693. NumEntries = 0;
  694. NumTombstones = 0;
  695. }
  696. }
  697. void grow(unsigned AtLeast) {
  698. unsigned OldNumBuckets = NumBuckets;
  699. BucketT *OldBuckets = Buckets;
  700. allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
  701. assert(Buckets);
  702. if (!OldBuckets) {
  703. this->BaseT::initEmpty();
  704. return;
  705. }
  706. this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
  707. // Free the old table.
  708. deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
  709. alignof(BucketT));
  710. }
  711. void shrink_and_clear() {
  712. unsigned OldNumBuckets = NumBuckets;
  713. unsigned OldNumEntries = NumEntries;
  714. this->destroyAll();
  715. // Reduce the number of buckets.
  716. unsigned NewNumBuckets = 0;
  717. if (OldNumEntries)
  718. NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
  719. if (NewNumBuckets == NumBuckets) {
  720. this->BaseT::initEmpty();
  721. return;
  722. }
  723. deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
  724. alignof(BucketT));
  725. init(NewNumBuckets);
  726. }
  727. private:
  728. unsigned getNumEntries() const {
  729. return NumEntries;
  730. }
  731. void setNumEntries(unsigned Num) {
  732. NumEntries = Num;
  733. }
  734. unsigned getNumTombstones() const {
  735. return NumTombstones;
  736. }
  737. void setNumTombstones(unsigned Num) {
  738. NumTombstones = Num;
  739. }
  740. BucketT *getBuckets() const {
  741. return Buckets;
  742. }
  743. unsigned getNumBuckets() const {
  744. return NumBuckets;
  745. }
  746. bool allocateBuckets(unsigned Num) {
  747. NumBuckets = Num;
  748. if (NumBuckets == 0) {
  749. Buckets = nullptr;
  750. return false;
  751. }
  752. Buckets = static_cast<BucketT *>(
  753. allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
  754. return true;
  755. }
  756. };
  757. template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
  758. typename KeyInfoT = DenseMapInfo<KeyT>,
  759. typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
  760. class SmallDenseMap
  761. : public DenseMapBase<
  762. SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
  763. ValueT, KeyInfoT, BucketT> {
  764. friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
  765. // Lift some types from the dependent base class into this class for
  766. // simplicity of referring to them.
  767. using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
  768. static_assert(isPowerOf2_64(InlineBuckets),
  769. "InlineBuckets must be a power of 2.");
  770. unsigned Small : 1;
  771. unsigned NumEntries : 31;
  772. unsigned NumTombstones;
  773. struct LargeRep {
  774. BucketT *Buckets;
  775. unsigned NumBuckets;
  776. };
  777. /// A "union" of an inline bucket array and the struct representing
  778. /// a large bucket. This union will be discriminated by the 'Small' bit.
  779. AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
  780. public:
  781. explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
  782. if (NumInitBuckets > InlineBuckets)
  783. NumInitBuckets = NextPowerOf2(NumInitBuckets - 1);
  784. init(NumInitBuckets);
  785. }
  786. SmallDenseMap(const SmallDenseMap &other) : BaseT() {
  787. init(0);
  788. copyFrom(other);
  789. }
  790. SmallDenseMap(SmallDenseMap &&other) : BaseT() {
  791. init(0);
  792. swap(other);
  793. }
  794. template<typename InputIt>
  795. SmallDenseMap(const InputIt &I, const InputIt &E) {
  796. init(NextPowerOf2(std::distance(I, E)));
  797. this->insert(I, E);
  798. }
  799. SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
  800. : SmallDenseMap(Vals.begin(), Vals.end()) {}
  801. ~SmallDenseMap() {
  802. this->destroyAll();
  803. deallocateBuckets();
  804. }
  805. void swap(SmallDenseMap& RHS) {
  806. unsigned TmpNumEntries = RHS.NumEntries;
  807. RHS.NumEntries = NumEntries;
  808. NumEntries = TmpNumEntries;
  809. std::swap(NumTombstones, RHS.NumTombstones);
  810. const KeyT EmptyKey = this->getEmptyKey();
  811. const KeyT TombstoneKey = this->getTombstoneKey();
  812. if (Small && RHS.Small) {
  813. // If we're swapping inline bucket arrays, we have to cope with some of
  814. // the tricky bits of DenseMap's storage system: the buckets are not
  815. // fully initialized. Thus we swap every key, but we may have
  816. // a one-directional move of the value.
  817. for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
  818. BucketT *LHSB = &getInlineBuckets()[i],
  819. *RHSB = &RHS.getInlineBuckets()[i];
  820. bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
  821. !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
  822. bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
  823. !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
  824. if (hasLHSValue && hasRHSValue) {
  825. // Swap together if we can...
  826. std::swap(*LHSB, *RHSB);
  827. continue;
  828. }
  829. // Swap separately and handle any asymmetry.
  830. std::swap(LHSB->getFirst(), RHSB->getFirst());
  831. if (hasLHSValue) {
  832. ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
  833. LHSB->getSecond().~ValueT();
  834. } else if (hasRHSValue) {
  835. ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
  836. RHSB->getSecond().~ValueT();
  837. }
  838. }
  839. return;
  840. }
  841. if (!Small && !RHS.Small) {
  842. std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
  843. std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
  844. return;
  845. }
  846. SmallDenseMap &SmallSide = Small ? *this : RHS;
  847. SmallDenseMap &LargeSide = Small ? RHS : *this;
  848. // First stash the large side's rep and move the small side across.
  849. LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
  850. LargeSide.getLargeRep()->~LargeRep();
  851. LargeSide.Small = true;
  852. // This is similar to the standard move-from-old-buckets, but the bucket
  853. // count hasn't actually rotated in this case. So we have to carefully
  854. // move construct the keys and values into their new locations, but there
  855. // is no need to re-hash things.
  856. for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
  857. BucketT *NewB = &LargeSide.getInlineBuckets()[i],
  858. *OldB = &SmallSide.getInlineBuckets()[i];
  859. ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
  860. OldB->getFirst().~KeyT();
  861. if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
  862. !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
  863. ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
  864. OldB->getSecond().~ValueT();
  865. }
  866. }
  867. // The hard part of moving the small buckets across is done, just move
  868. // the TmpRep into its new home.
  869. SmallSide.Small = false;
  870. new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
  871. }
  872. SmallDenseMap& operator=(const SmallDenseMap& other) {
  873. if (&other != this)
  874. copyFrom(other);
  875. return *this;
  876. }
  877. SmallDenseMap& operator=(SmallDenseMap &&other) {
  878. this->destroyAll();
  879. deallocateBuckets();
  880. init(0);
  881. swap(other);
  882. return *this;
  883. }
  884. void copyFrom(const SmallDenseMap& other) {
  885. this->destroyAll();
  886. deallocateBuckets();
  887. Small = true;
  888. if (other.getNumBuckets() > InlineBuckets) {
  889. Small = false;
  890. new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
  891. }
  892. this->BaseT::copyFrom(other);
  893. }
  894. void init(unsigned InitBuckets) {
  895. Small = true;
  896. if (InitBuckets > InlineBuckets) {
  897. Small = false;
  898. new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
  899. }
  900. this->BaseT::initEmpty();
  901. }
  902. void grow(unsigned AtLeast) {
  903. if (AtLeast > InlineBuckets)
  904. AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
  905. if (Small) {
  906. // First move the inline buckets into a temporary storage.
  907. AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
  908. BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
  909. BucketT *TmpEnd = TmpBegin;
  910. // Loop over the buckets, moving non-empty, non-tombstones into the
  911. // temporary storage. Have the loop move the TmpEnd forward as it goes.
  912. const KeyT EmptyKey = this->getEmptyKey();
  913. const KeyT TombstoneKey = this->getTombstoneKey();
  914. for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
  915. if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
  916. !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
  917. assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
  918. "Too many inline buckets!");
  919. ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
  920. ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
  921. ++TmpEnd;
  922. P->getSecond().~ValueT();
  923. }
  924. P->getFirst().~KeyT();
  925. }
  926. // AtLeast == InlineBuckets can happen if there are many tombstones,
  927. // and grow() is used to remove them. Usually we always switch to the
  928. // large rep here.
  929. if (AtLeast > InlineBuckets) {
  930. Small = false;
  931. new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
  932. }
  933. this->moveFromOldBuckets(TmpBegin, TmpEnd);
  934. return;
  935. }
  936. LargeRep OldRep = std::move(*getLargeRep());
  937. getLargeRep()->~LargeRep();
  938. if (AtLeast <= InlineBuckets) {
  939. Small = true;
  940. } else {
  941. new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
  942. }
  943. this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
  944. // Free the old table.
  945. deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
  946. alignof(BucketT));
  947. }
  948. void shrink_and_clear() {
  949. unsigned OldSize = this->size();
  950. this->destroyAll();
  951. // Reduce the number of buckets.
  952. unsigned NewNumBuckets = 0;
  953. if (OldSize) {
  954. NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
  955. if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
  956. NewNumBuckets = 64;
  957. }
  958. if ((Small && NewNumBuckets <= InlineBuckets) ||
  959. (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
  960. this->BaseT::initEmpty();
  961. return;
  962. }
  963. deallocateBuckets();
  964. init(NewNumBuckets);
  965. }
  966. private:
  967. unsigned getNumEntries() const {
  968. return NumEntries;
  969. }
  970. void setNumEntries(unsigned Num) {
  971. // NumEntries is hardcoded to be 31 bits wide.
  972. assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
  973. NumEntries = Num;
  974. }
  975. unsigned getNumTombstones() const {
  976. return NumTombstones;
  977. }
  978. void setNumTombstones(unsigned Num) {
  979. NumTombstones = Num;
  980. }
  981. const BucketT *getInlineBuckets() const {
  982. assert(Small);
  983. // Note that this cast does not violate aliasing rules as we assert that
  984. // the memory's dynamic type is the small, inline bucket buffer, and the
  985. // 'storage' is a POD containing a char buffer.
  986. return reinterpret_cast<const BucketT *>(&storage);
  987. }
  988. BucketT *getInlineBuckets() {
  989. return const_cast<BucketT *>(
  990. const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
  991. }
  992. const LargeRep *getLargeRep() const {
  993. assert(!Small);
  994. // Note, same rule about aliasing as with getInlineBuckets.
  995. return reinterpret_cast<const LargeRep *>(&storage);
  996. }
  997. LargeRep *getLargeRep() {
  998. return const_cast<LargeRep *>(
  999. const_cast<const SmallDenseMap *>(this)->getLargeRep());
  1000. }
  1001. const BucketT *getBuckets() const {
  1002. return Small ? getInlineBuckets() : getLargeRep()->Buckets;
  1003. }
  1004. BucketT *getBuckets() {
  1005. return const_cast<BucketT *>(
  1006. const_cast<const SmallDenseMap *>(this)->getBuckets());
  1007. }
  1008. unsigned getNumBuckets() const {
  1009. return Small ? InlineBuckets : getLargeRep()->NumBuckets;
  1010. }
  1011. void deallocateBuckets() {
  1012. if (Small)
  1013. return;
  1014. deallocate_buffer(getLargeRep()->Buckets,
  1015. sizeof(BucketT) * getLargeRep()->NumBuckets,
  1016. alignof(BucketT));
  1017. getLargeRep()->~LargeRep();
  1018. }
  1019. LargeRep allocateBuckets(unsigned Num) {
  1020. assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
  1021. LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
  1022. sizeof(BucketT) * Num, alignof(BucketT))),
  1023. Num};
  1024. return Rep;
  1025. }
  1026. };
  1027. template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
  1028. bool IsConst>
  1029. class DenseMapIterator : DebugEpochBase::HandleBase {
  1030. friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
  1031. friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
  1032. public:
  1033. using difference_type = ptrdiff_t;
  1034. using value_type = std::conditional_t<IsConst, const Bucket, Bucket>;
  1035. using pointer = value_type *;
  1036. using reference = value_type &;
  1037. using iterator_category = std::forward_iterator_tag;
  1038. private:
  1039. pointer Ptr = nullptr;
  1040. pointer End = nullptr;
  1041. public:
  1042. DenseMapIterator() = default;
  1043. DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
  1044. bool NoAdvance = false)
  1045. : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
  1046. assert(isHandleInSync() && "invalid construction!");
  1047. if (NoAdvance) return;
  1048. if (shouldReverseIterate<KeyT>()) {
  1049. RetreatPastEmptyBuckets();
  1050. return;
  1051. }
  1052. AdvancePastEmptyBuckets();
  1053. }
  1054. // Converting ctor from non-const iterators to const iterators. SFINAE'd out
  1055. // for const iterator destinations so it doesn't end up as a user defined copy
  1056. // constructor.
  1057. template <bool IsConstSrc,
  1058. typename = std::enable_if_t<!IsConstSrc && IsConst>>
  1059. DenseMapIterator(
  1060. const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
  1061. : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
  1062. reference operator*() const {
  1063. assert(isHandleInSync() && "invalid iterator access!");
  1064. assert(Ptr != End && "dereferencing end() iterator");
  1065. if (shouldReverseIterate<KeyT>())
  1066. return Ptr[-1];
  1067. return *Ptr;
  1068. }
  1069. pointer operator->() const {
  1070. assert(isHandleInSync() && "invalid iterator access!");
  1071. assert(Ptr != End && "dereferencing end() iterator");
  1072. if (shouldReverseIterate<KeyT>())
  1073. return &(Ptr[-1]);
  1074. return Ptr;
  1075. }
  1076. friend bool operator==(const DenseMapIterator &LHS,
  1077. const DenseMapIterator &RHS) {
  1078. assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
  1079. assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
  1080. assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
  1081. "comparing incomparable iterators!");
  1082. return LHS.Ptr == RHS.Ptr;
  1083. }
  1084. friend bool operator!=(const DenseMapIterator &LHS,
  1085. const DenseMapIterator &RHS) {
  1086. return !(LHS == RHS);
  1087. }
  1088. inline DenseMapIterator& operator++() { // Preincrement
  1089. assert(isHandleInSync() && "invalid iterator access!");
  1090. assert(Ptr != End && "incrementing end() iterator");
  1091. if (shouldReverseIterate<KeyT>()) {
  1092. --Ptr;
  1093. RetreatPastEmptyBuckets();
  1094. return *this;
  1095. }
  1096. ++Ptr;
  1097. AdvancePastEmptyBuckets();
  1098. return *this;
  1099. }
  1100. DenseMapIterator operator++(int) { // Postincrement
  1101. assert(isHandleInSync() && "invalid iterator access!");
  1102. DenseMapIterator tmp = *this; ++*this; return tmp;
  1103. }
  1104. private:
  1105. void AdvancePastEmptyBuckets() {
  1106. assert(Ptr <= End);
  1107. const KeyT Empty = KeyInfoT::getEmptyKey();
  1108. const KeyT Tombstone = KeyInfoT::getTombstoneKey();
  1109. while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
  1110. KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
  1111. ++Ptr;
  1112. }
  1113. void RetreatPastEmptyBuckets() {
  1114. assert(Ptr >= End);
  1115. const KeyT Empty = KeyInfoT::getEmptyKey();
  1116. const KeyT Tombstone = KeyInfoT::getTombstoneKey();
  1117. while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
  1118. KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
  1119. --Ptr;
  1120. }
  1121. };
  1122. template <typename KeyT, typename ValueT, typename KeyInfoT>
  1123. inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
  1124. return X.getMemorySize();
  1125. }
  1126. } // end namespace llvm
  1127. #endif // LLVM_ADT_DENSEMAP_H
  1128. #ifdef __GNUC__
  1129. #pragma GCC diagnostic pop
  1130. #endif