DenseMap.h 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320
  1. #pragma once
  2. #ifdef __GNUC__
  3. #pragma GCC diagnostic push
  4. #pragma GCC diagnostic ignored "-Wunused-parameter"
  5. #endif
  6. //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
  7. //
  8. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  9. // See https://llvm.org/LICENSE.txt for license information.
  10. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  11. //
  12. //===----------------------------------------------------------------------===//
  13. ///
  14. /// \file
  15. /// This file defines the DenseMap class.
  16. ///
  17. //===----------------------------------------------------------------------===//
  18. #ifndef LLVM_ADT_DENSEMAP_H
  19. #define LLVM_ADT_DENSEMAP_H
  20. #include "llvm/ADT/DenseMapInfo.h"
  21. #include "llvm/ADT/EpochTracker.h"
  22. #include "llvm/Support/AlignOf.h"
  23. #include "llvm/Support/Compiler.h"
  24. #include "llvm/Support/MathExtras.h"
  25. #include "llvm/Support/MemAlloc.h"
  26. #include "llvm/Support/ReverseIteration.h"
  27. #include "llvm/Support/type_traits.h"
  28. #include <algorithm>
  29. #include <cassert>
  30. #include <cstddef>
  31. #include <cstring>
  32. #include <initializer_list>
  33. #include <iterator>
  34. #include <new>
  35. #include <type_traits>
  36. #include <utility>
  37. namespace llvm {
  38. namespace detail {
  39. // We extend a pair to allow users to override the bucket type with their own
  40. // implementation without requiring two members.
  41. template <typename KeyT, typename ValueT>
  42. struct DenseMapPair : public std::pair<KeyT, ValueT> {
  43. using std::pair<KeyT, ValueT>::pair;
  44. KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
  45. const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
  46. ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
  47. const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
  48. };
  49. } // end namespace detail
  50. template <typename KeyT, typename ValueT,
  51. typename KeyInfoT = DenseMapInfo<KeyT>,
  52. typename Bucket = llvm::detail::DenseMapPair<KeyT, ValueT>,
  53. bool IsConst = false>
  54. class DenseMapIterator;
  55. template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
  56. typename BucketT>
  57. class DenseMapBase : public DebugEpochBase {
  58. template <typename T>
  59. using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
  60. public:
  61. using size_type = unsigned;
  62. using key_type = KeyT;
  63. using mapped_type = ValueT;
  64. using value_type = BucketT;
  65. using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>;
  66. using const_iterator =
  67. DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>;
  68. inline iterator begin() {
  69. // When the map is empty, avoid the overhead of advancing/retreating past
  70. // empty buckets.
  71. if (empty())
  72. return end();
  73. if (shouldReverseIterate<KeyT>())
  74. return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
  75. return makeIterator(getBuckets(), getBucketsEnd(), *this);
  76. }
  77. inline iterator end() {
  78. return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
  79. }
  80. inline const_iterator begin() const {
  81. if (empty())
  82. return end();
  83. if (shouldReverseIterate<KeyT>())
  84. return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
  85. return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
  86. }
  87. inline const_iterator end() const {
  88. return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
  89. }
  90. LLVM_NODISCARD bool empty() const {
  91. return getNumEntries() == 0;
  92. }
  93. unsigned size() const { return getNumEntries(); }
  94. /// Grow the densemap so that it can contain at least \p NumEntries items
  95. /// before resizing again.
  96. void reserve(size_type NumEntries) {
  97. auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
  98. incrementEpoch();
  99. if (NumBuckets > getNumBuckets())
  100. grow(NumBuckets);
  101. }
  102. void clear() {
  103. incrementEpoch();
  104. if (getNumEntries() == 0 && getNumTombstones() == 0) return;
  105. // If the capacity of the array is huge, and the # elements used is small,
  106. // shrink the array.
  107. if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
  108. shrink_and_clear();
  109. return;
  110. }
  111. const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
  112. if (std::is_trivially_destructible<ValueT>::value) {
  113. // Use a simpler loop when values don't need destruction.
  114. for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
  115. P->getFirst() = EmptyKey;
  116. } else {
  117. [[maybe_unused]] unsigned NumEntries = getNumEntries();
  118. for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
  119. if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
  120. if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
  121. P->getSecond().~ValueT();
  122. --NumEntries;
  123. }
  124. P->getFirst() = EmptyKey;
  125. }
  126. }
  127. assert(NumEntries == 0 && "Node count imbalance!");
  128. }
  129. setNumEntries(0);
  130. setNumTombstones(0);
  131. }
  132. /// Return 1 if the specified key is in the map, 0 otherwise.
  133. size_type count(const_arg_type_t<KeyT> Val) const {
  134. const BucketT *TheBucket;
  135. return LookupBucketFor(Val, TheBucket) ? 1 : 0;
  136. }
  137. iterator find(const_arg_type_t<KeyT> Val) {
  138. BucketT *TheBucket;
  139. if (LookupBucketFor(Val, TheBucket))
  140. return makeIterator(TheBucket,
  141. shouldReverseIterate<KeyT>() ? getBuckets()
  142. : getBucketsEnd(),
  143. *this, true);
  144. return end();
  145. }
  146. const_iterator find(const_arg_type_t<KeyT> Val) const {
  147. const BucketT *TheBucket;
  148. if (LookupBucketFor(Val, TheBucket))
  149. return makeConstIterator(TheBucket,
  150. shouldReverseIterate<KeyT>() ? getBuckets()
  151. : getBucketsEnd(),
  152. *this, true);
  153. return end();
  154. }
  155. /// Alternate version of find() which allows a different, and possibly
  156. /// less expensive, key type.
  157. /// The DenseMapInfo is responsible for supplying methods
  158. /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
  159. /// type used.
  160. template<class LookupKeyT>
  161. iterator find_as(const LookupKeyT &Val) {
  162. BucketT *TheBucket;
  163. if (LookupBucketFor(Val, TheBucket))
  164. return makeIterator(TheBucket,
  165. shouldReverseIterate<KeyT>() ? getBuckets()
  166. : getBucketsEnd(),
  167. *this, true);
  168. return end();
  169. }
  170. template<class LookupKeyT>
  171. const_iterator find_as(const LookupKeyT &Val) const {
  172. const BucketT *TheBucket;
  173. if (LookupBucketFor(Val, TheBucket))
  174. return makeConstIterator(TheBucket,
  175. shouldReverseIterate<KeyT>() ? getBuckets()
  176. : getBucketsEnd(),
  177. *this, true);
  178. return end();
  179. }
  180. /// lookup - Return the entry for the specified key, or a default
  181. /// constructed value if no such entry exists.
  182. ValueT lookup(const_arg_type_t<KeyT> Val) const {
  183. const BucketT *TheBucket;
  184. if (LookupBucketFor(Val, TheBucket))
  185. return TheBucket->getSecond();
  186. return ValueT();
  187. }
  188. // Inserts key,value pair into the map if the key isn't already in the map.
  189. // If the key is already in the map, it returns false and doesn't update the
  190. // value.
  191. std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
  192. return try_emplace(KV.first, KV.second);
  193. }
  194. // Inserts key,value pair into the map if the key isn't already in the map.
  195. // If the key is already in the map, it returns false and doesn't update the
  196. // value.
  197. std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
  198. return try_emplace(std::move(KV.first), std::move(KV.second));
  199. }
  200. // Inserts key,value pair into the map if the key isn't already in the map.
  201. // The value is constructed in-place if the key is not in the map, otherwise
  202. // it is not moved.
  203. template <typename... Ts>
  204. std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
  205. BucketT *TheBucket;
  206. if (LookupBucketFor(Key, TheBucket))
  207. return std::make_pair(makeIterator(TheBucket,
  208. shouldReverseIterate<KeyT>()
  209. ? getBuckets()
  210. : getBucketsEnd(),
  211. *this, true),
  212. false); // Already in map.
  213. // Otherwise, insert the new element.
  214. TheBucket =
  215. InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
  216. return std::make_pair(makeIterator(TheBucket,
  217. shouldReverseIterate<KeyT>()
  218. ? getBuckets()
  219. : getBucketsEnd(),
  220. *this, true),
  221. true);
  222. }
  223. // Inserts key,value pair into the map if the key isn't already in the map.
  224. // The value is constructed in-place if the key is not in the map, otherwise
  225. // it is not moved.
  226. template <typename... Ts>
  227. std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
  228. BucketT *TheBucket;
  229. if (LookupBucketFor(Key, TheBucket))
  230. return std::make_pair(makeIterator(TheBucket,
  231. shouldReverseIterate<KeyT>()
  232. ? getBuckets()
  233. : getBucketsEnd(),
  234. *this, true),
  235. false); // Already in map.
  236. // Otherwise, insert the new element.
  237. TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
  238. return std::make_pair(makeIterator(TheBucket,
  239. shouldReverseIterate<KeyT>()
  240. ? getBuckets()
  241. : getBucketsEnd(),
  242. *this, true),
  243. true);
  244. }
  245. /// Alternate version of insert() which allows a different, and possibly
  246. /// less expensive, key type.
  247. /// The DenseMapInfo is responsible for supplying methods
  248. /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
  249. /// type used.
  250. template <typename LookupKeyT>
  251. std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
  252. const LookupKeyT &Val) {
  253. BucketT *TheBucket;
  254. if (LookupBucketFor(Val, TheBucket))
  255. return std::make_pair(makeIterator(TheBucket,
  256. shouldReverseIterate<KeyT>()
  257. ? getBuckets()
  258. : getBucketsEnd(),
  259. *this, true),
  260. false); // Already in map.
  261. // Otherwise, insert the new element.
  262. TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
  263. std::move(KV.second), Val);
  264. return std::make_pair(makeIterator(TheBucket,
  265. shouldReverseIterate<KeyT>()
  266. ? getBuckets()
  267. : getBucketsEnd(),
  268. *this, true),
  269. true);
  270. }
  271. /// insert - Range insertion of pairs.
  272. template<typename InputIt>
  273. void insert(InputIt I, InputIt E) {
  274. for (; I != E; ++I)
  275. insert(*I);
  276. }
  277. bool erase(const KeyT &Val) {
  278. BucketT *TheBucket;
  279. if (!LookupBucketFor(Val, TheBucket))
  280. return false; // not in map.
  281. TheBucket->getSecond().~ValueT();
  282. TheBucket->getFirst() = getTombstoneKey();
  283. decrementNumEntries();
  284. incrementNumTombstones();
  285. return true;
  286. }
  287. void erase(iterator I) {
  288. BucketT *TheBucket = &*I;
  289. TheBucket->getSecond().~ValueT();
  290. TheBucket->getFirst() = getTombstoneKey();
  291. decrementNumEntries();
  292. incrementNumTombstones();
  293. }
  294. value_type& FindAndConstruct(const KeyT &Key) {
  295. BucketT *TheBucket;
  296. if (LookupBucketFor(Key, TheBucket))
  297. return *TheBucket;
  298. return *InsertIntoBucket(TheBucket, Key);
  299. }
  300. ValueT &operator[](const KeyT &Key) {
  301. return FindAndConstruct(Key).second;
  302. }
  303. value_type& FindAndConstruct(KeyT &&Key) {
  304. BucketT *TheBucket;
  305. if (LookupBucketFor(Key, TheBucket))
  306. return *TheBucket;
  307. return *InsertIntoBucket(TheBucket, std::move(Key));
  308. }
  309. ValueT &operator[](KeyT &&Key) {
  310. return FindAndConstruct(std::move(Key)).second;
  311. }
  312. /// isPointerIntoBucketsArray - Return true if the specified pointer points
  313. /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
  314. /// value in the DenseMap).
  315. bool isPointerIntoBucketsArray(const void *Ptr) const {
  316. return Ptr >= getBuckets() && Ptr < getBucketsEnd();
  317. }
  318. /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
  319. /// array. In conjunction with the previous method, this can be used to
  320. /// determine whether an insertion caused the DenseMap to reallocate.
  321. const void *getPointerIntoBucketsArray() const { return getBuckets(); }
  322. protected:
  323. DenseMapBase() = default;
  324. void destroyAll() {
  325. if (getNumBuckets() == 0) // Nothing to do.
  326. return;
  327. const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
  328. for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
  329. if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
  330. !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
  331. P->getSecond().~ValueT();
  332. P->getFirst().~KeyT();
  333. }
  334. }
  335. void initEmpty() {
  336. setNumEntries(0);
  337. setNumTombstones(0);
  338. assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
  339. "# initial buckets must be a power of two!");
  340. const KeyT EmptyKey = getEmptyKey();
  341. for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
  342. ::new (&B->getFirst()) KeyT(EmptyKey);
  343. }
  344. /// Returns the number of buckets to allocate to ensure that the DenseMap can
  345. /// accommodate \p NumEntries without need to grow().
  346. unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
  347. // Ensure that "NumEntries * 4 < NumBuckets * 3"
  348. if (NumEntries == 0)
  349. return 0;
  350. // +1 is required because of the strict equality.
  351. // For example if NumEntries is 48, we need to return 401.
  352. return NextPowerOf2(NumEntries * 4 / 3 + 1);
  353. }
  354. void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
  355. initEmpty();
  356. // Insert all the old elements.
  357. const KeyT EmptyKey = getEmptyKey();
  358. const KeyT TombstoneKey = getTombstoneKey();
  359. for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
  360. if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
  361. !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
  362. // Insert the key/value into the new table.
  363. BucketT *DestBucket;
  364. bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
  365. (void)FoundVal; // silence warning.
  366. assert(!FoundVal && "Key already in new map?");
  367. DestBucket->getFirst() = std::move(B->getFirst());
  368. ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
  369. incrementNumEntries();
  370. // Free the value.
  371. B->getSecond().~ValueT();
  372. }
  373. B->getFirst().~KeyT();
  374. }
  375. }
  376. template <typename OtherBaseT>
  377. void copyFrom(
  378. const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
  379. assert(&other != this);
  380. assert(getNumBuckets() == other.getNumBuckets());
  381. setNumEntries(other.getNumEntries());
  382. setNumTombstones(other.getNumTombstones());
  383. if (std::is_trivially_copyable<KeyT>::value &&
  384. std::is_trivially_copyable<ValueT>::value)
  385. memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
  386. getNumBuckets() * sizeof(BucketT));
  387. else
  388. for (size_t i = 0; i < getNumBuckets(); ++i) {
  389. ::new (&getBuckets()[i].getFirst())
  390. KeyT(other.getBuckets()[i].getFirst());
  391. if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
  392. !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
  393. ::new (&getBuckets()[i].getSecond())
  394. ValueT(other.getBuckets()[i].getSecond());
  395. }
  396. }
  397. static unsigned getHashValue(const KeyT &Val) {
  398. return KeyInfoT::getHashValue(Val);
  399. }
  400. template<typename LookupKeyT>
  401. static unsigned getHashValue(const LookupKeyT &Val) {
  402. return KeyInfoT::getHashValue(Val);
  403. }
  404. static const KeyT getEmptyKey() {
  405. static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
  406. "Must pass the derived type to this template!");
  407. return KeyInfoT::getEmptyKey();
  408. }
  409. static const KeyT getTombstoneKey() {
  410. return KeyInfoT::getTombstoneKey();
  411. }
  412. private:
  413. iterator makeIterator(BucketT *P, BucketT *E,
  414. DebugEpochBase &Epoch,
  415. bool NoAdvance=false) {
  416. if (shouldReverseIterate<KeyT>()) {
  417. BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
  418. return iterator(B, E, Epoch, NoAdvance);
  419. }
  420. return iterator(P, E, Epoch, NoAdvance);
  421. }
  422. const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
  423. const DebugEpochBase &Epoch,
  424. const bool NoAdvance=false) const {
  425. if (shouldReverseIterate<KeyT>()) {
  426. const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
  427. return const_iterator(B, E, Epoch, NoAdvance);
  428. }
  429. return const_iterator(P, E, Epoch, NoAdvance);
  430. }
  431. unsigned getNumEntries() const {
  432. return static_cast<const DerivedT *>(this)->getNumEntries();
  433. }
  434. void setNumEntries(unsigned Num) {
  435. static_cast<DerivedT *>(this)->setNumEntries(Num);
  436. }
  437. void incrementNumEntries() {
  438. setNumEntries(getNumEntries() + 1);
  439. }
  440. void decrementNumEntries() {
  441. setNumEntries(getNumEntries() - 1);
  442. }
  443. unsigned getNumTombstones() const {
  444. return static_cast<const DerivedT *>(this)->getNumTombstones();
  445. }
  446. void setNumTombstones(unsigned Num) {
  447. static_cast<DerivedT *>(this)->setNumTombstones(Num);
  448. }
  449. void incrementNumTombstones() {
  450. setNumTombstones(getNumTombstones() + 1);
  451. }
  452. void decrementNumTombstones() {
  453. setNumTombstones(getNumTombstones() - 1);
  454. }
  455. const BucketT *getBuckets() const {
  456. return static_cast<const DerivedT *>(this)->getBuckets();
  457. }
  458. BucketT *getBuckets() {
  459. return static_cast<DerivedT *>(this)->getBuckets();
  460. }
  461. unsigned getNumBuckets() const {
  462. return static_cast<const DerivedT *>(this)->getNumBuckets();
  463. }
  464. BucketT *getBucketsEnd() {
  465. return getBuckets() + getNumBuckets();
  466. }
  467. const BucketT *getBucketsEnd() const {
  468. return getBuckets() + getNumBuckets();
  469. }
  470. void grow(unsigned AtLeast) {
  471. static_cast<DerivedT *>(this)->grow(AtLeast);
  472. }
  473. void shrink_and_clear() {
  474. static_cast<DerivedT *>(this)->shrink_and_clear();
  475. }
  476. template <typename KeyArg, typename... ValueArgs>
  477. BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
  478. ValueArgs &&... Values) {
  479. TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
  480. TheBucket->getFirst() = std::forward<KeyArg>(Key);
  481. ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
  482. return TheBucket;
  483. }
  484. template <typename LookupKeyT>
  485. BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
  486. ValueT &&Value, LookupKeyT &Lookup) {
  487. TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
  488. TheBucket->getFirst() = std::move(Key);
  489. ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
  490. return TheBucket;
  491. }
  492. template <typename LookupKeyT>
  493. BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
  494. BucketT *TheBucket) {
  495. incrementEpoch();
  496. // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
  497. // the buckets are empty (meaning that many are filled with tombstones),
  498. // grow the table.
  499. //
  500. // The later case is tricky. For example, if we had one empty bucket with
  501. // tons of tombstones, failing lookups (e.g. for insertion) would have to
  502. // probe almost the entire table until it found the empty bucket. If the
  503. // table completely filled with tombstones, no lookup would ever succeed,
  504. // causing infinite loops in lookup.
  505. unsigned NewNumEntries = getNumEntries() + 1;
  506. unsigned NumBuckets = getNumBuckets();
  507. if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
  508. this->grow(NumBuckets * 2);
  509. LookupBucketFor(Lookup, TheBucket);
  510. NumBuckets = getNumBuckets();
  511. } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
  512. NumBuckets/8)) {
  513. this->grow(NumBuckets);
  514. LookupBucketFor(Lookup, TheBucket);
  515. }
  516. assert(TheBucket);
  517. // Only update the state after we've grown our bucket space appropriately
  518. // so that when growing buckets we have self-consistent entry count.
  519. incrementNumEntries();
  520. // If we are writing over a tombstone, remember this.
  521. const KeyT EmptyKey = getEmptyKey();
  522. if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
  523. decrementNumTombstones();
  524. return TheBucket;
  525. }
  526. /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
  527. /// FoundBucket. If the bucket contains the key and a value, this returns
  528. /// true, otherwise it returns a bucket with an empty marker or tombstone and
  529. /// returns false.
  530. template<typename LookupKeyT>
  531. bool LookupBucketFor(const LookupKeyT &Val,
  532. const BucketT *&FoundBucket) const {
  533. const BucketT *BucketsPtr = getBuckets();
  534. const unsigned NumBuckets = getNumBuckets();
  535. if (NumBuckets == 0) {
  536. FoundBucket = nullptr;
  537. return false;
  538. }
  539. // FoundTombstone - Keep track of whether we find a tombstone while probing.
  540. const BucketT *FoundTombstone = nullptr;
  541. const KeyT EmptyKey = getEmptyKey();
  542. const KeyT TombstoneKey = getTombstoneKey();
  543. assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
  544. !KeyInfoT::isEqual(Val, TombstoneKey) &&
  545. "Empty/Tombstone value shouldn't be inserted into map!");
  546. unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
  547. unsigned ProbeAmt = 1;
  548. while (true) {
  549. const BucketT *ThisBucket = BucketsPtr + BucketNo;
  550. // Found Val's bucket? If so, return it.
  551. if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
  552. FoundBucket = ThisBucket;
  553. return true;
  554. }
  555. // If we found an empty bucket, the key doesn't exist in the set.
  556. // Insert it and return the default value.
  557. if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
  558. // If we've already seen a tombstone while probing, fill it in instead
  559. // of the empty bucket we eventually probed to.
  560. FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
  561. return false;
  562. }
  563. // If this is a tombstone, remember it. If Val ends up not in the map, we
  564. // prefer to return it than something that would require more probing.
  565. if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
  566. !FoundTombstone)
  567. FoundTombstone = ThisBucket; // Remember the first tombstone found.
  568. // Otherwise, it's a hash collision or a tombstone, continue quadratic
  569. // probing.
  570. BucketNo += ProbeAmt++;
  571. BucketNo &= (NumBuckets-1);
  572. }
  573. }
  574. template <typename LookupKeyT>
  575. bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
  576. const BucketT *ConstFoundBucket;
  577. bool Result = const_cast<const DenseMapBase *>(this)
  578. ->LookupBucketFor(Val, ConstFoundBucket);
  579. FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
  580. return Result;
  581. }
  582. public:
  583. /// Return the approximate size (in bytes) of the actual map.
  584. /// This is just the raw memory used by DenseMap.
  585. /// If entries are pointers to objects, the size of the referenced objects
  586. /// are not included.
  587. size_t getMemorySize() const {
  588. return getNumBuckets() * sizeof(BucketT);
  589. }
  590. };
  591. /// Equality comparison for DenseMap.
  592. ///
  593. /// Iterates over elements of LHS confirming that each (key, value) pair in LHS
  594. /// is also in RHS, and that no additional pairs are in RHS.
  595. /// Equivalent to N calls to RHS.find and N value comparisons. Amortized
  596. /// complexity is linear, worst case is O(N^2) (if every hash collides).
  597. template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
  598. typename BucketT>
  599. bool operator==(
  600. const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
  601. const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
  602. if (LHS.size() != RHS.size())
  603. return false;
  604. for (auto &KV : LHS) {
  605. auto I = RHS.find(KV.first);
  606. if (I == RHS.end() || I->second != KV.second)
  607. return false;
  608. }
  609. return true;
  610. }
  611. /// Inequality comparison for DenseMap.
  612. ///
  613. /// Equivalent to !(LHS == RHS). See operator== for performance notes.
  614. template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
  615. typename BucketT>
  616. bool operator!=(
  617. const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
  618. const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
  619. return !(LHS == RHS);
  620. }
  621. template <typename KeyT, typename ValueT,
  622. typename KeyInfoT = DenseMapInfo<KeyT>,
  623. typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
  624. class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
  625. KeyT, ValueT, KeyInfoT, BucketT> {
  626. friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
  627. // Lift some types from the dependent base class into this class for
  628. // simplicity of referring to them.
  629. using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
  630. BucketT *Buckets;
  631. unsigned NumEntries;
  632. unsigned NumTombstones;
  633. unsigned NumBuckets;
  634. public:
  635. /// Create a DenseMap with an optional \p InitialReserve that guarantee that
  636. /// this number of elements can be inserted in the map without grow()
  637. explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
  638. DenseMap(const DenseMap &other) : BaseT() {
  639. init(0);
  640. copyFrom(other);
  641. }
  642. DenseMap(DenseMap &&other) : BaseT() {
  643. init(0);
  644. swap(other);
  645. }
  646. template<typename InputIt>
  647. DenseMap(const InputIt &I, const InputIt &E) {
  648. init(std::distance(I, E));
  649. this->insert(I, E);
  650. }
  651. DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
  652. init(Vals.size());
  653. this->insert(Vals.begin(), Vals.end());
  654. }
  655. ~DenseMap() {
  656. this->destroyAll();
  657. deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
  658. }
  659. void swap(DenseMap& RHS) {
  660. this->incrementEpoch();
  661. RHS.incrementEpoch();
  662. std::swap(Buckets, RHS.Buckets);
  663. std::swap(NumEntries, RHS.NumEntries);
  664. std::swap(NumTombstones, RHS.NumTombstones);
  665. std::swap(NumBuckets, RHS.NumBuckets);
  666. }
  667. DenseMap& operator=(const DenseMap& other) {
  668. if (&other != this)
  669. copyFrom(other);
  670. return *this;
  671. }
  672. DenseMap& operator=(DenseMap &&other) {
  673. this->destroyAll();
  674. deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
  675. init(0);
  676. swap(other);
  677. return *this;
  678. }
  679. void copyFrom(const DenseMap& other) {
  680. this->destroyAll();
  681. deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
  682. if (allocateBuckets(other.NumBuckets)) {
  683. this->BaseT::copyFrom(other);
  684. } else {
  685. NumEntries = 0;
  686. NumTombstones = 0;
  687. }
  688. }
  689. void init(unsigned InitNumEntries) {
  690. auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
  691. if (allocateBuckets(InitBuckets)) {
  692. this->BaseT::initEmpty();
  693. } else {
  694. NumEntries = 0;
  695. NumTombstones = 0;
  696. }
  697. }
  698. void grow(unsigned AtLeast) {
  699. unsigned OldNumBuckets = NumBuckets;
  700. BucketT *OldBuckets = Buckets;
  701. allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
  702. assert(Buckets);
  703. if (!OldBuckets) {
  704. this->BaseT::initEmpty();
  705. return;
  706. }
  707. this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
  708. // Free the old table.
  709. deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
  710. alignof(BucketT));
  711. }
  712. void shrink_and_clear() {
  713. unsigned OldNumBuckets = NumBuckets;
  714. unsigned OldNumEntries = NumEntries;
  715. this->destroyAll();
  716. // Reduce the number of buckets.
  717. unsigned NewNumBuckets = 0;
  718. if (OldNumEntries)
  719. NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
  720. if (NewNumBuckets == NumBuckets) {
  721. this->BaseT::initEmpty();
  722. return;
  723. }
  724. deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
  725. alignof(BucketT));
  726. init(NewNumBuckets);
  727. }
  728. private:
  729. unsigned getNumEntries() const {
  730. return NumEntries;
  731. }
  732. void setNumEntries(unsigned Num) {
  733. NumEntries = Num;
  734. }
  735. unsigned getNumTombstones() const {
  736. return NumTombstones;
  737. }
  738. void setNumTombstones(unsigned Num) {
  739. NumTombstones = Num;
  740. }
  741. BucketT *getBuckets() const {
  742. return Buckets;
  743. }
  744. unsigned getNumBuckets() const {
  745. return NumBuckets;
  746. }
  747. bool allocateBuckets(unsigned Num) {
  748. NumBuckets = Num;
  749. if (NumBuckets == 0) {
  750. Buckets = nullptr;
  751. return false;
  752. }
  753. Buckets = static_cast<BucketT *>(
  754. allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
  755. return true;
  756. }
  757. };
  758. template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
  759. typename KeyInfoT = DenseMapInfo<KeyT>,
  760. typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
  761. class SmallDenseMap
  762. : public DenseMapBase<
  763. SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
  764. ValueT, KeyInfoT, BucketT> {
  765. friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
  766. // Lift some types from the dependent base class into this class for
  767. // simplicity of referring to them.
  768. using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
  769. static_assert(isPowerOf2_64(InlineBuckets),
  770. "InlineBuckets must be a power of 2.");
  771. unsigned Small : 1;
  772. unsigned NumEntries : 31;
  773. unsigned NumTombstones;
  774. struct LargeRep {
  775. BucketT *Buckets;
  776. unsigned NumBuckets;
  777. };
  778. /// A "union" of an inline bucket array and the struct representing
  779. /// a large bucket. This union will be discriminated by the 'Small' bit.
  780. AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
  781. public:
  782. explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
  783. init(NumInitBuckets);
  784. }
  785. SmallDenseMap(const SmallDenseMap &other) : BaseT() {
  786. init(0);
  787. copyFrom(other);
  788. }
  789. SmallDenseMap(SmallDenseMap &&other) : BaseT() {
  790. init(0);
  791. swap(other);
  792. }
  793. template<typename InputIt>
  794. SmallDenseMap(const InputIt &I, const InputIt &E) {
  795. init(NextPowerOf2(std::distance(I, E)));
  796. this->insert(I, E);
  797. }
  798. SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
  799. : SmallDenseMap(Vals.begin(), Vals.end()) {}
  800. ~SmallDenseMap() {
  801. this->destroyAll();
  802. deallocateBuckets();
  803. }
  804. void swap(SmallDenseMap& RHS) {
  805. unsigned TmpNumEntries = RHS.NumEntries;
  806. RHS.NumEntries = NumEntries;
  807. NumEntries = TmpNumEntries;
  808. std::swap(NumTombstones, RHS.NumTombstones);
  809. const KeyT EmptyKey = this->getEmptyKey();
  810. const KeyT TombstoneKey = this->getTombstoneKey();
  811. if (Small && RHS.Small) {
  812. // If we're swapping inline bucket arrays, we have to cope with some of
  813. // the tricky bits of DenseMap's storage system: the buckets are not
  814. // fully initialized. Thus we swap every key, but we may have
  815. // a one-directional move of the value.
  816. for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
  817. BucketT *LHSB = &getInlineBuckets()[i],
  818. *RHSB = &RHS.getInlineBuckets()[i];
  819. bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
  820. !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
  821. bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
  822. !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
  823. if (hasLHSValue && hasRHSValue) {
  824. // Swap together if we can...
  825. std::swap(*LHSB, *RHSB);
  826. continue;
  827. }
  828. // Swap separately and handle any asymmetry.
  829. std::swap(LHSB->getFirst(), RHSB->getFirst());
  830. if (hasLHSValue) {
  831. ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
  832. LHSB->getSecond().~ValueT();
  833. } else if (hasRHSValue) {
  834. ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
  835. RHSB->getSecond().~ValueT();
  836. }
  837. }
  838. return;
  839. }
  840. if (!Small && !RHS.Small) {
  841. std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
  842. std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
  843. return;
  844. }
  845. SmallDenseMap &SmallSide = Small ? *this : RHS;
  846. SmallDenseMap &LargeSide = Small ? RHS : *this;
  847. // First stash the large side's rep and move the small side across.
  848. LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
  849. LargeSide.getLargeRep()->~LargeRep();
  850. LargeSide.Small = true;
  851. // This is similar to the standard move-from-old-buckets, but the bucket
  852. // count hasn't actually rotated in this case. So we have to carefully
  853. // move construct the keys and values into their new locations, but there
  854. // is no need to re-hash things.
  855. for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
  856. BucketT *NewB = &LargeSide.getInlineBuckets()[i],
  857. *OldB = &SmallSide.getInlineBuckets()[i];
  858. ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
  859. OldB->getFirst().~KeyT();
  860. if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
  861. !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
  862. ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
  863. OldB->getSecond().~ValueT();
  864. }
  865. }
  866. // The hard part of moving the small buckets across is done, just move
  867. // the TmpRep into its new home.
  868. SmallSide.Small = false;
  869. new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
  870. }
  871. SmallDenseMap& operator=(const SmallDenseMap& other) {
  872. if (&other != this)
  873. copyFrom(other);
  874. return *this;
  875. }
  876. SmallDenseMap& operator=(SmallDenseMap &&other) {
  877. this->destroyAll();
  878. deallocateBuckets();
  879. init(0);
  880. swap(other);
  881. return *this;
  882. }
  883. void copyFrom(const SmallDenseMap& other) {
  884. this->destroyAll();
  885. deallocateBuckets();
  886. Small = true;
  887. if (other.getNumBuckets() > InlineBuckets) {
  888. Small = false;
  889. new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
  890. }
  891. this->BaseT::copyFrom(other);
  892. }
  893. void init(unsigned InitBuckets) {
  894. Small = true;
  895. if (InitBuckets > InlineBuckets) {
  896. Small = false;
  897. new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
  898. }
  899. this->BaseT::initEmpty();
  900. }
  901. void grow(unsigned AtLeast) {
  902. if (AtLeast > InlineBuckets)
  903. AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
  904. if (Small) {
  905. // First move the inline buckets into a temporary storage.
  906. AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
  907. BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
  908. BucketT *TmpEnd = TmpBegin;
  909. // Loop over the buckets, moving non-empty, non-tombstones into the
  910. // temporary storage. Have the loop move the TmpEnd forward as it goes.
  911. const KeyT EmptyKey = this->getEmptyKey();
  912. const KeyT TombstoneKey = this->getTombstoneKey();
  913. for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
  914. if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
  915. !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
  916. assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
  917. "Too many inline buckets!");
  918. ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
  919. ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
  920. ++TmpEnd;
  921. P->getSecond().~ValueT();
  922. }
  923. P->getFirst().~KeyT();
  924. }
  925. // AtLeast == InlineBuckets can happen if there are many tombstones,
  926. // and grow() is used to remove them. Usually we always switch to the
  927. // large rep here.
  928. if (AtLeast > InlineBuckets) {
  929. Small = false;
  930. new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
  931. }
  932. this->moveFromOldBuckets(TmpBegin, TmpEnd);
  933. return;
  934. }
  935. LargeRep OldRep = std::move(*getLargeRep());
  936. getLargeRep()->~LargeRep();
  937. if (AtLeast <= InlineBuckets) {
  938. Small = true;
  939. } else {
  940. new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
  941. }
  942. this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
  943. // Free the old table.
  944. deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
  945. alignof(BucketT));
  946. }
  947. void shrink_and_clear() {
  948. unsigned OldSize = this->size();
  949. this->destroyAll();
  950. // Reduce the number of buckets.
  951. unsigned NewNumBuckets = 0;
  952. if (OldSize) {
  953. NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
  954. if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
  955. NewNumBuckets = 64;
  956. }
  957. if ((Small && NewNumBuckets <= InlineBuckets) ||
  958. (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
  959. this->BaseT::initEmpty();
  960. return;
  961. }
  962. deallocateBuckets();
  963. init(NewNumBuckets);
  964. }
  965. private:
  966. unsigned getNumEntries() const {
  967. return NumEntries;
  968. }
  969. void setNumEntries(unsigned Num) {
  970. // NumEntries is hardcoded to be 31 bits wide.
  971. assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
  972. NumEntries = Num;
  973. }
  974. unsigned getNumTombstones() const {
  975. return NumTombstones;
  976. }
  977. void setNumTombstones(unsigned Num) {
  978. NumTombstones = Num;
  979. }
  980. const BucketT *getInlineBuckets() const {
  981. assert(Small);
  982. // Note that this cast does not violate aliasing rules as we assert that
  983. // the memory's dynamic type is the small, inline bucket buffer, and the
  984. // 'storage' is a POD containing a char buffer.
  985. return reinterpret_cast<const BucketT *>(&storage);
  986. }
  987. BucketT *getInlineBuckets() {
  988. return const_cast<BucketT *>(
  989. const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
  990. }
  991. const LargeRep *getLargeRep() const {
  992. assert(!Small);
  993. // Note, same rule about aliasing as with getInlineBuckets.
  994. return reinterpret_cast<const LargeRep *>(&storage);
  995. }
  996. LargeRep *getLargeRep() {
  997. return const_cast<LargeRep *>(
  998. const_cast<const SmallDenseMap *>(this)->getLargeRep());
  999. }
  1000. const BucketT *getBuckets() const {
  1001. return Small ? getInlineBuckets() : getLargeRep()->Buckets;
  1002. }
  1003. BucketT *getBuckets() {
  1004. return const_cast<BucketT *>(
  1005. const_cast<const SmallDenseMap *>(this)->getBuckets());
  1006. }
  1007. unsigned getNumBuckets() const {
  1008. return Small ? InlineBuckets : getLargeRep()->NumBuckets;
  1009. }
  1010. void deallocateBuckets() {
  1011. if (Small)
  1012. return;
  1013. deallocate_buffer(getLargeRep()->Buckets,
  1014. sizeof(BucketT) * getLargeRep()->NumBuckets,
  1015. alignof(BucketT));
  1016. getLargeRep()->~LargeRep();
  1017. }
  1018. LargeRep allocateBuckets(unsigned Num) {
  1019. assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
  1020. LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
  1021. sizeof(BucketT) * Num, alignof(BucketT))),
  1022. Num};
  1023. return Rep;
  1024. }
  1025. };
  1026. template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
  1027. bool IsConst>
  1028. class DenseMapIterator : DebugEpochBase::HandleBase {
  1029. friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
  1030. friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
  1031. public:
  1032. using difference_type = ptrdiff_t;
  1033. using value_type =
  1034. typename std::conditional<IsConst, const Bucket, Bucket>::type;
  1035. using pointer = value_type *;
  1036. using reference = value_type &;
  1037. using iterator_category = std::forward_iterator_tag;
  1038. private:
  1039. pointer Ptr = nullptr;
  1040. pointer End = nullptr;
  1041. public:
  1042. DenseMapIterator() = default;
  1043. DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
  1044. bool NoAdvance = false)
  1045. : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
  1046. assert(isHandleInSync() && "invalid construction!");
  1047. if (NoAdvance) return;
  1048. if (shouldReverseIterate<KeyT>()) {
  1049. RetreatPastEmptyBuckets();
  1050. return;
  1051. }
  1052. AdvancePastEmptyBuckets();
  1053. }
  1054. // Converting ctor from non-const iterators to const iterators. SFINAE'd out
  1055. // for const iterator destinations so it doesn't end up as a user defined copy
  1056. // constructor.
  1057. template <bool IsConstSrc,
  1058. typename = std::enable_if_t<!IsConstSrc && IsConst>>
  1059. DenseMapIterator(
  1060. const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
  1061. : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
  1062. reference operator*() const {
  1063. assert(isHandleInSync() && "invalid iterator access!");
  1064. assert(Ptr != End && "dereferencing end() iterator");
  1065. if (shouldReverseIterate<KeyT>())
  1066. return Ptr[-1];
  1067. return *Ptr;
  1068. }
  1069. pointer operator->() const {
  1070. assert(isHandleInSync() && "invalid iterator access!");
  1071. assert(Ptr != End && "dereferencing end() iterator");
  1072. if (shouldReverseIterate<KeyT>())
  1073. return &(Ptr[-1]);
  1074. return Ptr;
  1075. }
  1076. friend bool operator==(const DenseMapIterator &LHS,
  1077. const DenseMapIterator &RHS) {
  1078. assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
  1079. assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
  1080. assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
  1081. "comparing incomparable iterators!");
  1082. return LHS.Ptr == RHS.Ptr;
  1083. }
  1084. friend bool operator!=(const DenseMapIterator &LHS,
  1085. const DenseMapIterator &RHS) {
  1086. return !(LHS == RHS);
  1087. }
  1088. inline DenseMapIterator& operator++() { // Preincrement
  1089. assert(isHandleInSync() && "invalid iterator access!");
  1090. assert(Ptr != End && "incrementing end() iterator");
  1091. if (shouldReverseIterate<KeyT>()) {
  1092. --Ptr;
  1093. RetreatPastEmptyBuckets();
  1094. return *this;
  1095. }
  1096. ++Ptr;
  1097. AdvancePastEmptyBuckets();
  1098. return *this;
  1099. }
  1100. DenseMapIterator operator++(int) { // Postincrement
  1101. assert(isHandleInSync() && "invalid iterator access!");
  1102. DenseMapIterator tmp = *this; ++*this; return tmp;
  1103. }
  1104. private:
  1105. void AdvancePastEmptyBuckets() {
  1106. assert(Ptr <= End);
  1107. const KeyT Empty = KeyInfoT::getEmptyKey();
  1108. const KeyT Tombstone = KeyInfoT::getTombstoneKey();
  1109. while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
  1110. KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
  1111. ++Ptr;
  1112. }
  1113. void RetreatPastEmptyBuckets() {
  1114. assert(Ptr >= End);
  1115. const KeyT Empty = KeyInfoT::getEmptyKey();
  1116. const KeyT Tombstone = KeyInfoT::getTombstoneKey();
  1117. while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
  1118. KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
  1119. --Ptr;
  1120. }
  1121. };
  1122. template <typename KeyT, typename ValueT, typename KeyInfoT>
  1123. inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
  1124. return X.getMemorySize();
  1125. }
  1126. } // end namespace llvm
  1127. #endif // LLVM_ADT_DENSEMAP_H
  1128. #ifdef __GNUC__
  1129. #pragma GCC diagnostic pop
  1130. #endif