123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886 |
- // Copyright 2018 The Abseil Authors.
- //
- // Licensed under the Apache License, Version 2.0 (the "License");
- // you may not use this file except in compliance with the License.
- // You may obtain a copy of the License at
- //
- // https://www.apache.org/licenses/LICENSE-2.0
- //
- // Unless required by applicable law or agreed to in writing, software
- // distributed under the License is distributed on an "AS IS" BASIS,
- // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- // See the License for the specific language governing permissions and
- // limitations under the License.
- //
- // An open-addressing
- // hashtable with quadratic probing.
- //
- // This is a low level hashtable on top of which different interfaces can be
- // implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
- //
- // The table interface is similar to that of std::unordered_set. Notable
- // differences are that most member functions support heterogeneous keys when
- // BOTH the hash and eq functions are marked as transparent. They do so by
- // providing a typedef called `is_transparent`.
- //
- // When heterogeneous lookup is enabled, functions that take key_type act as if
- // they have an overload set like:
- //
- // iterator find(const key_type& key);
- // template <class K>
- // iterator find(const K& key);
- //
- // size_type erase(const key_type& key);
- // template <class K>
- // size_type erase(const K& key);
- //
- // std::pair<iterator, iterator> equal_range(const key_type& key);
- // template <class K>
- // std::pair<iterator, iterator> equal_range(const K& key);
- //
- // When heterogeneous lookup is disabled, only the explicit `key_type` overloads
- // exist.
- //
- // find() also supports passing the hash explicitly:
- //
- // iterator find(const key_type& key, size_t hash);
- // template <class U>
- // iterator find(const U& key, size_t hash);
- //
- // In addition the pointer to element and iterator stability guarantees are
- // weaker: all iterators and pointers are invalidated after a new element is
- // inserted.
- //
- // IMPLEMENTATION DETAILS
- //
- // # Table Layout
- //
- // A raw_hash_set's backing array consists of control bytes followed by slots
- // that may or may not contain objects.
- //
- // The layout of the backing array, for `capacity` slots, is thus, as a
- // pseudo-struct:
- //
- // struct BackingArray {
- // // The number of elements we can insert before growing the capacity.
- // size_t growth_left;
- // // Control bytes for the "real" slots.
- // ctrl_t ctrl[capacity];
- // // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
- // // stop and serves no other purpose.
- // ctrl_t sentinel;
- // // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
- // // that if a probe sequence picks a value near the end of `ctrl`,
- // // `Group` will have valid control bytes to look at.
- // ctrl_t clones[kWidth - 1];
- // // The actual slot data.
- // slot_type slots[capacity];
- // };
- //
- // The length of this array is computed by `AllocSize()` below.
- //
- // Control bytes (`ctrl_t`) are bytes (collected into groups of a
- // platform-specific size) that define the state of the corresponding slot in
- // the slot array. Group manipulation is tightly optimized to be as efficient
- // as possible: SSE and friends on x86, clever bit operations on other arches.
- //
- // Group 1 Group 2 Group 3
- // +---------------+---------------+---------------+
- // | | | | | | | | | | | | | | | | | | | | | | | | |
- // +---------------+---------------+---------------+
- //
- // Each control byte is either a special value for empty slots, deleted slots
- // (sometimes called *tombstones*), and a special end-of-table marker used by
- // iterators, or, if occupied, seven bits (H2) from the hash of the value in the
- // corresponding slot.
- //
- // Storing control bytes in a separate array also has beneficial cache effects,
- // since more logical slots will fit into a cache line.
- //
- // # Hashing
- //
- // We compute two separate hashes, `H1` and `H2`, from the hash of an object.
- // `H1(hash(x))` is an index into `slots`, and essentially the starting point
- // for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
- // objects that cannot possibly be the one we are looking for.
- //
- // # Table operations.
- //
- // The key operations are `insert`, `find`, and `erase`.
- //
- // Since `insert` and `erase` are implemented in terms of `find`, we describe
- // `find` first. To `find` a value `x`, we compute `hash(x)`. From
- // `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
- // group of slots in some interesting order.
- //
- // We now walk through these indices. At each index, we select the entire group
- // starting with that index and extract potential candidates: occupied slots
- // with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
- // group, we stop and return an error. Each candidate slot `y` is compared with
- // `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
- // next probe index. Tombstones effectively behave like full slots that never
- // match the value we're looking for.
- //
- // The `H2` bits ensure when we compare a slot to an object with `==`, we are
- // likely to have actually found the object. That is, the chance is low that
- // `==` is called and returns `false`. Thus, when we search for an object, we
- // are unlikely to call `==` many times. This likelyhood can be analyzed as
- // follows (assuming that H2 is a random enough hash function).
- //
- // Let's assume that there are `k` "wrong" objects that must be examined in a
- // probe sequence. For example, when doing a `find` on an object that is in the
- // table, `k` is the number of objects between the start of the probe sequence
- // and the final found object (not including the final found object). The
- // expected number of objects with an H2 match is then `k/128`. Measurements
- // and analysis indicate that even at high load factors, `k` is less than 32,
- // meaning that the number of "false positive" comparisons we must perform is
- // less than 1/8 per `find`.
- // `insert` is implemented in terms of `unchecked_insert`, which inserts a
- // value presumed to not be in the table (violating this requirement will cause
- // the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
- // it, we construct a `probe_seq` once again, and use it to find the first
- // group with an unoccupied (empty *or* deleted) slot. We place `x` into the
- // first such slot in the group and mark it as full with `x`'s H2.
- //
- // To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
- // perform a `find` to see if it's already present; if it is, we're done. If
- // it's not, we may decide the table is getting overcrowded (i.e. the load
- // factor is greater than 7/8 for big tables; `is_small()` tables use a max load
- // factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
- // each element of the table into the new array (we know that no insertion here
- // will insert an already-present value), and discard the old backing array. At
- // this point, we may `unchecked_insert` the value `x`.
- //
- // Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
- // presents a viable, initialized slot pointee to the caller.
- //
- // `erase` is implemented in terms of `erase_at`, which takes an index to a
- // slot. Given an offset, we simply create a tombstone and destroy its contents.
- // If we can prove that the slot would not appear in a probe sequence, we can
- // make the slot as empty, instead. We can prove this by observing that if a
- // group has any empty slots, it has never been full (assuming we never create
- // an empty slot in a group with no empties, which this heuristic guarantees we
- // never do) and find would stop at this group anyways (since it does not probe
- // beyond groups with empties).
- //
- // `erase` is `erase_at` composed with `find`: if we
- // have a value `x`, we can perform a `find`, and then `erase_at` the resulting
- // slot.
- //
- // To iterate, we simply traverse the array, skipping empty and deleted slots
- // and stopping when we hit a `kSentinel`.
- #ifndef Y_ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
- #define Y_ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
- #include <algorithm>
- #include <cmath>
- #include <cstddef>
- #include <cstdint>
- #include <cstring>
- #include <iterator>
- #include <limits>
- #include <memory>
- #include <util/generic/string.h>
- #include <tuple>
- #include <type_traits>
- #include <utility>
- #include "y_absl/base/config.h"
- #include "y_absl/base/internal/endian.h"
- #include "y_absl/base/internal/raw_logging.h"
- #include "y_absl/base/optimization.h"
- #include "y_absl/base/port.h"
- #include "y_absl/base/prefetch.h"
- #include "y_absl/container/internal/common.h"
- #include "y_absl/container/internal/compressed_tuple.h"
- #include "y_absl/container/internal/container_memory.h"
- #include "y_absl/container/internal/hash_policy_traits.h"
- #include "y_absl/container/internal/hashtable_debug_hooks.h"
- #include "y_absl/container/internal/hashtablez_sampler.h"
- #include "y_absl/memory/memory.h"
- #include "y_absl/meta/type_traits.h"
- #include "y_absl/numeric/bits.h"
- #include "y_absl/utility/utility.h"
- #ifdef Y_ABSL_INTERNAL_HAVE_SSE2
- #include <emmintrin.h>
- #endif
- #ifdef Y_ABSL_INTERNAL_HAVE_SSSE3
- #include <tmmintrin.h>
- #endif
- #ifdef _MSC_VER
- #include <intrin.h>
- #endif
- #ifdef Y_ABSL_INTERNAL_HAVE_ARM_NEON
- #include <arm_neon.h>
- #endif
- namespace y_absl {
- Y_ABSL_NAMESPACE_BEGIN
- namespace container_internal {
- #ifdef Y_ABSL_SWISSTABLE_ENABLE_GENERATIONS
- #error Y_ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
- #elif defined(Y_ABSL_HAVE_ADDRESS_SANITIZER) || \
- defined(Y_ABSL_HAVE_MEMORY_SANITIZER)
- // When compiled in sanitizer mode, we add generation integers to the backing
- // array and iterators. In the backing array, we store the generation between
- // the control bytes and the slots. When iterators are dereferenced, we assert
- // that the container has not been mutated in a way that could cause iterator
- // invalidation since the iterator was initialized.
- #define Y_ABSL_SWISSTABLE_ENABLE_GENERATIONS
- #endif
- // We use uint8_t so we don't need to worry about padding.
- using GenerationType = uint8_t;
- // A sentinel value for empty generations. Using 0 makes it easy to constexpr
- // initialize an array of this value.
- constexpr GenerationType SentinelEmptyGeneration() { return 0; }
- constexpr GenerationType NextGeneration(GenerationType generation) {
- return ++generation == SentinelEmptyGeneration() ? ++generation : generation;
- }
- #ifdef Y_ABSL_SWISSTABLE_ENABLE_GENERATIONS
- constexpr bool SwisstableGenerationsEnabled() { return true; }
- constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
- #else
- constexpr bool SwisstableGenerationsEnabled() { return false; }
- constexpr size_t NumGenerationBytes() { return 0; }
- #endif
- template <typename AllocType>
- void SwapAlloc(AllocType& lhs, AllocType& rhs,
- std::true_type /* propagate_on_container_swap */) {
- using std::swap;
- swap(lhs, rhs);
- }
- template <typename AllocType>
- void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
- std::false_type /* propagate_on_container_swap */) {}
- // The state for a probe sequence.
- //
- // Currently, the sequence is a triangular progression of the form
- //
- // p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
- //
- // The use of `Width` ensures that each probe step does not overlap groups;
- // the sequence effectively outputs the addresses of *groups* (although not
- // necessarily aligned to any boundary). The `Group` machinery allows us
- // to check an entire group with minimal branching.
- //
- // Wrapping around at `mask + 1` is important, but not for the obvious reason.
- // As described above, the first few entries of the control byte array
- // are mirrored at the end of the array, which `Group` will find and use
- // for selecting candidates. However, when those candidates' slots are
- // actually inspected, there are no corresponding slots for the cloned bytes,
- // so we need to make sure we've treated those offsets as "wrapping around".
- //
- // It turns out that this probe sequence visits every group exactly once if the
- // number of groups is a power of two, since (i^2+i)/2 is a bijection in
- // Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
- template <size_t Width>
- class probe_seq {
- public:
- // Creates a new probe sequence using `hash` as the initial value of the
- // sequence and `mask` (usually the capacity of the table) as the mask to
- // apply to each value in the progression.
- probe_seq(size_t hash, size_t mask) {
- assert(((mask + 1) & mask) == 0 && "not a mask");
- mask_ = mask;
- offset_ = hash & mask_;
- }
- // The offset within the table, i.e., the value `p(i)` above.
- size_t offset() const { return offset_; }
- size_t offset(size_t i) const { return (offset_ + i) & mask_; }
- void next() {
- index_ += Width;
- offset_ += index_;
- offset_ &= mask_;
- }
- // 0-based probe index, a multiple of `Width`.
- size_t index() const { return index_; }
- private:
- size_t mask_;
- size_t offset_;
- size_t index_ = 0;
- };
- template <class ContainerKey, class Hash, class Eq>
- struct RequireUsableKey {
- template <class PassedKey, class... Args>
- std::pair<
- decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
- decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
- std::declval<const PassedKey&>()))>*
- operator()(const PassedKey&, const Args&...) const;
- };
- template <class E, class Policy, class Hash, class Eq, class... Ts>
- struct IsDecomposable : std::false_type {};
- template <class Policy, class Hash, class Eq, class... Ts>
- struct IsDecomposable<
- y_absl::void_t<decltype(Policy::apply(
- RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
- std::declval<Ts>()...))>,
- Policy, Hash, Eq, Ts...> : std::true_type {};
- // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
- template <class T>
- constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
- using std::swap;
- return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
- }
- template <class T>
- constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
- return false;
- }
- template <typename T>
- uint32_t TrailingZeros(T x) {
- Y_ABSL_ASSUME(x != 0);
- return static_cast<uint32_t>(countr_zero(x));
- }
- // An abstract bitmask, such as that emitted by a SIMD instruction.
- //
- // Specifically, this type implements a simple bitset whose representation is
- // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
- // of abstract bits in the bitset, while `Shift` is the log-base-two of the
- // width of an abstract bit in the representation.
- // This mask provides operations for any number of real bits set in an abstract
- // bit. To add iteration on top of that, implementation must guarantee no more
- // than one real bit is set in an abstract bit.
- template <class T, int SignificantBits, int Shift = 0>
- class NonIterableBitMask {
- public:
- explicit NonIterableBitMask(T mask) : mask_(mask) {}
- explicit operator bool() const { return this->mask_ != 0; }
- // Returns the index of the lowest *abstract* bit set in `self`.
- uint32_t LowestBitSet() const {
- return container_internal::TrailingZeros(mask_) >> Shift;
- }
- // Returns the index of the highest *abstract* bit set in `self`.
- uint32_t HighestBitSet() const {
- return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
- }
- // Returns the number of trailing zero *abstract* bits.
- uint32_t TrailingZeros() const {
- return container_internal::TrailingZeros(mask_) >> Shift;
- }
- // Returns the number of leading zero *abstract* bits.
- uint32_t LeadingZeros() const {
- constexpr int total_significant_bits = SignificantBits << Shift;
- constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
- return static_cast<uint32_t>(countl_zero(mask_ << extra_bits)) >> Shift;
- }
- T mask_;
- };
- // Mask that can be iterable
- //
- // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
- // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
- // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
- // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
- //
- // For example:
- // for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
- // for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
- template <class T, int SignificantBits, int Shift = 0>
- class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
- using Base = NonIterableBitMask<T, SignificantBits, Shift>;
- static_assert(std::is_unsigned<T>::value, "");
- static_assert(Shift == 0 || Shift == 3, "");
- public:
- explicit BitMask(T mask) : Base(mask) {}
- // BitMask is an iterator over the indices of its abstract bits.
- using value_type = int;
- using iterator = BitMask;
- using const_iterator = BitMask;
- BitMask& operator++() {
- this->mask_ &= (this->mask_ - 1);
- return *this;
- }
- uint32_t operator*() const { return Base::LowestBitSet(); }
- BitMask begin() const { return *this; }
- BitMask end() const { return BitMask(0); }
- private:
- friend bool operator==(const BitMask& a, const BitMask& b) {
- return a.mask_ == b.mask_;
- }
- friend bool operator!=(const BitMask& a, const BitMask& b) {
- return a.mask_ != b.mask_;
- }
- };
- using h2_t = uint8_t;
- // The values here are selected for maximum performance. See the static asserts
- // below for details.
- // A `ctrl_t` is a single control byte, which can have one of four
- // states: empty, deleted, full (which has an associated seven-bit h2_t value)
- // and the sentinel. They have the following bit patterns:
- //
- // empty: 1 0 0 0 0 0 0 0
- // deleted: 1 1 1 1 1 1 1 0
- // full: 0 h h h h h h h // h represents the hash bits.
- // sentinel: 1 1 1 1 1 1 1 1
- //
- // These values are specifically tuned for SSE-flavored SIMD.
- // The static_asserts below detail the source of these choices.
- //
- // We use an enum class so that when strict aliasing is enabled, the compiler
- // knows ctrl_t doesn't alias other types.
- enum class ctrl_t : int8_t {
- kEmpty = -128, // 0b10000000
- kDeleted = -2, // 0b11111110
- kSentinel = -1, // 0b11111111
- };
- static_assert(
- (static_cast<int8_t>(ctrl_t::kEmpty) &
- static_cast<int8_t>(ctrl_t::kDeleted) &
- static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
- "Special markers need to have the MSB to make checking for them efficient");
- static_assert(
- ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
- "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
- "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
- static_assert(
- ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
- "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
- "registers (pcmpeqd xmm, xmm)");
- static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
- "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
- "existence efficient (psignb xmm, xmm)");
- static_assert(
- (~static_cast<int8_t>(ctrl_t::kEmpty) &
- ~static_cast<int8_t>(ctrl_t::kDeleted) &
- static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
- "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
- "shared by ctrl_t::kSentinel to make the scalar test for "
- "MaskEmptyOrDeleted() efficient");
- static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
- "ctrl_t::kDeleted must be -2 to make the implementation of "
- "ConvertSpecialToEmptyAndFullToDeleted efficient");
- // See definition comment for why this is size 32.
- Y_ABSL_DLL extern const ctrl_t kEmptyGroup[32];
- // Returns a pointer to a control byte group that can be used by empty tables.
- inline ctrl_t* EmptyGroup() {
- // Const must be cast away here; no uses of this function will actually write
- // to it, because it is only used for empty tables.
- return const_cast<ctrl_t*>(kEmptyGroup + 16);
- }
- // Returns a pointer to a generation to use for an empty hashtable.
- GenerationType* EmptyGeneration();
- // Returns whether `generation` is a generation for an empty hashtable that
- // could be returned by EmptyGeneration().
- inline bool IsEmptyGeneration(const GenerationType* generation) {
- return *generation == SentinelEmptyGeneration();
- }
- // Mixes a randomly generated per-process seed with `hash` and `ctrl` to
- // randomize insertion order within groups.
- bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
- // Returns a per-table, hash salt, which changes on resize. This gets mixed into
- // H1 to randomize iteration order per-table.
- //
- // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
- // non-determinism of iteration order in most cases.
- inline size_t PerTableSalt(const ctrl_t* ctrl) {
- // The low bits of the pointer have little or no entropy because of
- // alignment. We shift the pointer to try to use higher entropy bits. A
- // good number seems to be 12 bits, because that aligns with page size.
- return reinterpret_cast<uintptr_t>(ctrl) >> 12;
- }
- // Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
- inline size_t H1(size_t hash, const ctrl_t* ctrl) {
- return (hash >> 7) ^ PerTableSalt(ctrl);
- }
- // Extracts the H2 portion of a hash: the 7 bits not used for H1.
- //
- // These are used as an occupied control byte.
- inline h2_t H2(size_t hash) { return hash & 0x7F; }
- // Helpers for checking the state of a control byte.
- inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
- inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
- inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
- inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
- #ifdef Y_ABSL_INTERNAL_HAVE_SSE2
- // Quick reference guide for intrinsics used below:
- //
- // * __m128i: An XMM (128-bit) word.
- //
- // * _mm_setzero_si128: Returns a zero vector.
- // * _mm_set1_epi8: Returns a vector with the same i8 in each lane.
- //
- // * _mm_subs_epi8: Saturating-subtracts two i8 vectors.
- // * _mm_and_si128: Ands two i128s together.
- // * _mm_or_si128: Ors two i128s together.
- // * _mm_andnot_si128: And-nots two i128s together.
- //
- // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
- // filling each lane with 0x00 or 0xff.
- // * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
- //
- // * _mm_loadu_si128: Performs an unaligned load of an i128.
- // * _mm_storeu_si128: Performs an unaligned store of an i128.
- //
- // * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first
- // argument if the corresponding lane of the second
- // argument is positive, negative, or zero, respectively.
- // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
- // bitmask consisting of those bits.
- // * _mm_shuffle_epi8: Selects i8s from the first argument, using the low
- // four bits of each i8 lane in the second argument as
- // indices.
- // https://github.com/abseil/abseil-cpp/issues/209
- // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
- // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
- // Work around this by using the portable implementation of Group
- // when using -funsigned-char under GCC.
- inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
- #if defined(__GNUC__) && !defined(__clang__)
- if (std::is_unsigned<char>::value) {
- const __m128i mask = _mm_set1_epi8(0x80);
- const __m128i diff = _mm_subs_epi8(b, a);
- return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
- }
- #endif
- return _mm_cmpgt_epi8(a, b);
- }
- struct GroupSse2Impl {
- static constexpr size_t kWidth = 16; // the number of slots per group
- explicit GroupSse2Impl(const ctrl_t* pos) {
- ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
- }
- // Returns a bitmask representing the positions of slots that match hash.
- BitMask<uint32_t, kWidth> Match(h2_t hash) const {
- auto match = _mm_set1_epi8(static_cast<char>(hash));
- return BitMask<uint32_t, kWidth>(
- static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
- }
- // Returns a bitmask representing the positions of empty slots.
- NonIterableBitMask<uint32_t, kWidth> MaskEmpty() const {
- #ifdef Y_ABSL_INTERNAL_HAVE_SSSE3
- // This only works because ctrl_t::kEmpty is -128.
- return NonIterableBitMask<uint32_t, kWidth>(
- static_cast<uint32_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
- #else
- auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
- return NonIterableBitMask<uint32_t, kWidth>(
- static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
- #endif
- }
- // Returns a bitmask representing the positions of empty or deleted slots.
- NonIterableBitMask<uint32_t, kWidth> MaskEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
- return NonIterableBitMask<uint32_t, kWidth>(static_cast<uint32_t>(
- _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
- }
- // Returns the number of trailing empty or deleted elements in the group.
- uint32_t CountLeadingEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
- return TrailingZeros(static_cast<uint32_t>(
- _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
- }
- void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
- auto msbs = _mm_set1_epi8(static_cast<char>(-128));
- auto x126 = _mm_set1_epi8(126);
- #ifdef Y_ABSL_INTERNAL_HAVE_SSSE3
- auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
- #else
- auto zero = _mm_setzero_si128();
- auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
- auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
- #endif
- _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
- }
- __m128i ctrl;
- };
- #endif // Y_ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
- #if defined(Y_ABSL_INTERNAL_HAVE_ARM_NEON) && defined(Y_ABSL_IS_LITTLE_ENDIAN)
- struct GroupAArch64Impl {
- static constexpr size_t kWidth = 8;
- explicit GroupAArch64Impl(const ctrl_t* pos) {
- ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
- }
- BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
- uint8x8_t dup = vdup_n_u8(hash);
- auto mask = vceq_u8(ctrl, dup);
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- return BitMask<uint64_t, kWidth, 3>(
- vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs);
- }
- NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
- uint64_t mask =
- vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
- vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
- vreinterpret_s8_u8(ctrl))),
- 0);
- return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
- }
- NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
- uint64_t mask =
- vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
- vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
- vreinterpret_s8_u8(ctrl))),
- 0);
- return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
- }
- uint32_t CountLeadingEmptyOrDeleted() const {
- uint64_t mask =
- vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
- vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
- vreinterpret_s8_u8(ctrl))),
- 0);
- // Similar to MaskEmptyorDeleted() but we invert the logic to invert the
- // produced bitfield. We then count number of trailing zeros.
- // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
- // so we should be fine.
- return static_cast<uint32_t>(countr_zero(mask)) >> 3;
- }
- void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
- uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- constexpr uint64_t slsbs = 0x0202020202020202ULL;
- constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
- auto x = slsbs & (mask >> 6);
- auto res = (x + midbs) | msbs;
- little_endian::Store64(dst, res);
- }
- uint8x8_t ctrl;
- };
- #endif // Y_ABSL_INTERNAL_HAVE_ARM_NEON && Y_ABSL_IS_LITTLE_ENDIAN
- struct GroupPortableImpl {
- static constexpr size_t kWidth = 8;
- explicit GroupPortableImpl(const ctrl_t* pos)
- : ctrl(little_endian::Load64(pos)) {}
- BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
- // For the technique, see:
- // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
- // (Determine if a word has a byte equal to n).
- //
- // Caveat: there are false positives but:
- // - they only occur if there is a real match
- // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
- // - they will be handled gracefully by subsequent checks in code
- //
- // Example:
- // v = 0x1716151413121110
- // hash = 0x12
- // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- constexpr uint64_t lsbs = 0x0101010101010101ULL;
- auto x = ctrl ^ (lsbs * hash);
- return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
- }
- NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) &
- msbs);
- }
- NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) &
- msbs);
- }
- uint32_t CountLeadingEmptyOrDeleted() const {
- // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
- // kDeleted. We lower all other bits and count number of trailing zeros.
- constexpr uint64_t bits = 0x0101010101010101ULL;
- return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
- 3);
- }
- void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
- constexpr uint64_t msbs = 0x8080808080808080ULL;
- constexpr uint64_t lsbs = 0x0101010101010101ULL;
- auto x = ctrl & msbs;
- auto res = (~x + (x >> 7)) & ~lsbs;
- little_endian::Store64(dst, res);
- }
- uint64_t ctrl;
- };
- #ifdef Y_ABSL_INTERNAL_HAVE_SSE2
- using Group = GroupSse2Impl;
- #elif defined(Y_ABSL_INTERNAL_HAVE_ARM_NEON) && defined(Y_ABSL_IS_LITTLE_ENDIAN)
- using Group = GroupAArch64Impl;
- #else
- using Group = GroupPortableImpl;
- #endif
- // When there is an insertion with no reserved growth, we rehash with
- // probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
- // constant divided by capacity ensures that inserting N elements is still O(N)
- // in the average case. Using the constant 16 means that we expect to rehash ~8
- // times more often than when generations are disabled. We are adding expected
- // rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 -
- // 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth.
- inline size_t RehashProbabilityConstant() { return 16; }
- class CommonFieldsGenerationInfoEnabled {
- // A sentinel value for reserved_growth_ indicating that we just ran out of
- // reserved growth on the last insertion. When reserve is called and then
- // insertions take place, reserved_growth_'s state machine is N, ..., 1,
- // kReservedGrowthJustRanOut, 0.
- static constexpr size_t kReservedGrowthJustRanOut =
- (std::numeric_limits<size_t>::max)();
- public:
- CommonFieldsGenerationInfoEnabled() = default;
- CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that)
- : reserved_growth_(that.reserved_growth_),
- reservation_size_(that.reservation_size_),
- generation_(that.generation_) {
- that.reserved_growth_ = 0;
- that.reservation_size_ = 0;
- that.generation_ = EmptyGeneration();
- }
- CommonFieldsGenerationInfoEnabled& operator=(
- CommonFieldsGenerationInfoEnabled&&) = default;
- // Whether we should rehash on insert in order to detect bugs of using invalid
- // references. We rehash on the first insertion after reserved_growth_ reaches
- // 0 after a call to reserve. We also do a rehash with low probability
- // whenever reserved_growth_ is zero.
- bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
- size_t capacity) const;
- void maybe_increment_generation_on_insert() {
- if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
- if (reserved_growth_ > 0) {
- if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
- } else {
- *generation_ = NextGeneration(*generation_);
- }
- }
- void reset_reserved_growth(size_t reservation, size_t size) {
- reserved_growth_ = reservation - size;
- }
- size_t reserved_growth() const { return reserved_growth_; }
- void set_reserved_growth(size_t r) { reserved_growth_ = r; }
- size_t reservation_size() const { return reservation_size_; }
- void set_reservation_size(size_t r) { reservation_size_ = r; }
- GenerationType generation() const { return *generation_; }
- void set_generation(GenerationType g) { *generation_ = g; }
- GenerationType* generation_ptr() const { return generation_; }
- void set_generation_ptr(GenerationType* g) { generation_ = g; }
- private:
- // The number of insertions remaining that are guaranteed to not rehash due to
- // a prior call to reserve. Note: we store reserved growth in addition to
- // reservation size because calls to erase() decrease size_ but don't decrease
- // reserved growth.
- size_t reserved_growth_ = 0;
- // The maximum argument to reserve() since the container was cleared. We need
- // to keep track of this, in addition to reserved growth, because we reset
- // reserved growth to this when erase(begin(), end()) is called.
- size_t reservation_size_ = 0;
- // Pointer to the generation counter, which is used to validate iterators and
- // is stored in the backing array between the control bytes and the slots.
- // Note that we can't store the generation inside the container itself and
- // keep a pointer to the container in the iterators because iterators must
- // remain valid when the container is moved.
- // Note: we could derive this pointer from the control pointer, but it makes
- // the code more complicated, and there's a benefit in having the sizes of
- // raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different,
- // which is that tests are less likely to rely on the size remaining the same.
- GenerationType* generation_ = EmptyGeneration();
- };
- class CommonFieldsGenerationInfoDisabled {
- public:
- CommonFieldsGenerationInfoDisabled() = default;
- CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) =
- default;
- CommonFieldsGenerationInfoDisabled& operator=(
- CommonFieldsGenerationInfoDisabled&&) = default;
- bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
- return false;
- }
- void maybe_increment_generation_on_insert() {}
- void reset_reserved_growth(size_t, size_t) {}
- size_t reserved_growth() const { return 0; }
- void set_reserved_growth(size_t) {}
- size_t reservation_size() const { return 0; }
- void set_reservation_size(size_t) {}
- GenerationType generation() const { return 0; }
- void set_generation(GenerationType) {}
- GenerationType* generation_ptr() const { return nullptr; }
- void set_generation_ptr(GenerationType*) {}
- };
- class HashSetIteratorGenerationInfoEnabled {
- public:
- HashSetIteratorGenerationInfoEnabled() = default;
- explicit HashSetIteratorGenerationInfoEnabled(
- const GenerationType* generation_ptr)
- : generation_ptr_(generation_ptr), generation_(*generation_ptr) {}
- GenerationType generation() const { return generation_; }
- void reset_generation() { generation_ = *generation_ptr_; }
- const GenerationType* generation_ptr() const { return generation_ptr_; }
- void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; }
- private:
- const GenerationType* generation_ptr_ = EmptyGeneration();
- GenerationType generation_ = *generation_ptr_;
- };
- class HashSetIteratorGenerationInfoDisabled {
- public:
- HashSetIteratorGenerationInfoDisabled() = default;
- explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) {}
- GenerationType generation() const { return 0; }
- void reset_generation() {}
- const GenerationType* generation_ptr() const { return nullptr; }
- void set_generation_ptr(const GenerationType*) {}
- };
- #ifdef Y_ABSL_SWISSTABLE_ENABLE_GENERATIONS
- using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled;
- using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled;
- #else
- using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
- using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
- #endif
- // Returns whether `n` is a valid capacity (i.e., number of slots).
- //
- // A valid capacity is a non-zero integer `2^m - 1`.
- inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
- // Computes the offset from the start of the backing allocation of the control
- // bytes. growth_left is stored at the beginning of the backing array.
- inline size_t ControlOffset() { return sizeof(size_t); }
- // Returns the number of "cloned control bytes".
- //
- // This is the number of control bytes that are present both at the beginning
- // of the control byte array and at the end, such that we can create a
- // `Group::kWidth`-width probe window starting from any control byte.
- constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
- // Given the capacity of a table, computes the offset (from the start of the
- // backing allocation) of the generation counter (if it exists).
- inline size_t GenerationOffset(size_t capacity) {
- assert(IsValidCapacity(capacity));
- const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
- return ControlOffset() + num_control_bytes;
- }
- // Given the capacity of a table, computes the offset (from the start of the
- // backing allocation) at which the slots begin.
- inline size_t SlotOffset(size_t capacity, size_t slot_align) {
- assert(IsValidCapacity(capacity));
- return (GenerationOffset(capacity) + NumGenerationBytes() + slot_align - 1) &
- (~slot_align + 1);
- }
- // Given the capacity of a table, computes the total size of the backing
- // array.
- inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
- return SlotOffset(capacity, slot_align) + capacity * slot_size;
- }
- // CommonFields hold the fields in raw_hash_set that do not depend
- // on template parameters. This allows us to conveniently pass all
- // of this state to helper functions as a single argument.
- class CommonFields : public CommonFieldsGenerationInfo {
- public:
- CommonFields() = default;
- // Not copyable
- CommonFields(const CommonFields&) = delete;
- CommonFields& operator=(const CommonFields&) = delete;
- // Movable
- CommonFields(CommonFields&& that)
- : CommonFieldsGenerationInfo(
- std::move(static_cast<CommonFieldsGenerationInfo&&>(that))),
- // Explicitly copying fields into "this" and then resetting "that"
- // fields generates less code then calling y_absl::exchange per field.
- control_(that.control()),
- slots_(that.slot_array()),
- capacity_(that.capacity()),
- compressed_tuple_(that.size(), std::move(that.infoz())) {
- that.set_control(EmptyGroup());
- that.set_slots(nullptr);
- that.set_capacity(0);
- that.set_size(0);
- }
- CommonFields& operator=(CommonFields&&) = default;
- ctrl_t* control() const { return control_; }
- void set_control(ctrl_t* c) { control_ = c; }
- void* backing_array_start() const {
- // growth_left is stored before control bytes.
- assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
- return control() - sizeof(size_t);
- }
- // Note: we can't use slots() because Qt defines "slots" as a macro.
- void* slot_array() const { return slots_; }
- void set_slots(void* s) { slots_ = s; }
- // The number of filled slots.
- size_t size() const { return compressed_tuple_.template get<0>(); }
- void set_size(size_t s) { compressed_tuple_.template get<0>() = s; }
- // The total number of available slots.
- size_t capacity() const { return capacity_; }
- void set_capacity(size_t c) {
- assert(c == 0 || IsValidCapacity(c));
- capacity_ = c;
- }
- // The number of slots we can still fill without needing to rehash.
- // This is stored in the heap allocation before the control bytes.
- size_t growth_left() const {
- return *reinterpret_cast<size_t*>(backing_array_start());
- }
- void set_growth_left(size_t gl) {
- *reinterpret_cast<size_t*>(backing_array_start()) = gl;
- }
- HashtablezInfoHandle& infoz() { return compressed_tuple_.template get<1>(); }
- const HashtablezInfoHandle& infoz() const {
- return compressed_tuple_.template get<1>();
- }
- bool should_rehash_for_bug_detection_on_insert() const {
- return CommonFieldsGenerationInfo::
- should_rehash_for_bug_detection_on_insert(control(), capacity());
- }
- void reset_reserved_growth(size_t reservation) {
- CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
- }
- // The size of the backing array allocation.
- size_t alloc_size(size_t slot_size, size_t slot_align) const {
- return AllocSize(capacity(), slot_size, slot_align);
- }
- // Returns the number of control bytes set to kDeleted. For testing only.
- size_t TombstonesCount() const {
- return static_cast<size_t>(
- std::count(control(), control() + capacity(), ctrl_t::kDeleted));
- }
- private:
- // TODO(b/259599413): Investigate removing some of these fields:
- // - control/slots can be derived from each other
- // - we can use 6 bits for capacity since it's always a power of two minus 1
- // The control bytes (and, also, a pointer near to the base of the backing
- // array).
- //
- // This contains `capacity + 1 + NumClonedBytes()` entries, even
- // when the table is empty (hence EmptyGroup).
- //
- // Note that growth_left is stored immediately before this pointer.
- ctrl_t* control_ = EmptyGroup();
- // The beginning of the slots, located at `SlotOffset()` bytes after
- // `control`. May be null for empty tables.
- void* slots_ = nullptr;
- size_t capacity_ = 0;
- // Bundle together size and HashtablezInfoHandle to ensure EBO for
- // HashtablezInfoHandle when sampling is turned off.
- y_absl::container_internal::CompressedTuple<size_t, HashtablezInfoHandle>
- compressed_tuple_{0u, HashtablezInfoHandle{}};
- };
- template <class Policy, class Hash, class Eq, class Alloc>
- class raw_hash_set;
- // Returns the next valid capacity after `n`.
- inline size_t NextCapacity(size_t n) {
- assert(IsValidCapacity(n) || n == 0);
- return n * 2 + 1;
- }
- // Applies the following mapping to every byte in the control array:
- // * kDeleted -> kEmpty
- // * kEmpty -> kEmpty
- // * _ -> kDeleted
- // PRECONDITION:
- // IsValidCapacity(capacity)
- // ctrl[capacity] == ctrl_t::kSentinel
- // ctrl[i] != ctrl_t::kSentinel for all i < capacity
- void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
- // Converts `n` into the next valid capacity, per `IsValidCapacity`.
- inline size_t NormalizeCapacity(size_t n) {
- return n ? ~size_t{} >> countl_zero(n) : 1;
- }
- // General notes on capacity/growth methods below:
- // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
- // average of two empty slots per group.
- // - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
- // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
- // never need to probe (the whole table fits in one group) so we don't need a
- // load factor less than 1.
- // Given `capacity`, applies the load factor; i.e., it returns the maximum
- // number of values we should put into the table before a resizing rehash.
- inline size_t CapacityToGrowth(size_t capacity) {
- assert(IsValidCapacity(capacity));
- // `capacity*7/8`
- if (Group::kWidth == 8 && capacity == 7) {
- // x-x/8 does not work when x==7.
- return 6;
- }
- return capacity - capacity / 8;
- }
- // Given `growth`, "unapplies" the load factor to find how large the capacity
- // should be to stay within the load factor.
- //
- // This might not be a valid capacity and `NormalizeCapacity()` should be
- // called on this.
- inline size_t GrowthToLowerboundCapacity(size_t growth) {
- // `growth*8/7`
- if (Group::kWidth == 8 && growth == 7) {
- // x+(x-1)/7 does not work when x==7.
- return 8;
- }
- return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
- }
- template <class InputIter>
- size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
- size_t bucket_count) {
- if (bucket_count != 0) {
- return bucket_count;
- }
- using InputIterCategory =
- typename std::iterator_traits<InputIter>::iterator_category;
- if (std::is_base_of<std::random_access_iterator_tag,
- InputIterCategory>::value) {
- return GrowthToLowerboundCapacity(
- static_cast<size_t>(std::distance(first, last)));
- }
- return 0;
- }
- constexpr bool SwisstableDebugEnabled() {
- #if defined(Y_ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
- Y_ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
- return true;
- #else
- return false;
- #endif
- }
- inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
- const GenerationType* generation_ptr,
- const char* operation) {
- if (!SwisstableDebugEnabled()) return;
- if (ctrl == nullptr) {
- Y_ABSL_INTERNAL_LOG(FATAL,
- TString(operation) + " called on end() iterator.");
- }
- if (ctrl == EmptyGroup()) {
- Y_ABSL_INTERNAL_LOG(FATAL, TString(operation) +
- " called on default-constructed iterator.");
- }
- if (SwisstableGenerationsEnabled()) {
- if (generation != *generation_ptr) {
- Y_ABSL_INTERNAL_LOG(FATAL,
- TString(operation) +
- " called on invalid iterator. The table could have "
- "rehashed since this iterator was initialized.");
- }
- if (!IsFull(*ctrl)) {
- Y_ABSL_INTERNAL_LOG(
- FATAL,
- TString(operation) +
- " called on invalid iterator. The element was likely erased.");
- }
- } else {
- if (!IsFull(*ctrl)) {
- Y_ABSL_INTERNAL_LOG(
- FATAL,
- TString(operation) +
- " called on invalid iterator. The element might have been erased "
- "or the table might have rehashed. Consider running with "
- "--config=asan to diagnose rehashing issues.");
- }
- }
- }
- // Note that for comparisons, null/end iterators are valid.
- inline void AssertIsValidForComparison(const ctrl_t* ctrl,
- GenerationType generation,
- const GenerationType* generation_ptr) {
- if (!SwisstableDebugEnabled()) return;
- const bool ctrl_is_valid_for_comparison =
- ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
- if (SwisstableGenerationsEnabled()) {
- if (generation != *generation_ptr) {
- Y_ABSL_INTERNAL_LOG(FATAL,
- "Invalid iterator comparison. The table could have "
- "rehashed since this iterator was initialized.");
- }
- if (!ctrl_is_valid_for_comparison) {
- Y_ABSL_INTERNAL_LOG(
- FATAL, "Invalid iterator comparison. The element was likely erased.");
- }
- } else {
- Y_ABSL_HARDENING_ASSERT(
- ctrl_is_valid_for_comparison &&
- "Invalid iterator comparison. The element might have been erased or "
- "the table might have rehashed. Consider running with --config=asan to "
- "diagnose rehashing issues.");
- }
- }
- // If the two iterators come from the same container, then their pointers will
- // interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa.
- // Note: we take slots by reference so that it's not UB if they're uninitialized
- // as long as we don't read them (when ctrl is null).
- inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
- const ctrl_t* ctrl_b,
- const void* const& slot_a,
- const void* const& slot_b) {
- // If either control byte is null, then we can't tell.
- if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
- const void* low_slot = slot_a;
- const void* hi_slot = slot_b;
- if (ctrl_a > ctrl_b) {
- std::swap(ctrl_a, ctrl_b);
- std::swap(low_slot, hi_slot);
- }
- return ctrl_b < low_slot && low_slot <= hi_slot;
- }
- // Asserts that two iterators come from the same container.
- // Note: we take slots by reference so that it's not UB if they're uninitialized
- // as long as we don't read them (when ctrl is null).
- inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
- const void* const& slot_a,
- const void* const& slot_b,
- const GenerationType* generation_ptr_a,
- const GenerationType* generation_ptr_b) {
- if (!SwisstableDebugEnabled()) return;
- const bool a_is_default = ctrl_a == EmptyGroup();
- const bool b_is_default = ctrl_b == EmptyGroup();
- if (a_is_default != b_is_default) {
- Y_ABSL_INTERNAL_LOG(
- FATAL,
- "Invalid iterator comparison. Comparing default-constructed iterator "
- "with non-default-constructed iterator.");
- }
- if (a_is_default && b_is_default) return;
- if (SwisstableGenerationsEnabled()) {
- if (generation_ptr_a == generation_ptr_b) return;
- const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
- const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
- if (a_is_empty != b_is_empty) {
- Y_ABSL_INTERNAL_LOG(FATAL,
- "Invalid iterator comparison. Comparing iterator from "
- "a non-empty hashtable with an iterator from an empty "
- "hashtable.");
- }
- if (a_is_empty && b_is_empty) {
- Y_ABSL_INTERNAL_LOG(FATAL,
- "Invalid iterator comparison. Comparing iterators from "
- "different empty hashtables.");
- }
- const bool a_is_end = ctrl_a == nullptr;
- const bool b_is_end = ctrl_b == nullptr;
- if (a_is_end || b_is_end) {
- Y_ABSL_INTERNAL_LOG(FATAL,
- "Invalid iterator comparison. Comparing iterator with "
- "an end() iterator from a different hashtable.");
- }
- Y_ABSL_INTERNAL_LOG(FATAL,
- "Invalid iterator comparison. Comparing non-end() "
- "iterators from different hashtables.");
- } else {
- Y_ABSL_HARDENING_ASSERT(
- AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
- "Invalid iterator comparison. The iterators may be from different "
- "containers or the container might have rehashed. Consider running "
- "with --config=asan to diagnose rehashing issues.");
- }
- }
- struct FindInfo {
- size_t offset;
- size_t probe_length;
- };
- // Whether a table is "small". A small table fits entirely into a probing
- // group, i.e., has a capacity < `Group::kWidth`.
- //
- // In small mode we are able to use the whole capacity. The extra control
- // bytes give us at least one "empty" control byte to stop the iteration.
- // This is important to make 1 a valid capacity.
- //
- // In small mode only the first `capacity` control bytes after the sentinel
- // are valid. The rest contain dummy ctrl_t::kEmpty values that do not
- // represent a real slot. This is important to take into account on
- // `find_first_non_full()`, where we never try
- // `ShouldInsertBackwards()` for small tables.
- inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
- // Begins a probing operation on `common.control`, using `hash`.
- inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
- size_t hash) {
- return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
- }
- inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
- return probe(common.control(), common.capacity(), hash);
- }
- // Probes an array of control bits using a probe sequence derived from `hash`,
- // and returns the offset corresponding to the first deleted or empty slot.
- //
- // Behavior when the entire table is full is undefined.
- //
- // NOTE: this function must work with tables having both empty and deleted
- // slots in the same group. Such tables appear during `erase()`.
- template <typename = void>
- inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
- auto seq = probe(common, hash);
- const ctrl_t* ctrl = common.control();
- while (true) {
- Group g{ctrl + seq.offset()};
- auto mask = g.MaskEmptyOrDeleted();
- if (mask) {
- #if !defined(NDEBUG)
- // We want to add entropy even when ASLR is not enabled.
- // In debug build we will randomly insert in either the front or back of
- // the group.
- // TODO(kfm,sbenza): revisit after we do unconditional mixing
- if (!is_small(common.capacity()) && ShouldInsertBackwards(hash, ctrl)) {
- return {seq.offset(mask.HighestBitSet()), seq.index()};
- }
- #endif
- return {seq.offset(mask.LowestBitSet()), seq.index()};
- }
- seq.next();
- assert(seq.index() <= common.capacity() && "full table!");
- }
- }
- // Extern template for inline function keep possibility of inlining.
- // When compiler decided to not inline, no symbols will be added to the
- // corresponding translation unit.
- extern template FindInfo find_first_non_full(const CommonFields&, size_t);
- // Non-inlined version of find_first_non_full for use in less
- // performance critical routines.
- FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
- inline void ResetGrowthLeft(CommonFields& common) {
- common.set_growth_left(CapacityToGrowth(common.capacity()) - common.size());
- }
- // Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
- // array as marked as empty.
- inline void ResetCtrl(CommonFields& common, size_t slot_size) {
- const size_t capacity = common.capacity();
- ctrl_t* ctrl = common.control();
- std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
- capacity + 1 + NumClonedBytes());
- ctrl[capacity] = ctrl_t::kSentinel;
- SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
- ResetGrowthLeft(common);
- }
- // Sets `ctrl[i]` to `h`.
- //
- // Unlike setting it directly, this function will perform bounds checks and
- // mirror the value to the cloned tail if necessary.
- inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h,
- size_t slot_size) {
- const size_t capacity = common.capacity();
- assert(i < capacity);
- auto* slot_i = static_cast<const char*>(common.slot_array()) + i * slot_size;
- if (IsFull(h)) {
- SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
- } else {
- SanitizerPoisonMemoryRegion(slot_i, slot_size);
- }
- ctrl_t* ctrl = common.control();
- ctrl[i] = h;
- ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
- }
- // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
- inline void SetCtrl(const CommonFields& common, size_t i, h2_t h,
- size_t slot_size) {
- SetCtrl(common, i, static_cast<ctrl_t>(h), slot_size);
- }
- // growth_left (which is a size_t) is stored with the backing array.
- constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
- return (std::max)(align_of_slot, alignof(size_t));
- }
- template <typename Alloc, size_t SizeOfSlot, size_t AlignOfSlot>
- Y_ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) {
- assert(c.capacity());
- // Folks with custom allocators often make unwarranted assumptions about the
- // behavior of their classes vis-a-vis trivial destructability and what
- // calls they will or won't make. Avoid sampling for people with custom
- // allocators to get us out of this mess. This is not a hard guarantee but
- // a workaround while we plan the exact guarantee we want to provide.
- const size_t sample_size =
- (std::is_same<Alloc, std::allocator<char>>::value &&
- c.slot_array() == nullptr)
- ? SizeOfSlot
- : 0;
- const size_t cap = c.capacity();
- const size_t alloc_size = AllocSize(cap, SizeOfSlot, AlignOfSlot);
- // growth_left (which is a size_t) is stored with the backing array.
- char* mem = static_cast<char*>(
- Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
- const GenerationType old_generation = c.generation();
- c.set_generation_ptr(
- reinterpret_cast<GenerationType*>(mem + GenerationOffset(cap)));
- c.set_generation(NextGeneration(old_generation));
- c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset()));
- c.set_slots(mem + SlotOffset(cap, AlignOfSlot));
- ResetCtrl(c, SizeOfSlot);
- if (sample_size) {
- c.infoz() = Sample(sample_size);
- }
- c.infoz().RecordStorageChanged(c.size(), cap);
- }
- // PolicyFunctions bundles together some information for a particular
- // raw_hash_set<T, ...> instantiation. This information is passed to
- // type-erased functions that want to do small amounts of type-specific
- // work.
- struct PolicyFunctions {
- size_t slot_size;
- // Returns the hash of the pointed-to slot.
- size_t (*hash_slot)(void* set, void* slot);
- // Transfer the contents of src_slot to dst_slot.
- void (*transfer)(void* set, void* dst_slot, void* src_slot);
- // Deallocate the backing store from common.
- void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
- };
- // ClearBackingArray clears the backing array, either modifying it in place,
- // or creating a new one based on the value of "reuse".
- // REQUIRES: c.capacity > 0
- void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
- bool reuse);
- // Type-erased version of raw_hash_set::erase_meta_only.
- void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size);
- // Function to place in PolicyFunctions::dealloc for raw_hash_sets
- // that are using std::allocator. This allows us to share the same
- // function body for raw_hash_set instantiations that have the
- // same slot alignment.
- template <size_t AlignOfSlot>
- Y_ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
- const PolicyFunctions& policy) {
- // Unpoison before returning the memory to the allocator.
- SanitizerUnpoisonMemoryRegion(common.slot_array(),
- policy.slot_size * common.capacity());
- std::allocator<char> alloc;
- Deallocate<BackingArrayAlignment(AlignOfSlot)>(
- &alloc, common.backing_array_start(),
- common.alloc_size(policy.slot_size, AlignOfSlot));
- }
- // For trivially relocatable types we use memcpy directly. This allows us to
- // share the same function body for raw_hash_set instantiations that have the
- // same slot size as long as they are relocatable.
- template <size_t SizeOfSlot>
- Y_ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {
- memcpy(dst, src, SizeOfSlot);
- }
- // Type-erased version of raw_hash_set::drop_deletes_without_resize.
- void DropDeletesWithoutResize(CommonFields& common,
- const PolicyFunctions& policy, void* tmp_space);
- // A SwissTable.
- //
- // Policy: a policy defines how to perform different operations on
- // the slots of the hashtable (see hash_policy_traits.h for the full interface
- // of policy).
- //
- // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
- // functor should accept a key and return size_t as hash. For best performance
- // it is important that the hash function provides high entropy across all bits
- // of the hash.
- //
- // Eq: a (possibly polymorphic) functor that compares two keys for equality. It
- // should accept two (of possibly different type) keys and return a bool: true
- // if they are equal, false if they are not. If two keys compare equal, then
- // their hash values as defined by Hash MUST be equal.
- //
- // Allocator: an Allocator
- // [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
- // the storage of the hashtable will be allocated and the elements will be
- // constructed and destroyed.
- template <class Policy, class Hash, class Eq, class Alloc>
- class raw_hash_set {
- using PolicyTraits = hash_policy_traits<Policy>;
- using KeyArgImpl =
- KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
- public:
- using init_type = typename PolicyTraits::init_type;
- using key_type = typename PolicyTraits::key_type;
- // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
- // code fixes!
- using slot_type = typename PolicyTraits::slot_type;
- using allocator_type = Alloc;
- using size_type = size_t;
- using difference_type = ptrdiff_t;
- using hasher = Hash;
- using key_equal = Eq;
- using policy_type = Policy;
- using value_type = typename PolicyTraits::value_type;
- using reference = value_type&;
- using const_reference = const value_type&;
- using pointer = typename y_absl::allocator_traits<
- allocator_type>::template rebind_traits<value_type>::pointer;
- using const_pointer = typename y_absl::allocator_traits<
- allocator_type>::template rebind_traits<value_type>::const_pointer;
- // Alias used for heterogeneous lookup functions.
- // `key_arg<K>` evaluates to `K` when the functors are transparent and to
- // `key_type` otherwise. It permits template argument deduction on `K` for the
- // transparent case.
- template <class K>
- using key_arg = typename KeyArgImpl::template type<K, key_type>;
- private:
- // Give an early error when key_type is not hashable/eq.
- auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
- auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
- using AllocTraits = y_absl::allocator_traits<allocator_type>;
- using SlotAlloc = typename y_absl::allocator_traits<
- allocator_type>::template rebind_alloc<slot_type>;
- using SlotAllocTraits = typename y_absl::allocator_traits<
- allocator_type>::template rebind_traits<slot_type>;
- static_assert(std::is_lvalue_reference<reference>::value,
- "Policy::element() must return a reference");
- template <typename T>
- struct SameAsElementReference
- : std::is_same<typename std::remove_cv<
- typename std::remove_reference<reference>::type>::type,
- typename std::remove_cv<
- typename std::remove_reference<T>::type>::type> {};
- // An enabler for insert(T&&): T must be convertible to init_type or be the
- // same as [cv] value_type [ref].
- // Note: we separate SameAsElementReference into its own type to avoid using
- // reference unless we need to. MSVC doesn't seem to like it in some
- // cases.
- template <class T>
- using RequiresInsertable = typename std::enable_if<
- y_absl::disjunction<std::is_convertible<T, init_type>,
- SameAsElementReference<T>>::value,
- int>::type;
- // RequiresNotInit is a workaround for gcc prior to 7.1.
- // See https://godbolt.org/g/Y4xsUh.
- template <class T>
- using RequiresNotInit =
- typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
- template <class... Ts>
- using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
- public:
- static_assert(std::is_same<pointer, value_type*>::value,
- "Allocators with custom pointer types are not supported");
- static_assert(std::is_same<const_pointer, const value_type*>::value,
- "Allocators with custom pointer types are not supported");
- class iterator : private HashSetIteratorGenerationInfo {
- friend class raw_hash_set;
- public:
- using iterator_category = std::forward_iterator_tag;
- using value_type = typename raw_hash_set::value_type;
- using reference =
- y_absl::conditional_t<PolicyTraits::constant_iterators::value,
- const value_type&, value_type&>;
- using pointer = y_absl::remove_reference_t<reference>*;
- using difference_type = typename raw_hash_set::difference_type;
- iterator() {}
- // PRECONDITION: not an end() iterator.
- reference operator*() const {
- AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
- return PolicyTraits::element(slot_);
- }
- // PRECONDITION: not an end() iterator.
- pointer operator->() const {
- AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->");
- return &operator*();
- }
- // PRECONDITION: not an end() iterator.
- iterator& operator++() {
- AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++");
- ++ctrl_;
- ++slot_;
- skip_empty_or_deleted();
- return *this;
- }
- // PRECONDITION: not an end() iterator.
- iterator operator++(int) {
- auto tmp = *this;
- ++*this;
- return tmp;
- }
- friend bool operator==(const iterator& a, const iterator& b) {
- AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr());
- AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr());
- AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_,
- a.generation_ptr(), b.generation_ptr());
- return a.ctrl_ == b.ctrl_;
- }
- friend bool operator!=(const iterator& a, const iterator& b) {
- return !(a == b);
- }
- private:
- iterator(ctrl_t* ctrl, slot_type* slot,
- const GenerationType* generation_ptr)
- : HashSetIteratorGenerationInfo(generation_ptr),
- ctrl_(ctrl),
- slot_(slot) {
- // This assumption helps the compiler know that any non-end iterator is
- // not equal to any end iterator.
- Y_ABSL_ASSUME(ctrl != nullptr);
- }
- // For end() iterators.
- explicit iterator(const GenerationType* generation_ptr)
- : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
- // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
- // they reach one.
- //
- // If a sentinel is reached, we null `ctrl_` out instead.
- void skip_empty_or_deleted() {
- while (IsEmptyOrDeleted(*ctrl_)) {
- uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
- ctrl_ += shift;
- slot_ += shift;
- }
- if (Y_ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
- }
- // We use EmptyGroup() for default-constructed iterators so that they can
- // be distinguished from end iterators, which have nullptr ctrl_.
- ctrl_t* ctrl_ = EmptyGroup();
- // To avoid uninitialized member warnings, put slot_ in an anonymous union.
- // The member is not initialized on singleton and end iterators.
- union {
- slot_type* slot_;
- };
- };
- class const_iterator {
- friend class raw_hash_set;
- public:
- using iterator_category = typename iterator::iterator_category;
- using value_type = typename raw_hash_set::value_type;
- using reference = typename raw_hash_set::const_reference;
- using pointer = typename raw_hash_set::const_pointer;
- using difference_type = typename raw_hash_set::difference_type;
- const_iterator() = default;
- // Implicit construction from iterator.
- const_iterator(iterator i) : inner_(std::move(i)) {} // NOLINT
- reference operator*() const { return *inner_; }
- pointer operator->() const { return inner_.operator->(); }
- const_iterator& operator++() {
- ++inner_;
- return *this;
- }
- const_iterator operator++(int) { return inner_++; }
- friend bool operator==(const const_iterator& a, const const_iterator& b) {
- return a.inner_ == b.inner_;
- }
- friend bool operator!=(const const_iterator& a, const const_iterator& b) {
- return !(a == b);
- }
- private:
- const_iterator(const ctrl_t* ctrl, const slot_type* slot,
- const GenerationType* gen)
- : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) {
- }
- iterator inner_;
- };
- using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
- using insert_return_type = InsertReturnType<iterator, node_type>;
- // Note: can't use `= default` due to non-default noexcept (causes
- // problems for some compilers). NOLINTNEXTLINE
- raw_hash_set() noexcept(
- std::is_nothrow_default_constructible<hasher>::value &&
- std::is_nothrow_default_constructible<key_equal>::value &&
- std::is_nothrow_default_constructible<allocator_type>::value) {}
- Y_ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
- size_t bucket_count, const hasher& hash = hasher(),
- const key_equal& eq = key_equal(),
- const allocator_type& alloc = allocator_type())
- : settings_(CommonFields{}, hash, eq, alloc) {
- if (bucket_count) {
- common().set_capacity(NormalizeCapacity(bucket_count));
- initialize_slots();
- }
- }
- raw_hash_set(size_t bucket_count, const hasher& hash,
- const allocator_type& alloc)
- : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
- raw_hash_set(size_t bucket_count, const allocator_type& alloc)
- : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
- explicit raw_hash_set(const allocator_type& alloc)
- : raw_hash_set(0, hasher(), key_equal(), alloc) {}
- template <class InputIter>
- raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
- const hasher& hash = hasher(), const key_equal& eq = key_equal(),
- const allocator_type& alloc = allocator_type())
- : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
- hash, eq, alloc) {
- insert(first, last);
- }
- template <class InputIter>
- raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
- const hasher& hash, const allocator_type& alloc)
- : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
- template <class InputIter>
- raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
- const allocator_type& alloc)
- : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
- template <class InputIter>
- raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
- : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
- // Instead of accepting std::initializer_list<value_type> as the first
- // argument like std::unordered_set<value_type> does, we have two overloads
- // that accept std::initializer_list<T> and std::initializer_list<init_type>.
- // This is advantageous for performance.
- //
- // // Turns {"abc", "def"} into std::initializer_list<TString>, then
- // // copies the strings into the set.
- // std::unordered_set<TString> s = {"abc", "def"};
- //
- // // Turns {"abc", "def"} into std::initializer_list<const char*>, then
- // // copies the strings into the set.
- // y_absl::flat_hash_set<TString> s = {"abc", "def"};
- //
- // The same trick is used in insert().
- //
- // The enabler is necessary to prevent this constructor from triggering where
- // the copy constructor is meant to be called.
- //
- // y_absl::flat_hash_set<int> a, b{a};
- //
- // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
- template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
- raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
- const hasher& hash = hasher(), const key_equal& eq = key_equal(),
- const allocator_type& alloc = allocator_type())
- : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
- raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
- const hasher& hash = hasher(), const key_equal& eq = key_equal(),
- const allocator_type& alloc = allocator_type())
- : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
- template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
- raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
- const hasher& hash, const allocator_type& alloc)
- : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
- raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
- const hasher& hash, const allocator_type& alloc)
- : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
- template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
- raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
- const allocator_type& alloc)
- : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
- raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
- const allocator_type& alloc)
- : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
- template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
- raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
- : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
- raw_hash_set(std::initializer_list<init_type> init,
- const allocator_type& alloc)
- : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
- raw_hash_set(const raw_hash_set& that)
- : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
- that.alloc_ref())) {}
- raw_hash_set(const raw_hash_set& that, const allocator_type& a)
- : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
- const size_t size = that.size();
- if (size == 0) return;
- reserve(size);
- // Because the table is guaranteed to be empty, we can do something faster
- // than a full `insert`.
- for (const auto& v : that) {
- const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
- auto target = find_first_non_full_outofline(common(), hash);
- SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
- emplace_at(target.offset, v);
- common().maybe_increment_generation_on_insert();
- infoz().RecordInsert(hash, target.probe_length);
- }
- common().set_size(size);
- set_growth_left(growth_left() - size);
- }
- Y_ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
- std::is_nothrow_copy_constructible<hasher>::value &&
- std::is_nothrow_copy_constructible<key_equal>::value &&
- std::is_nothrow_copy_constructible<allocator_type>::value)
- : // Hash, equality and allocator are copied instead of moved because
- // `that` must be left valid. If Hash is std::function<Key>, moving it
- // would create a nullptr functor that cannot be called.
- settings_(y_absl::exchange(that.common(), CommonFields{}),
- that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
- raw_hash_set(raw_hash_set&& that, const allocator_type& a)
- : settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) {
- if (a == that.alloc_ref()) {
- std::swap(common(), that.common());
- } else {
- reserve(that.size());
- // Note: this will copy elements of dense_set and unordered_set instead of
- // moving them. This can be fixed if it ever becomes an issue.
- for (auto& elem : that) insert(std::move(elem));
- }
- }
- raw_hash_set& operator=(const raw_hash_set& that) {
- raw_hash_set tmp(that,
- AllocTraits::propagate_on_container_copy_assignment::value
- ? that.alloc_ref()
- : alloc_ref());
- swap(tmp);
- return *this;
- }
- raw_hash_set& operator=(raw_hash_set&& that) noexcept(
- y_absl::allocator_traits<allocator_type>::is_always_equal::value &&
- std::is_nothrow_move_assignable<hasher>::value &&
- std::is_nothrow_move_assignable<key_equal>::value) {
- // TODO(sbenza): We should only use the operations from the noexcept clause
- // to make sure we actually adhere to that contract.
- // NOLINTNEXTLINE: not returning *this for performance.
- return move_assign(
- std::move(that),
- typename AllocTraits::propagate_on_container_move_assignment());
- }
- ~raw_hash_set() {
- const size_t cap = capacity();
- if (!cap) return;
- destroy_slots();
- // Unpoison before returning the memory to the allocator.
- SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap);
- Deallocate<BackingArrayAlignment(alignof(slot_type))>(
- &alloc_ref(), common().backing_array_start(),
- AllocSize(cap, sizeof(slot_type), alignof(slot_type)));
- infoz().Unregister();
- }
- iterator begin() Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- auto it = iterator_at(0);
- it.skip_empty_or_deleted();
- return it;
- }
- iterator end() Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return iterator(common().generation_ptr());
- }
- const_iterator begin() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return const_cast<raw_hash_set*>(this)->begin();
- }
- const_iterator end() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return iterator(common().generation_ptr());
- }
- const_iterator cbegin() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return begin();
- }
- const_iterator cend() const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
- bool empty() const { return !size(); }
- size_t size() const { return common().size(); }
- size_t capacity() const { return common().capacity(); }
- size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
- Y_ABSL_ATTRIBUTE_REINITIALIZES void clear() {
- // Iterating over this container is O(bucket_count()). When bucket_count()
- // is much greater than size(), iteration becomes prohibitively expensive.
- // For clear() it is more important to reuse the allocated array when the
- // container is small because allocation takes comparatively long time
- // compared to destruction of the elements of the container. So we pick the
- // largest bucket_count() threshold for which iteration is still fast and
- // past that we simply deallocate the array.
- const size_t cap = capacity();
- if (cap == 0) {
- // Already guaranteed to be empty; so nothing to do.
- } else {
- destroy_slots();
- ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128);
- }
- common().set_reserved_growth(0);
- common().set_reservation_size(0);
- }
- inline void destroy_slots() {
- const size_t cap = capacity();
- const ctrl_t* ctrl = control();
- slot_type* slot = slot_array();
- for (size_t i = 0; i != cap; ++i) {
- if (IsFull(ctrl[i])) {
- PolicyTraits::destroy(&alloc_ref(), slot + i);
- }
- }
- }
- // This overload kicks in when the argument is an rvalue of insertable and
- // decomposable type other than init_type.
- //
- // flat_hash_map<TString, int> m;
- // m.insert(std::make_pair("abc", 42));
- // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
- // bug.
- template <class T, RequiresInsertable<T> = 0, class T2 = T,
- typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
- T* = nullptr>
- std::pair<iterator, bool> insert(T&& value) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return emplace(std::forward<T>(value));
- }
- // This overload kicks in when the argument is a bitfield or an lvalue of
- // insertable and decomposable type.
- //
- // union { int n : 1; };
- // flat_hash_set<int> s;
- // s.insert(n);
- //
- // flat_hash_set<TString> s;
- // const char* p = "hello";
- // s.insert(p);
- //
- template <
- class T, RequiresInsertable<const T&> = 0,
- typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
- std::pair<iterator, bool> insert(const T& value)
- Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return emplace(value);
- }
- // This overload kicks in when the argument is an rvalue of init_type. Its
- // purpose is to handle brace-init-list arguments.
- //
- // flat_hash_map<TString, int> s;
- // s.insert({"abc", 42});
- std::pair<iterator, bool> insert(init_type&& value)
- Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return emplace(std::move(value));
- }
- // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
- // bug.
- template <class T, RequiresInsertable<T> = 0, class T2 = T,
- typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
- T* = nullptr>
- iterator insert(const_iterator, T&& value) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert(std::forward<T>(value)).first;
- }
- template <
- class T, RequiresInsertable<const T&> = 0,
- typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
- iterator insert(const_iterator,
- const T& value) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert(value).first;
- }
- iterator insert(const_iterator,
- init_type&& value) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return insert(std::move(value)).first;
- }
- template <class InputIt>
- void insert(InputIt first, InputIt last) {
- for (; first != last; ++first) emplace(*first);
- }
- template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
- void insert(std::initializer_list<T> ilist) {
- insert(ilist.begin(), ilist.end());
- }
- void insert(std::initializer_list<init_type> ilist) {
- insert(ilist.begin(), ilist.end());
- }
- insert_return_type insert(node_type&& node) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- if (!node) return {end(), false, node_type()};
- const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
- auto res = PolicyTraits::apply(
- InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
- elem);
- if (res.second) {
- CommonAccess::Reset(&node);
- return {res.first, true, node_type()};
- } else {
- return {res.first, false, std::move(node)};
- }
- }
- iterator insert(const_iterator,
- node_type&& node) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- auto res = insert(std::move(node));
- node = std::move(res.node);
- return res.position;
- }
- // This overload kicks in if we can deduce the key from args. This enables us
- // to avoid constructing value_type if an entry with the same key already
- // exists.
- //
- // For example:
- //
- // flat_hash_map<TString, TString> m = {{"abc", "def"}};
- // // Creates no TString copies and makes no heap allocations.
- // m.emplace("abc", "xyz");
- template <class... Args, typename std::enable_if<
- IsDecomposable<Args...>::value, int>::type = 0>
- std::pair<iterator, bool> emplace(Args&&... args)
- Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return PolicyTraits::apply(EmplaceDecomposable{*this},
- std::forward<Args>(args)...);
- }
- // This overload kicks in if we cannot deduce the key from args. It constructs
- // value_type unconditionally and then either moves it into the table or
- // destroys.
- template <class... Args, typename std::enable_if<
- !IsDecomposable<Args...>::value, int>::type = 0>
- std::pair<iterator, bool> emplace(Args&&... args)
- Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- alignas(slot_type) unsigned char raw[sizeof(slot_type)];
- slot_type* slot = reinterpret_cast<slot_type*>(&raw);
- PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
- const auto& elem = PolicyTraits::element(slot);
- return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
- }
- template <class... Args>
- iterator emplace_hint(const_iterator,
- Args&&... args) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return emplace(std::forward<Args>(args)...).first;
- }
- // Extension API: support for lazy emplace.
- //
- // Looks up key in the table. If found, returns the iterator to the element.
- // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`,
- // and returns an iterator to the new element.
- //
- // `f` must abide by several restrictions:
- // - it MUST call `raw_hash_set::constructor` with arguments as if a
- // `raw_hash_set::value_type` is constructed,
- // - it MUST NOT access the container before the call to
- // `raw_hash_set::constructor`, and
- // - it MUST NOT erase the lazily emplaced element.
- // Doing any of these is undefined behavior.
- //
- // For example:
- //
- // std::unordered_set<ArenaString> s;
- // // Makes ArenaStr even if "abc" is in the map.
- // s.insert(ArenaString(&arena, "abc"));
- //
- // flat_hash_set<ArenaStr> s;
- // // Makes ArenaStr only if "abc" is not in the map.
- // s.lazy_emplace("abc", [&](const constructor& ctor) {
- // ctor(&arena, "abc");
- // });
- //
- // WARNING: This API is currently experimental. If there is a way to implement
- // the same thing with the rest of the API, prefer that.
- class constructor {
- friend class raw_hash_set;
- public:
- template <class... Args>
- void operator()(Args&&... args) const {
- assert(*slot_);
- PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
- *slot_ = nullptr;
- }
- private:
- constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
- allocator_type* alloc_;
- slot_type** slot_;
- };
- template <class K = key_type, class F>
- iterator lazy_emplace(const key_arg<K>& key,
- F&& f) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- auto res = find_or_prepare_insert(key);
- if (res.second) {
- slot_type* slot = slot_array() + res.first;
- std::forward<F>(f)(constructor(&alloc_ref(), &slot));
- assert(!slot);
- }
- return iterator_at(res.first);
- }
- // Extension API: support for heterogeneous keys.
- //
- // std::unordered_set<TString> s;
- // // Turns "abc" into TString.
- // s.erase("abc");
- //
- // flat_hash_set<TString> s;
- // // Uses "abc" directly without copying it into TString.
- // s.erase("abc");
- template <class K = key_type>
- size_type erase(const key_arg<K>& key) {
- auto it = find(key);
- if (it == end()) return 0;
- erase(it);
- return 1;
- }
- // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`,
- // this method returns void to reduce algorithmic complexity to O(1). The
- // iterator is invalidated, so any increment should be done before calling
- // erase. In order to erase while iterating across a map, use the following
- // idiom (which also works for standard containers):
- //
- // for (auto it = m.begin(), end = m.end(); it != end;) {
- // // `erase()` will invalidate `it`, so advance `it` first.
- // auto copy_it = it++;
- // if (<pred>) {
- // m.erase(copy_it);
- // }
- // }
- void erase(const_iterator cit) { erase(cit.inner_); }
- // This overload is necessary because otherwise erase<K>(const K&) would be
- // a better match if non-const iterator is passed as an argument.
- void erase(iterator it) {
- AssertIsFull(it.ctrl_, it.generation(), it.generation_ptr(), "erase()");
- PolicyTraits::destroy(&alloc_ref(), it.slot_);
- erase_meta_only(it);
- }
- iterator erase(const_iterator first,
- const_iterator last) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- // We check for empty first because ClearBackingArray requires that
- // capacity() > 0 as a precondition.
- if (empty()) return end();
- if (first == begin() && last == end()) {
- // TODO(ezb): we access control bytes in destroy_slots so it could make
- // sense to combine destroy_slots and ClearBackingArray to avoid cache
- // misses when the table is large. Note that we also do this in clear().
- destroy_slots();
- ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true);
- common().set_reserved_growth(common().reservation_size());
- return end();
- }
- while (first != last) {
- erase(first++);
- }
- return last.inner_;
- }
- // Moves elements from `src` into `this`.
- // If the element already exists in `this`, it is left unmodified in `src`.
- template <typename H, typename E>
- void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
- assert(this != &src);
- for (auto it = src.begin(), e = src.end(); it != e;) {
- auto next = std::next(it);
- if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
- PolicyTraits::element(it.slot_))
- .second) {
- src.erase_meta_only(it);
- }
- it = next;
- }
- }
- template <typename H, typename E>
- void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
- merge(src);
- }
- node_type extract(const_iterator position) {
- AssertIsFull(position.inner_.ctrl_, position.inner_.generation(),
- position.inner_.generation_ptr(), "extract()");
- auto node =
- CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
- erase_meta_only(position);
- return node;
- }
- template <
- class K = key_type,
- typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
- node_type extract(const key_arg<K>& key) {
- auto it = find(key);
- return it == end() ? node_type() : extract(const_iterator{it});
- }
- void swap(raw_hash_set& that) noexcept(
- IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
- IsNoThrowSwappable<allocator_type>(
- typename AllocTraits::propagate_on_container_swap{})) {
- using std::swap;
- swap(common(), that.common());
- swap(hash_ref(), that.hash_ref());
- swap(eq_ref(), that.eq_ref());
- SwapAlloc(alloc_ref(), that.alloc_ref(),
- typename AllocTraits::propagate_on_container_swap{});
- }
- void rehash(size_t n) {
- if (n == 0 && capacity() == 0) return;
- if (n == 0 && size() == 0) {
- ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false);
- return;
- }
- // bitor is a faster way of doing `max` here. We will round up to the next
- // power-of-2-minus-1, so bitor is good enough.
- auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
- // n == 0 unconditionally rehashes as per the standard.
- if (n == 0 || m > capacity()) {
- resize(m);
- // This is after resize, to ensure that we have completed the allocation
- // and have potentially sampled the hashtable.
- infoz().RecordReservation(n);
- }
- }
- void reserve(size_t n) {
- if (n > size() + growth_left()) {
- size_t m = GrowthToLowerboundCapacity(n);
- resize(NormalizeCapacity(m));
- // This is after resize, to ensure that we have completed the allocation
- // and have potentially sampled the hashtable.
- infoz().RecordReservation(n);
- }
- common().reset_reserved_growth(n);
- common().set_reservation_size(n);
- }
- // Extension API: support for heterogeneous keys.
- //
- // std::unordered_set<TString> s;
- // // Turns "abc" into TString.
- // s.count("abc");
- //
- // ch_set<TString> s;
- // // Uses "abc" directly without copying it into TString.
- // s.count("abc");
- template <class K = key_type>
- size_t count(const key_arg<K>& key) const {
- return find(key) == end() ? 0 : 1;
- }
- // Issues CPU prefetch instructions for the memory needed to find or insert
- // a key. Like all lookup functions, this support heterogeneous keys.
- //
- // NOTE: This is a very low level operation and should not be used without
- // specific benchmarks indicating its importance.
- template <class K = key_type>
- void prefetch(const key_arg<K>& key) const {
- (void)key;
- // Avoid probing if we won't be able to prefetch the addresses received.
- #ifdef Y_ABSL_HAVE_PREFETCH
- prefetch_heap_block();
- auto seq = probe(common(), hash_ref()(key));
- PrefetchToLocalCache(control() + seq.offset());
- PrefetchToLocalCache(slot_array() + seq.offset());
- #endif // Y_ABSL_HAVE_PREFETCH
- }
- // The API of find() has two extensions.
- //
- // 1. The hash can be passed by the user. It must be equal to the hash of the
- // key.
- //
- // 2. The type of the key argument doesn't have to be key_type. This is so
- // called heterogeneous key support.
- template <class K = key_type>
- iterator find(const key_arg<K>& key,
- size_t hash) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- auto seq = probe(common(), hash);
- slot_type* slot_ptr = slot_array();
- const ctrl_t* ctrl = control();
- while (true) {
- Group g{ctrl + seq.offset()};
- for (uint32_t i : g.Match(H2(hash))) {
- if (Y_ABSL_PREDICT_TRUE(PolicyTraits::apply(
- EqualElement<K>{key, eq_ref()},
- PolicyTraits::element(slot_ptr + seq.offset(i)))))
- return iterator_at(seq.offset(i));
- }
- if (Y_ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
- seq.next();
- assert(seq.index() <= capacity() && "full table!");
- }
- }
- template <class K = key_type>
- iterator find(const key_arg<K>& key) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- prefetch_heap_block();
- return find(key, hash_ref()(key));
- }
- template <class K = key_type>
- const_iterator find(const key_arg<K>& key,
- size_t hash) const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return const_cast<raw_hash_set*>(this)->find(key, hash);
- }
- template <class K = key_type>
- const_iterator find(const key_arg<K>& key) const
- Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- prefetch_heap_block();
- return find(key, hash_ref()(key));
- }
- template <class K = key_type>
- bool contains(const key_arg<K>& key) const {
- return find(key) != end();
- }
- template <class K = key_type>
- std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
- Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- auto it = find(key);
- if (it != end()) return {it, std::next(it)};
- return {it, it};
- }
- template <class K = key_type>
- std::pair<const_iterator, const_iterator> equal_range(
- const key_arg<K>& key) const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- auto it = find(key);
- if (it != end()) return {it, std::next(it)};
- return {it, it};
- }
- size_t bucket_count() const { return capacity(); }
- float load_factor() const {
- return capacity() ? static_cast<double>(size()) / capacity() : 0.0;
- }
- float max_load_factor() const { return 1.0f; }
- void max_load_factor(float) {
- // Does nothing.
- }
- hasher hash_function() const { return hash_ref(); }
- key_equal key_eq() const { return eq_ref(); }
- allocator_type get_allocator() const { return alloc_ref(); }
- friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
- if (a.size() != b.size()) return false;
- const raw_hash_set* outer = &a;
- const raw_hash_set* inner = &b;
- if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
- for (const value_type& elem : *outer)
- if (!inner->has_element(elem)) return false;
- return true;
- }
- friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
- return !(a == b);
- }
- template <typename H>
- friend typename std::enable_if<H::template is_hashable<value_type>::value,
- H>::type
- AbslHashValue(H h, const raw_hash_set& s) {
- return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
- s.size());
- }
- friend void swap(raw_hash_set& a,
- raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
- a.swap(b);
- }
- private:
- template <class Container, typename Enabler>
- friend struct y_absl::container_internal::hashtable_debug_internal::
- HashtableDebugAccess;
- struct FindElement {
- template <class K, class... Args>
- const_iterator operator()(const K& key, Args&&...) const {
- return s.find(key);
- }
- const raw_hash_set& s;
- };
- struct HashElement {
- template <class K, class... Args>
- size_t operator()(const K& key, Args&&...) const {
- return h(key);
- }
- const hasher& h;
- };
- template <class K1>
- struct EqualElement {
- template <class K2, class... Args>
- bool operator()(const K2& lhs, Args&&...) const {
- return eq(lhs, rhs);
- }
- const K1& rhs;
- const key_equal& eq;
- };
- struct EmplaceDecomposable {
- template <class K, class... Args>
- std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
- auto res = s.find_or_prepare_insert(key);
- if (res.second) {
- s.emplace_at(res.first, std::forward<Args>(args)...);
- }
- return {s.iterator_at(res.first), res.second};
- }
- raw_hash_set& s;
- };
- template <bool do_destroy>
- struct InsertSlot {
- template <class K, class... Args>
- std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
- auto res = s.find_or_prepare_insert(key);
- if (res.second) {
- PolicyTraits::transfer(&s.alloc_ref(), s.slot_array() + res.first,
- &slot);
- } else if (do_destroy) {
- PolicyTraits::destroy(&s.alloc_ref(), &slot);
- }
- return {s.iterator_at(res.first), res.second};
- }
- raw_hash_set& s;
- // Constructed slot. Either moved into place or destroyed.
- slot_type&& slot;
- };
- // Erases, but does not destroy, the value pointed to by `it`.
- //
- // This merely updates the pertinent control byte. This can be used in
- // conjunction with Policy::transfer to move the object to another place.
- void erase_meta_only(const_iterator it) {
- EraseMetaOnly(common(), it.inner_.ctrl_, sizeof(slot_type));
- }
- // Allocates a backing array for `self` and initializes its control bytes.
- // This reads `capacity` and updates all other fields based on the result of
- // the allocation.
- //
- // This does not free the currently held array; `capacity` must be nonzero.
- inline void initialize_slots() {
- // People are often sloppy with the exact type of their allocator (sometimes
- // it has an extra const or is missing the pair, but rebinds made it work
- // anyway).
- using CharAlloc =
- typename y_absl::allocator_traits<Alloc>::template rebind_alloc<char>;
- InitializeSlots<CharAlloc, sizeof(slot_type), alignof(slot_type)>(
- common(), CharAlloc(alloc_ref()));
- }
- Y_ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) {
- assert(IsValidCapacity(new_capacity));
- auto* old_ctrl = control();
- auto* old_slots = slot_array();
- const size_t old_capacity = common().capacity();
- common().set_capacity(new_capacity);
- initialize_slots();
- auto* new_slots = slot_array();
- size_t total_probe_length = 0;
- for (size_t i = 0; i != old_capacity; ++i) {
- if (IsFull(old_ctrl[i])) {
- size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
- PolicyTraits::element(old_slots + i));
- auto target = find_first_non_full(common(), hash);
- size_t new_i = target.offset;
- total_probe_length += target.probe_length;
- SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
- PolicyTraits::transfer(&alloc_ref(), new_slots + new_i, old_slots + i);
- }
- }
- if (old_capacity) {
- SanitizerUnpoisonMemoryRegion(old_slots,
- sizeof(slot_type) * old_capacity);
- Deallocate<BackingArrayAlignment(alignof(slot_type))>(
- &alloc_ref(), old_ctrl - ControlOffset(),
- AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
- }
- infoz().RecordRehash(total_probe_length);
- }
- // Prunes control bytes to remove as many tombstones as possible.
- //
- // See the comment on `rehash_and_grow_if_necessary()`.
- inline void drop_deletes_without_resize() {
- // Stack-allocate space for swapping elements.
- alignas(slot_type) unsigned char tmp[sizeof(slot_type)];
- DropDeletesWithoutResize(common(), GetPolicyFunctions(), tmp);
- }
- // Called whenever the table *might* need to conditionally grow.
- //
- // This function is an optimization opportunity to perform a rehash even when
- // growth is unnecessary, because vacating tombstones is beneficial for
- // performance in the long-run.
- void rehash_and_grow_if_necessary() {
- const size_t cap = capacity();
- if (cap > Group::kWidth &&
- // Do these calculations in 64-bit to avoid overflow.
- size() * uint64_t{32} <= cap * uint64_t{25}) {
- // Squash DELETED without growing if there is enough capacity.
- //
- // Rehash in place if the current size is <= 25/32 of capacity.
- // Rationale for such a high factor: 1) drop_deletes_without_resize() is
- // faster than resize, and 2) it takes quite a bit of work to add
- // tombstones. In the worst case, seems to take approximately 4
- // insert/erase pairs to create a single tombstone and so if we are
- // rehashing because of tombstones, we can afford to rehash-in-place as
- // long as we are reclaiming at least 1/8 the capacity without doing more
- // than 2X the work. (Where "work" is defined to be size() for rehashing
- // or rehashing in place, and 1 for an insert or erase.) But rehashing in
- // place is faster per operation than inserting or even doubling the size
- // of the table, so we actually afford to reclaim even less space from a
- // resize-in-place. The decision is to rehash in place if we can reclaim
- // at about 1/8th of the usable capacity (specifically 3/28 of the
- // capacity) which means that the total cost of rehashing will be a small
- // fraction of the total work.
- //
- // Here is output of an experiment using the BM_CacheInSteadyState
- // benchmark running the old case (where we rehash-in-place only if we can
- // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
- // if we can recover 3/32*capacity).
- //
- // Note that although in the worst-case number of rehashes jumped up from
- // 15 to 190, but the number of operations per second is almost the same.
- //
- // Abridged output of running BM_CacheInSteadyState benchmark from
- // raw_hash_set_benchmark. N is the number of insert/erase operations.
- //
- // | OLD (recover >= 7/16 | NEW (recover >= 3/32)
- // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes
- // 448 | 145284 0.44 18 | 140118 0.44 19
- // 493 | 152546 0.24 11 | 151417 0.48 28
- // 538 | 151439 0.26 11 | 151152 0.53 38
- // 583 | 151765 0.28 11 | 150572 0.57 50
- // 628 | 150241 0.31 11 | 150853 0.61 66
- // 672 | 149602 0.33 12 | 150110 0.66 90
- // 717 | 149998 0.35 12 | 149531 0.70 129
- // 762 | 149836 0.37 13 | 148559 0.74 190
- // 807 | 149736 0.39 14 | 151107 0.39 14
- // 852 | 150204 0.42 15 | 151019 0.42 15
- drop_deletes_without_resize();
- } else {
- // Otherwise grow the container.
- resize(NextCapacity(cap));
- }
- }
- bool has_element(const value_type& elem) const {
- size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
- auto seq = probe(common(), hash);
- const ctrl_t* ctrl = control();
- while (true) {
- Group g{ctrl + seq.offset()};
- for (uint32_t i : g.Match(H2(hash))) {
- if (Y_ABSL_PREDICT_TRUE(
- PolicyTraits::element(slot_array() + seq.offset(i)) == elem))
- return true;
- }
- if (Y_ABSL_PREDICT_TRUE(g.MaskEmpty())) return false;
- seq.next();
- assert(seq.index() <= capacity() && "full table!");
- }
- return false;
- }
- // TODO(alkis): Optimize this assuming *this and that don't overlap.
- raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
- raw_hash_set tmp(std::move(that));
- swap(tmp);
- return *this;
- }
- raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
- raw_hash_set tmp(std::move(that), alloc_ref());
- swap(tmp);
- return *this;
- }
- protected:
- // Attempts to find `key` in the table; if it isn't found, returns a slot that
- // the value can be inserted into, with the control byte already set to
- // `key`'s H2.
- template <class K>
- std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
- prefetch_heap_block();
- auto hash = hash_ref()(key);
- auto seq = probe(common(), hash);
- const ctrl_t* ctrl = control();
- while (true) {
- Group g{ctrl + seq.offset()};
- for (uint32_t i : g.Match(H2(hash))) {
- if (Y_ABSL_PREDICT_TRUE(PolicyTraits::apply(
- EqualElement<K>{key, eq_ref()},
- PolicyTraits::element(slot_array() + seq.offset(i)))))
- return {seq.offset(i), false};
- }
- if (Y_ABSL_PREDICT_TRUE(g.MaskEmpty())) break;
- seq.next();
- assert(seq.index() <= capacity() && "full table!");
- }
- return {prepare_insert(hash), true};
- }
- // Given the hash of a value not currently in the table, finds the next
- // viable slot index to insert it at.
- //
- // REQUIRES: At least one non-full slot available.
- size_t prepare_insert(size_t hash) Y_ABSL_ATTRIBUTE_NOINLINE {
- const bool rehash_for_bug_detection =
- common().should_rehash_for_bug_detection_on_insert();
- if (rehash_for_bug_detection) {
- // Move to a different heap allocation in order to detect bugs.
- const size_t cap = capacity();
- resize(growth_left() > 0 ? cap : NextCapacity(cap));
- }
- auto target = find_first_non_full(common(), hash);
- if (!rehash_for_bug_detection &&
- Y_ABSL_PREDICT_FALSE(growth_left() == 0 &&
- !IsDeleted(control()[target.offset]))) {
- rehash_and_grow_if_necessary();
- target = find_first_non_full(common(), hash);
- }
- common().set_size(common().size() + 1);
- set_growth_left(growth_left() - IsEmpty(control()[target.offset]));
- SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
- common().maybe_increment_generation_on_insert();
- infoz().RecordInsert(hash, target.probe_length);
- return target.offset;
- }
- // Constructs the value in the space pointed by the iterator. This only works
- // after an unsuccessful find_or_prepare_insert() and before any other
- // modifications happen in the raw_hash_set.
- //
- // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
- // k is the key decomposed from `forward<Args>(args)...`, and the bool
- // returned by find_or_prepare_insert(k) was true.
- // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
- template <class... Args>
- void emplace_at(size_t i, Args&&... args) {
- PolicyTraits::construct(&alloc_ref(), slot_array() + i,
- std::forward<Args>(args)...);
- assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
- iterator_at(i) &&
- "constructed value does not match the lookup key");
- }
- iterator iterator_at(size_t i) Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return {control() + i, slot_array() + i, common().generation_ptr()};
- }
- const_iterator iterator_at(size_t i) const Y_ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return {control() + i, slot_array() + i, common().generation_ptr()};
- }
- private:
- friend struct RawHashSetTestOnlyAccess;
- // The number of slots we can still fill without needing to rehash.
- //
- // This is stored separately due to tombstones: we do not include tombstones
- // in the growth capacity, because we'd like to rehash when the table is
- // otherwise filled with tombstones: otherwise, probe sequences might get
- // unacceptably long without triggering a rehash. Callers can also force a
- // rehash via the standard `rehash(0)`, which will recompute this value as a
- // side-effect.
- //
- // See `CapacityToGrowth()`.
- size_t growth_left() const { return common().growth_left(); }
- void set_growth_left(size_t gl) { return common().set_growth_left(gl); }
- // Prefetch the heap-allocated memory region to resolve potential TLB and
- // cache misses. This is intended to overlap with execution of calculating the
- // hash for a key.
- void prefetch_heap_block() const {
- #if Y_ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
- __builtin_prefetch(control(), 0, 1);
- #endif
- }
- CommonFields& common() { return settings_.template get<0>(); }
- const CommonFields& common() const { return settings_.template get<0>(); }
- ctrl_t* control() const { return common().control(); }
- slot_type* slot_array() const {
- return static_cast<slot_type*>(common().slot_array());
- }
- HashtablezInfoHandle& infoz() { return common().infoz(); }
- hasher& hash_ref() { return settings_.template get<1>(); }
- const hasher& hash_ref() const { return settings_.template get<1>(); }
- key_equal& eq_ref() { return settings_.template get<2>(); }
- const key_equal& eq_ref() const { return settings_.template get<2>(); }
- allocator_type& alloc_ref() { return settings_.template get<3>(); }
- const allocator_type& alloc_ref() const {
- return settings_.template get<3>();
- }
- // Make type-specific functions for this type's PolicyFunctions struct.
- static size_t hash_slot_fn(void* set, void* slot) {
- auto* h = static_cast<raw_hash_set*>(set);
- return PolicyTraits::apply(
- HashElement{h->hash_ref()},
- PolicyTraits::element(static_cast<slot_type*>(slot)));
- }
- static void transfer_slot_fn(void* set, void* dst, void* src) {
- auto* h = static_cast<raw_hash_set*>(set);
- PolicyTraits::transfer(&h->alloc_ref(), static_cast<slot_type*>(dst),
- static_cast<slot_type*>(src));
- }
- // Note: dealloc_fn will only be used if we have a non-standard allocator.
- static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
- auto* set = reinterpret_cast<raw_hash_set*>(&common);
- // Unpoison before returning the memory to the allocator.
- SanitizerUnpoisonMemoryRegion(common.slot_array(),
- sizeof(slot_type) * common.capacity());
- Deallocate<BackingArrayAlignment(alignof(slot_type))>(
- &set->alloc_ref(), common.backing_array_start(),
- common.alloc_size(sizeof(slot_type), alignof(slot_type)));
- }
- static const PolicyFunctions& GetPolicyFunctions() {
- static constexpr PolicyFunctions value = {
- sizeof(slot_type),
- &raw_hash_set::hash_slot_fn,
- PolicyTraits::transfer_uses_memcpy()
- ? TransferRelocatable<sizeof(slot_type)>
- : &raw_hash_set::transfer_slot_fn,
- (std::is_same<SlotAlloc, std::allocator<slot_type>>::value
- ? &DeallocateStandard<alignof(slot_type)>
- : &raw_hash_set::dealloc_fn),
- };
- return value;
- }
- // Bundle together CommonFields plus other objects which might be empty.
- // CompressedTuple will ensure that sizeof is not affected by any of the empty
- // fields that occur after CommonFields.
- y_absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
- allocator_type>
- settings_{CommonFields{}, hasher{}, key_equal{}, allocator_type{}};
- };
- // Erases all elements that satisfy the predicate `pred` from the container `c`.
- template <typename P, typename H, typename E, typename A, typename Predicate>
- typename raw_hash_set<P, H, E, A>::size_type EraseIf(
- Predicate& pred, raw_hash_set<P, H, E, A>* c) {
- const auto initial_size = c->size();
- for (auto it = c->begin(), last = c->end(); it != last;) {
- if (pred(*it)) {
- c->erase(it++);
- } else {
- ++it;
- }
- }
- return initial_size - c->size();
- }
- namespace hashtable_debug_internal {
- template <typename Set>
- struct HashtableDebugAccess<Set, y_absl::void_t<typename Set::raw_hash_set>> {
- using Traits = typename Set::PolicyTraits;
- using Slot = typename Traits::slot_type;
- static size_t GetNumProbes(const Set& set,
- const typename Set::key_type& key) {
- size_t num_probes = 0;
- size_t hash = set.hash_ref()(key);
- auto seq = probe(set.common(), hash);
- const ctrl_t* ctrl = set.control();
- while (true) {
- container_internal::Group g{ctrl + seq.offset()};
- for (uint32_t i : g.Match(container_internal::H2(hash))) {
- if (Traits::apply(
- typename Set::template EqualElement<typename Set::key_type>{
- key, set.eq_ref()},
- Traits::element(set.slot_array() + seq.offset(i))))
- return num_probes;
- ++num_probes;
- }
- if (g.MaskEmpty()) return num_probes;
- seq.next();
- ++num_probes;
- }
- }
- static size_t AllocatedByteSize(const Set& c) {
- size_t capacity = c.capacity();
- if (capacity == 0) return 0;
- size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
- size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
- if (per_slot != ~size_t{}) {
- m += per_slot * c.size();
- } else {
- const ctrl_t* ctrl = c.control();
- for (size_t i = 0; i != capacity; ++i) {
- if (container_internal::IsFull(ctrl[i])) {
- m += Traits::space_used(c.slot_array() + i);
- }
- }
- }
- return m;
- }
- static size_t LowerBoundAllocatedByteSize(size_t size) {
- size_t capacity = GrowthToLowerboundCapacity(size);
- if (capacity == 0) return 0;
- size_t m =
- AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot));
- size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
- if (per_slot != ~size_t{}) {
- m += per_slot * size;
- }
- return m;
- }
- };
- } // namespace hashtable_debug_internal
- } // namespace container_internal
- Y_ABSL_NAMESPACE_END
- } // namespace y_absl
- #undef Y_ABSL_SWISSTABLE_ENABLE_GENERATIONS
- #endif // Y_ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
|