raw_hash_set.h 163 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182
  1. // Copyright 2018 The Abseil Authors.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // https://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. //
  15. // An open-addressing
  16. // hashtable with quadratic probing.
  17. //
  18. // This is a low level hashtable on top of which different interfaces can be
  19. // implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
  20. //
  21. // The table interface is similar to that of std::unordered_set. Notable
  22. // differences are that most member functions support heterogeneous keys when
  23. // BOTH the hash and eq functions are marked as transparent. They do so by
  24. // providing a typedef called `is_transparent`.
  25. //
  26. // When heterogeneous lookup is enabled, functions that take key_type act as if
  27. // they have an overload set like:
  28. //
  29. // iterator find(const key_type& key);
  30. // template <class K>
  31. // iterator find(const K& key);
  32. //
  33. // size_type erase(const key_type& key);
  34. // template <class K>
  35. // size_type erase(const K& key);
  36. //
  37. // std::pair<iterator, iterator> equal_range(const key_type& key);
  38. // template <class K>
  39. // std::pair<iterator, iterator> equal_range(const K& key);
  40. //
  41. // When heterogeneous lookup is disabled, only the explicit `key_type` overloads
  42. // exist.
  43. //
  44. // find() also supports passing the hash explicitly:
  45. //
  46. // iterator find(const key_type& key, size_t hash);
  47. // template <class U>
  48. // iterator find(const U& key, size_t hash);
  49. //
  50. // In addition the pointer to element and iterator stability guarantees are
  51. // weaker: all iterators and pointers are invalidated after a new element is
  52. // inserted.
  53. //
  54. // IMPLEMENTATION DETAILS
  55. //
  56. // # Table Layout
  57. //
  58. // A raw_hash_set's backing array consists of control bytes followed by slots
  59. // that may or may not contain objects.
  60. //
  61. // The layout of the backing array, for `capacity` slots, is thus, as a
  62. // pseudo-struct:
  63. //
  64. // struct BackingArray {
  65. // // Sampling handler. This field isn't present when the sampling is
  66. // // disabled or this allocation hasn't been selected for sampling.
  67. // HashtablezInfoHandle infoz_;
  68. // // The number of elements we can insert before growing the capacity.
  69. // size_t growth_left;
  70. // // Control bytes for the "real" slots.
  71. // ctrl_t ctrl[capacity];
  72. // // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
  73. // // stop and serves no other purpose.
  74. // ctrl_t sentinel;
  75. // // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
  76. // // that if a probe sequence picks a value near the end of `ctrl`,
  77. // // `Group` will have valid control bytes to look at.
  78. // ctrl_t clones[kWidth - 1];
  79. // // The actual slot data.
  80. // slot_type slots[capacity];
  81. // };
  82. //
  83. // The length of this array is computed by `RawHashSetLayout::alloc_size` below.
  84. //
  85. // Control bytes (`ctrl_t`) are bytes (collected into groups of a
  86. // platform-specific size) that define the state of the corresponding slot in
  87. // the slot array. Group manipulation is tightly optimized to be as efficient
  88. // as possible: SSE and friends on x86, clever bit operations on other arches.
  89. //
  90. // Group 1 Group 2 Group 3
  91. // +---------------+---------------+---------------+
  92. // | | | | | | | | | | | | | | | | | | | | | | | | |
  93. // +---------------+---------------+---------------+
  94. //
  95. // Each control byte is either a special value for empty slots, deleted slots
  96. // (sometimes called *tombstones*), and a special end-of-table marker used by
  97. // iterators, or, if occupied, seven bits (H2) from the hash of the value in the
  98. // corresponding slot.
  99. //
  100. // Storing control bytes in a separate array also has beneficial cache effects,
  101. // since more logical slots will fit into a cache line.
  102. //
  103. // # Small Object Optimization (SOO)
  104. //
  105. // When the size/alignment of the value_type and the capacity of the table are
  106. // small, we enable small object optimization and store the values inline in
  107. // the raw_hash_set object. This optimization allows us to avoid
  108. // allocation/deallocation as well as cache/dTLB misses.
  109. //
  110. // # Hashing
  111. //
  112. // We compute two separate hashes, `H1` and `H2`, from the hash of an object.
  113. // `H1(hash(x))` is an index into `slots`, and essentially the starting point
  114. // for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
  115. // objects that cannot possibly be the one we are looking for.
  116. //
  117. // # Table operations.
  118. //
  119. // The key operations are `insert`, `find`, and `erase`.
  120. //
  121. // Since `insert` and `erase` are implemented in terms of `find`, we describe
  122. // `find` first. To `find` a value `x`, we compute `hash(x)`. From
  123. // `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
  124. // group of slots in some interesting order.
  125. //
  126. // We now walk through these indices. At each index, we select the entire group
  127. // starting with that index and extract potential candidates: occupied slots
  128. // with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
  129. // group, we stop and return an error. Each candidate slot `y` is compared with
  130. // `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
  131. // next probe index. Tombstones effectively behave like full slots that never
  132. // match the value we're looking for.
  133. //
  134. // The `H2` bits ensure when we compare a slot to an object with `==`, we are
  135. // likely to have actually found the object. That is, the chance is low that
  136. // `==` is called and returns `false`. Thus, when we search for an object, we
  137. // are unlikely to call `==` many times. This likelyhood can be analyzed as
  138. // follows (assuming that H2 is a random enough hash function).
  139. //
  140. // Let's assume that there are `k` "wrong" objects that must be examined in a
  141. // probe sequence. For example, when doing a `find` on an object that is in the
  142. // table, `k` is the number of objects between the start of the probe sequence
  143. // and the final found object (not including the final found object). The
  144. // expected number of objects with an H2 match is then `k/128`. Measurements
  145. // and analysis indicate that even at high load factors, `k` is less than 32,
  146. // meaning that the number of "false positive" comparisons we must perform is
  147. // less than 1/8 per `find`.
  148. // `insert` is implemented in terms of `unchecked_insert`, which inserts a
  149. // value presumed to not be in the table (violating this requirement will cause
  150. // the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
  151. // it, we construct a `probe_seq` once again, and use it to find the first
  152. // group with an unoccupied (empty *or* deleted) slot. We place `x` into the
  153. // first such slot in the group and mark it as full with `x`'s H2.
  154. //
  155. // To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
  156. // perform a `find` to see if it's already present; if it is, we're done. If
  157. // it's not, we may decide the table is getting overcrowded (i.e. the load
  158. // factor is greater than 7/8 for big tables; `is_small()` tables use a max load
  159. // factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
  160. // each element of the table into the new array (we know that no insertion here
  161. // will insert an already-present value), and discard the old backing array. At
  162. // this point, we may `unchecked_insert` the value `x`.
  163. //
  164. // Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
  165. // presents a viable, initialized slot pointee to the caller.
  166. //
  167. // `erase` is implemented in terms of `erase_at`, which takes an index to a
  168. // slot. Given an offset, we simply create a tombstone and destroy its contents.
  169. // If we can prove that the slot would not appear in a probe sequence, we can
  170. // make the slot as empty, instead. We can prove this by observing that if a
  171. // group has any empty slots, it has never been full (assuming we never create
  172. // an empty slot in a group with no empties, which this heuristic guarantees we
  173. // never do) and find would stop at this group anyways (since it does not probe
  174. // beyond groups with empties).
  175. //
  176. // `erase` is `erase_at` composed with `find`: if we
  177. // have a value `x`, we can perform a `find`, and then `erase_at` the resulting
  178. // slot.
  179. //
  180. // To iterate, we simply traverse the array, skipping empty and deleted slots
  181. // and stopping when we hit a `kSentinel`.
  182. #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
  183. #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
  184. #include <algorithm>
  185. #include <cassert>
  186. #include <cmath>
  187. #include <cstddef>
  188. #include <cstdint>
  189. #include <cstring>
  190. #include <initializer_list>
  191. #include <iterator>
  192. #include <limits>
  193. #include <memory>
  194. #include <tuple>
  195. #include <type_traits>
  196. #include <utility>
  197. #include "absl/base/attributes.h"
  198. #include "absl/base/config.h"
  199. #include "absl/base/internal/endian.h"
  200. #include "absl/base/internal/raw_logging.h"
  201. #include "absl/base/macros.h"
  202. #include "absl/base/optimization.h"
  203. #include "absl/base/options.h"
  204. #include "absl/base/port.h"
  205. #include "absl/base/prefetch.h"
  206. #include "absl/container/internal/common.h" // IWYU pragma: export // for node_handle
  207. #include "absl/container/internal/compressed_tuple.h"
  208. #include "absl/container/internal/container_memory.h"
  209. #include "absl/container/internal/hash_policy_traits.h"
  210. #include "absl/container/internal/hashtable_debug_hooks.h"
  211. #include "absl/container/internal/hashtablez_sampler.h"
  212. #include "absl/memory/memory.h"
  213. #include "absl/meta/type_traits.h"
  214. #include "absl/numeric/bits.h"
  215. #include "absl/utility/utility.h"
  216. #ifdef ABSL_INTERNAL_HAVE_SSE2
  217. #include <emmintrin.h>
  218. #endif
  219. #ifdef ABSL_INTERNAL_HAVE_SSSE3
  220. #include <tmmintrin.h>
  221. #endif
  222. #ifdef _MSC_VER
  223. #include <intrin.h>
  224. #endif
  225. #ifdef ABSL_INTERNAL_HAVE_ARM_NEON
  226. #include <arm_neon.h>
  227. #endif
  228. namespace absl {
  229. ABSL_NAMESPACE_BEGIN
  230. namespace container_internal {
  231. #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
  232. #error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
  233. #elif (defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
  234. defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
  235. defined(ABSL_HAVE_MEMORY_SANITIZER)) && \
  236. !defined(NDEBUG_SANITIZER) // If defined, performance is important.
  237. // When compiled in sanitizer mode, we add generation integers to the backing
  238. // array and iterators. In the backing array, we store the generation between
  239. // the control bytes and the slots. When iterators are dereferenced, we assert
  240. // that the container has not been mutated in a way that could cause iterator
  241. // invalidation since the iterator was initialized.
  242. #define ABSL_SWISSTABLE_ENABLE_GENERATIONS
  243. #endif
  244. // We use uint8_t so we don't need to worry about padding.
  245. using GenerationType = uint8_t;
  246. // A sentinel value for empty generations. Using 0 makes it easy to constexpr
  247. // initialize an array of this value.
  248. constexpr GenerationType SentinelEmptyGeneration() { return 0; }
  249. constexpr GenerationType NextGeneration(GenerationType generation) {
  250. return ++generation == SentinelEmptyGeneration() ? ++generation : generation;
  251. }
  252. #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
  253. constexpr bool SwisstableGenerationsEnabled() { return true; }
  254. constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
  255. #else
  256. constexpr bool SwisstableGenerationsEnabled() { return false; }
  257. constexpr size_t NumGenerationBytes() { return 0; }
  258. #endif
  259. template <typename AllocType>
  260. void SwapAlloc(AllocType& lhs, AllocType& rhs,
  261. std::true_type /* propagate_on_container_swap */) {
  262. using std::swap;
  263. swap(lhs, rhs);
  264. }
  265. template <typename AllocType>
  266. void SwapAlloc(AllocType& lhs, AllocType& rhs,
  267. std::false_type /* propagate_on_container_swap */) {
  268. (void)lhs;
  269. (void)rhs;
  270. assert(lhs == rhs &&
  271. "It's UB to call swap with unequal non-propagating allocators.");
  272. }
  273. template <typename AllocType>
  274. void CopyAlloc(AllocType& lhs, AllocType& rhs,
  275. std::true_type /* propagate_alloc */) {
  276. lhs = rhs;
  277. }
  278. template <typename AllocType>
  279. void CopyAlloc(AllocType&, AllocType&, std::false_type /* propagate_alloc */) {}
  280. // The state for a probe sequence.
  281. //
  282. // Currently, the sequence is a triangular progression of the form
  283. //
  284. // p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
  285. //
  286. // The use of `Width` ensures that each probe step does not overlap groups;
  287. // the sequence effectively outputs the addresses of *groups* (although not
  288. // necessarily aligned to any boundary). The `Group` machinery allows us
  289. // to check an entire group with minimal branching.
  290. //
  291. // Wrapping around at `mask + 1` is important, but not for the obvious reason.
  292. // As described above, the first few entries of the control byte array
  293. // are mirrored at the end of the array, which `Group` will find and use
  294. // for selecting candidates. However, when those candidates' slots are
  295. // actually inspected, there are no corresponding slots for the cloned bytes,
  296. // so we need to make sure we've treated those offsets as "wrapping around".
  297. //
  298. // It turns out that this probe sequence visits every group exactly once if the
  299. // number of groups is a power of two, since (i^2+i)/2 is a bijection in
  300. // Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
  301. template <size_t Width>
  302. class probe_seq {
  303. public:
  304. // Creates a new probe sequence using `hash` as the initial value of the
  305. // sequence and `mask` (usually the capacity of the table) as the mask to
  306. // apply to each value in the progression.
  307. probe_seq(size_t hash, size_t mask) {
  308. assert(((mask + 1) & mask) == 0 && "not a mask");
  309. mask_ = mask;
  310. offset_ = hash & mask_;
  311. }
  312. // The offset within the table, i.e., the value `p(i)` above.
  313. size_t offset() const { return offset_; }
  314. size_t offset(size_t i) const { return (offset_ + i) & mask_; }
  315. void next() {
  316. index_ += Width;
  317. offset_ += index_;
  318. offset_ &= mask_;
  319. }
  320. // 0-based probe index, a multiple of `Width`.
  321. size_t index() const { return index_; }
  322. private:
  323. size_t mask_;
  324. size_t offset_;
  325. size_t index_ = 0;
  326. };
  327. template <class ContainerKey, class Hash, class Eq>
  328. struct RequireUsableKey {
  329. template <class PassedKey, class... Args>
  330. std::pair<
  331. decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
  332. decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
  333. std::declval<const PassedKey&>()))>*
  334. operator()(const PassedKey&, const Args&...) const;
  335. };
  336. template <class E, class Policy, class Hash, class Eq, class... Ts>
  337. struct IsDecomposable : std::false_type {};
  338. template <class Policy, class Hash, class Eq, class... Ts>
  339. struct IsDecomposable<
  340. absl::void_t<decltype(Policy::apply(
  341. RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
  342. std::declval<Ts>()...))>,
  343. Policy, Hash, Eq, Ts...> : std::true_type {};
  344. // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
  345. template <class T>
  346. constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
  347. using std::swap;
  348. return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
  349. }
  350. template <class T>
  351. constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
  352. return false;
  353. }
  354. template <typename T>
  355. uint32_t TrailingZeros(T x) {
  356. ABSL_ASSUME(x != 0);
  357. return static_cast<uint32_t>(countr_zero(x));
  358. }
  359. // 8 bytes bitmask with most significant bit set for every byte.
  360. constexpr uint64_t kMsbs8Bytes = 0x8080808080808080ULL;
  361. // An abstract bitmask, such as that emitted by a SIMD instruction.
  362. //
  363. // Specifically, this type implements a simple bitset whose representation is
  364. // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
  365. // of abstract bits in the bitset, while `Shift` is the log-base-two of the
  366. // width of an abstract bit in the representation.
  367. // This mask provides operations for any number of real bits set in an abstract
  368. // bit. To add iteration on top of that, implementation must guarantee no more
  369. // than the most significant real bit is set in a set abstract bit.
  370. template <class T, int SignificantBits, int Shift = 0>
  371. class NonIterableBitMask {
  372. public:
  373. explicit NonIterableBitMask(T mask) : mask_(mask) {}
  374. explicit operator bool() const { return this->mask_ != 0; }
  375. // Returns the index of the lowest *abstract* bit set in `self`.
  376. uint32_t LowestBitSet() const {
  377. return container_internal::TrailingZeros(mask_) >> Shift;
  378. }
  379. // Returns the index of the highest *abstract* bit set in `self`.
  380. uint32_t HighestBitSet() const {
  381. return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
  382. }
  383. // Returns the number of trailing zero *abstract* bits.
  384. uint32_t TrailingZeros() const {
  385. return container_internal::TrailingZeros(mask_) >> Shift;
  386. }
  387. // Returns the number of leading zero *abstract* bits.
  388. uint32_t LeadingZeros() const {
  389. constexpr int total_significant_bits = SignificantBits << Shift;
  390. constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
  391. return static_cast<uint32_t>(
  392. countl_zero(static_cast<T>(mask_ << extra_bits))) >>
  393. Shift;
  394. }
  395. T mask_;
  396. };
  397. // Mask that can be iterable
  398. //
  399. // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
  400. // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
  401. // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
  402. // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
  403. // If NullifyBitsOnIteration is true (only allowed for Shift == 3),
  404. // non zero abstract bit is allowed to have additional bits
  405. // (e.g., `0xff`, `0x83` and `0x9c` are ok, but `0x6f` is not).
  406. //
  407. // For example:
  408. // for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
  409. // for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
  410. template <class T, int SignificantBits, int Shift = 0,
  411. bool NullifyBitsOnIteration = false>
  412. class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
  413. using Base = NonIterableBitMask<T, SignificantBits, Shift>;
  414. static_assert(std::is_unsigned<T>::value, "");
  415. static_assert(Shift == 0 || Shift == 3, "");
  416. static_assert(!NullifyBitsOnIteration || Shift == 3, "");
  417. public:
  418. explicit BitMask(T mask) : Base(mask) {
  419. if (Shift == 3 && !NullifyBitsOnIteration) {
  420. assert(this->mask_ == (this->mask_ & kMsbs8Bytes));
  421. }
  422. }
  423. // BitMask is an iterator over the indices of its abstract bits.
  424. using value_type = int;
  425. using iterator = BitMask;
  426. using const_iterator = BitMask;
  427. BitMask& operator++() {
  428. if (Shift == 3 && NullifyBitsOnIteration) {
  429. this->mask_ &= kMsbs8Bytes;
  430. }
  431. this->mask_ &= (this->mask_ - 1);
  432. return *this;
  433. }
  434. uint32_t operator*() const { return Base::LowestBitSet(); }
  435. BitMask begin() const { return *this; }
  436. BitMask end() const { return BitMask(0); }
  437. private:
  438. friend bool operator==(const BitMask& a, const BitMask& b) {
  439. return a.mask_ == b.mask_;
  440. }
  441. friend bool operator!=(const BitMask& a, const BitMask& b) {
  442. return a.mask_ != b.mask_;
  443. }
  444. };
  445. using h2_t = uint8_t;
  446. // The values here are selected for maximum performance. See the static asserts
  447. // below for details.
  448. // A `ctrl_t` is a single control byte, which can have one of four
  449. // states: empty, deleted, full (which has an associated seven-bit h2_t value)
  450. // and the sentinel. They have the following bit patterns:
  451. //
  452. // empty: 1 0 0 0 0 0 0 0
  453. // deleted: 1 1 1 1 1 1 1 0
  454. // full: 0 h h h h h h h // h represents the hash bits.
  455. // sentinel: 1 1 1 1 1 1 1 1
  456. //
  457. // These values are specifically tuned for SSE-flavored SIMD.
  458. // The static_asserts below detail the source of these choices.
  459. //
  460. // We use an enum class so that when strict aliasing is enabled, the compiler
  461. // knows ctrl_t doesn't alias other types.
  462. enum class ctrl_t : int8_t {
  463. kEmpty = -128, // 0b10000000
  464. kDeleted = -2, // 0b11111110
  465. kSentinel = -1, // 0b11111111
  466. };
  467. static_assert(
  468. (static_cast<int8_t>(ctrl_t::kEmpty) &
  469. static_cast<int8_t>(ctrl_t::kDeleted) &
  470. static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
  471. "Special markers need to have the MSB to make checking for them efficient");
  472. static_assert(
  473. ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
  474. "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
  475. "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
  476. static_assert(
  477. ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
  478. "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
  479. "registers (pcmpeqd xmm, xmm)");
  480. static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
  481. "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
  482. "existence efficient (psignb xmm, xmm)");
  483. static_assert(
  484. (~static_cast<int8_t>(ctrl_t::kEmpty) &
  485. ~static_cast<int8_t>(ctrl_t::kDeleted) &
  486. static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
  487. "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
  488. "shared by ctrl_t::kSentinel to make the scalar test for "
  489. "MaskEmptyOrDeleted() efficient");
  490. static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
  491. "ctrl_t::kDeleted must be -2 to make the implementation of "
  492. "ConvertSpecialToEmptyAndFullToDeleted efficient");
  493. // See definition comment for why this is size 32.
  494. ABSL_DLL extern const ctrl_t kEmptyGroup[32];
  495. // Returns a pointer to a control byte group that can be used by empty tables.
  496. inline ctrl_t* EmptyGroup() {
  497. // Const must be cast away here; no uses of this function will actually write
  498. // to it because it is only used for empty tables.
  499. return const_cast<ctrl_t*>(kEmptyGroup + 16);
  500. }
  501. // For use in SOO iterators.
  502. // TODO(b/289225379): we could potentially get rid of this by adding an is_soo
  503. // bit in iterators. This would add branches but reduce cache misses.
  504. ABSL_DLL extern const ctrl_t kSooControl[17];
  505. // Returns a pointer to a full byte followed by a sentinel byte.
  506. inline ctrl_t* SooControl() {
  507. // Const must be cast away here; no uses of this function will actually write
  508. // to it because it is only used for SOO iterators.
  509. return const_cast<ctrl_t*>(kSooControl);
  510. }
  511. // Whether ctrl is from the SooControl array.
  512. inline bool IsSooControl(const ctrl_t* ctrl) { return ctrl == SooControl(); }
  513. // Returns a pointer to a generation to use for an empty hashtable.
  514. GenerationType* EmptyGeneration();
  515. // Returns whether `generation` is a generation for an empty hashtable that
  516. // could be returned by EmptyGeneration().
  517. inline bool IsEmptyGeneration(const GenerationType* generation) {
  518. return *generation == SentinelEmptyGeneration();
  519. }
  520. // Mixes a randomly generated per-process seed with `hash` and `ctrl` to
  521. // randomize insertion order within groups.
  522. bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
  523. const ctrl_t* ctrl);
  524. ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool ShouldInsertBackwards(
  525. ABSL_ATTRIBUTE_UNUSED size_t capacity, ABSL_ATTRIBUTE_UNUSED size_t hash,
  526. ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
  527. #if defined(NDEBUG)
  528. return false;
  529. #else
  530. return ShouldInsertBackwardsForDebug(capacity, hash, ctrl);
  531. #endif
  532. }
  533. // Returns insert position for the given mask.
  534. // We want to add entropy even when ASLR is not enabled.
  535. // In debug build we will randomly insert in either the front or back of
  536. // the group.
  537. // TODO(kfm,sbenza): revisit after we do unconditional mixing
  538. template <class Mask>
  539. ABSL_ATTRIBUTE_ALWAYS_INLINE inline auto GetInsertionOffset(
  540. Mask mask, ABSL_ATTRIBUTE_UNUSED size_t capacity,
  541. ABSL_ATTRIBUTE_UNUSED size_t hash,
  542. ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
  543. #if defined(NDEBUG)
  544. return mask.LowestBitSet();
  545. #else
  546. return ShouldInsertBackwardsForDebug(capacity, hash, ctrl)
  547. ? mask.HighestBitSet()
  548. : mask.LowestBitSet();
  549. #endif
  550. }
  551. // Returns a per-table, hash salt, which changes on resize. This gets mixed into
  552. // H1 to randomize iteration order per-table.
  553. //
  554. // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
  555. // non-determinism of iteration order in most cases.
  556. inline size_t PerTableSalt(const ctrl_t* ctrl) {
  557. // The low bits of the pointer have little or no entropy because of
  558. // alignment. We shift the pointer to try to use higher entropy bits. A
  559. // good number seems to be 12 bits, because that aligns with page size.
  560. return reinterpret_cast<uintptr_t>(ctrl) >> 12;
  561. }
  562. // Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
  563. inline size_t H1(size_t hash, const ctrl_t* ctrl) {
  564. return (hash >> 7) ^ PerTableSalt(ctrl);
  565. }
  566. // Extracts the H2 portion of a hash: the 7 bits not used for H1.
  567. //
  568. // These are used as an occupied control byte.
  569. inline h2_t H2(size_t hash) { return hash & 0x7F; }
  570. // Helpers for checking the state of a control byte.
  571. inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
  572. inline bool IsFull(ctrl_t c) {
  573. // Cast `c` to the underlying type instead of casting `0` to `ctrl_t` as `0`
  574. // is not a value in the enum. Both ways are equivalent, but this way makes
  575. // linters happier.
  576. return static_cast<std::underlying_type_t<ctrl_t>>(c) >= 0;
  577. }
  578. inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
  579. inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
  580. #ifdef ABSL_INTERNAL_HAVE_SSE2
  581. // Quick reference guide for intrinsics used below:
  582. //
  583. // * __m128i: An XMM (128-bit) word.
  584. //
  585. // * _mm_setzero_si128: Returns a zero vector.
  586. // * _mm_set1_epi8: Returns a vector with the same i8 in each lane.
  587. //
  588. // * _mm_subs_epi8: Saturating-subtracts two i8 vectors.
  589. // * _mm_and_si128: Ands two i128s together.
  590. // * _mm_or_si128: Ors two i128s together.
  591. // * _mm_andnot_si128: And-nots two i128s together.
  592. //
  593. // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
  594. // filling each lane with 0x00 or 0xff.
  595. // * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
  596. //
  597. // * _mm_loadu_si128: Performs an unaligned load of an i128.
  598. // * _mm_storeu_si128: Performs an unaligned store of an i128.
  599. //
  600. // * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first
  601. // argument if the corresponding lane of the second
  602. // argument is positive, negative, or zero, respectively.
  603. // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
  604. // bitmask consisting of those bits.
  605. // * _mm_shuffle_epi8: Selects i8s from the first argument, using the low
  606. // four bits of each i8 lane in the second argument as
  607. // indices.
  608. // https://github.com/abseil/abseil-cpp/issues/209
  609. // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
  610. // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
  611. // Work around this by using the portable implementation of Group
  612. // when using -funsigned-char under GCC.
  613. inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
  614. #if defined(__GNUC__) && !defined(__clang__)
  615. if (std::is_unsigned<char>::value) {
  616. const __m128i mask = _mm_set1_epi8(0x80);
  617. const __m128i diff = _mm_subs_epi8(b, a);
  618. return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
  619. }
  620. #endif
  621. return _mm_cmpgt_epi8(a, b);
  622. }
  623. struct GroupSse2Impl {
  624. static constexpr size_t kWidth = 16; // the number of slots per group
  625. explicit GroupSse2Impl(const ctrl_t* pos) {
  626. ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
  627. }
  628. // Returns a bitmask representing the positions of slots that match hash.
  629. BitMask<uint16_t, kWidth> Match(h2_t hash) const {
  630. auto match = _mm_set1_epi8(static_cast<char>(hash));
  631. BitMask<uint16_t, kWidth> result = BitMask<uint16_t, kWidth>(0);
  632. result = BitMask<uint16_t, kWidth>(
  633. static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
  634. return result;
  635. }
  636. // Returns a bitmask representing the positions of empty slots.
  637. NonIterableBitMask<uint16_t, kWidth> MaskEmpty() const {
  638. #ifdef ABSL_INTERNAL_HAVE_SSSE3
  639. // This only works because ctrl_t::kEmpty is -128.
  640. return NonIterableBitMask<uint16_t, kWidth>(
  641. static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
  642. #else
  643. auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
  644. return NonIterableBitMask<uint16_t, kWidth>(
  645. static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
  646. #endif
  647. }
  648. // Returns a bitmask representing the positions of full slots.
  649. // Note: for `is_small()` tables group may contain the "same" slot twice:
  650. // original and mirrored.
  651. BitMask<uint16_t, kWidth> MaskFull() const {
  652. return BitMask<uint16_t, kWidth>(
  653. static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff));
  654. }
  655. // Returns a bitmask representing the positions of non full slots.
  656. // Note: this includes: kEmpty, kDeleted, kSentinel.
  657. // It is useful in contexts when kSentinel is not present.
  658. auto MaskNonFull() const {
  659. return BitMask<uint16_t, kWidth>(
  660. static_cast<uint16_t>(_mm_movemask_epi8(ctrl)));
  661. }
  662. // Returns a bitmask representing the positions of empty or deleted slots.
  663. NonIterableBitMask<uint16_t, kWidth> MaskEmptyOrDeleted() const {
  664. auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
  665. return NonIterableBitMask<uint16_t, kWidth>(static_cast<uint16_t>(
  666. _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
  667. }
  668. // Returns the number of trailing empty or deleted elements in the group.
  669. uint32_t CountLeadingEmptyOrDeleted() const {
  670. auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
  671. return TrailingZeros(static_cast<uint32_t>(
  672. _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
  673. }
  674. void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
  675. auto msbs = _mm_set1_epi8(static_cast<char>(-128));
  676. auto x126 = _mm_set1_epi8(126);
  677. #ifdef ABSL_INTERNAL_HAVE_SSSE3
  678. auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
  679. #else
  680. auto zero = _mm_setzero_si128();
  681. auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
  682. auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
  683. #endif
  684. _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
  685. }
  686. __m128i ctrl;
  687. };
  688. #endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
  689. #if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
  690. struct GroupAArch64Impl {
  691. static constexpr size_t kWidth = 8;
  692. explicit GroupAArch64Impl(const ctrl_t* pos) {
  693. ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
  694. }
  695. auto Match(h2_t hash) const {
  696. uint8x8_t dup = vdup_n_u8(hash);
  697. auto mask = vceq_u8(ctrl, dup);
  698. return BitMask<uint64_t, kWidth, /*Shift=*/3,
  699. /*NullifyBitsOnIteration=*/true>(
  700. vget_lane_u64(vreinterpret_u64_u8(mask), 0));
  701. }
  702. NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
  703. uint64_t mask =
  704. vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
  705. vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
  706. vreinterpret_s8_u8(ctrl))),
  707. 0);
  708. return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
  709. }
  710. // Returns a bitmask representing the positions of full slots.
  711. // Note: for `is_small()` tables group may contain the "same" slot twice:
  712. // original and mirrored.
  713. auto MaskFull() const {
  714. uint64_t mask = vget_lane_u64(
  715. vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl),
  716. vdup_n_s8(static_cast<int8_t>(0)))),
  717. 0);
  718. return BitMask<uint64_t, kWidth, /*Shift=*/3,
  719. /*NullifyBitsOnIteration=*/true>(mask);
  720. }
  721. // Returns a bitmask representing the positions of non full slots.
  722. // Note: this includes: kEmpty, kDeleted, kSentinel.
  723. // It is useful in contexts when kSentinel is not present.
  724. auto MaskNonFull() const {
  725. uint64_t mask = vget_lane_u64(
  726. vreinterpret_u64_u8(vclt_s8(vreinterpret_s8_u8(ctrl),
  727. vdup_n_s8(static_cast<int8_t>(0)))),
  728. 0);
  729. return BitMask<uint64_t, kWidth, /*Shift=*/3,
  730. /*NullifyBitsOnIteration=*/true>(mask);
  731. }
  732. NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
  733. uint64_t mask =
  734. vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
  735. vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
  736. vreinterpret_s8_u8(ctrl))),
  737. 0);
  738. return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
  739. }
  740. uint32_t CountLeadingEmptyOrDeleted() const {
  741. uint64_t mask =
  742. vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
  743. vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
  744. vreinterpret_s8_u8(ctrl))),
  745. 0);
  746. // Similar to MaskEmptyorDeleted() but we invert the logic to invert the
  747. // produced bitfield. We then count number of trailing zeros.
  748. // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
  749. // so we should be fine.
  750. return static_cast<uint32_t>(countr_zero(mask)) >> 3;
  751. }
  752. void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
  753. uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
  754. constexpr uint64_t slsbs = 0x0202020202020202ULL;
  755. constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
  756. auto x = slsbs & (mask >> 6);
  757. auto res = (x + midbs) | kMsbs8Bytes;
  758. little_endian::Store64(dst, res);
  759. }
  760. uint8x8_t ctrl;
  761. };
  762. #endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
  763. struct GroupPortableImpl {
  764. static constexpr size_t kWidth = 8;
  765. explicit GroupPortableImpl(const ctrl_t* pos)
  766. : ctrl(little_endian::Load64(pos)) {}
  767. BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
  768. // For the technique, see:
  769. // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
  770. // (Determine if a word has a byte equal to n).
  771. //
  772. // Caveat: there are false positives but:
  773. // - they only occur if there is a real match
  774. // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
  775. // - they will be handled gracefully by subsequent checks in code
  776. //
  777. // Example:
  778. // v = 0x1716151413121110
  779. // hash = 0x12
  780. // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
  781. constexpr uint64_t lsbs = 0x0101010101010101ULL;
  782. auto x = ctrl ^ (lsbs * hash);
  783. return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & kMsbs8Bytes);
  784. }
  785. NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
  786. return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 6)) &
  787. kMsbs8Bytes);
  788. }
  789. // Returns a bitmask representing the positions of full slots.
  790. // Note: for `is_small()` tables group may contain the "same" slot twice:
  791. // original and mirrored.
  792. BitMask<uint64_t, kWidth, 3> MaskFull() const {
  793. return BitMask<uint64_t, kWidth, 3>((ctrl ^ kMsbs8Bytes) & kMsbs8Bytes);
  794. }
  795. // Returns a bitmask representing the positions of non full slots.
  796. // Note: this includes: kEmpty, kDeleted, kSentinel.
  797. // It is useful in contexts when kSentinel is not present.
  798. auto MaskNonFull() const {
  799. return BitMask<uint64_t, kWidth, 3>(ctrl & kMsbs8Bytes);
  800. }
  801. NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
  802. return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 7)) &
  803. kMsbs8Bytes);
  804. }
  805. uint32_t CountLeadingEmptyOrDeleted() const {
  806. // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
  807. // kDeleted. We lower all other bits and count number of trailing zeros.
  808. constexpr uint64_t bits = 0x0101010101010101ULL;
  809. return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
  810. 3);
  811. }
  812. void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
  813. constexpr uint64_t lsbs = 0x0101010101010101ULL;
  814. auto x = ctrl & kMsbs8Bytes;
  815. auto res = (~x + (x >> 7)) & ~lsbs;
  816. little_endian::Store64(dst, res);
  817. }
  818. uint64_t ctrl;
  819. };
  820. #ifdef ABSL_INTERNAL_HAVE_SSE2
  821. using Group = GroupSse2Impl;
  822. using GroupFullEmptyOrDeleted = GroupSse2Impl;
  823. #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
  824. using Group = GroupAArch64Impl;
  825. // For Aarch64, we use the portable implementation for counting and masking
  826. // full, empty or deleted group elements. This is to avoid the latency of moving
  827. // between data GPRs and Neon registers when it does not provide a benefit.
  828. // Using Neon is profitable when we call Match(), but is not when we don't,
  829. // which is the case when we do *EmptyOrDeleted and MaskFull operations.
  830. // It is difficult to make a similar approach beneficial on other architectures
  831. // such as x86 since they have much lower GPR <-> vector register transfer
  832. // latency and 16-wide Groups.
  833. using GroupFullEmptyOrDeleted = GroupPortableImpl;
  834. #else
  835. using Group = GroupPortableImpl;
  836. using GroupFullEmptyOrDeleted = GroupPortableImpl;
  837. #endif
  838. // When there is an insertion with no reserved growth, we rehash with
  839. // probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
  840. // constant divided by capacity ensures that inserting N elements is still O(N)
  841. // in the average case. Using the constant 16 means that we expect to rehash ~8
  842. // times more often than when generations are disabled. We are adding expected
  843. // rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 -
  844. // 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth.
  845. inline size_t RehashProbabilityConstant() { return 16; }
  846. class CommonFieldsGenerationInfoEnabled {
  847. // A sentinel value for reserved_growth_ indicating that we just ran out of
  848. // reserved growth on the last insertion. When reserve is called and then
  849. // insertions take place, reserved_growth_'s state machine is N, ..., 1,
  850. // kReservedGrowthJustRanOut, 0.
  851. static constexpr size_t kReservedGrowthJustRanOut =
  852. (std::numeric_limits<size_t>::max)();
  853. public:
  854. CommonFieldsGenerationInfoEnabled() = default;
  855. CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that)
  856. : reserved_growth_(that.reserved_growth_),
  857. reservation_size_(that.reservation_size_),
  858. generation_(that.generation_) {
  859. that.reserved_growth_ = 0;
  860. that.reservation_size_ = 0;
  861. that.generation_ = EmptyGeneration();
  862. }
  863. CommonFieldsGenerationInfoEnabled& operator=(
  864. CommonFieldsGenerationInfoEnabled&&) = default;
  865. // Whether we should rehash on insert in order to detect bugs of using invalid
  866. // references. We rehash on the first insertion after reserved_growth_ reaches
  867. // 0 after a call to reserve. We also do a rehash with low probability
  868. // whenever reserved_growth_ is zero.
  869. bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
  870. size_t capacity) const;
  871. // Similar to above, except that we don't depend on reserved_growth_.
  872. bool should_rehash_for_bug_detection_on_move(const ctrl_t* ctrl,
  873. size_t capacity) const;
  874. void maybe_increment_generation_on_insert() {
  875. if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
  876. if (reserved_growth_ > 0) {
  877. if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
  878. } else {
  879. increment_generation();
  880. }
  881. }
  882. void increment_generation() { *generation_ = NextGeneration(*generation_); }
  883. void reset_reserved_growth(size_t reservation, size_t size) {
  884. reserved_growth_ = reservation - size;
  885. }
  886. size_t reserved_growth() const { return reserved_growth_; }
  887. void set_reserved_growth(size_t r) { reserved_growth_ = r; }
  888. size_t reservation_size() const { return reservation_size_; }
  889. void set_reservation_size(size_t r) { reservation_size_ = r; }
  890. GenerationType generation() const { return *generation_; }
  891. void set_generation(GenerationType g) { *generation_ = g; }
  892. GenerationType* generation_ptr() const { return generation_; }
  893. void set_generation_ptr(GenerationType* g) { generation_ = g; }
  894. private:
  895. // The number of insertions remaining that are guaranteed to not rehash due to
  896. // a prior call to reserve. Note: we store reserved growth in addition to
  897. // reservation size because calls to erase() decrease size_ but don't decrease
  898. // reserved growth.
  899. size_t reserved_growth_ = 0;
  900. // The maximum argument to reserve() since the container was cleared. We need
  901. // to keep track of this, in addition to reserved growth, because we reset
  902. // reserved growth to this when erase(begin(), end()) is called.
  903. size_t reservation_size_ = 0;
  904. // Pointer to the generation counter, which is used to validate iterators and
  905. // is stored in the backing array between the control bytes and the slots.
  906. // Note that we can't store the generation inside the container itself and
  907. // keep a pointer to the container in the iterators because iterators must
  908. // remain valid when the container is moved.
  909. // Note: we could derive this pointer from the control pointer, but it makes
  910. // the code more complicated, and there's a benefit in having the sizes of
  911. // raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different,
  912. // which is that tests are less likely to rely on the size remaining the same.
  913. GenerationType* generation_ = EmptyGeneration();
  914. };
  915. class CommonFieldsGenerationInfoDisabled {
  916. public:
  917. CommonFieldsGenerationInfoDisabled() = default;
  918. CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) =
  919. default;
  920. CommonFieldsGenerationInfoDisabled& operator=(
  921. CommonFieldsGenerationInfoDisabled&&) = default;
  922. bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
  923. return false;
  924. }
  925. bool should_rehash_for_bug_detection_on_move(const ctrl_t*, size_t) const {
  926. return false;
  927. }
  928. void maybe_increment_generation_on_insert() {}
  929. void increment_generation() {}
  930. void reset_reserved_growth(size_t, size_t) {}
  931. size_t reserved_growth() const { return 0; }
  932. void set_reserved_growth(size_t) {}
  933. size_t reservation_size() const { return 0; }
  934. void set_reservation_size(size_t) {}
  935. GenerationType generation() const { return 0; }
  936. void set_generation(GenerationType) {}
  937. GenerationType* generation_ptr() const { return nullptr; }
  938. void set_generation_ptr(GenerationType*) {}
  939. };
  940. class HashSetIteratorGenerationInfoEnabled {
  941. public:
  942. HashSetIteratorGenerationInfoEnabled() = default;
  943. explicit HashSetIteratorGenerationInfoEnabled(
  944. const GenerationType* generation_ptr)
  945. : generation_ptr_(generation_ptr), generation_(*generation_ptr) {}
  946. GenerationType generation() const { return generation_; }
  947. void reset_generation() { generation_ = *generation_ptr_; }
  948. const GenerationType* generation_ptr() const { return generation_ptr_; }
  949. void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; }
  950. private:
  951. const GenerationType* generation_ptr_ = EmptyGeneration();
  952. GenerationType generation_ = *generation_ptr_;
  953. };
  954. class HashSetIteratorGenerationInfoDisabled {
  955. public:
  956. HashSetIteratorGenerationInfoDisabled() = default;
  957. explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) {}
  958. GenerationType generation() const { return 0; }
  959. void reset_generation() {}
  960. const GenerationType* generation_ptr() const { return nullptr; }
  961. void set_generation_ptr(const GenerationType*) {}
  962. };
  963. #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
  964. using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled;
  965. using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled;
  966. #else
  967. using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
  968. using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
  969. #endif
  970. // Stored the information regarding number of slots we can still fill
  971. // without needing to rehash.
  972. //
  973. // We want to ensure sufficient number of empty slots in the table in order
  974. // to keep probe sequences relatively short. Empty slot in the probe group
  975. // is required to stop probing.
  976. //
  977. // Tombstones (kDeleted slots) are not included in the growth capacity,
  978. // because we'd like to rehash when the table is filled with tombstones and/or
  979. // full slots.
  980. //
  981. // GrowthInfo also stores a bit that encodes whether table may have any
  982. // deleted slots.
  983. // Most of the tables (>95%) have no deleted slots, so some functions can
  984. // be more efficient with this information.
  985. //
  986. // Callers can also force a rehash via the standard `rehash(0)`,
  987. // which will recompute this value as a side-effect.
  988. //
  989. // See also `CapacityToGrowth()`.
  990. class GrowthInfo {
  991. public:
  992. // Leaves data member uninitialized.
  993. GrowthInfo() = default;
  994. // Initializes the GrowthInfo assuming we can grow `growth_left` elements
  995. // and there are no kDeleted slots in the table.
  996. void InitGrowthLeftNoDeleted(size_t growth_left) {
  997. growth_left_info_ = growth_left;
  998. }
  999. // Overwrites single full slot with an empty slot.
  1000. void OverwriteFullAsEmpty() { ++growth_left_info_; }
  1001. // Overwrites single empty slot with a full slot.
  1002. void OverwriteEmptyAsFull() {
  1003. assert(GetGrowthLeft() > 0);
  1004. --growth_left_info_;
  1005. }
  1006. // Overwrites several empty slots with full slots.
  1007. void OverwriteManyEmptyAsFull(size_t cnt) {
  1008. assert(GetGrowthLeft() >= cnt);
  1009. growth_left_info_ -= cnt;
  1010. }
  1011. // Overwrites specified control element with full slot.
  1012. void OverwriteControlAsFull(ctrl_t ctrl) {
  1013. assert(GetGrowthLeft() >= static_cast<size_t>(IsEmpty(ctrl)));
  1014. growth_left_info_ -= static_cast<size_t>(IsEmpty(ctrl));
  1015. }
  1016. // Overwrites single full slot with a deleted slot.
  1017. void OverwriteFullAsDeleted() { growth_left_info_ |= kDeletedBit; }
  1018. // Returns true if table satisfies two properties:
  1019. // 1. Guaranteed to have no kDeleted slots.
  1020. // 2. There is a place for at least one element to grow.
  1021. bool HasNoDeletedAndGrowthLeft() const {
  1022. return static_cast<std::make_signed_t<size_t>>(growth_left_info_) > 0;
  1023. }
  1024. // Returns true if the table satisfies two properties:
  1025. // 1. Guaranteed to have no kDeleted slots.
  1026. // 2. There is no growth left.
  1027. bool HasNoGrowthLeftAndNoDeleted() const { return growth_left_info_ == 0; }
  1028. // Returns true if table guaranteed to have no k
  1029. bool HasNoDeleted() const {
  1030. return static_cast<std::make_signed_t<size_t>>(growth_left_info_) >= 0;
  1031. }
  1032. // Returns the number of elements left to grow.
  1033. size_t GetGrowthLeft() const { return growth_left_info_ & kGrowthLeftMask; }
  1034. private:
  1035. static constexpr size_t kGrowthLeftMask = ((~size_t{}) >> 1);
  1036. static constexpr size_t kDeletedBit = ~kGrowthLeftMask;
  1037. // Topmost bit signal whenever there are deleted slots.
  1038. size_t growth_left_info_;
  1039. };
  1040. static_assert(sizeof(GrowthInfo) == sizeof(size_t), "");
  1041. static_assert(alignof(GrowthInfo) == alignof(size_t), "");
  1042. // Returns whether `n` is a valid capacity (i.e., number of slots).
  1043. //
  1044. // A valid capacity is a non-zero integer `2^m - 1`.
  1045. inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
  1046. // Returns the number of "cloned control bytes".
  1047. //
  1048. // This is the number of control bytes that are present both at the beginning
  1049. // of the control byte array and at the end, such that we can create a
  1050. // `Group::kWidth`-width probe window starting from any control byte.
  1051. constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
  1052. // Returns the number of control bytes including cloned.
  1053. constexpr size_t NumControlBytes(size_t capacity) {
  1054. return capacity + 1 + NumClonedBytes();
  1055. }
  1056. // Computes the offset from the start of the backing allocation of control.
  1057. // infoz and growth_info are stored at the beginning of the backing array.
  1058. inline static size_t ControlOffset(bool has_infoz) {
  1059. return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(GrowthInfo);
  1060. }
  1061. // Helper class for computing offsets and allocation size of hash set fields.
  1062. class RawHashSetLayout {
  1063. public:
  1064. explicit RawHashSetLayout(size_t capacity, size_t slot_align, bool has_infoz)
  1065. : capacity_(capacity),
  1066. control_offset_(ControlOffset(has_infoz)),
  1067. generation_offset_(control_offset_ + NumControlBytes(capacity)),
  1068. slot_offset_(
  1069. (generation_offset_ + NumGenerationBytes() + slot_align - 1) &
  1070. (~slot_align + 1)) {
  1071. assert(IsValidCapacity(capacity));
  1072. }
  1073. // Returns the capacity of a table.
  1074. size_t capacity() const { return capacity_; }
  1075. // Returns precomputed offset from the start of the backing allocation of
  1076. // control.
  1077. size_t control_offset() const { return control_offset_; }
  1078. // Given the capacity of a table, computes the offset (from the start of the
  1079. // backing allocation) of the generation counter (if it exists).
  1080. size_t generation_offset() const { return generation_offset_; }
  1081. // Given the capacity of a table, computes the offset (from the start of the
  1082. // backing allocation) at which the slots begin.
  1083. size_t slot_offset() const { return slot_offset_; }
  1084. // Given the capacity of a table, computes the total size of the backing
  1085. // array.
  1086. size_t alloc_size(size_t slot_size) const {
  1087. ABSL_HARDENING_ASSERT(
  1088. slot_size <=
  1089. ((std::numeric_limits<size_t>::max)() - slot_offset_) / capacity_);
  1090. return slot_offset_ + capacity_ * slot_size;
  1091. }
  1092. private:
  1093. size_t capacity_;
  1094. size_t control_offset_;
  1095. size_t generation_offset_;
  1096. size_t slot_offset_;
  1097. };
  1098. struct HashtableFreeFunctionsAccess;
  1099. // We only allow a maximum of 1 SOO element, which makes the implementation
  1100. // much simpler. Complications with multiple SOO elements include:
  1101. // - Satisfying the guarantee that erasing one element doesn't invalidate
  1102. // iterators to other elements means we would probably need actual SOO
  1103. // control bytes.
  1104. // - In order to prevent user code from depending on iteration order for small
  1105. // tables, we would need to randomize the iteration order somehow.
  1106. constexpr size_t SooCapacity() { return 1; }
  1107. // Sentinel type to indicate SOO CommonFields construction.
  1108. struct soo_tag_t {};
  1109. // Sentinel type to indicate SOO CommonFields construction with full size.
  1110. struct full_soo_tag_t {};
  1111. // Suppress erroneous uninitialized memory errors on GCC. For example, GCC
  1112. // thinks that the call to slot_array() in find_or_prepare_insert() is reading
  1113. // uninitialized memory, but slot_array is only called there when the table is
  1114. // non-empty and this memory is initialized when the table is non-empty.
  1115. #if !defined(__clang__) && defined(__GNUC__)
  1116. #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) \
  1117. _Pragma("GCC diagnostic push") \
  1118. _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") \
  1119. _Pragma("GCC diagnostic ignored \"-Wuninitialized\"") x; \
  1120. _Pragma("GCC diagnostic pop")
  1121. #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) \
  1122. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(return x)
  1123. #else
  1124. #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) x
  1125. #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) return x
  1126. #endif
  1127. // This allows us to work around an uninitialized memory warning when
  1128. // constructing begin() iterators in empty hashtables.
  1129. union MaybeInitializedPtr {
  1130. void* get() const { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(p); }
  1131. void set(void* ptr) { p = ptr; }
  1132. void* p;
  1133. };
  1134. struct HeapPtrs {
  1135. HeapPtrs() = default;
  1136. explicit HeapPtrs(ctrl_t* c) : control(c) {}
  1137. // The control bytes (and, also, a pointer near to the base of the backing
  1138. // array).
  1139. //
  1140. // This contains `capacity + 1 + NumClonedBytes()` entries, even
  1141. // when the table is empty (hence EmptyGroup).
  1142. //
  1143. // Note that growth_info is stored immediately before this pointer.
  1144. // May be uninitialized for SOO tables.
  1145. ctrl_t* control;
  1146. // The beginning of the slots, located at `SlotOffset()` bytes after
  1147. // `control`. May be uninitialized for empty tables.
  1148. // Note: we can't use `slots` because Qt defines "slots" as a macro.
  1149. MaybeInitializedPtr slot_array;
  1150. };
  1151. // Manages the backing array pointers or the SOO slot. When raw_hash_set::is_soo
  1152. // is true, the SOO slot is stored in `soo_data`. Otherwise, we use `heap`.
  1153. union HeapOrSoo {
  1154. HeapOrSoo() = default;
  1155. explicit HeapOrSoo(ctrl_t* c) : heap(c) {}
  1156. ctrl_t*& control() {
  1157. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control);
  1158. }
  1159. ctrl_t* control() const {
  1160. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control);
  1161. }
  1162. MaybeInitializedPtr& slot_array() {
  1163. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array);
  1164. }
  1165. MaybeInitializedPtr slot_array() const {
  1166. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array);
  1167. }
  1168. void* get_soo_data() {
  1169. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data);
  1170. }
  1171. const void* get_soo_data() const {
  1172. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data);
  1173. }
  1174. HeapPtrs heap;
  1175. unsigned char soo_data[sizeof(HeapPtrs)];
  1176. };
  1177. // CommonFields hold the fields in raw_hash_set that do not depend
  1178. // on template parameters. This allows us to conveniently pass all
  1179. // of this state to helper functions as a single argument.
  1180. class CommonFields : public CommonFieldsGenerationInfo {
  1181. public:
  1182. CommonFields() : capacity_(0), size_(0), heap_or_soo_(EmptyGroup()) {}
  1183. explicit CommonFields(soo_tag_t) : capacity_(SooCapacity()), size_(0) {}
  1184. explicit CommonFields(full_soo_tag_t)
  1185. : capacity_(SooCapacity()), size_(size_t{1} << HasInfozShift()) {}
  1186. // Not copyable
  1187. CommonFields(const CommonFields&) = delete;
  1188. CommonFields& operator=(const CommonFields&) = delete;
  1189. // Movable
  1190. CommonFields(CommonFields&& that) = default;
  1191. CommonFields& operator=(CommonFields&&) = default;
  1192. template <bool kSooEnabled>
  1193. static CommonFields CreateDefault() {
  1194. return kSooEnabled ? CommonFields{soo_tag_t{}} : CommonFields{};
  1195. }
  1196. // The inline data for SOO is written on top of control_/slots_.
  1197. const void* soo_data() const { return heap_or_soo_.get_soo_data(); }
  1198. void* soo_data() { return heap_or_soo_.get_soo_data(); }
  1199. HeapOrSoo heap_or_soo() const { return heap_or_soo_; }
  1200. const HeapOrSoo& heap_or_soo_ref() const { return heap_or_soo_; }
  1201. ctrl_t* control() const { return heap_or_soo_.control(); }
  1202. void set_control(ctrl_t* c) { heap_or_soo_.control() = c; }
  1203. void* backing_array_start() const {
  1204. // growth_info (and maybe infoz) is stored before control bytes.
  1205. assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
  1206. return control() - ControlOffset(has_infoz());
  1207. }
  1208. // Note: we can't use slots() because Qt defines "slots" as a macro.
  1209. void* slot_array() const { return heap_or_soo_.slot_array().get(); }
  1210. MaybeInitializedPtr slots_union() const { return heap_or_soo_.slot_array(); }
  1211. void set_slots(void* s) { heap_or_soo_.slot_array().set(s); }
  1212. // The number of filled slots.
  1213. size_t size() const { return size_ >> HasInfozShift(); }
  1214. void set_size(size_t s) {
  1215. size_ = (s << HasInfozShift()) | (size_ & HasInfozMask());
  1216. }
  1217. void set_empty_soo() {
  1218. AssertInSooMode();
  1219. size_ = 0;
  1220. }
  1221. void set_full_soo() {
  1222. AssertInSooMode();
  1223. size_ = size_t{1} << HasInfozShift();
  1224. }
  1225. void increment_size() {
  1226. assert(size() < capacity());
  1227. size_ += size_t{1} << HasInfozShift();
  1228. }
  1229. void decrement_size() {
  1230. assert(size() > 0);
  1231. size_ -= size_t{1} << HasInfozShift();
  1232. }
  1233. // The total number of available slots.
  1234. size_t capacity() const { return capacity_; }
  1235. void set_capacity(size_t c) {
  1236. assert(c == 0 || IsValidCapacity(c));
  1237. capacity_ = c;
  1238. }
  1239. // The number of slots we can still fill without needing to rehash.
  1240. // This is stored in the heap allocation before the control bytes.
  1241. // TODO(b/289225379): experiment with moving growth_info back inline to
  1242. // increase room for SOO.
  1243. size_t growth_left() const { return growth_info().GetGrowthLeft(); }
  1244. GrowthInfo& growth_info() {
  1245. auto* gl_ptr = reinterpret_cast<GrowthInfo*>(control()) - 1;
  1246. assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(GrowthInfo) == 0);
  1247. return *gl_ptr;
  1248. }
  1249. GrowthInfo growth_info() const {
  1250. return const_cast<CommonFields*>(this)->growth_info();
  1251. }
  1252. bool has_infoz() const {
  1253. return ABSL_PREDICT_FALSE((size_ & HasInfozMask()) != 0);
  1254. }
  1255. void set_has_infoz(bool has_infoz) {
  1256. size_ = (size() << HasInfozShift()) | static_cast<size_t>(has_infoz);
  1257. }
  1258. HashtablezInfoHandle infoz() {
  1259. return has_infoz()
  1260. ? *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start())
  1261. : HashtablezInfoHandle();
  1262. }
  1263. void set_infoz(HashtablezInfoHandle infoz) {
  1264. assert(has_infoz());
  1265. *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start()) = infoz;
  1266. }
  1267. bool should_rehash_for_bug_detection_on_insert() const {
  1268. return CommonFieldsGenerationInfo::
  1269. should_rehash_for_bug_detection_on_insert(control(), capacity());
  1270. }
  1271. bool should_rehash_for_bug_detection_on_move() const {
  1272. return CommonFieldsGenerationInfo::should_rehash_for_bug_detection_on_move(
  1273. control(), capacity());
  1274. }
  1275. void reset_reserved_growth(size_t reservation) {
  1276. CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
  1277. }
  1278. // The size of the backing array allocation.
  1279. size_t alloc_size(size_t slot_size, size_t slot_align) const {
  1280. return RawHashSetLayout(capacity(), slot_align, has_infoz())
  1281. .alloc_size(slot_size);
  1282. }
  1283. // Move fields other than heap_or_soo_.
  1284. void move_non_heap_or_soo_fields(CommonFields& that) {
  1285. static_cast<CommonFieldsGenerationInfo&>(*this) =
  1286. std::move(static_cast<CommonFieldsGenerationInfo&>(that));
  1287. capacity_ = that.capacity_;
  1288. size_ = that.size_;
  1289. }
  1290. // Returns the number of control bytes set to kDeleted. For testing only.
  1291. size_t TombstonesCount() const {
  1292. return static_cast<size_t>(
  1293. std::count(control(), control() + capacity(), ctrl_t::kDeleted));
  1294. }
  1295. private:
  1296. // We store the has_infoz bit in the lowest bit of size_.
  1297. static constexpr size_t HasInfozShift() { return 1; }
  1298. static constexpr size_t HasInfozMask() {
  1299. return (size_t{1} << HasInfozShift()) - 1;
  1300. }
  1301. // We can't assert that SOO is enabled because we don't have SooEnabled(), but
  1302. // we assert what we can.
  1303. void AssertInSooMode() const {
  1304. assert(capacity() == SooCapacity());
  1305. assert(!has_infoz());
  1306. }
  1307. // The number of slots in the backing array. This is always 2^N-1 for an
  1308. // integer N. NOTE: we tried experimenting with compressing the capacity and
  1309. // storing it together with size_: (a) using 6 bits to store the corresponding
  1310. // power (N in 2^N-1), and (b) storing 2^N as the most significant bit of
  1311. // size_ and storing size in the low bits. Both of these experiments were
  1312. // regressions, presumably because we need capacity to do find operations.
  1313. size_t capacity_;
  1314. // The size and also has one bit that stores whether we have infoz.
  1315. // TODO(b/289225379): we could put size_ into HeapOrSoo and make capacity_
  1316. // encode the size in SOO case. We would be making size()/capacity() more
  1317. // expensive in order to have more SOO space.
  1318. size_t size_;
  1319. // Either the control/slots pointers or the SOO slot.
  1320. HeapOrSoo heap_or_soo_;
  1321. };
  1322. template <class Policy, class Hash, class Eq, class Alloc>
  1323. class raw_hash_set;
  1324. // Returns the next valid capacity after `n`.
  1325. inline size_t NextCapacity(size_t n) {
  1326. assert(IsValidCapacity(n) || n == 0);
  1327. return n * 2 + 1;
  1328. }
  1329. // Applies the following mapping to every byte in the control array:
  1330. // * kDeleted -> kEmpty
  1331. // * kEmpty -> kEmpty
  1332. // * _ -> kDeleted
  1333. // PRECONDITION:
  1334. // IsValidCapacity(capacity)
  1335. // ctrl[capacity] == ctrl_t::kSentinel
  1336. // ctrl[i] != ctrl_t::kSentinel for all i < capacity
  1337. void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
  1338. // Converts `n` into the next valid capacity, per `IsValidCapacity`.
  1339. inline size_t NormalizeCapacity(size_t n) {
  1340. return n ? ~size_t{} >> countl_zero(n) : 1;
  1341. }
  1342. template <size_t kSlotSize>
  1343. size_t MaxValidCapacity() {
  1344. return NormalizeCapacity((std::numeric_limits<size_t>::max)() / 4 /
  1345. kSlotSize);
  1346. }
  1347. // General notes on capacity/growth methods below:
  1348. // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
  1349. // average of two empty slots per group.
  1350. // - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
  1351. // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
  1352. // never need to probe (the whole table fits in one group) so we don't need a
  1353. // load factor less than 1.
  1354. // Given `capacity`, applies the load factor; i.e., it returns the maximum
  1355. // number of values we should put into the table before a resizing rehash.
  1356. inline size_t CapacityToGrowth(size_t capacity) {
  1357. assert(IsValidCapacity(capacity));
  1358. // `capacity*7/8`
  1359. if (Group::kWidth == 8 && capacity == 7) {
  1360. // x-x/8 does not work when x==7.
  1361. return 6;
  1362. }
  1363. return capacity - capacity / 8;
  1364. }
  1365. // Given `growth`, "unapplies" the load factor to find how large the capacity
  1366. // should be to stay within the load factor.
  1367. //
  1368. // This might not be a valid capacity and `NormalizeCapacity()` should be
  1369. // called on this.
  1370. inline size_t GrowthToLowerboundCapacity(size_t growth) {
  1371. // `growth*8/7`
  1372. if (Group::kWidth == 8 && growth == 7) {
  1373. // x+(x-1)/7 does not work when x==7.
  1374. return 8;
  1375. }
  1376. return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
  1377. }
  1378. template <class InputIter>
  1379. size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
  1380. size_t bucket_count) {
  1381. if (bucket_count != 0) {
  1382. return bucket_count;
  1383. }
  1384. using InputIterCategory =
  1385. typename std::iterator_traits<InputIter>::iterator_category;
  1386. if (std::is_base_of<std::random_access_iterator_tag,
  1387. InputIterCategory>::value) {
  1388. return GrowthToLowerboundCapacity(
  1389. static_cast<size_t>(std::distance(first, last)));
  1390. }
  1391. return 0;
  1392. }
  1393. constexpr bool SwisstableDebugEnabled() {
  1394. #if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
  1395. ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
  1396. return true;
  1397. #else
  1398. return false;
  1399. #endif
  1400. }
  1401. inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
  1402. const GenerationType* generation_ptr,
  1403. const char* operation) {
  1404. if (!SwisstableDebugEnabled()) return;
  1405. // `SwisstableDebugEnabled()` is also true for release builds with hardening
  1406. // enabled. To minimize their impact in those builds:
  1407. // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
  1408. // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
  1409. // the chances that the hot paths will be inlined.
  1410. if (ABSL_PREDICT_FALSE(ctrl == nullptr)) {
  1411. ABSL_RAW_LOG(FATAL, "%s called on end() iterator.", operation);
  1412. }
  1413. if (ABSL_PREDICT_FALSE(ctrl == EmptyGroup())) {
  1414. ABSL_RAW_LOG(FATAL, "%s called on default-constructed iterator.",
  1415. operation);
  1416. }
  1417. if (SwisstableGenerationsEnabled()) {
  1418. if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
  1419. ABSL_RAW_LOG(FATAL,
  1420. "%s called on invalid iterator. The table could have "
  1421. "rehashed or moved since this iterator was initialized.",
  1422. operation);
  1423. }
  1424. if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
  1425. ABSL_RAW_LOG(
  1426. FATAL,
  1427. "%s called on invalid iterator. The element was likely erased.",
  1428. operation);
  1429. }
  1430. } else {
  1431. if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
  1432. ABSL_RAW_LOG(
  1433. FATAL,
  1434. "%s called on invalid iterator. The element might have been erased "
  1435. "or the table might have rehashed. Consider running with "
  1436. "--config=asan to diagnose rehashing issues.",
  1437. operation);
  1438. }
  1439. }
  1440. }
  1441. // Note that for comparisons, null/end iterators are valid.
  1442. inline void AssertIsValidForComparison(const ctrl_t* ctrl,
  1443. GenerationType generation,
  1444. const GenerationType* generation_ptr) {
  1445. if (!SwisstableDebugEnabled()) return;
  1446. const bool ctrl_is_valid_for_comparison =
  1447. ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
  1448. if (SwisstableGenerationsEnabled()) {
  1449. if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
  1450. ABSL_RAW_LOG(FATAL,
  1451. "Invalid iterator comparison. The table could have rehashed "
  1452. "or moved since this iterator was initialized.");
  1453. }
  1454. if (ABSL_PREDICT_FALSE(!ctrl_is_valid_for_comparison)) {
  1455. ABSL_RAW_LOG(
  1456. FATAL, "Invalid iterator comparison. The element was likely erased.");
  1457. }
  1458. } else {
  1459. ABSL_HARDENING_ASSERT(
  1460. ctrl_is_valid_for_comparison &&
  1461. "Invalid iterator comparison. The element might have been erased or "
  1462. "the table might have rehashed. Consider running with --config=asan to "
  1463. "diagnose rehashing issues.");
  1464. }
  1465. }
  1466. // If the two iterators come from the same container, then their pointers will
  1467. // interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa.
  1468. // Note: we take slots by reference so that it's not UB if they're uninitialized
  1469. // as long as we don't read them (when ctrl is null).
  1470. inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
  1471. const ctrl_t* ctrl_b,
  1472. const void* const& slot_a,
  1473. const void* const& slot_b) {
  1474. // If either control byte is null, then we can't tell.
  1475. if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
  1476. const bool a_is_soo = IsSooControl(ctrl_a);
  1477. if (a_is_soo != IsSooControl(ctrl_b)) return false;
  1478. if (a_is_soo) return slot_a == slot_b;
  1479. const void* low_slot = slot_a;
  1480. const void* hi_slot = slot_b;
  1481. if (ctrl_a > ctrl_b) {
  1482. std::swap(ctrl_a, ctrl_b);
  1483. std::swap(low_slot, hi_slot);
  1484. }
  1485. return ctrl_b < low_slot && low_slot <= hi_slot;
  1486. }
  1487. // Asserts that two iterators come from the same container.
  1488. // Note: we take slots by reference so that it's not UB if they're uninitialized
  1489. // as long as we don't read them (when ctrl is null).
  1490. inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
  1491. const void* const& slot_a,
  1492. const void* const& slot_b,
  1493. const GenerationType* generation_ptr_a,
  1494. const GenerationType* generation_ptr_b) {
  1495. if (!SwisstableDebugEnabled()) return;
  1496. // `SwisstableDebugEnabled()` is also true for release builds with hardening
  1497. // enabled. To minimize their impact in those builds:
  1498. // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
  1499. // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
  1500. // the chances that the hot paths will be inlined.
  1501. // fail_if(is_invalid, message) crashes when is_invalid is true and provides
  1502. // an error message based on `message`.
  1503. const auto fail_if = [](bool is_invalid, const char* message) {
  1504. if (ABSL_PREDICT_FALSE(is_invalid)) {
  1505. ABSL_RAW_LOG(FATAL, "Invalid iterator comparison. %s", message);
  1506. }
  1507. };
  1508. const bool a_is_default = ctrl_a == EmptyGroup();
  1509. const bool b_is_default = ctrl_b == EmptyGroup();
  1510. if (a_is_default && b_is_default) return;
  1511. fail_if(a_is_default != b_is_default,
  1512. "Comparing default-constructed hashtable iterator with a "
  1513. "non-default-constructed hashtable iterator.");
  1514. if (SwisstableGenerationsEnabled()) {
  1515. if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return;
  1516. // Users don't need to know whether the tables are SOO so don't mention SOO
  1517. // in the debug message.
  1518. const bool a_is_soo = IsSooControl(ctrl_a);
  1519. const bool b_is_soo = IsSooControl(ctrl_b);
  1520. fail_if(a_is_soo != b_is_soo || (a_is_soo && b_is_soo),
  1521. "Comparing iterators from different hashtables.");
  1522. const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
  1523. const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
  1524. fail_if(a_is_empty != b_is_empty,
  1525. "Comparing an iterator from an empty hashtable with an iterator "
  1526. "from a non-empty hashtable.");
  1527. fail_if(a_is_empty && b_is_empty,
  1528. "Comparing iterators from different empty hashtables.");
  1529. const bool a_is_end = ctrl_a == nullptr;
  1530. const bool b_is_end = ctrl_b == nullptr;
  1531. fail_if(a_is_end || b_is_end,
  1532. "Comparing iterator with an end() iterator from a different "
  1533. "hashtable.");
  1534. fail_if(true, "Comparing non-end() iterators from different hashtables.");
  1535. } else {
  1536. ABSL_HARDENING_ASSERT(
  1537. AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
  1538. "Invalid iterator comparison. The iterators may be from different "
  1539. "containers or the container might have rehashed or moved. Consider "
  1540. "running with --config=asan to diagnose issues.");
  1541. }
  1542. }
  1543. struct FindInfo {
  1544. size_t offset;
  1545. size_t probe_length;
  1546. };
  1547. // Whether a table is "small". A small table fits entirely into a probing
  1548. // group, i.e., has a capacity < `Group::kWidth`.
  1549. //
  1550. // In small mode we are able to use the whole capacity. The extra control
  1551. // bytes give us at least one "empty" control byte to stop the iteration.
  1552. // This is important to make 1 a valid capacity.
  1553. //
  1554. // In small mode only the first `capacity` control bytes after the sentinel
  1555. // are valid. The rest contain dummy ctrl_t::kEmpty values that do not
  1556. // represent a real slot. This is important to take into account on
  1557. // `find_first_non_full()`, where we never try
  1558. // `ShouldInsertBackwards()` for small tables.
  1559. inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
  1560. // Whether a table fits entirely into a probing group.
  1561. // Arbitrary order of elements in such tables is correct.
  1562. inline bool is_single_group(size_t capacity) {
  1563. return capacity <= Group::kWidth;
  1564. }
  1565. // Begins a probing operation on `common.control`, using `hash`.
  1566. inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
  1567. size_t hash) {
  1568. return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
  1569. }
  1570. inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
  1571. return probe(common.control(), common.capacity(), hash);
  1572. }
  1573. // Probes an array of control bits using a probe sequence derived from `hash`,
  1574. // and returns the offset corresponding to the first deleted or empty slot.
  1575. //
  1576. // Behavior when the entire table is full is undefined.
  1577. //
  1578. // NOTE: this function must work with tables having both empty and deleted
  1579. // slots in the same group. Such tables appear during `erase()`.
  1580. template <typename = void>
  1581. inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
  1582. auto seq = probe(common, hash);
  1583. const ctrl_t* ctrl = common.control();
  1584. if (IsEmptyOrDeleted(ctrl[seq.offset()]) &&
  1585. !ShouldInsertBackwards(common.capacity(), hash, ctrl)) {
  1586. return {seq.offset(), /*probe_length=*/0};
  1587. }
  1588. while (true) {
  1589. GroupFullEmptyOrDeleted g{ctrl + seq.offset()};
  1590. auto mask = g.MaskEmptyOrDeleted();
  1591. if (mask) {
  1592. return {
  1593. seq.offset(GetInsertionOffset(mask, common.capacity(), hash, ctrl)),
  1594. seq.index()};
  1595. }
  1596. seq.next();
  1597. assert(seq.index() <= common.capacity() && "full table!");
  1598. }
  1599. }
  1600. // Extern template for inline function keep possibility of inlining.
  1601. // When compiler decided to not inline, no symbols will be added to the
  1602. // corresponding translation unit.
  1603. extern template FindInfo find_first_non_full(const CommonFields&, size_t);
  1604. // Non-inlined version of find_first_non_full for use in less
  1605. // performance critical routines.
  1606. FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
  1607. inline void ResetGrowthLeft(CommonFields& common) {
  1608. common.growth_info().InitGrowthLeftNoDeleted(
  1609. CapacityToGrowth(common.capacity()) - common.size());
  1610. }
  1611. // Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
  1612. // array as marked as empty.
  1613. inline void ResetCtrl(CommonFields& common, size_t slot_size) {
  1614. const size_t capacity = common.capacity();
  1615. ctrl_t* ctrl = common.control();
  1616. std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
  1617. capacity + 1 + NumClonedBytes());
  1618. ctrl[capacity] = ctrl_t::kSentinel;
  1619. SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
  1620. }
  1621. // Sets sanitizer poisoning for slot corresponding to control byte being set.
  1622. inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,
  1623. size_t slot_size) {
  1624. assert(i < c.capacity());
  1625. auto* slot_i = static_cast<const char*>(c.slot_array()) + i * slot_size;
  1626. if (IsFull(h)) {
  1627. SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
  1628. } else {
  1629. SanitizerPoisonMemoryRegion(slot_i, slot_size);
  1630. }
  1631. }
  1632. // Sets `ctrl[i]` to `h`.
  1633. //
  1634. // Unlike setting it directly, this function will perform bounds checks and
  1635. // mirror the value to the cloned tail if necessary.
  1636. inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h,
  1637. size_t slot_size) {
  1638. DoSanitizeOnSetCtrl(c, i, h, slot_size);
  1639. ctrl_t* ctrl = c.control();
  1640. ctrl[i] = h;
  1641. ctrl[((i - NumClonedBytes()) & c.capacity()) +
  1642. (NumClonedBytes() & c.capacity())] = h;
  1643. }
  1644. // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
  1645. inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) {
  1646. SetCtrl(c, i, static_cast<ctrl_t>(h), slot_size);
  1647. }
  1648. // Like SetCtrl, but in a single group table, we can save some operations when
  1649. // setting the cloned control byte.
  1650. inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h,
  1651. size_t slot_size) {
  1652. assert(is_single_group(c.capacity()));
  1653. DoSanitizeOnSetCtrl(c, i, h, slot_size);
  1654. ctrl_t* ctrl = c.control();
  1655. ctrl[i] = h;
  1656. ctrl[i + c.capacity() + 1] = h;
  1657. }
  1658. // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
  1659. inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h,
  1660. size_t slot_size) {
  1661. SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size);
  1662. }
  1663. // growth_info (which is a size_t) is stored with the backing array.
  1664. constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
  1665. return (std::max)(align_of_slot, alignof(GrowthInfo));
  1666. }
  1667. // Returns the address of the ith slot in slots where each slot occupies
  1668. // slot_size.
  1669. inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
  1670. return static_cast<void*>(static_cast<char*>(slot_array) +
  1671. (slot * slot_size));
  1672. }
  1673. // Iterates over all full slots and calls `cb(const ctrl_t*, SlotType*)`.
  1674. // No insertion to the table allowed during Callback call.
  1675. // Erasure is allowed only for the element passed to the callback.
  1676. template <class SlotType, class Callback>
  1677. ABSL_ATTRIBUTE_ALWAYS_INLINE inline void IterateOverFullSlots(
  1678. const CommonFields& c, SlotType* slot, Callback cb) {
  1679. const size_t cap = c.capacity();
  1680. const ctrl_t* ctrl = c.control();
  1681. if (is_small(cap)) {
  1682. // Mirrored/cloned control bytes in small table are also located in the
  1683. // first group (starting from position 0). We are taking group from position
  1684. // `capacity` in order to avoid duplicates.
  1685. // Small tables capacity fits into portable group, where
  1686. // GroupPortableImpl::MaskFull is more efficient for the
  1687. // capacity <= GroupPortableImpl::kWidth.
  1688. assert(cap <= GroupPortableImpl::kWidth &&
  1689. "unexpectedly large small capacity");
  1690. static_assert(Group::kWidth >= GroupPortableImpl::kWidth,
  1691. "unexpected group width");
  1692. // Group starts from kSentinel slot, so indices in the mask will
  1693. // be increased by 1.
  1694. const auto mask = GroupPortableImpl(ctrl + cap).MaskFull();
  1695. --ctrl;
  1696. --slot;
  1697. for (uint32_t i : mask) {
  1698. cb(ctrl + i, slot + i);
  1699. }
  1700. return;
  1701. }
  1702. size_t remaining = c.size();
  1703. ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
  1704. while (remaining != 0) {
  1705. for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
  1706. assert(IsFull(ctrl[i]) && "hash table was modified unexpectedly");
  1707. cb(ctrl + i, slot + i);
  1708. --remaining;
  1709. }
  1710. ctrl += Group::kWidth;
  1711. slot += Group::kWidth;
  1712. assert((remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
  1713. "hash table was modified unexpectedly");
  1714. }
  1715. // NOTE: erasure of the current element is allowed in callback for
  1716. // absl::erase_if specialization. So we use `>=`.
  1717. assert(original_size_for_assert >= c.size() &&
  1718. "hash table was modified unexpectedly");
  1719. }
  1720. template <typename CharAlloc>
  1721. constexpr bool ShouldSampleHashtablezInfo() {
  1722. // Folks with custom allocators often make unwarranted assumptions about the
  1723. // behavior of their classes vis-a-vis trivial destructability and what
  1724. // calls they will or won't make. Avoid sampling for people with custom
  1725. // allocators to get us out of this mess. This is not a hard guarantee but
  1726. // a workaround while we plan the exact guarantee we want to provide.
  1727. return std::is_same<CharAlloc, std::allocator<char>>::value;
  1728. }
  1729. template <bool kSooEnabled>
  1730. HashtablezInfoHandle SampleHashtablezInfo(size_t sizeof_slot, size_t sizeof_key,
  1731. size_t sizeof_value,
  1732. size_t old_capacity, bool was_soo,
  1733. HashtablezInfoHandle forced_infoz,
  1734. CommonFields& c) {
  1735. if (forced_infoz.IsSampled()) return forced_infoz;
  1736. // In SOO, we sample on the first insertion so if this is an empty SOO case
  1737. // (e.g. when reserve is called), then we still need to sample.
  1738. if (kSooEnabled && was_soo && c.size() == 0) {
  1739. return Sample(sizeof_slot, sizeof_key, sizeof_value, SooCapacity());
  1740. }
  1741. // For non-SOO cases, we sample whenever the capacity is increasing from zero
  1742. // to non-zero.
  1743. if (!kSooEnabled && old_capacity == 0) {
  1744. return Sample(sizeof_slot, sizeof_key, sizeof_value, 0);
  1745. }
  1746. return c.infoz();
  1747. }
  1748. // Helper class to perform resize of the hash set.
  1749. //
  1750. // It contains special optimizations for small group resizes.
  1751. // See GrowIntoSingleGroupShuffleControlBytes for details.
  1752. class HashSetResizeHelper {
  1753. public:
  1754. explicit HashSetResizeHelper(CommonFields& c, bool was_soo, bool had_soo_slot,
  1755. HashtablezInfoHandle forced_infoz)
  1756. : old_capacity_(c.capacity()),
  1757. had_infoz_(c.has_infoz()),
  1758. was_soo_(was_soo),
  1759. had_soo_slot_(had_soo_slot),
  1760. forced_infoz_(forced_infoz) {}
  1761. // Optimized for small groups version of `find_first_non_full`.
  1762. // Beneficial only right after calling `raw_hash_set::resize`.
  1763. // It is safe to call in case capacity is big or was not changed, but there
  1764. // will be no performance benefit.
  1765. // It has implicit assumption that `resize` will call
  1766. // `GrowSizeIntoSingleGroup*` in case `IsGrowingIntoSingleGroupApplicable`.
  1767. // Falls back to `find_first_non_full` in case of big groups.
  1768. static FindInfo FindFirstNonFullAfterResize(const CommonFields& c,
  1769. size_t old_capacity,
  1770. size_t hash) {
  1771. if (!IsGrowingIntoSingleGroupApplicable(old_capacity, c.capacity())) {
  1772. return find_first_non_full(c, hash);
  1773. }
  1774. // Find a location for the new element non-deterministically.
  1775. // Note that any position is correct.
  1776. // It will located at `half_old_capacity` or one of the other
  1777. // empty slots with approximately 50% probability each.
  1778. size_t offset = probe(c, hash).offset();
  1779. // Note that we intentionally use unsigned int underflow.
  1780. if (offset - (old_capacity + 1) >= old_capacity) {
  1781. // Offset fall on kSentinel or into the mostly occupied first half.
  1782. offset = old_capacity / 2;
  1783. }
  1784. assert(IsEmpty(c.control()[offset]));
  1785. return FindInfo{offset, 0};
  1786. }
  1787. HeapOrSoo& old_heap_or_soo() { return old_heap_or_soo_; }
  1788. void* old_soo_data() { return old_heap_or_soo_.get_soo_data(); }
  1789. ctrl_t* old_ctrl() const {
  1790. assert(!was_soo_);
  1791. return old_heap_or_soo_.control();
  1792. }
  1793. void* old_slots() const {
  1794. assert(!was_soo_);
  1795. return old_heap_or_soo_.slot_array().get();
  1796. }
  1797. size_t old_capacity() const { return old_capacity_; }
  1798. // Returns the index of the SOO slot when growing from SOO to non-SOO in a
  1799. // single group. See also InitControlBytesAfterSoo(). It's important to use
  1800. // index 1 so that when resizing from capacity 1 to 3, we can still have
  1801. // random iteration order between the first two inserted elements.
  1802. // I.e. it allows inserting the second element at either index 0 or 2.
  1803. static size_t SooSlotIndex() { return 1; }
  1804. // Allocates a backing array for the hashtable.
  1805. // Reads `capacity` and updates all other fields based on the result of
  1806. // the allocation.
  1807. //
  1808. // It also may do the following actions:
  1809. // 1. initialize control bytes
  1810. // 2. initialize slots
  1811. // 3. deallocate old slots.
  1812. //
  1813. // We are bundling a lot of functionality
  1814. // in one ABSL_ATTRIBUTE_NOINLINE function in order to minimize binary code
  1815. // duplication in raw_hash_set<>::resize.
  1816. //
  1817. // `c.capacity()` must be nonzero.
  1818. // POSTCONDITIONS:
  1819. // 1. CommonFields is initialized.
  1820. //
  1821. // if IsGrowingIntoSingleGroupApplicable && TransferUsesMemcpy
  1822. // Both control bytes and slots are fully initialized.
  1823. // old_slots are deallocated.
  1824. // infoz.RecordRehash is called.
  1825. //
  1826. // if IsGrowingIntoSingleGroupApplicable && !TransferUsesMemcpy
  1827. // Control bytes are fully initialized.
  1828. // infoz.RecordRehash is called.
  1829. // GrowSizeIntoSingleGroup must be called to finish slots initialization.
  1830. //
  1831. // if !IsGrowingIntoSingleGroupApplicable
  1832. // Control bytes are initialized to empty table via ResetCtrl.
  1833. // raw_hash_set<>::resize must insert elements regularly.
  1834. // infoz.RecordRehash is called if old_capacity == 0.
  1835. //
  1836. // Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation.
  1837. template <typename Alloc, size_t SizeOfSlot, bool TransferUsesMemcpy,
  1838. bool SooEnabled, size_t AlignOfSlot>
  1839. ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, Alloc alloc,
  1840. ctrl_t soo_slot_h2,
  1841. size_t key_size,
  1842. size_t value_size) {
  1843. assert(c.capacity());
  1844. HashtablezInfoHandle infoz =
  1845. ShouldSampleHashtablezInfo<Alloc>()
  1846. ? SampleHashtablezInfo<SooEnabled>(SizeOfSlot, key_size, value_size,
  1847. old_capacity_, was_soo_,
  1848. forced_infoz_, c)
  1849. : HashtablezInfoHandle{};
  1850. const bool has_infoz = infoz.IsSampled();
  1851. RawHashSetLayout layout(c.capacity(), AlignOfSlot, has_infoz);
  1852. char* mem = static_cast<char*>(Allocate<BackingArrayAlignment(AlignOfSlot)>(
  1853. &alloc, layout.alloc_size(SizeOfSlot)));
  1854. const GenerationType old_generation = c.generation();
  1855. c.set_generation_ptr(
  1856. reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
  1857. c.set_generation(NextGeneration(old_generation));
  1858. c.set_control(reinterpret_cast<ctrl_t*>(mem + layout.control_offset()));
  1859. c.set_slots(mem + layout.slot_offset());
  1860. ResetGrowthLeft(c);
  1861. const bool grow_single_group =
  1862. IsGrowingIntoSingleGroupApplicable(old_capacity_, layout.capacity());
  1863. if (SooEnabled && was_soo_ && grow_single_group) {
  1864. InitControlBytesAfterSoo(c.control(), soo_slot_h2, layout.capacity());
  1865. if (TransferUsesMemcpy && had_soo_slot_) {
  1866. TransferSlotAfterSoo(c, SizeOfSlot);
  1867. }
  1868. // SooEnabled implies that old_capacity_ != 0.
  1869. } else if ((SooEnabled || old_capacity_ != 0) && grow_single_group) {
  1870. if (TransferUsesMemcpy) {
  1871. GrowSizeIntoSingleGroupTransferable(c, SizeOfSlot);
  1872. DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot);
  1873. } else {
  1874. GrowIntoSingleGroupShuffleControlBytes(c.control(), layout.capacity());
  1875. }
  1876. } else {
  1877. ResetCtrl(c, SizeOfSlot);
  1878. }
  1879. c.set_has_infoz(has_infoz);
  1880. if (has_infoz) {
  1881. infoz.RecordStorageChanged(c.size(), layout.capacity());
  1882. if ((SooEnabled && was_soo_) || grow_single_group || old_capacity_ == 0) {
  1883. infoz.RecordRehash(0);
  1884. }
  1885. c.set_infoz(infoz);
  1886. }
  1887. return grow_single_group;
  1888. }
  1889. // Relocates slots into new single group consistent with
  1890. // GrowIntoSingleGroupShuffleControlBytes.
  1891. //
  1892. // PRECONDITIONS:
  1893. // 1. GrowIntoSingleGroupShuffleControlBytes was already called.
  1894. template <class PolicyTraits, class Alloc>
  1895. void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref) {
  1896. assert(old_capacity_ < Group::kWidth / 2);
  1897. assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
  1898. using slot_type = typename PolicyTraits::slot_type;
  1899. assert(is_single_group(c.capacity()));
  1900. auto* new_slots = static_cast<slot_type*>(c.slot_array());
  1901. auto* old_slots_ptr = static_cast<slot_type*>(old_slots());
  1902. size_t shuffle_bit = old_capacity_ / 2 + 1;
  1903. for (size_t i = 0; i < old_capacity_; ++i) {
  1904. if (IsFull(old_ctrl()[i])) {
  1905. size_t new_i = i ^ shuffle_bit;
  1906. SanitizerUnpoisonMemoryRegion(new_slots + new_i, sizeof(slot_type));
  1907. PolicyTraits::transfer(&alloc_ref, new_slots + new_i,
  1908. old_slots_ptr + i);
  1909. }
  1910. }
  1911. PoisonSingleGroupEmptySlots(c, sizeof(slot_type));
  1912. }
  1913. // Deallocates old backing array.
  1914. template <size_t AlignOfSlot, class CharAlloc>
  1915. void DeallocateOld(CharAlloc alloc_ref, size_t slot_size) {
  1916. SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_);
  1917. auto layout = RawHashSetLayout(old_capacity_, AlignOfSlot, had_infoz_);
  1918. Deallocate<BackingArrayAlignment(AlignOfSlot)>(
  1919. &alloc_ref, old_ctrl() - layout.control_offset(),
  1920. layout.alloc_size(slot_size));
  1921. }
  1922. private:
  1923. // Returns true if `GrowSizeIntoSingleGroup` can be used for resizing.
  1924. static bool IsGrowingIntoSingleGroupApplicable(size_t old_capacity,
  1925. size_t new_capacity) {
  1926. // NOTE that `old_capacity < new_capacity` in order to have
  1927. // `old_capacity < Group::kWidth / 2` to make faster copies of 8 bytes.
  1928. return is_single_group(new_capacity) && old_capacity < new_capacity;
  1929. }
  1930. // Relocates control bytes and slots into new single group for
  1931. // transferable objects.
  1932. // Must be called only if IsGrowingIntoSingleGroupApplicable returned true.
  1933. void GrowSizeIntoSingleGroupTransferable(CommonFields& c, size_t slot_size);
  1934. // If there was an SOO slot and slots are transferable, transfers the SOO slot
  1935. // into the new heap allocation. Must be called only if
  1936. // IsGrowingIntoSingleGroupApplicable returned true.
  1937. void TransferSlotAfterSoo(CommonFields& c, size_t slot_size);
  1938. // Shuffle control bits deterministically to the next capacity.
  1939. // Returns offset for newly added element with given hash.
  1940. //
  1941. // PRECONDITIONs:
  1942. // 1. new_ctrl is allocated for new_capacity,
  1943. // but not initialized.
  1944. // 2. new_capacity is a single group.
  1945. //
  1946. // All elements are transferred into the first `old_capacity + 1` positions
  1947. // of the new_ctrl. Elements are rotated by `old_capacity_ / 2 + 1` positions
  1948. // in order to change an order and keep it non deterministic.
  1949. // Although rotation itself deterministic, position of the new added element
  1950. // will be based on `H1` and is not deterministic.
  1951. //
  1952. // Examples:
  1953. // S = kSentinel, E = kEmpty
  1954. //
  1955. // old_ctrl = SEEEEEEEE...
  1956. // new_ctrl = ESEEEEEEE...
  1957. //
  1958. // old_ctrl = 0SEEEEEEE...
  1959. // new_ctrl = E0ESE0EEE...
  1960. //
  1961. // old_ctrl = 012S012EEEEEEEEE...
  1962. // new_ctrl = 2E01EEES2E01EEE...
  1963. //
  1964. // old_ctrl = 0123456S0123456EEEEEEEEEEE...
  1965. // new_ctrl = 456E0123EEEEEES456E0123EEE...
  1966. void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl,
  1967. size_t new_capacity) const;
  1968. // If the table was SOO, initializes new control bytes. `h2` is the control
  1969. // byte corresponding to the full slot. Must be called only if
  1970. // IsGrowingIntoSingleGroupApplicable returned true.
  1971. // Requires: `had_soo_slot_ || h2 == ctrl_t::kEmpty`.
  1972. void InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
  1973. size_t new_capacity);
  1974. // Shuffle trivially transferable slots in the way consistent with
  1975. // GrowIntoSingleGroupShuffleControlBytes.
  1976. //
  1977. // PRECONDITIONs:
  1978. // 1. old_capacity must be non-zero.
  1979. // 2. new_ctrl is fully initialized using
  1980. // GrowIntoSingleGroupShuffleControlBytes.
  1981. // 3. new_slots is allocated and *not* poisoned.
  1982. //
  1983. // POSTCONDITIONS:
  1984. // 1. new_slots are transferred from old_slots_ consistent with
  1985. // GrowIntoSingleGroupShuffleControlBytes.
  1986. // 2. Empty new_slots are *not* poisoned.
  1987. void GrowIntoSingleGroupShuffleTransferableSlots(void* new_slots,
  1988. size_t slot_size) const;
  1989. // Poison empty slots that were transferred using the deterministic algorithm
  1990. // described above.
  1991. // PRECONDITIONs:
  1992. // 1. new_ctrl is fully initialized using
  1993. // GrowIntoSingleGroupShuffleControlBytes.
  1994. // 2. new_slots is fully initialized consistent with
  1995. // GrowIntoSingleGroupShuffleControlBytes.
  1996. void PoisonSingleGroupEmptySlots(CommonFields& c, size_t slot_size) const {
  1997. // poison non full items
  1998. for (size_t i = 0; i < c.capacity(); ++i) {
  1999. if (!IsFull(c.control()[i])) {
  2000. SanitizerPoisonMemoryRegion(SlotAddress(c.slot_array(), i, slot_size),
  2001. slot_size);
  2002. }
  2003. }
  2004. }
  2005. HeapOrSoo old_heap_or_soo_;
  2006. size_t old_capacity_;
  2007. bool had_infoz_;
  2008. bool was_soo_;
  2009. bool had_soo_slot_;
  2010. // Either null infoz or a pre-sampled forced infoz for SOO tables.
  2011. HashtablezInfoHandle forced_infoz_;
  2012. };
  2013. inline void PrepareInsertCommon(CommonFields& common) {
  2014. common.increment_size();
  2015. common.maybe_increment_generation_on_insert();
  2016. }
  2017. // Like prepare_insert, but for the case of inserting into a full SOO table.
  2018. size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
  2019. CommonFields& common);
  2020. // PolicyFunctions bundles together some information for a particular
  2021. // raw_hash_set<T, ...> instantiation. This information is passed to
  2022. // type-erased functions that want to do small amounts of type-specific
  2023. // work.
  2024. struct PolicyFunctions {
  2025. size_t slot_size;
  2026. // Returns the pointer to the hash function stored in the set.
  2027. const void* (*hash_fn)(const CommonFields& common);
  2028. // Returns the hash of the pointed-to slot.
  2029. size_t (*hash_slot)(const void* hash_fn, void* slot);
  2030. // Transfers the contents of src_slot to dst_slot.
  2031. void (*transfer)(void* set, void* dst_slot, void* src_slot);
  2032. // Deallocates the backing store from common.
  2033. void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
  2034. // Resizes set to the new capacity.
  2035. // Arguments are used as in raw_hash_set::resize_impl.
  2036. void (*resize)(CommonFields& common, size_t new_capacity,
  2037. HashtablezInfoHandle forced_infoz);
  2038. };
  2039. // ClearBackingArray clears the backing array, either modifying it in place,
  2040. // or creating a new one based on the value of "reuse".
  2041. // REQUIRES: c.capacity > 0
  2042. void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
  2043. bool reuse, bool soo_enabled);
  2044. // Type-erased version of raw_hash_set::erase_meta_only.
  2045. void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size);
  2046. // Function to place in PolicyFunctions::dealloc for raw_hash_sets
  2047. // that are using std::allocator. This allows us to share the same
  2048. // function body for raw_hash_set instantiations that have the
  2049. // same slot alignment.
  2050. template <size_t AlignOfSlot>
  2051. ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
  2052. const PolicyFunctions& policy) {
  2053. // Unpoison before returning the memory to the allocator.
  2054. SanitizerUnpoisonMemoryRegion(common.slot_array(),
  2055. policy.slot_size * common.capacity());
  2056. std::allocator<char> alloc;
  2057. common.infoz().Unregister();
  2058. Deallocate<BackingArrayAlignment(AlignOfSlot)>(
  2059. &alloc, common.backing_array_start(),
  2060. common.alloc_size(policy.slot_size, AlignOfSlot));
  2061. }
  2062. // For trivially relocatable types we use memcpy directly. This allows us to
  2063. // share the same function body for raw_hash_set instantiations that have the
  2064. // same slot size as long as they are relocatable.
  2065. template <size_t SizeOfSlot>
  2066. ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {
  2067. memcpy(dst, src, SizeOfSlot);
  2068. }
  2069. // Type erased raw_hash_set::get_hash_ref_fn for the empty hash function case.
  2070. const void* GetHashRefForEmptyHasher(const CommonFields& common);
  2071. // Given the hash of a value not currently in the table and the first empty
  2072. // slot in the probe sequence, finds a viable slot index to insert it at.
  2073. //
  2074. // In case there's no space left, the table can be resized or rehashed
  2075. // (for tables with deleted slots, see FindInsertPositionWithGrowthOrRehash).
  2076. //
  2077. // In the case of absence of deleted slots and positive growth_left, the element
  2078. // can be inserted in the provided `target` position.
  2079. //
  2080. // When the table has deleted slots (according to GrowthInfo), the target
  2081. // position will be searched one more time using `find_first_non_full`.
  2082. //
  2083. // REQUIRES: Table is not SOO.
  2084. // REQUIRES: At least one non-full slot available.
  2085. // REQUIRES: `target` is a valid empty position to insert.
  2086. size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target,
  2087. const PolicyFunctions& policy);
  2088. // A SwissTable.
  2089. //
  2090. // Policy: a policy defines how to perform different operations on
  2091. // the slots of the hashtable (see hash_policy_traits.h for the full interface
  2092. // of policy).
  2093. //
  2094. // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
  2095. // functor should accept a key and return size_t as hash. For best performance
  2096. // it is important that the hash function provides high entropy across all bits
  2097. // of the hash.
  2098. //
  2099. // Eq: a (possibly polymorphic) functor that compares two keys for equality. It
  2100. // should accept two (of possibly different type) keys and return a bool: true
  2101. // if they are equal, false if they are not. If two keys compare equal, then
  2102. // their hash values as defined by Hash MUST be equal.
  2103. //
  2104. // Allocator: an Allocator
  2105. // [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
  2106. // the storage of the hashtable will be allocated and the elements will be
  2107. // constructed and destroyed.
  2108. template <class Policy, class Hash, class Eq, class Alloc>
  2109. class raw_hash_set {
  2110. using PolicyTraits = hash_policy_traits<Policy>;
  2111. using KeyArgImpl =
  2112. KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
  2113. public:
  2114. using init_type = typename PolicyTraits::init_type;
  2115. using key_type = typename PolicyTraits::key_type;
  2116. // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
  2117. // code fixes!
  2118. using slot_type = typename PolicyTraits::slot_type;
  2119. using allocator_type = Alloc;
  2120. using size_type = size_t;
  2121. using difference_type = ptrdiff_t;
  2122. using hasher = Hash;
  2123. using key_equal = Eq;
  2124. using policy_type = Policy;
  2125. using value_type = typename PolicyTraits::value_type;
  2126. using reference = value_type&;
  2127. using const_reference = const value_type&;
  2128. using pointer = typename absl::allocator_traits<
  2129. allocator_type>::template rebind_traits<value_type>::pointer;
  2130. using const_pointer = typename absl::allocator_traits<
  2131. allocator_type>::template rebind_traits<value_type>::const_pointer;
  2132. // Alias used for heterogeneous lookup functions.
  2133. // `key_arg<K>` evaluates to `K` when the functors are transparent and to
  2134. // `key_type` otherwise. It permits template argument deduction on `K` for the
  2135. // transparent case.
  2136. template <class K>
  2137. using key_arg = typename KeyArgImpl::template type<K, key_type>;
  2138. private:
  2139. // TODO(b/289225379): we could add extra SOO space inside raw_hash_set
  2140. // after CommonFields to allow inlining larger slot_types (e.g. std::string),
  2141. // but it's a bit complicated if we want to support incomplete mapped_type in
  2142. // flat_hash_map. We could potentially do this for flat_hash_set and for an
  2143. // allowlist of `mapped_type`s of flat_hash_map that includes e.g. arithmetic
  2144. // types, strings, cords, and pairs/tuples of allowlisted types.
  2145. constexpr static bool SooEnabled() {
  2146. return PolicyTraits::soo_enabled() &&
  2147. sizeof(slot_type) <= sizeof(HeapOrSoo) &&
  2148. alignof(slot_type) <= alignof(HeapOrSoo);
  2149. }
  2150. // Whether `size` fits in the SOO capacity of this table.
  2151. bool fits_in_soo(size_t size) const {
  2152. return SooEnabled() && size <= SooCapacity();
  2153. }
  2154. // Whether this table is in SOO mode or non-SOO mode.
  2155. bool is_soo() const { return fits_in_soo(capacity()); }
  2156. bool is_full_soo() const { return is_soo() && !empty(); }
  2157. // Give an early error when key_type is not hashable/eq.
  2158. auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
  2159. auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
  2160. using AllocTraits = absl::allocator_traits<allocator_type>;
  2161. using SlotAlloc = typename absl::allocator_traits<
  2162. allocator_type>::template rebind_alloc<slot_type>;
  2163. // People are often sloppy with the exact type of their allocator (sometimes
  2164. // it has an extra const or is missing the pair, but rebinds made it work
  2165. // anyway).
  2166. using CharAlloc =
  2167. typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
  2168. using SlotAllocTraits = typename absl::allocator_traits<
  2169. allocator_type>::template rebind_traits<slot_type>;
  2170. static_assert(std::is_lvalue_reference<reference>::value,
  2171. "Policy::element() must return a reference");
  2172. template <typename T>
  2173. struct SameAsElementReference
  2174. : std::is_same<typename std::remove_cv<
  2175. typename std::remove_reference<reference>::type>::type,
  2176. typename std::remove_cv<
  2177. typename std::remove_reference<T>::type>::type> {};
  2178. // An enabler for insert(T&&): T must be convertible to init_type or be the
  2179. // same as [cv] value_type [ref].
  2180. // Note: we separate SameAsElementReference into its own type to avoid using
  2181. // reference unless we need to. MSVC doesn't seem to like it in some
  2182. // cases.
  2183. template <class T>
  2184. using RequiresInsertable = typename std::enable_if<
  2185. absl::disjunction<std::is_convertible<T, init_type>,
  2186. SameAsElementReference<T>>::value,
  2187. int>::type;
  2188. // RequiresNotInit is a workaround for gcc prior to 7.1.
  2189. // See https://godbolt.org/g/Y4xsUh.
  2190. template <class T>
  2191. using RequiresNotInit =
  2192. typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
  2193. template <class... Ts>
  2194. using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
  2195. public:
  2196. static_assert(std::is_same<pointer, value_type*>::value,
  2197. "Allocators with custom pointer types are not supported");
  2198. static_assert(std::is_same<const_pointer, const value_type*>::value,
  2199. "Allocators with custom pointer types are not supported");
  2200. class iterator : private HashSetIteratorGenerationInfo {
  2201. friend class raw_hash_set;
  2202. friend struct HashtableFreeFunctionsAccess;
  2203. public:
  2204. using iterator_category = std::forward_iterator_tag;
  2205. using value_type = typename raw_hash_set::value_type;
  2206. using reference =
  2207. absl::conditional_t<PolicyTraits::constant_iterators::value,
  2208. const value_type&, value_type&>;
  2209. using pointer = absl::remove_reference_t<reference>*;
  2210. using difference_type = typename raw_hash_set::difference_type;
  2211. iterator() {}
  2212. // PRECONDITION: not an end() iterator.
  2213. reference operator*() const {
  2214. AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
  2215. return unchecked_deref();
  2216. }
  2217. // PRECONDITION: not an end() iterator.
  2218. pointer operator->() const {
  2219. AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->");
  2220. return &operator*();
  2221. }
  2222. // PRECONDITION: not an end() iterator.
  2223. iterator& operator++() {
  2224. AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++");
  2225. ++ctrl_;
  2226. ++slot_;
  2227. skip_empty_or_deleted();
  2228. if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
  2229. return *this;
  2230. }
  2231. // PRECONDITION: not an end() iterator.
  2232. iterator operator++(int) {
  2233. auto tmp = *this;
  2234. ++*this;
  2235. return tmp;
  2236. }
  2237. friend bool operator==(const iterator& a, const iterator& b) {
  2238. AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr());
  2239. AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr());
  2240. AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_,
  2241. a.generation_ptr(), b.generation_ptr());
  2242. return a.ctrl_ == b.ctrl_;
  2243. }
  2244. friend bool operator!=(const iterator& a, const iterator& b) {
  2245. return !(a == b);
  2246. }
  2247. private:
  2248. iterator(ctrl_t* ctrl, slot_type* slot,
  2249. const GenerationType* generation_ptr)
  2250. : HashSetIteratorGenerationInfo(generation_ptr),
  2251. ctrl_(ctrl),
  2252. slot_(slot) {
  2253. // This assumption helps the compiler know that any non-end iterator is
  2254. // not equal to any end iterator.
  2255. ABSL_ASSUME(ctrl != nullptr);
  2256. }
  2257. // This constructor is used in begin() to avoid an MSan
  2258. // use-of-uninitialized-value error. Delegating from this constructor to
  2259. // the previous one doesn't avoid the error.
  2260. iterator(ctrl_t* ctrl, MaybeInitializedPtr slot,
  2261. const GenerationType* generation_ptr)
  2262. : HashSetIteratorGenerationInfo(generation_ptr),
  2263. ctrl_(ctrl),
  2264. slot_(to_slot(slot.get())) {
  2265. // This assumption helps the compiler know that any non-end iterator is
  2266. // not equal to any end iterator.
  2267. ABSL_ASSUME(ctrl != nullptr);
  2268. }
  2269. // For end() iterators.
  2270. explicit iterator(const GenerationType* generation_ptr)
  2271. : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
  2272. // Fixes up `ctrl_` to point to a full or sentinel by advancing `ctrl_` and
  2273. // `slot_` until they reach one.
  2274. void skip_empty_or_deleted() {
  2275. while (IsEmptyOrDeleted(*ctrl_)) {
  2276. uint32_t shift =
  2277. GroupFullEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted();
  2278. ctrl_ += shift;
  2279. slot_ += shift;
  2280. }
  2281. }
  2282. ctrl_t* control() const { return ctrl_; }
  2283. slot_type* slot() const { return slot_; }
  2284. // We use EmptyGroup() for default-constructed iterators so that they can
  2285. // be distinguished from end iterators, which have nullptr ctrl_.
  2286. ctrl_t* ctrl_ = EmptyGroup();
  2287. // To avoid uninitialized member warnings, put slot_ in an anonymous union.
  2288. // The member is not initialized on singleton and end iterators.
  2289. union {
  2290. slot_type* slot_;
  2291. };
  2292. // An equality check which skips ABSL Hardening iterator invalidation
  2293. // checks.
  2294. // Should be used when the lifetimes of the iterators are well-enough
  2295. // understood to prove that they cannot be invalid.
  2296. bool unchecked_equals(const iterator& b) { return ctrl_ == b.control(); }
  2297. // Dereferences the iterator without ABSL Hardening iterator invalidation
  2298. // checks.
  2299. reference unchecked_deref() const { return PolicyTraits::element(slot_); }
  2300. };
  2301. class const_iterator {
  2302. friend class raw_hash_set;
  2303. template <class Container, typename Enabler>
  2304. friend struct absl::container_internal::hashtable_debug_internal::
  2305. HashtableDebugAccess;
  2306. public:
  2307. using iterator_category = typename iterator::iterator_category;
  2308. using value_type = typename raw_hash_set::value_type;
  2309. using reference = typename raw_hash_set::const_reference;
  2310. using pointer = typename raw_hash_set::const_pointer;
  2311. using difference_type = typename raw_hash_set::difference_type;
  2312. const_iterator() = default;
  2313. // Implicit construction from iterator.
  2314. const_iterator(iterator i) : inner_(std::move(i)) {} // NOLINT
  2315. reference operator*() const { return *inner_; }
  2316. pointer operator->() const { return inner_.operator->(); }
  2317. const_iterator& operator++() {
  2318. ++inner_;
  2319. return *this;
  2320. }
  2321. const_iterator operator++(int) { return inner_++; }
  2322. friend bool operator==(const const_iterator& a, const const_iterator& b) {
  2323. return a.inner_ == b.inner_;
  2324. }
  2325. friend bool operator!=(const const_iterator& a, const const_iterator& b) {
  2326. return !(a == b);
  2327. }
  2328. private:
  2329. const_iterator(const ctrl_t* ctrl, const slot_type* slot,
  2330. const GenerationType* gen)
  2331. : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) {
  2332. }
  2333. ctrl_t* control() const { return inner_.control(); }
  2334. slot_type* slot() const { return inner_.slot(); }
  2335. iterator inner_;
  2336. bool unchecked_equals(const const_iterator& b) {
  2337. return inner_.unchecked_equals(b.inner_);
  2338. }
  2339. };
  2340. using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
  2341. using insert_return_type = InsertReturnType<iterator, node_type>;
  2342. // Note: can't use `= default` due to non-default noexcept (causes
  2343. // problems for some compilers). NOLINTNEXTLINE
  2344. raw_hash_set() noexcept(
  2345. std::is_nothrow_default_constructible<hasher>::value &&
  2346. std::is_nothrow_default_constructible<key_equal>::value &&
  2347. std::is_nothrow_default_constructible<allocator_type>::value) {}
  2348. ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
  2349. size_t bucket_count, const hasher& hash = hasher(),
  2350. const key_equal& eq = key_equal(),
  2351. const allocator_type& alloc = allocator_type())
  2352. : settings_(CommonFields::CreateDefault<SooEnabled()>(), hash, eq,
  2353. alloc) {
  2354. if (bucket_count > (SooEnabled() ? SooCapacity() : 0)) {
  2355. ABSL_RAW_CHECK(bucket_count <= MaxValidCapacity<sizeof(slot_type)>(),
  2356. "Hash table size overflow");
  2357. resize(NormalizeCapacity(bucket_count));
  2358. }
  2359. }
  2360. raw_hash_set(size_t bucket_count, const hasher& hash,
  2361. const allocator_type& alloc)
  2362. : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
  2363. raw_hash_set(size_t bucket_count, const allocator_type& alloc)
  2364. : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
  2365. explicit raw_hash_set(const allocator_type& alloc)
  2366. : raw_hash_set(0, hasher(), key_equal(), alloc) {}
  2367. template <class InputIter>
  2368. raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
  2369. const hasher& hash = hasher(), const key_equal& eq = key_equal(),
  2370. const allocator_type& alloc = allocator_type())
  2371. : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
  2372. hash, eq, alloc) {
  2373. insert(first, last);
  2374. }
  2375. template <class InputIter>
  2376. raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
  2377. const hasher& hash, const allocator_type& alloc)
  2378. : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
  2379. template <class InputIter>
  2380. raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
  2381. const allocator_type& alloc)
  2382. : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
  2383. template <class InputIter>
  2384. raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
  2385. : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
  2386. // Instead of accepting std::initializer_list<value_type> as the first
  2387. // argument like std::unordered_set<value_type> does, we have two overloads
  2388. // that accept std::initializer_list<T> and std::initializer_list<init_type>.
  2389. // This is advantageous for performance.
  2390. //
  2391. // // Turns {"abc", "def"} into std::initializer_list<std::string>, then
  2392. // // copies the strings into the set.
  2393. // std::unordered_set<std::string> s = {"abc", "def"};
  2394. //
  2395. // // Turns {"abc", "def"} into std::initializer_list<const char*>, then
  2396. // // copies the strings into the set.
  2397. // absl::flat_hash_set<std::string> s = {"abc", "def"};
  2398. //
  2399. // The same trick is used in insert().
  2400. //
  2401. // The enabler is necessary to prevent this constructor from triggering where
  2402. // the copy constructor is meant to be called.
  2403. //
  2404. // absl::flat_hash_set<int> a, b{a};
  2405. //
  2406. // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
  2407. template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
  2408. raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
  2409. const hasher& hash = hasher(), const key_equal& eq = key_equal(),
  2410. const allocator_type& alloc = allocator_type())
  2411. : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
  2412. raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
  2413. const hasher& hash = hasher(), const key_equal& eq = key_equal(),
  2414. const allocator_type& alloc = allocator_type())
  2415. : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
  2416. template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
  2417. raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
  2418. const hasher& hash, const allocator_type& alloc)
  2419. : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
  2420. raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
  2421. const hasher& hash, const allocator_type& alloc)
  2422. : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
  2423. template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
  2424. raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
  2425. const allocator_type& alloc)
  2426. : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
  2427. raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
  2428. const allocator_type& alloc)
  2429. : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
  2430. template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
  2431. raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
  2432. : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
  2433. raw_hash_set(std::initializer_list<init_type> init,
  2434. const allocator_type& alloc)
  2435. : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
  2436. raw_hash_set(const raw_hash_set& that)
  2437. : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
  2438. that.alloc_ref())) {}
  2439. raw_hash_set(const raw_hash_set& that, const allocator_type& a)
  2440. : raw_hash_set(GrowthToLowerboundCapacity(that.size()), that.hash_ref(),
  2441. that.eq_ref(), a) {
  2442. const size_t size = that.size();
  2443. if (size == 0) {
  2444. return;
  2445. }
  2446. // We don't use `that.is_soo()` here because `that` can have non-SOO
  2447. // capacity but have a size that fits into SOO capacity.
  2448. if (fits_in_soo(size)) {
  2449. assert(size == 1);
  2450. common().set_full_soo();
  2451. emplace_at(soo_iterator(), *that.begin());
  2452. const HashtablezInfoHandle infoz = try_sample_soo();
  2453. if (infoz.IsSampled()) resize_with_soo_infoz(infoz);
  2454. return;
  2455. }
  2456. assert(!that.is_soo());
  2457. const size_t cap = capacity();
  2458. // Note about single group tables:
  2459. // 1. It is correct to have any order of elements.
  2460. // 2. Order has to be non deterministic.
  2461. // 3. We are assigning elements with arbitrary `shift` starting from
  2462. // `capacity + shift` position.
  2463. // 4. `shift` must be coprime with `capacity + 1` in order to be able to use
  2464. // modular arithmetic to traverse all positions, instead if cycling
  2465. // through a subset of positions. Odd numbers are coprime with any
  2466. // `capacity + 1` (2^N).
  2467. size_t offset = cap;
  2468. const size_t shift =
  2469. is_single_group(cap) ? (PerTableSalt(control()) | 1) : 0;
  2470. IterateOverFullSlots(
  2471. that.common(), that.slot_array(),
  2472. [&](const ctrl_t* that_ctrl,
  2473. slot_type* that_slot) ABSL_ATTRIBUTE_ALWAYS_INLINE {
  2474. if (shift == 0) {
  2475. // Big tables case. Position must be searched via probing.
  2476. // The table is guaranteed to be empty, so we can do faster than
  2477. // a full `insert`.
  2478. const size_t hash = PolicyTraits::apply(
  2479. HashElement{hash_ref()}, PolicyTraits::element(that_slot));
  2480. FindInfo target = find_first_non_full_outofline(common(), hash);
  2481. infoz().RecordInsert(hash, target.probe_length);
  2482. offset = target.offset;
  2483. } else {
  2484. // Small tables case. Next position is computed via shift.
  2485. offset = (offset + shift) & cap;
  2486. }
  2487. const h2_t h2 = static_cast<h2_t>(*that_ctrl);
  2488. assert( // We rely that hash is not changed for small tables.
  2489. H2(PolicyTraits::apply(HashElement{hash_ref()},
  2490. PolicyTraits::element(that_slot))) == h2 &&
  2491. "hash function value changed unexpectedly during the copy");
  2492. SetCtrl(common(), offset, h2, sizeof(slot_type));
  2493. emplace_at(iterator_at(offset), PolicyTraits::element(that_slot));
  2494. common().maybe_increment_generation_on_insert();
  2495. });
  2496. if (shift != 0) {
  2497. // On small table copy we do not record individual inserts.
  2498. // RecordInsert requires hash, but it is unknown for small tables.
  2499. infoz().RecordStorageChanged(size, cap);
  2500. }
  2501. common().set_size(size);
  2502. growth_info().OverwriteManyEmptyAsFull(size);
  2503. }
  2504. ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
  2505. std::is_nothrow_copy_constructible<hasher>::value &&
  2506. std::is_nothrow_copy_constructible<key_equal>::value &&
  2507. std::is_nothrow_copy_constructible<allocator_type>::value)
  2508. : // Hash, equality and allocator are copied instead of moved because
  2509. // `that` must be left valid. If Hash is std::function<Key>, moving it
  2510. // would create a nullptr functor that cannot be called.
  2511. // TODO(b/296061262): move instead of copying hash/eq/alloc.
  2512. // Note: we avoid using exchange for better generated code.
  2513. settings_(PolicyTraits::transfer_uses_memcpy() || !that.is_full_soo()
  2514. ? std::move(that.common())
  2515. : CommonFields{full_soo_tag_t{}},
  2516. that.hash_ref(), that.eq_ref(), that.alloc_ref()) {
  2517. if (!PolicyTraits::transfer_uses_memcpy() && that.is_full_soo()) {
  2518. transfer(soo_slot(), that.soo_slot());
  2519. }
  2520. that.common() = CommonFields::CreateDefault<SooEnabled()>();
  2521. maybe_increment_generation_or_rehash_on_move();
  2522. }
  2523. raw_hash_set(raw_hash_set&& that, const allocator_type& a)
  2524. : settings_(CommonFields::CreateDefault<SooEnabled()>(), that.hash_ref(),
  2525. that.eq_ref(), a) {
  2526. if (a == that.alloc_ref()) {
  2527. swap_common(that);
  2528. maybe_increment_generation_or_rehash_on_move();
  2529. } else {
  2530. move_elements_allocs_unequal(std::move(that));
  2531. }
  2532. }
  2533. raw_hash_set& operator=(const raw_hash_set& that) {
  2534. if (ABSL_PREDICT_FALSE(this == &that)) return *this;
  2535. constexpr bool propagate_alloc =
  2536. AllocTraits::propagate_on_container_copy_assignment::value;
  2537. // TODO(ezb): maybe avoid allocating a new backing array if this->capacity()
  2538. // is an exact match for that.size(). If this->capacity() is too big, then
  2539. // it would make iteration very slow to reuse the allocation. Maybe we can
  2540. // do the same heuristic as clear() and reuse if it's small enough.
  2541. raw_hash_set tmp(that, propagate_alloc ? that.alloc_ref() : alloc_ref());
  2542. // NOLINTNEXTLINE: not returning *this for performance.
  2543. return assign_impl<propagate_alloc>(std::move(tmp));
  2544. }
  2545. raw_hash_set& operator=(raw_hash_set&& that) noexcept(
  2546. absl::allocator_traits<allocator_type>::is_always_equal::value &&
  2547. std::is_nothrow_move_assignable<hasher>::value &&
  2548. std::is_nothrow_move_assignable<key_equal>::value) {
  2549. // TODO(sbenza): We should only use the operations from the noexcept clause
  2550. // to make sure we actually adhere to that contract.
  2551. // NOLINTNEXTLINE: not returning *this for performance.
  2552. return move_assign(
  2553. std::move(that),
  2554. typename AllocTraits::propagate_on_container_move_assignment());
  2555. }
  2556. ~raw_hash_set() { destructor_impl(); }
  2557. iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2558. if (ABSL_PREDICT_FALSE(empty())) return end();
  2559. if (is_soo()) return soo_iterator();
  2560. iterator it = {control(), common().slots_union(),
  2561. common().generation_ptr()};
  2562. it.skip_empty_or_deleted();
  2563. assert(IsFull(*it.control()));
  2564. return it;
  2565. }
  2566. iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2567. return iterator(common().generation_ptr());
  2568. }
  2569. const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2570. return const_cast<raw_hash_set*>(this)->begin();
  2571. }
  2572. const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2573. return iterator(common().generation_ptr());
  2574. }
  2575. const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2576. return begin();
  2577. }
  2578. const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
  2579. bool empty() const { return !size(); }
  2580. size_t size() const { return common().size(); }
  2581. size_t capacity() const {
  2582. const size_t cap = common().capacity();
  2583. // Compiler complains when using functions in assume so use local variables.
  2584. ABSL_ATTRIBUTE_UNUSED static constexpr bool kEnabled = SooEnabled();
  2585. ABSL_ATTRIBUTE_UNUSED static constexpr size_t kCapacity = SooCapacity();
  2586. ABSL_ASSUME(!kEnabled || cap >= kCapacity);
  2587. return cap;
  2588. }
  2589. size_t max_size() const {
  2590. return CapacityToGrowth(MaxValidCapacity<sizeof(slot_type)>());
  2591. }
  2592. ABSL_ATTRIBUTE_REINITIALIZES void clear() {
  2593. // Iterating over this container is O(bucket_count()). When bucket_count()
  2594. // is much greater than size(), iteration becomes prohibitively expensive.
  2595. // For clear() it is more important to reuse the allocated array when the
  2596. // container is small because allocation takes comparatively long time
  2597. // compared to destruction of the elements of the container. So we pick the
  2598. // largest bucket_count() threshold for which iteration is still fast and
  2599. // past that we simply deallocate the array.
  2600. const size_t cap = capacity();
  2601. if (cap == 0) {
  2602. // Already guaranteed to be empty; so nothing to do.
  2603. } else if (is_soo()) {
  2604. if (!empty()) destroy(soo_slot());
  2605. common().set_empty_soo();
  2606. } else {
  2607. destroy_slots();
  2608. ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128,
  2609. SooEnabled());
  2610. }
  2611. common().set_reserved_growth(0);
  2612. common().set_reservation_size(0);
  2613. }
  2614. // This overload kicks in when the argument is an rvalue of insertable and
  2615. // decomposable type other than init_type.
  2616. //
  2617. // flat_hash_map<std::string, int> m;
  2618. // m.insert(std::make_pair("abc", 42));
  2619. // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
  2620. // bug.
  2621. template <class T, RequiresInsertable<T> = 0, class T2 = T,
  2622. typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
  2623. T* = nullptr>
  2624. std::pair<iterator, bool> insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2625. return emplace(std::forward<T>(value));
  2626. }
  2627. // This overload kicks in when the argument is a bitfield or an lvalue of
  2628. // insertable and decomposable type.
  2629. //
  2630. // union { int n : 1; };
  2631. // flat_hash_set<int> s;
  2632. // s.insert(n);
  2633. //
  2634. // flat_hash_set<std::string> s;
  2635. // const char* p = "hello";
  2636. // s.insert(p);
  2637. //
  2638. template <
  2639. class T, RequiresInsertable<const T&> = 0,
  2640. typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
  2641. std::pair<iterator, bool> insert(const T& value)
  2642. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2643. return emplace(value);
  2644. }
  2645. // This overload kicks in when the argument is an rvalue of init_type. Its
  2646. // purpose is to handle brace-init-list arguments.
  2647. //
  2648. // flat_hash_map<std::string, int> s;
  2649. // s.insert({"abc", 42});
  2650. std::pair<iterator, bool> insert(init_type&& value)
  2651. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2652. return emplace(std::move(value));
  2653. }
  2654. // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
  2655. // bug.
  2656. template <class T, RequiresInsertable<T> = 0, class T2 = T,
  2657. typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
  2658. T* = nullptr>
  2659. iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2660. return insert(std::forward<T>(value)).first;
  2661. }
  2662. template <
  2663. class T, RequiresInsertable<const T&> = 0,
  2664. typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
  2665. iterator insert(const_iterator,
  2666. const T& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2667. return insert(value).first;
  2668. }
  2669. iterator insert(const_iterator,
  2670. init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2671. return insert(std::move(value)).first;
  2672. }
  2673. template <class InputIt>
  2674. void insert(InputIt first, InputIt last) {
  2675. for (; first != last; ++first) emplace(*first);
  2676. }
  2677. template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
  2678. void insert(std::initializer_list<T> ilist) {
  2679. insert(ilist.begin(), ilist.end());
  2680. }
  2681. void insert(std::initializer_list<init_type> ilist) {
  2682. insert(ilist.begin(), ilist.end());
  2683. }
  2684. insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2685. if (!node) return {end(), false, node_type()};
  2686. const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
  2687. auto res = PolicyTraits::apply(
  2688. InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
  2689. elem);
  2690. if (res.second) {
  2691. CommonAccess::Reset(&node);
  2692. return {res.first, true, node_type()};
  2693. } else {
  2694. return {res.first, false, std::move(node)};
  2695. }
  2696. }
  2697. iterator insert(const_iterator,
  2698. node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2699. auto res = insert(std::move(node));
  2700. node = std::move(res.node);
  2701. return res.position;
  2702. }
  2703. // This overload kicks in if we can deduce the key from args. This enables us
  2704. // to avoid constructing value_type if an entry with the same key already
  2705. // exists.
  2706. //
  2707. // For example:
  2708. //
  2709. // flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
  2710. // // Creates no std::string copies and makes no heap allocations.
  2711. // m.emplace("abc", "xyz");
  2712. template <class... Args, typename std::enable_if<
  2713. IsDecomposable<Args...>::value, int>::type = 0>
  2714. std::pair<iterator, bool> emplace(Args&&... args)
  2715. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2716. return PolicyTraits::apply(EmplaceDecomposable{*this},
  2717. std::forward<Args>(args)...);
  2718. }
  2719. // This overload kicks in if we cannot deduce the key from args. It constructs
  2720. // value_type unconditionally and then either moves it into the table or
  2721. // destroys.
  2722. template <class... Args, typename std::enable_if<
  2723. !IsDecomposable<Args...>::value, int>::type = 0>
  2724. std::pair<iterator, bool> emplace(Args&&... args)
  2725. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2726. alignas(slot_type) unsigned char raw[sizeof(slot_type)];
  2727. slot_type* slot = to_slot(&raw);
  2728. construct(slot, std::forward<Args>(args)...);
  2729. const auto& elem = PolicyTraits::element(slot);
  2730. return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
  2731. }
  2732. template <class... Args>
  2733. iterator emplace_hint(const_iterator,
  2734. Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2735. return emplace(std::forward<Args>(args)...).first;
  2736. }
  2737. // Extension API: support for lazy emplace.
  2738. //
  2739. // Looks up key in the table. If found, returns the iterator to the element.
  2740. // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`,
  2741. // and returns an iterator to the new element.
  2742. //
  2743. // `f` must abide by several restrictions:
  2744. // - it MUST call `raw_hash_set::constructor` with arguments as if a
  2745. // `raw_hash_set::value_type` is constructed,
  2746. // - it MUST NOT access the container before the call to
  2747. // `raw_hash_set::constructor`, and
  2748. // - it MUST NOT erase the lazily emplaced element.
  2749. // Doing any of these is undefined behavior.
  2750. //
  2751. // For example:
  2752. //
  2753. // std::unordered_set<ArenaString> s;
  2754. // // Makes ArenaStr even if "abc" is in the map.
  2755. // s.insert(ArenaString(&arena, "abc"));
  2756. //
  2757. // flat_hash_set<ArenaStr> s;
  2758. // // Makes ArenaStr only if "abc" is not in the map.
  2759. // s.lazy_emplace("abc", [&](const constructor& ctor) {
  2760. // ctor(&arena, "abc");
  2761. // });
  2762. //
  2763. // WARNING: This API is currently experimental. If there is a way to implement
  2764. // the same thing with the rest of the API, prefer that.
  2765. class constructor {
  2766. friend class raw_hash_set;
  2767. public:
  2768. template <class... Args>
  2769. void operator()(Args&&... args) const {
  2770. assert(*slot_);
  2771. PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
  2772. *slot_ = nullptr;
  2773. }
  2774. private:
  2775. constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
  2776. allocator_type* alloc_;
  2777. slot_type** slot_;
  2778. };
  2779. template <class K = key_type, class F>
  2780. iterator lazy_emplace(const key_arg<K>& key,
  2781. F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2782. auto res = find_or_prepare_insert(key);
  2783. if (res.second) {
  2784. slot_type* slot = res.first.slot();
  2785. std::forward<F>(f)(constructor(&alloc_ref(), &slot));
  2786. assert(!slot);
  2787. }
  2788. return res.first;
  2789. }
  2790. // Extension API: support for heterogeneous keys.
  2791. //
  2792. // std::unordered_set<std::string> s;
  2793. // // Turns "abc" into std::string.
  2794. // s.erase("abc");
  2795. //
  2796. // flat_hash_set<std::string> s;
  2797. // // Uses "abc" directly without copying it into std::string.
  2798. // s.erase("abc");
  2799. template <class K = key_type>
  2800. size_type erase(const key_arg<K>& key) {
  2801. auto it = find(key);
  2802. if (it == end()) return 0;
  2803. erase(it);
  2804. return 1;
  2805. }
  2806. // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`,
  2807. // this method returns void to reduce algorithmic complexity to O(1). The
  2808. // iterator is invalidated, so any increment should be done before calling
  2809. // erase. In order to erase while iterating across a map, use the following
  2810. // idiom (which also works for some standard containers):
  2811. //
  2812. // for (auto it = m.begin(), end = m.end(); it != end;) {
  2813. // // `erase()` will invalidate `it`, so advance `it` first.
  2814. // auto copy_it = it++;
  2815. // if (<pred>) {
  2816. // m.erase(copy_it);
  2817. // }
  2818. // }
  2819. void erase(const_iterator cit) { erase(cit.inner_); }
  2820. // This overload is necessary because otherwise erase<K>(const K&) would be
  2821. // a better match if non-const iterator is passed as an argument.
  2822. void erase(iterator it) {
  2823. AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()");
  2824. destroy(it.slot());
  2825. if (is_soo()) {
  2826. common().set_empty_soo();
  2827. } else {
  2828. erase_meta_only(it);
  2829. }
  2830. }
  2831. iterator erase(const_iterator first,
  2832. const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2833. // We check for empty first because ClearBackingArray requires that
  2834. // capacity() > 0 as a precondition.
  2835. if (empty()) return end();
  2836. if (first == last) return last.inner_;
  2837. if (is_soo()) {
  2838. destroy(soo_slot());
  2839. common().set_empty_soo();
  2840. return end();
  2841. }
  2842. if (first == begin() && last == end()) {
  2843. // TODO(ezb): we access control bytes in destroy_slots so it could make
  2844. // sense to combine destroy_slots and ClearBackingArray to avoid cache
  2845. // misses when the table is large. Note that we also do this in clear().
  2846. destroy_slots();
  2847. ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true,
  2848. SooEnabled());
  2849. common().set_reserved_growth(common().reservation_size());
  2850. return end();
  2851. }
  2852. while (first != last) {
  2853. erase(first++);
  2854. }
  2855. return last.inner_;
  2856. }
  2857. // Moves elements from `src` into `this`.
  2858. // If the element already exists in `this`, it is left unmodified in `src`.
  2859. template <typename H, typename E>
  2860. void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
  2861. assert(this != &src);
  2862. // Returns whether insertion took place.
  2863. const auto insert_slot = [this](slot_type* src_slot) {
  2864. return PolicyTraits::apply(InsertSlot<false>{*this, std::move(*src_slot)},
  2865. PolicyTraits::element(src_slot))
  2866. .second;
  2867. };
  2868. if (src.is_soo()) {
  2869. if (src.empty()) return;
  2870. if (insert_slot(src.soo_slot())) src.common().set_empty_soo();
  2871. return;
  2872. }
  2873. for (auto it = src.begin(), e = src.end(); it != e;) {
  2874. auto next = std::next(it);
  2875. if (insert_slot(it.slot())) src.erase_meta_only(it);
  2876. it = next;
  2877. }
  2878. }
  2879. template <typename H, typename E>
  2880. void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
  2881. merge(src);
  2882. }
  2883. node_type extract(const_iterator position) {
  2884. AssertIsFull(position.control(), position.inner_.generation(),
  2885. position.inner_.generation_ptr(), "extract()");
  2886. auto node = CommonAccess::Transfer<node_type>(alloc_ref(), position.slot());
  2887. if (is_soo()) {
  2888. common().set_empty_soo();
  2889. } else {
  2890. erase_meta_only(position);
  2891. }
  2892. return node;
  2893. }
  2894. template <
  2895. class K = key_type,
  2896. typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
  2897. node_type extract(const key_arg<K>& key) {
  2898. auto it = find(key);
  2899. return it == end() ? node_type() : extract(const_iterator{it});
  2900. }
  2901. void swap(raw_hash_set& that) noexcept(
  2902. IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
  2903. IsNoThrowSwappable<allocator_type>(
  2904. typename AllocTraits::propagate_on_container_swap{})) {
  2905. using std::swap;
  2906. swap_common(that);
  2907. swap(hash_ref(), that.hash_ref());
  2908. swap(eq_ref(), that.eq_ref());
  2909. SwapAlloc(alloc_ref(), that.alloc_ref(),
  2910. typename AllocTraits::propagate_on_container_swap{});
  2911. }
  2912. void rehash(size_t n) {
  2913. const size_t cap = capacity();
  2914. if (n == 0) {
  2915. if (cap == 0 || is_soo()) return;
  2916. if (empty()) {
  2917. ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
  2918. SooEnabled());
  2919. return;
  2920. }
  2921. if (fits_in_soo(size())) {
  2922. // When the table is already sampled, we keep it sampled.
  2923. if (infoz().IsSampled()) {
  2924. const size_t kInitialSampledCapacity = NextCapacity(SooCapacity());
  2925. if (capacity() > kInitialSampledCapacity) {
  2926. resize(kInitialSampledCapacity);
  2927. }
  2928. // This asserts that we didn't lose sampling coverage in `resize`.
  2929. assert(infoz().IsSampled());
  2930. return;
  2931. }
  2932. alignas(slot_type) unsigned char slot_space[sizeof(slot_type)];
  2933. slot_type* tmp_slot = to_slot(slot_space);
  2934. transfer(tmp_slot, begin().slot());
  2935. ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
  2936. SooEnabled());
  2937. transfer(soo_slot(), tmp_slot);
  2938. common().set_full_soo();
  2939. return;
  2940. }
  2941. }
  2942. // bitor is a faster way of doing `max` here. We will round up to the next
  2943. // power-of-2-minus-1, so bitor is good enough.
  2944. auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
  2945. // n == 0 unconditionally rehashes as per the standard.
  2946. if (n == 0 || m > cap) {
  2947. ABSL_RAW_CHECK(m <= MaxValidCapacity<sizeof(slot_type)>(),
  2948. "Hash table size overflow");
  2949. resize(m);
  2950. // This is after resize, to ensure that we have completed the allocation
  2951. // and have potentially sampled the hashtable.
  2952. infoz().RecordReservation(n);
  2953. }
  2954. }
  2955. void reserve(size_t n) {
  2956. const size_t max_size_before_growth =
  2957. is_soo() ? SooCapacity() : size() + growth_left();
  2958. if (n > max_size_before_growth) {
  2959. ABSL_RAW_CHECK(n <= max_size(), "Hash table size overflow");
  2960. size_t m = GrowthToLowerboundCapacity(n);
  2961. resize(NormalizeCapacity(m));
  2962. // This is after resize, to ensure that we have completed the allocation
  2963. // and have potentially sampled the hashtable.
  2964. infoz().RecordReservation(n);
  2965. }
  2966. common().reset_reserved_growth(n);
  2967. common().set_reservation_size(n);
  2968. }
  2969. // Extension API: support for heterogeneous keys.
  2970. //
  2971. // std::unordered_set<std::string> s;
  2972. // // Turns "abc" into std::string.
  2973. // s.count("abc");
  2974. //
  2975. // ch_set<std::string> s;
  2976. // // Uses "abc" directly without copying it into std::string.
  2977. // s.count("abc");
  2978. template <class K = key_type>
  2979. size_t count(const key_arg<K>& key) const {
  2980. return find(key) == end() ? 0 : 1;
  2981. }
  2982. // Issues CPU prefetch instructions for the memory needed to find or insert
  2983. // a key. Like all lookup functions, this support heterogeneous keys.
  2984. //
  2985. // NOTE: This is a very low level operation and should not be used without
  2986. // specific benchmarks indicating its importance.
  2987. template <class K = key_type>
  2988. void prefetch(const key_arg<K>& key) const {
  2989. if (SooEnabled() ? is_soo() : capacity() == 0) return;
  2990. (void)key;
  2991. // Avoid probing if we won't be able to prefetch the addresses received.
  2992. #ifdef ABSL_HAVE_PREFETCH
  2993. prefetch_heap_block();
  2994. auto seq = probe(common(), hash_ref()(key));
  2995. PrefetchToLocalCache(control() + seq.offset());
  2996. PrefetchToLocalCache(slot_array() + seq.offset());
  2997. #endif // ABSL_HAVE_PREFETCH
  2998. }
  2999. // The API of find() has two extensions.
  3000. //
  3001. // 1. The hash can be passed by the user. It must be equal to the hash of the
  3002. // key.
  3003. //
  3004. // 2. The type of the key argument doesn't have to be key_type. This is so
  3005. // called heterogeneous key support.
  3006. template <class K = key_type>
  3007. iterator find(const key_arg<K>& key,
  3008. size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3009. AssertHashEqConsistent(key);
  3010. if (is_soo()) return find_soo(key);
  3011. return find_non_soo(key, hash);
  3012. }
  3013. template <class K = key_type>
  3014. iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3015. AssertHashEqConsistent(key);
  3016. if (is_soo()) return find_soo(key);
  3017. prefetch_heap_block();
  3018. return find_non_soo(key, hash_ref()(key));
  3019. }
  3020. template <class K = key_type>
  3021. const_iterator find(const key_arg<K>& key,
  3022. size_t hash) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3023. return const_cast<raw_hash_set*>(this)->find(key, hash);
  3024. }
  3025. template <class K = key_type>
  3026. const_iterator find(const key_arg<K>& key) const
  3027. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3028. return const_cast<raw_hash_set*>(this)->find(key);
  3029. }
  3030. template <class K = key_type>
  3031. bool contains(const key_arg<K>& key) const {
  3032. // Here neither the iterator returned by `find()` nor `end()` can be invalid
  3033. // outside of potential thread-safety issues.
  3034. // `find()`'s return value is constructed, used, and then destructed
  3035. // all in this context.
  3036. return !find(key).unchecked_equals(end());
  3037. }
  3038. template <class K = key_type>
  3039. std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
  3040. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3041. auto it = find(key);
  3042. if (it != end()) return {it, std::next(it)};
  3043. return {it, it};
  3044. }
  3045. template <class K = key_type>
  3046. std::pair<const_iterator, const_iterator> equal_range(
  3047. const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3048. auto it = find(key);
  3049. if (it != end()) return {it, std::next(it)};
  3050. return {it, it};
  3051. }
  3052. size_t bucket_count() const { return capacity(); }
  3053. float load_factor() const {
  3054. return capacity() ? static_cast<double>(size()) / capacity() : 0.0;
  3055. }
  3056. float max_load_factor() const { return 1.0f; }
  3057. void max_load_factor(float) {
  3058. // Does nothing.
  3059. }
  3060. hasher hash_function() const { return hash_ref(); }
  3061. key_equal key_eq() const { return eq_ref(); }
  3062. allocator_type get_allocator() const { return alloc_ref(); }
  3063. friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
  3064. if (a.size() != b.size()) return false;
  3065. const raw_hash_set* outer = &a;
  3066. const raw_hash_set* inner = &b;
  3067. if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
  3068. for (const value_type& elem : *outer) {
  3069. auto it = PolicyTraits::apply(FindElement{*inner}, elem);
  3070. if (it == inner->end() || !(*it == elem)) return false;
  3071. }
  3072. return true;
  3073. }
  3074. friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
  3075. return !(a == b);
  3076. }
  3077. template <typename H>
  3078. friend typename std::enable_if<H::template is_hashable<value_type>::value,
  3079. H>::type
  3080. AbslHashValue(H h, const raw_hash_set& s) {
  3081. return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
  3082. s.size());
  3083. }
  3084. friend void swap(raw_hash_set& a,
  3085. raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
  3086. a.swap(b);
  3087. }
  3088. private:
  3089. template <class Container, typename Enabler>
  3090. friend struct absl::container_internal::hashtable_debug_internal::
  3091. HashtableDebugAccess;
  3092. friend struct absl::container_internal::HashtableFreeFunctionsAccess;
  3093. struct FindElement {
  3094. template <class K, class... Args>
  3095. const_iterator operator()(const K& key, Args&&...) const {
  3096. return s.find(key);
  3097. }
  3098. const raw_hash_set& s;
  3099. };
  3100. struct HashElement {
  3101. template <class K, class... Args>
  3102. size_t operator()(const K& key, Args&&...) const {
  3103. return h(key);
  3104. }
  3105. const hasher& h;
  3106. };
  3107. template <class K1>
  3108. struct EqualElement {
  3109. template <class K2, class... Args>
  3110. bool operator()(const K2& lhs, Args&&...) const {
  3111. return eq(lhs, rhs);
  3112. }
  3113. const K1& rhs;
  3114. const key_equal& eq;
  3115. };
  3116. struct EmplaceDecomposable {
  3117. template <class K, class... Args>
  3118. std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
  3119. auto res = s.find_or_prepare_insert(key);
  3120. if (res.second) {
  3121. s.emplace_at(res.first, std::forward<Args>(args)...);
  3122. }
  3123. return res;
  3124. }
  3125. raw_hash_set& s;
  3126. };
  3127. template <bool do_destroy>
  3128. struct InsertSlot {
  3129. template <class K, class... Args>
  3130. std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
  3131. auto res = s.find_or_prepare_insert(key);
  3132. if (res.second) {
  3133. s.transfer(res.first.slot(), &slot);
  3134. } else if (do_destroy) {
  3135. s.destroy(&slot);
  3136. }
  3137. return res;
  3138. }
  3139. raw_hash_set& s;
  3140. // Constructed slot. Either moved into place or destroyed.
  3141. slot_type&& slot;
  3142. };
  3143. // TODO(b/303305702): re-enable reentrant validation.
  3144. template <typename... Args>
  3145. inline void construct(slot_type* slot, Args&&... args) {
  3146. PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
  3147. }
  3148. inline void destroy(slot_type* slot) {
  3149. PolicyTraits::destroy(&alloc_ref(), slot);
  3150. }
  3151. inline void transfer(slot_type* to, slot_type* from) {
  3152. PolicyTraits::transfer(&alloc_ref(), to, from);
  3153. }
  3154. // TODO(b/289225379): consider having a helper class that has the impls for
  3155. // SOO functionality.
  3156. template <class K = key_type>
  3157. iterator find_soo(const key_arg<K>& key) {
  3158. assert(is_soo());
  3159. return empty() || !PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
  3160. PolicyTraits::element(soo_slot()))
  3161. ? end()
  3162. : soo_iterator();
  3163. }
  3164. template <class K = key_type>
  3165. iterator find_non_soo(const key_arg<K>& key, size_t hash) {
  3166. assert(!is_soo());
  3167. auto seq = probe(common(), hash);
  3168. const ctrl_t* ctrl = control();
  3169. while (true) {
  3170. Group g{ctrl + seq.offset()};
  3171. for (uint32_t i : g.Match(H2(hash))) {
  3172. if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
  3173. EqualElement<K>{key, eq_ref()},
  3174. PolicyTraits::element(slot_array() + seq.offset(i)))))
  3175. return iterator_at(seq.offset(i));
  3176. }
  3177. if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
  3178. seq.next();
  3179. assert(seq.index() <= capacity() && "full table!");
  3180. }
  3181. }
  3182. // Conditionally samples hashtablez for SOO tables. This should be called on
  3183. // insertion into an empty SOO table and in copy construction when the size
  3184. // can fit in SOO capacity.
  3185. inline HashtablezInfoHandle try_sample_soo() {
  3186. assert(is_soo());
  3187. if (!ShouldSampleHashtablezInfo<CharAlloc>()) return HashtablezInfoHandle{};
  3188. return Sample(sizeof(slot_type), sizeof(key_type), sizeof(value_type),
  3189. SooCapacity());
  3190. }
  3191. inline void destroy_slots() {
  3192. assert(!is_soo());
  3193. if (PolicyTraits::template destroy_is_trivial<Alloc>()) return;
  3194. IterateOverFullSlots(
  3195. common(), slot_array(),
  3196. [&](const ctrl_t*, slot_type* slot)
  3197. ABSL_ATTRIBUTE_ALWAYS_INLINE { this->destroy(slot); });
  3198. }
  3199. inline void dealloc() {
  3200. assert(capacity() != 0);
  3201. // Unpoison before returning the memory to the allocator.
  3202. SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * capacity());
  3203. infoz().Unregister();
  3204. Deallocate<BackingArrayAlignment(alignof(slot_type))>(
  3205. &alloc_ref(), common().backing_array_start(),
  3206. common().alloc_size(sizeof(slot_type), alignof(slot_type)));
  3207. }
  3208. inline void destructor_impl() {
  3209. if (capacity() == 0) return;
  3210. if (is_soo()) {
  3211. if (!empty()) {
  3212. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(destroy(soo_slot()));
  3213. }
  3214. return;
  3215. }
  3216. destroy_slots();
  3217. dealloc();
  3218. }
  3219. // Erases, but does not destroy, the value pointed to by `it`.
  3220. //
  3221. // This merely updates the pertinent control byte. This can be used in
  3222. // conjunction with Policy::transfer to move the object to another place.
  3223. void erase_meta_only(const_iterator it) {
  3224. assert(!is_soo());
  3225. EraseMetaOnly(common(), static_cast<size_t>(it.control() - control()),
  3226. sizeof(slot_type));
  3227. }
  3228. size_t hash_of(slot_type* slot) const {
  3229. return PolicyTraits::apply(HashElement{hash_ref()},
  3230. PolicyTraits::element(slot));
  3231. }
  3232. // Resizes table to the new capacity and move all elements to the new
  3233. // positions accordingly.
  3234. //
  3235. // Note that for better performance instead of
  3236. // find_first_non_full(common(), hash),
  3237. // HashSetResizeHelper::FindFirstNonFullAfterResize(
  3238. // common(), old_capacity, hash)
  3239. // can be called right after `resize`.
  3240. void resize(size_t new_capacity) {
  3241. raw_hash_set::resize_impl(common(), new_capacity, HashtablezInfoHandle{});
  3242. }
  3243. // As above, except that we also accept a pre-sampled, forced infoz for
  3244. // SOO tables, since they need to switch from SOO to heap in order to
  3245. // store the infoz.
  3246. void resize_with_soo_infoz(HashtablezInfoHandle forced_infoz) {
  3247. assert(forced_infoz.IsSampled());
  3248. raw_hash_set::resize_impl(common(), NextCapacity(SooCapacity()),
  3249. forced_infoz);
  3250. }
  3251. // Resizes set to the new capacity.
  3252. // It is a static function in order to use its pointer in GetPolicyFunctions.
  3253. ABSL_ATTRIBUTE_NOINLINE static void resize_impl(
  3254. CommonFields& common, size_t new_capacity,
  3255. HashtablezInfoHandle forced_infoz) {
  3256. raw_hash_set* set = reinterpret_cast<raw_hash_set*>(&common);
  3257. assert(IsValidCapacity(new_capacity));
  3258. assert(!set->fits_in_soo(new_capacity));
  3259. const bool was_soo = set->is_soo();
  3260. const bool had_soo_slot = was_soo && !set->empty();
  3261. const ctrl_t soo_slot_h2 =
  3262. had_soo_slot ? static_cast<ctrl_t>(H2(set->hash_of(set->soo_slot())))
  3263. : ctrl_t::kEmpty;
  3264. HashSetResizeHelper resize_helper(common, was_soo, had_soo_slot,
  3265. forced_infoz);
  3266. // Initialize HashSetResizeHelper::old_heap_or_soo_. We can't do this in
  3267. // HashSetResizeHelper constructor because it can't transfer slots when
  3268. // transfer_uses_memcpy is false.
  3269. // TODO(b/289225379): try to handle more of the SOO cases inside
  3270. // InitializeSlots. See comment on cl/555990034 snapshot #63.
  3271. if (PolicyTraits::transfer_uses_memcpy() || !had_soo_slot) {
  3272. resize_helper.old_heap_or_soo() = common.heap_or_soo();
  3273. } else {
  3274. set->transfer(set->to_slot(resize_helper.old_soo_data()),
  3275. set->soo_slot());
  3276. }
  3277. common.set_capacity(new_capacity);
  3278. // Note that `InitializeSlots` does different number initialization steps
  3279. // depending on the values of `transfer_uses_memcpy` and capacities.
  3280. // Refer to the comment in `InitializeSlots` for more details.
  3281. const bool grow_single_group =
  3282. resize_helper.InitializeSlots<CharAlloc, sizeof(slot_type),
  3283. PolicyTraits::transfer_uses_memcpy(),
  3284. SooEnabled(), alignof(slot_type)>(
  3285. common, CharAlloc(set->alloc_ref()), soo_slot_h2, sizeof(key_type),
  3286. sizeof(value_type));
  3287. // In the SooEnabled() case, capacity is never 0 so we don't check.
  3288. if (!SooEnabled() && resize_helper.old_capacity() == 0) {
  3289. // InitializeSlots did all the work including infoz().RecordRehash().
  3290. return;
  3291. }
  3292. assert(resize_helper.old_capacity() > 0);
  3293. // Nothing more to do in this case.
  3294. if (was_soo && !had_soo_slot) return;
  3295. slot_type* new_slots = set->slot_array();
  3296. if (grow_single_group) {
  3297. if (PolicyTraits::transfer_uses_memcpy()) {
  3298. // InitializeSlots did all the work.
  3299. return;
  3300. }
  3301. if (was_soo) {
  3302. set->transfer(new_slots + resize_helper.SooSlotIndex(),
  3303. to_slot(resize_helper.old_soo_data()));
  3304. return;
  3305. } else {
  3306. // We want GrowSizeIntoSingleGroup to be called here in order to make
  3307. // InitializeSlots not depend on PolicyTraits.
  3308. resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common,
  3309. set->alloc_ref());
  3310. }
  3311. } else {
  3312. // InitializeSlots prepares control bytes to correspond to empty table.
  3313. const auto insert_slot = [&](slot_type* slot) {
  3314. size_t hash = PolicyTraits::apply(HashElement{set->hash_ref()},
  3315. PolicyTraits::element(slot));
  3316. auto target = find_first_non_full(common, hash);
  3317. SetCtrl(common, target.offset, H2(hash), sizeof(slot_type));
  3318. set->transfer(new_slots + target.offset, slot);
  3319. return target.probe_length;
  3320. };
  3321. if (was_soo) {
  3322. insert_slot(to_slot(resize_helper.old_soo_data()));
  3323. return;
  3324. } else {
  3325. auto* old_slots = static_cast<slot_type*>(resize_helper.old_slots());
  3326. size_t total_probe_length = 0;
  3327. for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
  3328. if (IsFull(resize_helper.old_ctrl()[i])) {
  3329. total_probe_length += insert_slot(old_slots + i);
  3330. }
  3331. }
  3332. common.infoz().RecordRehash(total_probe_length);
  3333. }
  3334. }
  3335. resize_helper.DeallocateOld<alignof(slot_type)>(CharAlloc(set->alloc_ref()),
  3336. sizeof(slot_type));
  3337. }
  3338. // Casting directly from e.g. char* to slot_type* can cause compilation errors
  3339. // on objective-C. This function converts to void* first, avoiding the issue.
  3340. static slot_type* to_slot(void* buf) { return static_cast<slot_type*>(buf); }
  3341. // Requires that lhs does not have a full SOO slot.
  3342. static void move_common(bool that_is_full_soo, allocator_type& rhs_alloc,
  3343. CommonFields& lhs, CommonFields&& rhs) {
  3344. if (PolicyTraits::transfer_uses_memcpy() || !that_is_full_soo) {
  3345. lhs = std::move(rhs);
  3346. } else {
  3347. lhs.move_non_heap_or_soo_fields(rhs);
  3348. // TODO(b/303305702): add reentrancy guard.
  3349. PolicyTraits::transfer(&rhs_alloc, to_slot(lhs.soo_data()),
  3350. to_slot(rhs.soo_data()));
  3351. }
  3352. }
  3353. // Swaps common fields making sure to avoid memcpy'ing a full SOO slot if we
  3354. // aren't allowed to do so.
  3355. void swap_common(raw_hash_set& that) {
  3356. using std::swap;
  3357. if (PolicyTraits::transfer_uses_memcpy()) {
  3358. swap(common(), that.common());
  3359. return;
  3360. }
  3361. CommonFields tmp = CommonFields::CreateDefault<SooEnabled()>();
  3362. const bool that_is_full_soo = that.is_full_soo();
  3363. move_common(that_is_full_soo, that.alloc_ref(), tmp,
  3364. std::move(that.common()));
  3365. move_common(is_full_soo(), alloc_ref(), that.common(), std::move(common()));
  3366. move_common(that_is_full_soo, that.alloc_ref(), common(), std::move(tmp));
  3367. }
  3368. void maybe_increment_generation_or_rehash_on_move() {
  3369. if (!SwisstableGenerationsEnabled() || capacity() == 0 || is_soo()) {
  3370. return;
  3371. }
  3372. common().increment_generation();
  3373. if (!empty() && common().should_rehash_for_bug_detection_on_move()) {
  3374. resize(capacity());
  3375. }
  3376. }
  3377. template <bool propagate_alloc>
  3378. raw_hash_set& assign_impl(raw_hash_set&& that) {
  3379. // We don't bother checking for this/that aliasing. We just need to avoid
  3380. // breaking the invariants in that case.
  3381. destructor_impl();
  3382. move_common(that.is_full_soo(), that.alloc_ref(), common(),
  3383. std::move(that.common()));
  3384. // TODO(b/296061262): move instead of copying hash/eq/alloc.
  3385. hash_ref() = that.hash_ref();
  3386. eq_ref() = that.eq_ref();
  3387. CopyAlloc(alloc_ref(), that.alloc_ref(),
  3388. std::integral_constant<bool, propagate_alloc>());
  3389. that.common() = CommonFields::CreateDefault<SooEnabled()>();
  3390. maybe_increment_generation_or_rehash_on_move();
  3391. return *this;
  3392. }
  3393. raw_hash_set& move_elements_allocs_unequal(raw_hash_set&& that) {
  3394. const size_t size = that.size();
  3395. if (size == 0) return *this;
  3396. reserve(size);
  3397. for (iterator it = that.begin(); it != that.end(); ++it) {
  3398. insert(std::move(PolicyTraits::element(it.slot())));
  3399. that.destroy(it.slot());
  3400. }
  3401. if (!that.is_soo()) that.dealloc();
  3402. that.common() = CommonFields::CreateDefault<SooEnabled()>();
  3403. maybe_increment_generation_or_rehash_on_move();
  3404. return *this;
  3405. }
  3406. raw_hash_set& move_assign(raw_hash_set&& that,
  3407. std::true_type /*propagate_alloc*/) {
  3408. return assign_impl<true>(std::move(that));
  3409. }
  3410. raw_hash_set& move_assign(raw_hash_set&& that,
  3411. std::false_type /*propagate_alloc*/) {
  3412. if (alloc_ref() == that.alloc_ref()) {
  3413. return assign_impl<false>(std::move(that));
  3414. }
  3415. // Aliasing can't happen here because allocs would compare equal above.
  3416. assert(this != &that);
  3417. destructor_impl();
  3418. // We can't take over that's memory so we need to move each element.
  3419. // While moving elements, this should have that's hash/eq so copy hash/eq
  3420. // before moving elements.
  3421. // TODO(b/296061262): move instead of copying hash/eq.
  3422. hash_ref() = that.hash_ref();
  3423. eq_ref() = that.eq_ref();
  3424. return move_elements_allocs_unequal(std::move(that));
  3425. }
  3426. template <class K>
  3427. std::pair<iterator, bool> find_or_prepare_insert_soo(const K& key) {
  3428. if (empty()) {
  3429. const HashtablezInfoHandle infoz = try_sample_soo();
  3430. if (infoz.IsSampled()) {
  3431. resize_with_soo_infoz(infoz);
  3432. } else {
  3433. common().set_full_soo();
  3434. return {soo_iterator(), true};
  3435. }
  3436. } else if (PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
  3437. PolicyTraits::element(soo_slot()))) {
  3438. return {soo_iterator(), false};
  3439. } else {
  3440. resize(NextCapacity(SooCapacity()));
  3441. }
  3442. const size_t index =
  3443. PrepareInsertAfterSoo(hash_ref()(key), sizeof(slot_type), common());
  3444. return {iterator_at(index), true};
  3445. }
  3446. template <class K>
  3447. std::pair<iterator, bool> find_or_prepare_insert_non_soo(const K& key) {
  3448. assert(!is_soo());
  3449. prefetch_heap_block();
  3450. auto hash = hash_ref()(key);
  3451. auto seq = probe(common(), hash);
  3452. const ctrl_t* ctrl = control();
  3453. while (true) {
  3454. Group g{ctrl + seq.offset()};
  3455. for (uint32_t i : g.Match(H2(hash))) {
  3456. if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
  3457. EqualElement<K>{key, eq_ref()},
  3458. PolicyTraits::element(slot_array() + seq.offset(i)))))
  3459. return {iterator_at(seq.offset(i)), false};
  3460. }
  3461. auto mask_empty = g.MaskEmpty();
  3462. if (ABSL_PREDICT_TRUE(mask_empty)) {
  3463. size_t target = seq.offset(
  3464. GetInsertionOffset(mask_empty, capacity(), hash, control()));
  3465. return {iterator_at(PrepareInsertNonSoo(common(), hash,
  3466. FindInfo{target, seq.index()},
  3467. GetPolicyFunctions())),
  3468. true};
  3469. }
  3470. seq.next();
  3471. assert(seq.index() <= capacity() && "full table!");
  3472. }
  3473. }
  3474. protected:
  3475. // Asserts that hash and equal functors provided by the user are consistent,
  3476. // meaning that `eq(k1, k2)` implies `hash(k1)==hash(k2)`.
  3477. template <class K>
  3478. void AssertHashEqConsistent(ABSL_ATTRIBUTE_UNUSED const K& key) {
  3479. #ifndef NDEBUG
  3480. if (empty()) return;
  3481. const size_t hash_of_arg = hash_ref()(key);
  3482. const auto assert_consistent = [&](const ctrl_t*, slot_type* slot) {
  3483. const value_type& element = PolicyTraits::element(slot);
  3484. const bool is_key_equal =
  3485. PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, element);
  3486. if (!is_key_equal) return;
  3487. const size_t hash_of_slot =
  3488. PolicyTraits::apply(HashElement{hash_ref()}, element);
  3489. const bool is_hash_equal = hash_of_arg == hash_of_slot;
  3490. if (!is_hash_equal) {
  3491. // In this case, we're going to crash. Do a couple of other checks for
  3492. // idempotence issues. Recalculating hash/eq here is also convenient for
  3493. // debugging with gdb/lldb.
  3494. const size_t once_more_hash_arg = hash_ref()(key);
  3495. assert(hash_of_arg == once_more_hash_arg && "hash is not idempotent.");
  3496. const size_t once_more_hash_slot =
  3497. PolicyTraits::apply(HashElement{hash_ref()}, element);
  3498. assert(hash_of_slot == once_more_hash_slot &&
  3499. "hash is not idempotent.");
  3500. const bool once_more_eq =
  3501. PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, element);
  3502. assert(is_key_equal == once_more_eq && "equality is not idempotent.");
  3503. }
  3504. assert((!is_key_equal || is_hash_equal) &&
  3505. "eq(k1, k2) must imply that hash(k1) == hash(k2). "
  3506. "hash/eq functors are inconsistent.");
  3507. };
  3508. if (is_soo()) {
  3509. assert_consistent(/*unused*/ nullptr, soo_slot());
  3510. return;
  3511. }
  3512. // We only do validation for small tables so that it's constant time.
  3513. if (capacity() > 16) return;
  3514. IterateOverFullSlots(common(), slot_array(), assert_consistent);
  3515. #endif
  3516. }
  3517. // Attempts to find `key` in the table; if it isn't found, returns an iterator
  3518. // where the value can be inserted into, with the control byte already set to
  3519. // `key`'s H2. Returns a bool indicating whether an insertion can take place.
  3520. template <class K>
  3521. std::pair<iterator, bool> find_or_prepare_insert(const K& key) {
  3522. AssertHashEqConsistent(key);
  3523. if (is_soo()) return find_or_prepare_insert_soo(key);
  3524. return find_or_prepare_insert_non_soo(key);
  3525. }
  3526. // Constructs the value in the space pointed by the iterator. This only works
  3527. // after an unsuccessful find_or_prepare_insert() and before any other
  3528. // modifications happen in the raw_hash_set.
  3529. //
  3530. // PRECONDITION: iter was returned from find_or_prepare_insert(k), where k is
  3531. // the key decomposed from `forward<Args>(args)...`, and the bool returned by
  3532. // find_or_prepare_insert(k) was true.
  3533. // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
  3534. template <class... Args>
  3535. void emplace_at(iterator iter, Args&&... args) {
  3536. construct(iter.slot(), std::forward<Args>(args)...);
  3537. assert(PolicyTraits::apply(FindElement{*this}, *iter) == iter &&
  3538. "constructed value does not match the lookup key");
  3539. }
  3540. iterator iterator_at(size_t i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3541. return {control() + i, slot_array() + i, common().generation_ptr()};
  3542. }
  3543. const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3544. return const_cast<raw_hash_set*>(this)->iterator_at(i);
  3545. }
  3546. reference unchecked_deref(iterator it) { return it.unchecked_deref(); }
  3547. private:
  3548. friend struct RawHashSetTestOnlyAccess;
  3549. // The number of slots we can still fill without needing to rehash.
  3550. //
  3551. // This is stored separately due to tombstones: we do not include tombstones
  3552. // in the growth capacity, because we'd like to rehash when the table is
  3553. // otherwise filled with tombstones: otherwise, probe sequences might get
  3554. // unacceptably long without triggering a rehash. Callers can also force a
  3555. // rehash via the standard `rehash(0)`, which will recompute this value as a
  3556. // side-effect.
  3557. //
  3558. // See `CapacityToGrowth()`.
  3559. size_t growth_left() const {
  3560. assert(!is_soo());
  3561. return common().growth_left();
  3562. }
  3563. GrowthInfo& growth_info() {
  3564. assert(!is_soo());
  3565. return common().growth_info();
  3566. }
  3567. GrowthInfo growth_info() const {
  3568. assert(!is_soo());
  3569. return common().growth_info();
  3570. }
  3571. // Prefetch the heap-allocated memory region to resolve potential TLB and
  3572. // cache misses. This is intended to overlap with execution of calculating the
  3573. // hash for a key.
  3574. void prefetch_heap_block() const {
  3575. assert(!is_soo());
  3576. #if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
  3577. __builtin_prefetch(control(), 0, 1);
  3578. #endif
  3579. }
  3580. CommonFields& common() { return settings_.template get<0>(); }
  3581. const CommonFields& common() const { return settings_.template get<0>(); }
  3582. ctrl_t* control() const {
  3583. assert(!is_soo());
  3584. return common().control();
  3585. }
  3586. slot_type* slot_array() const {
  3587. assert(!is_soo());
  3588. return static_cast<slot_type*>(common().slot_array());
  3589. }
  3590. slot_type* soo_slot() {
  3591. assert(is_soo());
  3592. return static_cast<slot_type*>(common().soo_data());
  3593. }
  3594. const slot_type* soo_slot() const {
  3595. return const_cast<raw_hash_set*>(this)->soo_slot();
  3596. }
  3597. iterator soo_iterator() {
  3598. return {SooControl(), soo_slot(), common().generation_ptr()};
  3599. }
  3600. const_iterator soo_iterator() const {
  3601. return const_cast<raw_hash_set*>(this)->soo_iterator();
  3602. }
  3603. HashtablezInfoHandle infoz() {
  3604. assert(!is_soo());
  3605. return common().infoz();
  3606. }
  3607. hasher& hash_ref() { return settings_.template get<1>(); }
  3608. const hasher& hash_ref() const { return settings_.template get<1>(); }
  3609. key_equal& eq_ref() { return settings_.template get<2>(); }
  3610. const key_equal& eq_ref() const { return settings_.template get<2>(); }
  3611. allocator_type& alloc_ref() { return settings_.template get<3>(); }
  3612. const allocator_type& alloc_ref() const {
  3613. return settings_.template get<3>();
  3614. }
  3615. static const void* get_hash_ref_fn(const CommonFields& common) {
  3616. auto* h = reinterpret_cast<const raw_hash_set*>(&common);
  3617. return &h->hash_ref();
  3618. }
  3619. static void transfer_slot_fn(void* set, void* dst, void* src) {
  3620. auto* h = static_cast<raw_hash_set*>(set);
  3621. h->transfer(static_cast<slot_type*>(dst), static_cast<slot_type*>(src));
  3622. }
  3623. // Note: dealloc_fn will only be used if we have a non-standard allocator.
  3624. static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
  3625. auto* set = reinterpret_cast<raw_hash_set*>(&common);
  3626. // Unpoison before returning the memory to the allocator.
  3627. SanitizerUnpoisonMemoryRegion(common.slot_array(),
  3628. sizeof(slot_type) * common.capacity());
  3629. common.infoz().Unregister();
  3630. Deallocate<BackingArrayAlignment(alignof(slot_type))>(
  3631. &set->alloc_ref(), common.backing_array_start(),
  3632. common.alloc_size(sizeof(slot_type), alignof(slot_type)));
  3633. }
  3634. static const PolicyFunctions& GetPolicyFunctions() {
  3635. static constexpr PolicyFunctions value = {
  3636. sizeof(slot_type),
  3637. // TODO(b/328722020): try to type erase
  3638. // for standard layout and alignof(Hash) <= alignof(CommonFields).
  3639. std::is_empty<hasher>::value ? &GetHashRefForEmptyHasher
  3640. : &raw_hash_set::get_hash_ref_fn,
  3641. PolicyTraits::template get_hash_slot_fn<hasher>(),
  3642. PolicyTraits::transfer_uses_memcpy()
  3643. ? TransferRelocatable<sizeof(slot_type)>
  3644. : &raw_hash_set::transfer_slot_fn,
  3645. (std::is_same<SlotAlloc, std::allocator<slot_type>>::value
  3646. ? &DeallocateStandard<alignof(slot_type)>
  3647. : &raw_hash_set::dealloc_fn),
  3648. &raw_hash_set::resize_impl,
  3649. };
  3650. return value;
  3651. }
  3652. // Bundle together CommonFields plus other objects which might be empty.
  3653. // CompressedTuple will ensure that sizeof is not affected by any of the empty
  3654. // fields that occur after CommonFields.
  3655. absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
  3656. allocator_type>
  3657. settings_{CommonFields::CreateDefault<SooEnabled()>(), hasher{},
  3658. key_equal{}, allocator_type{}};
  3659. };
  3660. // Friend access for free functions in raw_hash_set.h.
  3661. struct HashtableFreeFunctionsAccess {
  3662. template <class Predicate, typename Set>
  3663. static typename Set::size_type EraseIf(Predicate& pred, Set* c) {
  3664. if (c->empty()) {
  3665. return 0;
  3666. }
  3667. if (c->is_soo()) {
  3668. auto it = c->soo_iterator();
  3669. if (!pred(*it)) {
  3670. assert(c->size() == 1 && "hash table was modified unexpectedly");
  3671. return 0;
  3672. }
  3673. c->destroy(it.slot());
  3674. c->common().set_empty_soo();
  3675. return 1;
  3676. }
  3677. ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = c->size();
  3678. size_t num_deleted = 0;
  3679. IterateOverFullSlots(
  3680. c->common(), c->slot_array(), [&](const ctrl_t* ctrl, auto* slot) {
  3681. if (pred(Set::PolicyTraits::element(slot))) {
  3682. c->destroy(slot);
  3683. EraseMetaOnly(c->common(), static_cast<size_t>(ctrl - c->control()),
  3684. sizeof(*slot));
  3685. ++num_deleted;
  3686. }
  3687. });
  3688. // NOTE: IterateOverFullSlots allow removal of the current element, so we
  3689. // verify the size additionally here.
  3690. assert(original_size_for_assert - num_deleted == c->size() &&
  3691. "hash table was modified unexpectedly");
  3692. return num_deleted;
  3693. }
  3694. template <class Callback, typename Set>
  3695. static void ForEach(Callback& cb, Set* c) {
  3696. if (c->empty()) {
  3697. return;
  3698. }
  3699. if (c->is_soo()) {
  3700. cb(*c->soo_iterator());
  3701. return;
  3702. }
  3703. using ElementTypeWithConstness = decltype(*c->begin());
  3704. IterateOverFullSlots(
  3705. c->common(), c->slot_array(), [&cb](const ctrl_t*, auto* slot) {
  3706. ElementTypeWithConstness& element = Set::PolicyTraits::element(slot);
  3707. cb(element);
  3708. });
  3709. }
  3710. };
  3711. // Erases all elements that satisfy the predicate `pred` from the container `c`.
  3712. template <typename P, typename H, typename E, typename A, typename Predicate>
  3713. typename raw_hash_set<P, H, E, A>::size_type EraseIf(
  3714. Predicate& pred, raw_hash_set<P, H, E, A>* c) {
  3715. return HashtableFreeFunctionsAccess::EraseIf(pred, c);
  3716. }
  3717. // Calls `cb` for all elements in the container `c`.
  3718. template <typename P, typename H, typename E, typename A, typename Callback>
  3719. void ForEach(Callback& cb, raw_hash_set<P, H, E, A>* c) {
  3720. return HashtableFreeFunctionsAccess::ForEach(cb, c);
  3721. }
  3722. template <typename P, typename H, typename E, typename A, typename Callback>
  3723. void ForEach(Callback& cb, const raw_hash_set<P, H, E, A>* c) {
  3724. return HashtableFreeFunctionsAccess::ForEach(cb, c);
  3725. }
  3726. namespace hashtable_debug_internal {
  3727. template <typename Set>
  3728. struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
  3729. using Traits = typename Set::PolicyTraits;
  3730. using Slot = typename Traits::slot_type;
  3731. static size_t GetNumProbes(const Set& set,
  3732. const typename Set::key_type& key) {
  3733. if (set.is_soo()) return 0;
  3734. size_t num_probes = 0;
  3735. size_t hash = set.hash_ref()(key);
  3736. auto seq = probe(set.common(), hash);
  3737. const ctrl_t* ctrl = set.control();
  3738. while (true) {
  3739. container_internal::Group g{ctrl + seq.offset()};
  3740. for (uint32_t i : g.Match(container_internal::H2(hash))) {
  3741. if (Traits::apply(
  3742. typename Set::template EqualElement<typename Set::key_type>{
  3743. key, set.eq_ref()},
  3744. Traits::element(set.slot_array() + seq.offset(i))))
  3745. return num_probes;
  3746. ++num_probes;
  3747. }
  3748. if (g.MaskEmpty()) return num_probes;
  3749. seq.next();
  3750. ++num_probes;
  3751. }
  3752. }
  3753. static size_t AllocatedByteSize(const Set& c) {
  3754. size_t capacity = c.capacity();
  3755. if (capacity == 0) return 0;
  3756. size_t m =
  3757. c.is_soo() ? 0 : c.common().alloc_size(sizeof(Slot), alignof(Slot));
  3758. size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
  3759. if (per_slot != ~size_t{}) {
  3760. m += per_slot * c.size();
  3761. } else {
  3762. for (auto it = c.begin(); it != c.end(); ++it) {
  3763. m += Traits::space_used(it.slot());
  3764. }
  3765. }
  3766. return m;
  3767. }
  3768. };
  3769. } // namespace hashtable_debug_internal
  3770. } // namespace container_internal
  3771. ABSL_NAMESPACE_END
  3772. } // namespace absl
  3773. #undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
  3774. #undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED
  3775. #undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN
  3776. #endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_