raw_hash_set.h 168 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332
  1. // Copyright 2018 The Abseil Authors.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // https://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. //
  15. // An open-addressing
  16. // hashtable with quadratic probing.
  17. //
  18. // This is a low level hashtable on top of which different interfaces can be
  19. // implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
  20. //
  21. // The table interface is similar to that of std::unordered_set. Notable
  22. // differences are that most member functions support heterogeneous keys when
  23. // BOTH the hash and eq functions are marked as transparent. They do so by
  24. // providing a typedef called `is_transparent`.
  25. //
  26. // When heterogeneous lookup is enabled, functions that take key_type act as if
  27. // they have an overload set like:
  28. //
  29. // iterator find(const key_type& key);
  30. // template <class K>
  31. // iterator find(const K& key);
  32. //
  33. // size_type erase(const key_type& key);
  34. // template <class K>
  35. // size_type erase(const K& key);
  36. //
  37. // std::pair<iterator, iterator> equal_range(const key_type& key);
  38. // template <class K>
  39. // std::pair<iterator, iterator> equal_range(const K& key);
  40. //
  41. // When heterogeneous lookup is disabled, only the explicit `key_type` overloads
  42. // exist.
  43. //
  44. // In addition the pointer to element and iterator stability guarantees are
  45. // weaker: all iterators and pointers are invalidated after a new element is
  46. // inserted.
  47. //
  48. // IMPLEMENTATION DETAILS
  49. //
  50. // # Table Layout
  51. //
  52. // A raw_hash_set's backing array consists of control bytes followed by slots
  53. // that may or may not contain objects.
  54. //
  55. // The layout of the backing array, for `capacity` slots, is thus, as a
  56. // pseudo-struct:
  57. //
  58. // struct BackingArray {
  59. // // Sampling handler. This field isn't present when the sampling is
  60. // // disabled or this allocation hasn't been selected for sampling.
  61. // HashtablezInfoHandle infoz_;
  62. // // The number of elements we can insert before growing the capacity.
  63. // size_t growth_left;
  64. // // Control bytes for the "real" slots.
  65. // ctrl_t ctrl[capacity];
  66. // // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
  67. // // stop and serves no other purpose.
  68. // ctrl_t sentinel;
  69. // // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
  70. // // that if a probe sequence picks a value near the end of `ctrl`,
  71. // // `Group` will have valid control bytes to look at.
  72. // ctrl_t clones[kWidth - 1];
  73. // // The actual slot data.
  74. // slot_type slots[capacity];
  75. // };
  76. //
  77. // The length of this array is computed by `RawHashSetLayout::alloc_size` below.
  78. //
  79. // Control bytes (`ctrl_t`) are bytes (collected into groups of a
  80. // platform-specific size) that define the state of the corresponding slot in
  81. // the slot array. Group manipulation is tightly optimized to be as efficient
  82. // as possible: SSE and friends on x86, clever bit operations on other arches.
  83. //
  84. // Group 1 Group 2 Group 3
  85. // +---------------+---------------+---------------+
  86. // | | | | | | | | | | | | | | | | | | | | | | | | |
  87. // +---------------+---------------+---------------+
  88. //
  89. // Each control byte is either a special value for empty slots, deleted slots
  90. // (sometimes called *tombstones*), and a special end-of-table marker used by
  91. // iterators, or, if occupied, seven bits (H2) from the hash of the value in the
  92. // corresponding slot.
  93. //
  94. // Storing control bytes in a separate array also has beneficial cache effects,
  95. // since more logical slots will fit into a cache line.
  96. //
  97. // # Small Object Optimization (SOO)
  98. //
  99. // When the size/alignment of the value_type and the capacity of the table are
  100. // small, we enable small object optimization and store the values inline in
  101. // the raw_hash_set object. This optimization allows us to avoid
  102. // allocation/deallocation as well as cache/dTLB misses.
  103. //
  104. // # Hashing
  105. //
  106. // We compute two separate hashes, `H1` and `H2`, from the hash of an object.
  107. // `H1(hash(x))` is an index into `slots`, and essentially the starting point
  108. // for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
  109. // objects that cannot possibly be the one we are looking for.
  110. //
  111. // # Table operations.
  112. //
  113. // The key operations are `insert`, `find`, and `erase`.
  114. //
  115. // Since `insert` and `erase` are implemented in terms of `find`, we describe
  116. // `find` first. To `find` a value `x`, we compute `hash(x)`. From
  117. // `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
  118. // group of slots in some interesting order.
  119. //
  120. // We now walk through these indices. At each index, we select the entire group
  121. // starting with that index and extract potential candidates: occupied slots
  122. // with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
  123. // group, we stop and return an error. Each candidate slot `y` is compared with
  124. // `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
  125. // next probe index. Tombstones effectively behave like full slots that never
  126. // match the value we're looking for.
  127. //
  128. // The `H2` bits ensure when we compare a slot to an object with `==`, we are
  129. // likely to have actually found the object. That is, the chance is low that
  130. // `==` is called and returns `false`. Thus, when we search for an object, we
  131. // are unlikely to call `==` many times. This likelyhood can be analyzed as
  132. // follows (assuming that H2 is a random enough hash function).
  133. //
  134. // Let's assume that there are `k` "wrong" objects that must be examined in a
  135. // probe sequence. For example, when doing a `find` on an object that is in the
  136. // table, `k` is the number of objects between the start of the probe sequence
  137. // and the final found object (not including the final found object). The
  138. // expected number of objects with an H2 match is then `k/128`. Measurements
  139. // and analysis indicate that even at high load factors, `k` is less than 32,
  140. // meaning that the number of "false positive" comparisons we must perform is
  141. // less than 1/8 per `find`.
  142. // `insert` is implemented in terms of `unchecked_insert`, which inserts a
  143. // value presumed to not be in the table (violating this requirement will cause
  144. // the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
  145. // it, we construct a `probe_seq` once again, and use it to find the first
  146. // group with an unoccupied (empty *or* deleted) slot. We place `x` into the
  147. // first such slot in the group and mark it as full with `x`'s H2.
  148. //
  149. // To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
  150. // perform a `find` to see if it's already present; if it is, we're done. If
  151. // it's not, we may decide the table is getting overcrowded (i.e. the load
  152. // factor is greater than 7/8 for big tables; `is_small()` tables use a max load
  153. // factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
  154. // each element of the table into the new array (we know that no insertion here
  155. // will insert an already-present value), and discard the old backing array. At
  156. // this point, we may `unchecked_insert` the value `x`.
  157. //
  158. // Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
  159. // presents a viable, initialized slot pointee to the caller.
  160. //
  161. // `erase` is implemented in terms of `erase_at`, which takes an index to a
  162. // slot. Given an offset, we simply create a tombstone and destroy its contents.
  163. // If we can prove that the slot would not appear in a probe sequence, we can
  164. // make the slot as empty, instead. We can prove this by observing that if a
  165. // group has any empty slots, it has never been full (assuming we never create
  166. // an empty slot in a group with no empties, which this heuristic guarantees we
  167. // never do) and find would stop at this group anyways (since it does not probe
  168. // beyond groups with empties).
  169. //
  170. // `erase` is `erase_at` composed with `find`: if we
  171. // have a value `x`, we can perform a `find`, and then `erase_at` the resulting
  172. // slot.
  173. //
  174. // To iterate, we simply traverse the array, skipping empty and deleted slots
  175. // and stopping when we hit a `kSentinel`.
  176. #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
  177. #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
  178. #include <algorithm>
  179. #include <cassert>
  180. #include <cmath>
  181. #include <cstddef>
  182. #include <cstdint>
  183. #include <cstring>
  184. #include <functional>
  185. #include <initializer_list>
  186. #include <iterator>
  187. #include <limits>
  188. #include <memory>
  189. #include <tuple>
  190. #include <type_traits>
  191. #include <utility>
  192. #include "absl/base/attributes.h"
  193. #include "absl/base/config.h"
  194. #include "absl/base/internal/endian.h"
  195. #include "absl/base/internal/raw_logging.h"
  196. #include "absl/base/macros.h"
  197. #include "absl/base/optimization.h"
  198. #include "absl/base/options.h"
  199. #include "absl/base/port.h"
  200. #include "absl/base/prefetch.h"
  201. #include "absl/container/internal/common.h" // IWYU pragma: export // for node_handle
  202. #include "absl/container/internal/common_policy_traits.h"
  203. #include "absl/container/internal/compressed_tuple.h"
  204. #include "absl/container/internal/container_memory.h"
  205. #include "absl/container/internal/hash_function_defaults.h"
  206. #include "absl/container/internal/hash_policy_traits.h"
  207. #include "absl/container/internal/hashtable_debug_hooks.h"
  208. #include "absl/container/internal/hashtablez_sampler.h"
  209. #include "absl/hash/hash.h"
  210. #include "absl/memory/memory.h"
  211. #include "absl/meta/type_traits.h"
  212. #include "absl/numeric/bits.h"
  213. #include "absl/utility/utility.h"
  214. #ifdef ABSL_INTERNAL_HAVE_SSE2
  215. #include <emmintrin.h>
  216. #endif
  217. #ifdef ABSL_INTERNAL_HAVE_SSSE3
  218. #include <tmmintrin.h>
  219. #endif
  220. #ifdef _MSC_VER
  221. #include <intrin.h>
  222. #endif
  223. #ifdef ABSL_INTERNAL_HAVE_ARM_NEON
  224. #include <arm_neon.h>
  225. #endif
  226. namespace absl {
  227. ABSL_NAMESPACE_BEGIN
  228. namespace container_internal {
  229. #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
  230. #error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
  231. #elif (defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
  232. defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
  233. defined(ABSL_HAVE_MEMORY_SANITIZER)) && \
  234. !defined(NDEBUG_SANITIZER) // If defined, performance is important.
  235. // When compiled in sanitizer mode, we add generation integers to the backing
  236. // array and iterators. In the backing array, we store the generation between
  237. // the control bytes and the slots. When iterators are dereferenced, we assert
  238. // that the container has not been mutated in a way that could cause iterator
  239. // invalidation since the iterator was initialized.
  240. #define ABSL_SWISSTABLE_ENABLE_GENERATIONS
  241. #endif
  242. #ifdef ABSL_SWISSTABLE_ASSERT
  243. #error ABSL_SWISSTABLE_ASSERT cannot be directly set
  244. #else
  245. // We use this macro for assertions that users may see when the table is in an
  246. // invalid state that sanitizers may help diagnose.
  247. #define ABSL_SWISSTABLE_ASSERT(CONDITION) \
  248. assert((CONDITION) && "Try enabling sanitizers.")
  249. #endif
  250. // We use uint8_t so we don't need to worry about padding.
  251. using GenerationType = uint8_t;
  252. // A sentinel value for empty generations. Using 0 makes it easy to constexpr
  253. // initialize an array of this value.
  254. constexpr GenerationType SentinelEmptyGeneration() { return 0; }
  255. constexpr GenerationType NextGeneration(GenerationType generation) {
  256. return ++generation == SentinelEmptyGeneration() ? ++generation : generation;
  257. }
  258. #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
  259. constexpr bool SwisstableGenerationsEnabled() { return true; }
  260. constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
  261. #else
  262. constexpr bool SwisstableGenerationsEnabled() { return false; }
  263. constexpr size_t NumGenerationBytes() { return 0; }
  264. #endif
  265. template <typename AllocType>
  266. void SwapAlloc(AllocType& lhs, AllocType& rhs,
  267. std::true_type /* propagate_on_container_swap */) {
  268. using std::swap;
  269. swap(lhs, rhs);
  270. }
  271. template <typename AllocType>
  272. void SwapAlloc(AllocType& lhs, AllocType& rhs,
  273. std::false_type /* propagate_on_container_swap */) {
  274. (void)lhs;
  275. (void)rhs;
  276. assert(lhs == rhs &&
  277. "It's UB to call swap with unequal non-propagating allocators.");
  278. }
  279. template <typename AllocType>
  280. void CopyAlloc(AllocType& lhs, AllocType& rhs,
  281. std::true_type /* propagate_alloc */) {
  282. lhs = rhs;
  283. }
  284. template <typename AllocType>
  285. void CopyAlloc(AllocType&, AllocType&, std::false_type /* propagate_alloc */) {}
  286. // The state for a probe sequence.
  287. //
  288. // Currently, the sequence is a triangular progression of the form
  289. //
  290. // p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
  291. //
  292. // The use of `Width` ensures that each probe step does not overlap groups;
  293. // the sequence effectively outputs the addresses of *groups* (although not
  294. // necessarily aligned to any boundary). The `Group` machinery allows us
  295. // to check an entire group with minimal branching.
  296. //
  297. // Wrapping around at `mask + 1` is important, but not for the obvious reason.
  298. // As described above, the first few entries of the control byte array
  299. // are mirrored at the end of the array, which `Group` will find and use
  300. // for selecting candidates. However, when those candidates' slots are
  301. // actually inspected, there are no corresponding slots for the cloned bytes,
  302. // so we need to make sure we've treated those offsets as "wrapping around".
  303. //
  304. // It turns out that this probe sequence visits every group exactly once if the
  305. // number of groups is a power of two, since (i^2+i)/2 is a bijection in
  306. // Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
  307. template <size_t Width>
  308. class probe_seq {
  309. public:
  310. // Creates a new probe sequence using `hash` as the initial value of the
  311. // sequence and `mask` (usually the capacity of the table) as the mask to
  312. // apply to each value in the progression.
  313. probe_seq(size_t hash, size_t mask) {
  314. ABSL_SWISSTABLE_ASSERT(((mask + 1) & mask) == 0 && "not a mask");
  315. mask_ = mask;
  316. offset_ = hash & mask_;
  317. }
  318. // The offset within the table, i.e., the value `p(i)` above.
  319. size_t offset() const { return offset_; }
  320. size_t offset(size_t i) const { return (offset_ + i) & mask_; }
  321. void next() {
  322. index_ += Width;
  323. offset_ += index_;
  324. offset_ &= mask_;
  325. }
  326. // 0-based probe index, a multiple of `Width`.
  327. size_t index() const { return index_; }
  328. private:
  329. size_t mask_;
  330. size_t offset_;
  331. size_t index_ = 0;
  332. };
  333. template <class ContainerKey, class Hash, class Eq>
  334. struct RequireUsableKey {
  335. template <class PassedKey, class... Args>
  336. std::pair<
  337. decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
  338. decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
  339. std::declval<const PassedKey&>()))>*
  340. operator()(const PassedKey&, const Args&...) const;
  341. };
  342. template <class E, class Policy, class Hash, class Eq, class... Ts>
  343. struct IsDecomposable : std::false_type {};
  344. template <class Policy, class Hash, class Eq, class... Ts>
  345. struct IsDecomposable<
  346. absl::void_t<decltype(Policy::apply(
  347. RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
  348. std::declval<Ts>()...))>,
  349. Policy, Hash, Eq, Ts...> : std::true_type {};
  350. // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
  351. template <class T>
  352. constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
  353. using std::swap;
  354. return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
  355. }
  356. template <class T>
  357. constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
  358. return false;
  359. }
  360. template <typename T>
  361. uint32_t TrailingZeros(T x) {
  362. ABSL_ASSUME(x != 0);
  363. return static_cast<uint32_t>(countr_zero(x));
  364. }
  365. // 8 bytes bitmask with most significant bit set for every byte.
  366. constexpr uint64_t kMsbs8Bytes = 0x8080808080808080ULL;
  367. // An abstract bitmask, such as that emitted by a SIMD instruction.
  368. //
  369. // Specifically, this type implements a simple bitset whose representation is
  370. // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
  371. // of abstract bits in the bitset, while `Shift` is the log-base-two of the
  372. // width of an abstract bit in the representation.
  373. // This mask provides operations for any number of real bits set in an abstract
  374. // bit. To add iteration on top of that, implementation must guarantee no more
  375. // than the most significant real bit is set in a set abstract bit.
  376. template <class T, int SignificantBits, int Shift = 0>
  377. class NonIterableBitMask {
  378. public:
  379. explicit NonIterableBitMask(T mask) : mask_(mask) {}
  380. explicit operator bool() const { return this->mask_ != 0; }
  381. // Returns the index of the lowest *abstract* bit set in `self`.
  382. uint32_t LowestBitSet() const {
  383. return container_internal::TrailingZeros(mask_) >> Shift;
  384. }
  385. // Returns the index of the highest *abstract* bit set in `self`.
  386. uint32_t HighestBitSet() const {
  387. return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
  388. }
  389. // Returns the number of trailing zero *abstract* bits.
  390. uint32_t TrailingZeros() const {
  391. return container_internal::TrailingZeros(mask_) >> Shift;
  392. }
  393. // Returns the number of leading zero *abstract* bits.
  394. uint32_t LeadingZeros() const {
  395. constexpr int total_significant_bits = SignificantBits << Shift;
  396. constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
  397. return static_cast<uint32_t>(
  398. countl_zero(static_cast<T>(mask_ << extra_bits))) >>
  399. Shift;
  400. }
  401. T mask_;
  402. };
  403. // Mask that can be iterable
  404. //
  405. // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
  406. // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
  407. // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
  408. // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
  409. // If NullifyBitsOnIteration is true (only allowed for Shift == 3),
  410. // non zero abstract bit is allowed to have additional bits
  411. // (e.g., `0xff`, `0x83` and `0x9c` are ok, but `0x6f` is not).
  412. //
  413. // For example:
  414. // for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
  415. // for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
  416. template <class T, int SignificantBits, int Shift = 0,
  417. bool NullifyBitsOnIteration = false>
  418. class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
  419. using Base = NonIterableBitMask<T, SignificantBits, Shift>;
  420. static_assert(std::is_unsigned<T>::value, "");
  421. static_assert(Shift == 0 || Shift == 3, "");
  422. static_assert(!NullifyBitsOnIteration || Shift == 3, "");
  423. public:
  424. explicit BitMask(T mask) : Base(mask) {
  425. if (Shift == 3 && !NullifyBitsOnIteration) {
  426. ABSL_SWISSTABLE_ASSERT(this->mask_ == (this->mask_ & kMsbs8Bytes));
  427. }
  428. }
  429. // BitMask is an iterator over the indices of its abstract bits.
  430. using value_type = int;
  431. using iterator = BitMask;
  432. using const_iterator = BitMask;
  433. BitMask& operator++() {
  434. if (Shift == 3 && NullifyBitsOnIteration) {
  435. this->mask_ &= kMsbs8Bytes;
  436. }
  437. this->mask_ &= (this->mask_ - 1);
  438. return *this;
  439. }
  440. uint32_t operator*() const { return Base::LowestBitSet(); }
  441. BitMask begin() const { return *this; }
  442. BitMask end() const { return BitMask(0); }
  443. private:
  444. friend bool operator==(const BitMask& a, const BitMask& b) {
  445. return a.mask_ == b.mask_;
  446. }
  447. friend bool operator!=(const BitMask& a, const BitMask& b) {
  448. return a.mask_ != b.mask_;
  449. }
  450. };
  451. using h2_t = uint8_t;
  452. // The values here are selected for maximum performance. See the static asserts
  453. // below for details.
  454. // A `ctrl_t` is a single control byte, which can have one of four
  455. // states: empty, deleted, full (which has an associated seven-bit h2_t value)
  456. // and the sentinel. They have the following bit patterns:
  457. //
  458. // empty: 1 0 0 0 0 0 0 0
  459. // deleted: 1 1 1 1 1 1 1 0
  460. // full: 0 h h h h h h h // h represents the hash bits.
  461. // sentinel: 1 1 1 1 1 1 1 1
  462. //
  463. // These values are specifically tuned for SSE-flavored SIMD.
  464. // The static_asserts below detail the source of these choices.
  465. //
  466. // We use an enum class so that when strict aliasing is enabled, the compiler
  467. // knows ctrl_t doesn't alias other types.
  468. enum class ctrl_t : int8_t {
  469. kEmpty = -128, // 0b10000000
  470. kDeleted = -2, // 0b11111110
  471. kSentinel = -1, // 0b11111111
  472. };
  473. static_assert(
  474. (static_cast<int8_t>(ctrl_t::kEmpty) &
  475. static_cast<int8_t>(ctrl_t::kDeleted) &
  476. static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
  477. "Special markers need to have the MSB to make checking for them efficient");
  478. static_assert(
  479. ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
  480. "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
  481. "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
  482. static_assert(
  483. ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
  484. "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
  485. "registers (pcmpeqd xmm, xmm)");
  486. static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
  487. "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
  488. "existence efficient (psignb xmm, xmm)");
  489. static_assert(
  490. (~static_cast<int8_t>(ctrl_t::kEmpty) &
  491. ~static_cast<int8_t>(ctrl_t::kDeleted) &
  492. static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
  493. "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
  494. "shared by ctrl_t::kSentinel to make the scalar test for "
  495. "MaskEmptyOrDeleted() efficient");
  496. static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
  497. "ctrl_t::kDeleted must be -2 to make the implementation of "
  498. "ConvertSpecialToEmptyAndFullToDeleted efficient");
  499. // See definition comment for why this is size 32.
  500. ABSL_DLL extern const ctrl_t kEmptyGroup[32];
  501. // We use these sentinel capacity values in debug mode to indicate different
  502. // classes of bugs.
  503. enum InvalidCapacity : size_t {
  504. kAboveMaxValidCapacity = ~size_t{} - 100,
  505. kReentrance,
  506. kDestroyed,
  507. // These two must be last because we use `>= kMovedFrom` to mean moved-from.
  508. kMovedFrom,
  509. kSelfMovedFrom,
  510. };
  511. // Returns a pointer to a control byte group that can be used by empty tables.
  512. inline ctrl_t* EmptyGroup() {
  513. // Const must be cast away here; no uses of this function will actually write
  514. // to it because it is only used for empty tables.
  515. return const_cast<ctrl_t*>(kEmptyGroup + 16);
  516. }
  517. // For use in SOO iterators.
  518. // TODO(b/289225379): we could potentially get rid of this by adding an is_soo
  519. // bit in iterators. This would add branches but reduce cache misses.
  520. ABSL_DLL extern const ctrl_t kSooControl[17];
  521. // Returns a pointer to a full byte followed by a sentinel byte.
  522. inline ctrl_t* SooControl() {
  523. // Const must be cast away here; no uses of this function will actually write
  524. // to it because it is only used for SOO iterators.
  525. return const_cast<ctrl_t*>(kSooControl);
  526. }
  527. // Whether ctrl is from the SooControl array.
  528. inline bool IsSooControl(const ctrl_t* ctrl) { return ctrl == SooControl(); }
  529. // Returns a pointer to a generation to use for an empty hashtable.
  530. GenerationType* EmptyGeneration();
  531. // Returns whether `generation` is a generation for an empty hashtable that
  532. // could be returned by EmptyGeneration().
  533. inline bool IsEmptyGeneration(const GenerationType* generation) {
  534. return *generation == SentinelEmptyGeneration();
  535. }
  536. // Mixes a randomly generated per-process seed with `hash` and `ctrl` to
  537. // randomize insertion order within groups.
  538. bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
  539. const ctrl_t* ctrl);
  540. ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool ShouldInsertBackwards(
  541. ABSL_ATTRIBUTE_UNUSED size_t capacity, ABSL_ATTRIBUTE_UNUSED size_t hash,
  542. ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
  543. #if defined(NDEBUG)
  544. return false;
  545. #else
  546. return ShouldInsertBackwardsForDebug(capacity, hash, ctrl);
  547. #endif
  548. }
  549. // Returns insert position for the given mask.
  550. // We want to add entropy even when ASLR is not enabled.
  551. // In debug build we will randomly insert in either the front or back of
  552. // the group.
  553. // TODO(kfm,sbenza): revisit after we do unconditional mixing
  554. template <class Mask>
  555. ABSL_ATTRIBUTE_ALWAYS_INLINE inline auto GetInsertionOffset(
  556. Mask mask, ABSL_ATTRIBUTE_UNUSED size_t capacity,
  557. ABSL_ATTRIBUTE_UNUSED size_t hash,
  558. ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
  559. #if defined(NDEBUG)
  560. return mask.LowestBitSet();
  561. #else
  562. return ShouldInsertBackwardsForDebug(capacity, hash, ctrl)
  563. ? mask.HighestBitSet()
  564. : mask.LowestBitSet();
  565. #endif
  566. }
  567. // Returns a per-table, hash salt, which changes on resize. This gets mixed into
  568. // H1 to randomize iteration order per-table.
  569. //
  570. // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
  571. // non-determinism of iteration order in most cases.
  572. inline size_t PerTableSalt(const ctrl_t* ctrl) {
  573. // The low bits of the pointer have little or no entropy because of
  574. // alignment. We shift the pointer to try to use higher entropy bits. A
  575. // good number seems to be 12 bits, because that aligns with page size.
  576. return reinterpret_cast<uintptr_t>(ctrl) >> 12;
  577. }
  578. // Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
  579. inline size_t H1(size_t hash, const ctrl_t* ctrl) {
  580. return (hash >> 7) ^ PerTableSalt(ctrl);
  581. }
  582. // Extracts the H2 portion of a hash: the 7 bits not used for H1.
  583. //
  584. // These are used as an occupied control byte.
  585. inline h2_t H2(size_t hash) { return hash & 0x7F; }
  586. // Helpers for checking the state of a control byte.
  587. inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
  588. inline bool IsFull(ctrl_t c) {
  589. // Cast `c` to the underlying type instead of casting `0` to `ctrl_t` as `0`
  590. // is not a value in the enum. Both ways are equivalent, but this way makes
  591. // linters happier.
  592. return static_cast<std::underlying_type_t<ctrl_t>>(c) >= 0;
  593. }
  594. inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
  595. inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
  596. #ifdef ABSL_INTERNAL_HAVE_SSE2
  597. // Quick reference guide for intrinsics used below:
  598. //
  599. // * __m128i: An XMM (128-bit) word.
  600. //
  601. // * _mm_setzero_si128: Returns a zero vector.
  602. // * _mm_set1_epi8: Returns a vector with the same i8 in each lane.
  603. //
  604. // * _mm_subs_epi8: Saturating-subtracts two i8 vectors.
  605. // * _mm_and_si128: Ands two i128s together.
  606. // * _mm_or_si128: Ors two i128s together.
  607. // * _mm_andnot_si128: And-nots two i128s together.
  608. //
  609. // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
  610. // filling each lane with 0x00 or 0xff.
  611. // * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
  612. //
  613. // * _mm_loadu_si128: Performs an unaligned load of an i128.
  614. // * _mm_storeu_si128: Performs an unaligned store of an i128.
  615. //
  616. // * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first
  617. // argument if the corresponding lane of the second
  618. // argument is positive, negative, or zero, respectively.
  619. // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
  620. // bitmask consisting of those bits.
  621. // * _mm_shuffle_epi8: Selects i8s from the first argument, using the low
  622. // four bits of each i8 lane in the second argument as
  623. // indices.
  624. // https://github.com/abseil/abseil-cpp/issues/209
  625. // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
  626. // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
  627. // Work around this by using the portable implementation of Group
  628. // when using -funsigned-char under GCC.
  629. inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
  630. #if defined(__GNUC__) && !defined(__clang__)
  631. if (std::is_unsigned<char>::value) {
  632. const __m128i mask = _mm_set1_epi8(0x80);
  633. const __m128i diff = _mm_subs_epi8(b, a);
  634. return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
  635. }
  636. #endif
  637. return _mm_cmpgt_epi8(a, b);
  638. }
  639. struct GroupSse2Impl {
  640. static constexpr size_t kWidth = 16; // the number of slots per group
  641. explicit GroupSse2Impl(const ctrl_t* pos) {
  642. ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
  643. }
  644. // Returns a bitmask representing the positions of slots that match hash.
  645. BitMask<uint16_t, kWidth> Match(h2_t hash) const {
  646. auto match = _mm_set1_epi8(static_cast<char>(hash));
  647. return BitMask<uint16_t, kWidth>(
  648. static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
  649. }
  650. // Returns a bitmask representing the positions of empty slots.
  651. NonIterableBitMask<uint16_t, kWidth> MaskEmpty() const {
  652. #ifdef ABSL_INTERNAL_HAVE_SSSE3
  653. // This only works because ctrl_t::kEmpty is -128.
  654. return NonIterableBitMask<uint16_t, kWidth>(
  655. static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
  656. #else
  657. auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
  658. return NonIterableBitMask<uint16_t, kWidth>(
  659. static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
  660. #endif
  661. }
  662. // Returns a bitmask representing the positions of full slots.
  663. // Note: for `is_small()` tables group may contain the "same" slot twice:
  664. // original and mirrored.
  665. BitMask<uint16_t, kWidth> MaskFull() const {
  666. return BitMask<uint16_t, kWidth>(
  667. static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff));
  668. }
  669. // Returns a bitmask representing the positions of non full slots.
  670. // Note: this includes: kEmpty, kDeleted, kSentinel.
  671. // It is useful in contexts when kSentinel is not present.
  672. auto MaskNonFull() const {
  673. return BitMask<uint16_t, kWidth>(
  674. static_cast<uint16_t>(_mm_movemask_epi8(ctrl)));
  675. }
  676. // Returns a bitmask representing the positions of empty or deleted slots.
  677. NonIterableBitMask<uint16_t, kWidth> MaskEmptyOrDeleted() const {
  678. auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
  679. return NonIterableBitMask<uint16_t, kWidth>(static_cast<uint16_t>(
  680. _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
  681. }
  682. // Returns the number of trailing empty or deleted elements in the group.
  683. uint32_t CountLeadingEmptyOrDeleted() const {
  684. auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
  685. return TrailingZeros(static_cast<uint32_t>(
  686. _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
  687. }
  688. void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
  689. auto msbs = _mm_set1_epi8(static_cast<char>(-128));
  690. auto x126 = _mm_set1_epi8(126);
  691. #ifdef ABSL_INTERNAL_HAVE_SSSE3
  692. auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
  693. #else
  694. auto zero = _mm_setzero_si128();
  695. auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
  696. auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
  697. #endif
  698. _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
  699. }
  700. __m128i ctrl;
  701. };
  702. #endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
  703. #if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
  704. struct GroupAArch64Impl {
  705. static constexpr size_t kWidth = 8;
  706. explicit GroupAArch64Impl(const ctrl_t* pos) {
  707. ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
  708. }
  709. auto Match(h2_t hash) const {
  710. uint8x8_t dup = vdup_n_u8(hash);
  711. auto mask = vceq_u8(ctrl, dup);
  712. return BitMask<uint64_t, kWidth, /*Shift=*/3,
  713. /*NullifyBitsOnIteration=*/true>(
  714. vget_lane_u64(vreinterpret_u64_u8(mask), 0));
  715. }
  716. NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
  717. uint64_t mask =
  718. vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
  719. vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
  720. vreinterpret_s8_u8(ctrl))),
  721. 0);
  722. return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
  723. }
  724. // Returns a bitmask representing the positions of full slots.
  725. // Note: for `is_small()` tables group may contain the "same" slot twice:
  726. // original and mirrored.
  727. auto MaskFull() const {
  728. uint64_t mask = vget_lane_u64(
  729. vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl),
  730. vdup_n_s8(static_cast<int8_t>(0)))),
  731. 0);
  732. return BitMask<uint64_t, kWidth, /*Shift=*/3,
  733. /*NullifyBitsOnIteration=*/true>(mask);
  734. }
  735. // Returns a bitmask representing the positions of non full slots.
  736. // Note: this includes: kEmpty, kDeleted, kSentinel.
  737. // It is useful in contexts when kSentinel is not present.
  738. auto MaskNonFull() const {
  739. uint64_t mask = vget_lane_u64(
  740. vreinterpret_u64_u8(vclt_s8(vreinterpret_s8_u8(ctrl),
  741. vdup_n_s8(static_cast<int8_t>(0)))),
  742. 0);
  743. return BitMask<uint64_t, kWidth, /*Shift=*/3,
  744. /*NullifyBitsOnIteration=*/true>(mask);
  745. }
  746. NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
  747. uint64_t mask =
  748. vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
  749. vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
  750. vreinterpret_s8_u8(ctrl))),
  751. 0);
  752. return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
  753. }
  754. uint32_t CountLeadingEmptyOrDeleted() const {
  755. uint64_t mask =
  756. vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
  757. vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
  758. vreinterpret_s8_u8(ctrl))),
  759. 0);
  760. // Similar to MaskEmptyorDeleted() but we invert the logic to invert the
  761. // produced bitfield. We then count number of trailing zeros.
  762. // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
  763. // so we should be fine.
  764. return static_cast<uint32_t>(countr_zero(mask)) >> 3;
  765. }
  766. void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
  767. uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
  768. constexpr uint64_t slsbs = 0x0202020202020202ULL;
  769. constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
  770. auto x = slsbs & (mask >> 6);
  771. auto res = (x + midbs) | kMsbs8Bytes;
  772. little_endian::Store64(dst, res);
  773. }
  774. uint8x8_t ctrl;
  775. };
  776. #endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
  777. struct GroupPortableImpl {
  778. static constexpr size_t kWidth = 8;
  779. explicit GroupPortableImpl(const ctrl_t* pos)
  780. : ctrl(little_endian::Load64(pos)) {}
  781. BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
  782. // For the technique, see:
  783. // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
  784. // (Determine if a word has a byte equal to n).
  785. //
  786. // Caveat: there are false positives but:
  787. // - they only occur if there is a real match
  788. // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
  789. // - they will be handled gracefully by subsequent checks in code
  790. //
  791. // Example:
  792. // v = 0x1716151413121110
  793. // hash = 0x12
  794. // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
  795. constexpr uint64_t lsbs = 0x0101010101010101ULL;
  796. auto x = ctrl ^ (lsbs * hash);
  797. return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & kMsbs8Bytes);
  798. }
  799. NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
  800. return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 6)) &
  801. kMsbs8Bytes);
  802. }
  803. // Returns a bitmask representing the positions of full slots.
  804. // Note: for `is_small()` tables group may contain the "same" slot twice:
  805. // original and mirrored.
  806. BitMask<uint64_t, kWidth, 3> MaskFull() const {
  807. return BitMask<uint64_t, kWidth, 3>((ctrl ^ kMsbs8Bytes) & kMsbs8Bytes);
  808. }
  809. // Returns a bitmask representing the positions of non full slots.
  810. // Note: this includes: kEmpty, kDeleted, kSentinel.
  811. // It is useful in contexts when kSentinel is not present.
  812. auto MaskNonFull() const {
  813. return BitMask<uint64_t, kWidth, 3>(ctrl & kMsbs8Bytes);
  814. }
  815. NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
  816. return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 7)) &
  817. kMsbs8Bytes);
  818. }
  819. uint32_t CountLeadingEmptyOrDeleted() const {
  820. // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
  821. // kDeleted. We lower all other bits and count number of trailing zeros.
  822. constexpr uint64_t bits = 0x0101010101010101ULL;
  823. return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
  824. 3);
  825. }
  826. void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
  827. constexpr uint64_t lsbs = 0x0101010101010101ULL;
  828. auto x = ctrl & kMsbs8Bytes;
  829. auto res = (~x + (x >> 7)) & ~lsbs;
  830. little_endian::Store64(dst, res);
  831. }
  832. uint64_t ctrl;
  833. };
  834. #ifdef ABSL_INTERNAL_HAVE_SSE2
  835. using Group = GroupSse2Impl;
  836. using GroupFullEmptyOrDeleted = GroupSse2Impl;
  837. #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
  838. using Group = GroupAArch64Impl;
  839. // For Aarch64, we use the portable implementation for counting and masking
  840. // full, empty or deleted group elements. This is to avoid the latency of moving
  841. // between data GPRs and Neon registers when it does not provide a benefit.
  842. // Using Neon is profitable when we call Match(), but is not when we don't,
  843. // which is the case when we do *EmptyOrDeleted and MaskFull operations.
  844. // It is difficult to make a similar approach beneficial on other architectures
  845. // such as x86 since they have much lower GPR <-> vector register transfer
  846. // latency and 16-wide Groups.
  847. using GroupFullEmptyOrDeleted = GroupPortableImpl;
  848. #else
  849. using Group = GroupPortableImpl;
  850. using GroupFullEmptyOrDeleted = GroupPortableImpl;
  851. #endif
  852. // When there is an insertion with no reserved growth, we rehash with
  853. // probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
  854. // constant divided by capacity ensures that inserting N elements is still O(N)
  855. // in the average case. Using the constant 16 means that we expect to rehash ~8
  856. // times more often than when generations are disabled. We are adding expected
  857. // rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 -
  858. // 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth.
  859. inline size_t RehashProbabilityConstant() { return 16; }
  860. class CommonFieldsGenerationInfoEnabled {
  861. // A sentinel value for reserved_growth_ indicating that we just ran out of
  862. // reserved growth on the last insertion. When reserve is called and then
  863. // insertions take place, reserved_growth_'s state machine is N, ..., 1,
  864. // kReservedGrowthJustRanOut, 0.
  865. static constexpr size_t kReservedGrowthJustRanOut =
  866. (std::numeric_limits<size_t>::max)();
  867. public:
  868. CommonFieldsGenerationInfoEnabled() = default;
  869. CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that)
  870. : reserved_growth_(that.reserved_growth_),
  871. reservation_size_(that.reservation_size_),
  872. generation_(that.generation_) {
  873. that.reserved_growth_ = 0;
  874. that.reservation_size_ = 0;
  875. that.generation_ = EmptyGeneration();
  876. }
  877. CommonFieldsGenerationInfoEnabled& operator=(
  878. CommonFieldsGenerationInfoEnabled&&) = default;
  879. // Whether we should rehash on insert in order to detect bugs of using invalid
  880. // references. We rehash on the first insertion after reserved_growth_ reaches
  881. // 0 after a call to reserve. We also do a rehash with low probability
  882. // whenever reserved_growth_ is zero.
  883. bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
  884. size_t capacity) const;
  885. // Similar to above, except that we don't depend on reserved_growth_.
  886. bool should_rehash_for_bug_detection_on_move(const ctrl_t* ctrl,
  887. size_t capacity) const;
  888. void maybe_increment_generation_on_insert() {
  889. if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
  890. if (reserved_growth_ > 0) {
  891. if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
  892. } else {
  893. increment_generation();
  894. }
  895. }
  896. void increment_generation() { *generation_ = NextGeneration(*generation_); }
  897. void reset_reserved_growth(size_t reservation, size_t size) {
  898. reserved_growth_ = reservation - size;
  899. }
  900. size_t reserved_growth() const { return reserved_growth_; }
  901. void set_reserved_growth(size_t r) { reserved_growth_ = r; }
  902. size_t reservation_size() const { return reservation_size_; }
  903. void set_reservation_size(size_t r) { reservation_size_ = r; }
  904. GenerationType generation() const { return *generation_; }
  905. void set_generation(GenerationType g) { *generation_ = g; }
  906. GenerationType* generation_ptr() const { return generation_; }
  907. void set_generation_ptr(GenerationType* g) { generation_ = g; }
  908. private:
  909. // The number of insertions remaining that are guaranteed to not rehash due to
  910. // a prior call to reserve. Note: we store reserved growth in addition to
  911. // reservation size because calls to erase() decrease size_ but don't decrease
  912. // reserved growth.
  913. size_t reserved_growth_ = 0;
  914. // The maximum argument to reserve() since the container was cleared. We need
  915. // to keep track of this, in addition to reserved growth, because we reset
  916. // reserved growth to this when erase(begin(), end()) is called.
  917. size_t reservation_size_ = 0;
  918. // Pointer to the generation counter, which is used to validate iterators and
  919. // is stored in the backing array between the control bytes and the slots.
  920. // Note that we can't store the generation inside the container itself and
  921. // keep a pointer to the container in the iterators because iterators must
  922. // remain valid when the container is moved.
  923. // Note: we could derive this pointer from the control pointer, but it makes
  924. // the code more complicated, and there's a benefit in having the sizes of
  925. // raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different,
  926. // which is that tests are less likely to rely on the size remaining the same.
  927. GenerationType* generation_ = EmptyGeneration();
  928. };
  929. class CommonFieldsGenerationInfoDisabled {
  930. public:
  931. CommonFieldsGenerationInfoDisabled() = default;
  932. CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) =
  933. default;
  934. CommonFieldsGenerationInfoDisabled& operator=(
  935. CommonFieldsGenerationInfoDisabled&&) = default;
  936. bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
  937. return false;
  938. }
  939. bool should_rehash_for_bug_detection_on_move(const ctrl_t*, size_t) const {
  940. return false;
  941. }
  942. void maybe_increment_generation_on_insert() {}
  943. void increment_generation() {}
  944. void reset_reserved_growth(size_t, size_t) {}
  945. size_t reserved_growth() const { return 0; }
  946. void set_reserved_growth(size_t) {}
  947. size_t reservation_size() const { return 0; }
  948. void set_reservation_size(size_t) {}
  949. GenerationType generation() const { return 0; }
  950. void set_generation(GenerationType) {}
  951. GenerationType* generation_ptr() const { return nullptr; }
  952. void set_generation_ptr(GenerationType*) {}
  953. };
  954. class HashSetIteratorGenerationInfoEnabled {
  955. public:
  956. HashSetIteratorGenerationInfoEnabled() = default;
  957. explicit HashSetIteratorGenerationInfoEnabled(
  958. const GenerationType* generation_ptr)
  959. : generation_ptr_(generation_ptr), generation_(*generation_ptr) {}
  960. GenerationType generation() const { return generation_; }
  961. void reset_generation() { generation_ = *generation_ptr_; }
  962. const GenerationType* generation_ptr() const { return generation_ptr_; }
  963. void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; }
  964. private:
  965. const GenerationType* generation_ptr_ = EmptyGeneration();
  966. GenerationType generation_ = *generation_ptr_;
  967. };
  968. class HashSetIteratorGenerationInfoDisabled {
  969. public:
  970. HashSetIteratorGenerationInfoDisabled() = default;
  971. explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) {}
  972. GenerationType generation() const { return 0; }
  973. void reset_generation() {}
  974. const GenerationType* generation_ptr() const { return nullptr; }
  975. void set_generation_ptr(const GenerationType*) {}
  976. };
  977. #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
  978. using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled;
  979. using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled;
  980. #else
  981. using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
  982. using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
  983. #endif
  984. // Stored the information regarding number of slots we can still fill
  985. // without needing to rehash.
  986. //
  987. // We want to ensure sufficient number of empty slots in the table in order
  988. // to keep probe sequences relatively short. Empty slot in the probe group
  989. // is required to stop probing.
  990. //
  991. // Tombstones (kDeleted slots) are not included in the growth capacity,
  992. // because we'd like to rehash when the table is filled with tombstones and/or
  993. // full slots.
  994. //
  995. // GrowthInfo also stores a bit that encodes whether table may have any
  996. // deleted slots.
  997. // Most of the tables (>95%) have no deleted slots, so some functions can
  998. // be more efficient with this information.
  999. //
  1000. // Callers can also force a rehash via the standard `rehash(0)`,
  1001. // which will recompute this value as a side-effect.
  1002. //
  1003. // See also `CapacityToGrowth()`.
  1004. class GrowthInfo {
  1005. public:
  1006. // Leaves data member uninitialized.
  1007. GrowthInfo() = default;
  1008. // Initializes the GrowthInfo assuming we can grow `growth_left` elements
  1009. // and there are no kDeleted slots in the table.
  1010. void InitGrowthLeftNoDeleted(size_t growth_left) {
  1011. growth_left_info_ = growth_left;
  1012. }
  1013. // Overwrites single full slot with an empty slot.
  1014. void OverwriteFullAsEmpty() { ++growth_left_info_; }
  1015. // Overwrites single empty slot with a full slot.
  1016. void OverwriteEmptyAsFull() {
  1017. ABSL_SWISSTABLE_ASSERT(GetGrowthLeft() > 0);
  1018. --growth_left_info_;
  1019. }
  1020. // Overwrites several empty slots with full slots.
  1021. void OverwriteManyEmptyAsFull(size_t cnt) {
  1022. ABSL_SWISSTABLE_ASSERT(GetGrowthLeft() >= cnt);
  1023. growth_left_info_ -= cnt;
  1024. }
  1025. // Overwrites specified control element with full slot.
  1026. void OverwriteControlAsFull(ctrl_t ctrl) {
  1027. ABSL_SWISSTABLE_ASSERT(GetGrowthLeft() >=
  1028. static_cast<size_t>(IsEmpty(ctrl)));
  1029. growth_left_info_ -= static_cast<size_t>(IsEmpty(ctrl));
  1030. }
  1031. // Overwrites single full slot with a deleted slot.
  1032. void OverwriteFullAsDeleted() { growth_left_info_ |= kDeletedBit; }
  1033. // Returns true if table satisfies two properties:
  1034. // 1. Guaranteed to have no kDeleted slots.
  1035. // 2. There is a place for at least one element to grow.
  1036. bool HasNoDeletedAndGrowthLeft() const {
  1037. return static_cast<std::make_signed_t<size_t>>(growth_left_info_) > 0;
  1038. }
  1039. // Returns true if the table satisfies two properties:
  1040. // 1. Guaranteed to have no kDeleted slots.
  1041. // 2. There is no growth left.
  1042. bool HasNoGrowthLeftAndNoDeleted() const { return growth_left_info_ == 0; }
  1043. // Returns true if table guaranteed to have no k
  1044. bool HasNoDeleted() const {
  1045. return static_cast<std::make_signed_t<size_t>>(growth_left_info_) >= 0;
  1046. }
  1047. // Returns the number of elements left to grow.
  1048. size_t GetGrowthLeft() const { return growth_left_info_ & kGrowthLeftMask; }
  1049. private:
  1050. static constexpr size_t kGrowthLeftMask = ((~size_t{}) >> 1);
  1051. static constexpr size_t kDeletedBit = ~kGrowthLeftMask;
  1052. // Topmost bit signal whenever there are deleted slots.
  1053. size_t growth_left_info_;
  1054. };
  1055. static_assert(sizeof(GrowthInfo) == sizeof(size_t), "");
  1056. static_assert(alignof(GrowthInfo) == alignof(size_t), "");
  1057. // Returns whether `n` is a valid capacity (i.e., number of slots).
  1058. //
  1059. // A valid capacity is a non-zero integer `2^m - 1`.
  1060. inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
  1061. // Returns the number of "cloned control bytes".
  1062. //
  1063. // This is the number of control bytes that are present both at the beginning
  1064. // of the control byte array and at the end, such that we can create a
  1065. // `Group::kWidth`-width probe window starting from any control byte.
  1066. constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
  1067. // Returns the number of control bytes including cloned.
  1068. constexpr size_t NumControlBytes(size_t capacity) {
  1069. return capacity + 1 + NumClonedBytes();
  1070. }
  1071. // Computes the offset from the start of the backing allocation of control.
  1072. // infoz and growth_info are stored at the beginning of the backing array.
  1073. inline static size_t ControlOffset(bool has_infoz) {
  1074. return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(GrowthInfo);
  1075. }
  1076. // Helper class for computing offsets and allocation size of hash set fields.
  1077. class RawHashSetLayout {
  1078. public:
  1079. explicit RawHashSetLayout(size_t capacity, size_t slot_align, bool has_infoz)
  1080. : capacity_(capacity),
  1081. control_offset_(ControlOffset(has_infoz)),
  1082. generation_offset_(control_offset_ + NumControlBytes(capacity)),
  1083. slot_offset_(
  1084. (generation_offset_ + NumGenerationBytes() + slot_align - 1) &
  1085. (~slot_align + 1)) {
  1086. ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
  1087. }
  1088. // Returns the capacity of a table.
  1089. size_t capacity() const { return capacity_; }
  1090. // Returns precomputed offset from the start of the backing allocation of
  1091. // control.
  1092. size_t control_offset() const { return control_offset_; }
  1093. // Given the capacity of a table, computes the offset (from the start of the
  1094. // backing allocation) of the generation counter (if it exists).
  1095. size_t generation_offset() const { return generation_offset_; }
  1096. // Given the capacity of a table, computes the offset (from the start of the
  1097. // backing allocation) at which the slots begin.
  1098. size_t slot_offset() const { return slot_offset_; }
  1099. // Given the capacity of a table, computes the total size of the backing
  1100. // array.
  1101. size_t alloc_size(size_t slot_size) const {
  1102. ABSL_SWISSTABLE_ASSERT(
  1103. slot_size <=
  1104. ((std::numeric_limits<size_t>::max)() - slot_offset_) / capacity_);
  1105. return slot_offset_ + capacity_ * slot_size;
  1106. }
  1107. private:
  1108. size_t capacity_;
  1109. size_t control_offset_;
  1110. size_t generation_offset_;
  1111. size_t slot_offset_;
  1112. };
  1113. struct HashtableFreeFunctionsAccess;
  1114. // We only allow a maximum of 1 SOO element, which makes the implementation
  1115. // much simpler. Complications with multiple SOO elements include:
  1116. // - Satisfying the guarantee that erasing one element doesn't invalidate
  1117. // iterators to other elements means we would probably need actual SOO
  1118. // control bytes.
  1119. // - In order to prevent user code from depending on iteration order for small
  1120. // tables, we would need to randomize the iteration order somehow.
  1121. constexpr size_t SooCapacity() { return 1; }
  1122. // Sentinel type to indicate SOO CommonFields construction.
  1123. struct soo_tag_t {};
  1124. // Sentinel type to indicate SOO CommonFields construction with full size.
  1125. struct full_soo_tag_t {};
  1126. // Sentinel type to indicate non-SOO CommonFields construction.
  1127. struct non_soo_tag_t {};
  1128. // Sentinel value to indicate an uninitialized CommonFields for use in swapping.
  1129. struct uninitialized_tag_t {};
  1130. // Suppress erroneous uninitialized memory errors on GCC. For example, GCC
  1131. // thinks that the call to slot_array() in find_or_prepare_insert() is reading
  1132. // uninitialized memory, but slot_array is only called there when the table is
  1133. // non-empty and this memory is initialized when the table is non-empty.
  1134. #if !defined(__clang__) && defined(__GNUC__)
  1135. #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) \
  1136. _Pragma("GCC diagnostic push") \
  1137. _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") \
  1138. _Pragma("GCC diagnostic ignored \"-Wuninitialized\"") x; \
  1139. _Pragma("GCC diagnostic pop")
  1140. #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) \
  1141. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(return x)
  1142. #else
  1143. #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) x
  1144. #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) return x
  1145. #endif
  1146. // This allows us to work around an uninitialized memory warning when
  1147. // constructing begin() iterators in empty hashtables.
  1148. union MaybeInitializedPtr {
  1149. void* get() const { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(p); }
  1150. void set(void* ptr) { p = ptr; }
  1151. void* p;
  1152. };
  1153. struct HeapPtrs {
  1154. HeapPtrs() = default;
  1155. explicit HeapPtrs(ctrl_t* c) : control(c) {}
  1156. // The control bytes (and, also, a pointer near to the base of the backing
  1157. // array).
  1158. //
  1159. // This contains `capacity + 1 + NumClonedBytes()` entries, even
  1160. // when the table is empty (hence EmptyGroup).
  1161. //
  1162. // Note that growth_info is stored immediately before this pointer.
  1163. // May be uninitialized for SOO tables.
  1164. ctrl_t* control;
  1165. // The beginning of the slots, located at `SlotOffset()` bytes after
  1166. // `control`. May be uninitialized for empty tables.
  1167. // Note: we can't use `slots` because Qt defines "slots" as a macro.
  1168. MaybeInitializedPtr slot_array;
  1169. };
  1170. // Manages the backing array pointers or the SOO slot. When raw_hash_set::is_soo
  1171. // is true, the SOO slot is stored in `soo_data`. Otherwise, we use `heap`.
  1172. union HeapOrSoo {
  1173. HeapOrSoo() = default;
  1174. explicit HeapOrSoo(ctrl_t* c) : heap(c) {}
  1175. ctrl_t*& control() {
  1176. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control);
  1177. }
  1178. ctrl_t* control() const {
  1179. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control);
  1180. }
  1181. MaybeInitializedPtr& slot_array() {
  1182. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array);
  1183. }
  1184. MaybeInitializedPtr slot_array() const {
  1185. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array);
  1186. }
  1187. void* get_soo_data() {
  1188. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data);
  1189. }
  1190. const void* get_soo_data() const {
  1191. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data);
  1192. }
  1193. HeapPtrs heap;
  1194. unsigned char soo_data[sizeof(HeapPtrs)];
  1195. };
  1196. // CommonFields hold the fields in raw_hash_set that do not depend
  1197. // on template parameters. This allows us to conveniently pass all
  1198. // of this state to helper functions as a single argument.
  1199. class CommonFields : public CommonFieldsGenerationInfo {
  1200. public:
  1201. explicit CommonFields(soo_tag_t) : capacity_(SooCapacity()), size_(0) {}
  1202. explicit CommonFields(full_soo_tag_t)
  1203. : capacity_(SooCapacity()), size_(size_t{1} << HasInfozShift()) {}
  1204. explicit CommonFields(non_soo_tag_t)
  1205. : capacity_(0), size_(0), heap_or_soo_(EmptyGroup()) {}
  1206. // For use in swapping.
  1207. explicit CommonFields(uninitialized_tag_t) {}
  1208. // Not copyable
  1209. CommonFields(const CommonFields&) = delete;
  1210. CommonFields& operator=(const CommonFields&) = delete;
  1211. // Movable
  1212. CommonFields(CommonFields&& that) = default;
  1213. CommonFields& operator=(CommonFields&&) = default;
  1214. template <bool kSooEnabled>
  1215. static CommonFields CreateDefault() {
  1216. return kSooEnabled ? CommonFields{soo_tag_t{}}
  1217. : CommonFields{non_soo_tag_t{}};
  1218. }
  1219. // The inline data for SOO is written on top of control_/slots_.
  1220. const void* soo_data() const { return heap_or_soo_.get_soo_data(); }
  1221. void* soo_data() { return heap_or_soo_.get_soo_data(); }
  1222. HeapOrSoo heap_or_soo() const { return heap_or_soo_; }
  1223. const HeapOrSoo& heap_or_soo_ref() const { return heap_or_soo_; }
  1224. ctrl_t* control() const { return heap_or_soo_.control(); }
  1225. void set_control(ctrl_t* c) { heap_or_soo_.control() = c; }
  1226. void* backing_array_start() const {
  1227. // growth_info (and maybe infoz) is stored before control bytes.
  1228. ABSL_SWISSTABLE_ASSERT(
  1229. reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
  1230. return control() - ControlOffset(has_infoz());
  1231. }
  1232. // Note: we can't use slots() because Qt defines "slots" as a macro.
  1233. void* slot_array() const { return heap_or_soo_.slot_array().get(); }
  1234. MaybeInitializedPtr slots_union() const { return heap_or_soo_.slot_array(); }
  1235. void set_slots(void* s) { heap_or_soo_.slot_array().set(s); }
  1236. // The number of filled slots.
  1237. size_t size() const { return size_ >> HasInfozShift(); }
  1238. void set_size(size_t s) {
  1239. size_ = (s << HasInfozShift()) | (size_ & HasInfozMask());
  1240. }
  1241. void set_empty_soo() {
  1242. AssertInSooMode();
  1243. size_ = 0;
  1244. }
  1245. void set_full_soo() {
  1246. AssertInSooMode();
  1247. size_ = size_t{1} << HasInfozShift();
  1248. }
  1249. void increment_size() {
  1250. ABSL_SWISSTABLE_ASSERT(size() < capacity());
  1251. size_ += size_t{1} << HasInfozShift();
  1252. }
  1253. void decrement_size() {
  1254. ABSL_SWISSTABLE_ASSERT(size() > 0);
  1255. size_ -= size_t{1} << HasInfozShift();
  1256. }
  1257. // The total number of available slots.
  1258. size_t capacity() const { return capacity_; }
  1259. void set_capacity(size_t c) {
  1260. // We allow setting above the max valid capacity for debugging purposes.
  1261. ABSL_SWISSTABLE_ASSERT(c == 0 || IsValidCapacity(c) ||
  1262. c > kAboveMaxValidCapacity);
  1263. capacity_ = c;
  1264. }
  1265. // The number of slots we can still fill without needing to rehash.
  1266. // This is stored in the heap allocation before the control bytes.
  1267. // TODO(b/289225379): experiment with moving growth_info back inline to
  1268. // increase room for SOO.
  1269. size_t growth_left() const { return growth_info().GetGrowthLeft(); }
  1270. GrowthInfo& growth_info() {
  1271. auto* gl_ptr = reinterpret_cast<GrowthInfo*>(control()) - 1;
  1272. ABSL_SWISSTABLE_ASSERT(
  1273. reinterpret_cast<uintptr_t>(gl_ptr) % alignof(GrowthInfo) == 0);
  1274. return *gl_ptr;
  1275. }
  1276. GrowthInfo growth_info() const {
  1277. return const_cast<CommonFields*>(this)->growth_info();
  1278. }
  1279. bool has_infoz() const {
  1280. return ABSL_PREDICT_FALSE((size_ & HasInfozMask()) != 0);
  1281. }
  1282. void set_has_infoz(bool has_infoz) {
  1283. size_ = (size() << HasInfozShift()) | static_cast<size_t>(has_infoz);
  1284. }
  1285. HashtablezInfoHandle infoz() {
  1286. return has_infoz()
  1287. ? *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start())
  1288. : HashtablezInfoHandle();
  1289. }
  1290. void set_infoz(HashtablezInfoHandle infoz) {
  1291. ABSL_SWISSTABLE_ASSERT(has_infoz());
  1292. *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start()) = infoz;
  1293. }
  1294. bool should_rehash_for_bug_detection_on_insert() const {
  1295. return CommonFieldsGenerationInfo::
  1296. should_rehash_for_bug_detection_on_insert(control(), capacity());
  1297. }
  1298. bool should_rehash_for_bug_detection_on_move() const {
  1299. return CommonFieldsGenerationInfo::should_rehash_for_bug_detection_on_move(
  1300. control(), capacity());
  1301. }
  1302. void reset_reserved_growth(size_t reservation) {
  1303. CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
  1304. }
  1305. // The size of the backing array allocation.
  1306. size_t alloc_size(size_t slot_size, size_t slot_align) const {
  1307. return RawHashSetLayout(capacity(), slot_align, has_infoz())
  1308. .alloc_size(slot_size);
  1309. }
  1310. // Move fields other than heap_or_soo_.
  1311. void move_non_heap_or_soo_fields(CommonFields& that) {
  1312. static_cast<CommonFieldsGenerationInfo&>(*this) =
  1313. std::move(static_cast<CommonFieldsGenerationInfo&>(that));
  1314. capacity_ = that.capacity_;
  1315. size_ = that.size_;
  1316. }
  1317. // Returns the number of control bytes set to kDeleted. For testing only.
  1318. size_t TombstonesCount() const {
  1319. return static_cast<size_t>(
  1320. std::count(control(), control() + capacity(), ctrl_t::kDeleted));
  1321. }
  1322. // Helper to enable sanitizer mode validation to protect against reentrant
  1323. // calls during element constructor/destructor.
  1324. template <typename F>
  1325. void RunWithReentrancyGuard(F f) {
  1326. #ifdef NDEBUG
  1327. f();
  1328. return;
  1329. #endif
  1330. const size_t cap = capacity();
  1331. set_capacity(InvalidCapacity::kReentrance);
  1332. f();
  1333. set_capacity(cap);
  1334. }
  1335. private:
  1336. // We store the has_infoz bit in the lowest bit of size_.
  1337. static constexpr size_t HasInfozShift() { return 1; }
  1338. static constexpr size_t HasInfozMask() {
  1339. return (size_t{1} << HasInfozShift()) - 1;
  1340. }
  1341. // We can't assert that SOO is enabled because we don't have SooEnabled(), but
  1342. // we assert what we can.
  1343. void AssertInSooMode() const {
  1344. ABSL_SWISSTABLE_ASSERT(capacity() == SooCapacity());
  1345. ABSL_SWISSTABLE_ASSERT(!has_infoz());
  1346. }
  1347. // The number of slots in the backing array. This is always 2^N-1 for an
  1348. // integer N. NOTE: we tried experimenting with compressing the capacity and
  1349. // storing it together with size_: (a) using 6 bits to store the corresponding
  1350. // power (N in 2^N-1), and (b) storing 2^N as the most significant bit of
  1351. // size_ and storing size in the low bits. Both of these experiments were
  1352. // regressions, presumably because we need capacity to do find operations.
  1353. size_t capacity_;
  1354. // The size and also has one bit that stores whether we have infoz.
  1355. // TODO(b/289225379): we could put size_ into HeapOrSoo and make capacity_
  1356. // encode the size in SOO case. We would be making size()/capacity() more
  1357. // expensive in order to have more SOO space.
  1358. size_t size_;
  1359. // Either the control/slots pointers or the SOO slot.
  1360. HeapOrSoo heap_or_soo_;
  1361. };
  1362. template <class Policy, class Hash, class Eq, class Alloc>
  1363. class raw_hash_set;
  1364. // Returns the next valid capacity after `n`.
  1365. inline size_t NextCapacity(size_t n) {
  1366. ABSL_SWISSTABLE_ASSERT(IsValidCapacity(n) || n == 0);
  1367. return n * 2 + 1;
  1368. }
  1369. // Applies the following mapping to every byte in the control array:
  1370. // * kDeleted -> kEmpty
  1371. // * kEmpty -> kEmpty
  1372. // * _ -> kDeleted
  1373. // PRECONDITION:
  1374. // IsValidCapacity(capacity)
  1375. // ctrl[capacity] == ctrl_t::kSentinel
  1376. // ctrl[i] != ctrl_t::kSentinel for all i < capacity
  1377. void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
  1378. // Converts `n` into the next valid capacity, per `IsValidCapacity`.
  1379. inline size_t NormalizeCapacity(size_t n) {
  1380. return n ? ~size_t{} >> countl_zero(n) : 1;
  1381. }
  1382. template <size_t kSlotSize>
  1383. size_t MaxValidCapacity() {
  1384. return NormalizeCapacity((std::numeric_limits<size_t>::max)() / 4 /
  1385. kSlotSize);
  1386. }
  1387. // Use a non-inlined function to avoid code bloat.
  1388. [[noreturn]] void HashTableSizeOverflow();
  1389. // General notes on capacity/growth methods below:
  1390. // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
  1391. // average of two empty slots per group.
  1392. // - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
  1393. // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
  1394. // never need to probe (the whole table fits in one group) so we don't need a
  1395. // load factor less than 1.
  1396. // Given `capacity`, applies the load factor; i.e., it returns the maximum
  1397. // number of values we should put into the table before a resizing rehash.
  1398. inline size_t CapacityToGrowth(size_t capacity) {
  1399. ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
  1400. // `capacity*7/8`
  1401. if (Group::kWidth == 8 && capacity == 7) {
  1402. // x-x/8 does not work when x==7.
  1403. return 6;
  1404. }
  1405. return capacity - capacity / 8;
  1406. }
  1407. // Given `growth`, "unapplies" the load factor to find how large the capacity
  1408. // should be to stay within the load factor.
  1409. //
  1410. // This might not be a valid capacity and `NormalizeCapacity()` should be
  1411. // called on this.
  1412. inline size_t GrowthToLowerboundCapacity(size_t growth) {
  1413. // `growth*8/7`
  1414. if (Group::kWidth == 8 && growth == 7) {
  1415. // x+(x-1)/7 does not work when x==7.
  1416. return 8;
  1417. }
  1418. return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
  1419. }
  1420. template <class InputIter>
  1421. size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
  1422. size_t bucket_count) {
  1423. if (bucket_count != 0) {
  1424. return bucket_count;
  1425. }
  1426. using InputIterCategory =
  1427. typename std::iterator_traits<InputIter>::iterator_category;
  1428. if (std::is_base_of<std::random_access_iterator_tag,
  1429. InputIterCategory>::value) {
  1430. return GrowthToLowerboundCapacity(
  1431. static_cast<size_t>(std::distance(first, last)));
  1432. }
  1433. return 0;
  1434. }
  1435. constexpr bool SwisstableDebugEnabled() {
  1436. #if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
  1437. ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
  1438. return true;
  1439. #else
  1440. return false;
  1441. #endif
  1442. }
  1443. inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
  1444. const GenerationType* generation_ptr,
  1445. const char* operation) {
  1446. if (!SwisstableDebugEnabled()) return;
  1447. // `SwisstableDebugEnabled()` is also true for release builds with hardening
  1448. // enabled. To minimize their impact in those builds:
  1449. // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
  1450. // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
  1451. // the chances that the hot paths will be inlined.
  1452. if (ABSL_PREDICT_FALSE(ctrl == nullptr)) {
  1453. ABSL_RAW_LOG(FATAL, "%s called on end() iterator.", operation);
  1454. }
  1455. if (ABSL_PREDICT_FALSE(ctrl == EmptyGroup())) {
  1456. ABSL_RAW_LOG(FATAL, "%s called on default-constructed iterator.",
  1457. operation);
  1458. }
  1459. if (SwisstableGenerationsEnabled()) {
  1460. if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
  1461. ABSL_RAW_LOG(FATAL,
  1462. "%s called on invalid iterator. The table could have "
  1463. "rehashed or moved since this iterator was initialized.",
  1464. operation);
  1465. }
  1466. if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
  1467. ABSL_RAW_LOG(
  1468. FATAL,
  1469. "%s called on invalid iterator. The element was likely erased.",
  1470. operation);
  1471. }
  1472. } else {
  1473. if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
  1474. ABSL_RAW_LOG(
  1475. FATAL,
  1476. "%s called on invalid iterator. The element might have been erased "
  1477. "or the table might have rehashed. Consider running with "
  1478. "--config=asan to diagnose rehashing issues.",
  1479. operation);
  1480. }
  1481. }
  1482. }
  1483. // Note that for comparisons, null/end iterators are valid.
  1484. inline void AssertIsValidForComparison(const ctrl_t* ctrl,
  1485. GenerationType generation,
  1486. const GenerationType* generation_ptr) {
  1487. if (!SwisstableDebugEnabled()) return;
  1488. const bool ctrl_is_valid_for_comparison =
  1489. ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
  1490. if (SwisstableGenerationsEnabled()) {
  1491. if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
  1492. ABSL_RAW_LOG(FATAL,
  1493. "Invalid iterator comparison. The table could have rehashed "
  1494. "or moved since this iterator was initialized.");
  1495. }
  1496. if (ABSL_PREDICT_FALSE(!ctrl_is_valid_for_comparison)) {
  1497. ABSL_RAW_LOG(
  1498. FATAL, "Invalid iterator comparison. The element was likely erased.");
  1499. }
  1500. } else {
  1501. ABSL_HARDENING_ASSERT(
  1502. ctrl_is_valid_for_comparison &&
  1503. "Invalid iterator comparison. The element might have been erased or "
  1504. "the table might have rehashed. Consider running with --config=asan to "
  1505. "diagnose rehashing issues.");
  1506. }
  1507. }
  1508. // If the two iterators come from the same container, then their pointers will
  1509. // interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa.
  1510. // Note: we take slots by reference so that it's not UB if they're uninitialized
  1511. // as long as we don't read them (when ctrl is null).
  1512. inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
  1513. const ctrl_t* ctrl_b,
  1514. const void* const& slot_a,
  1515. const void* const& slot_b) {
  1516. // If either control byte is null, then we can't tell.
  1517. if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
  1518. const bool a_is_soo = IsSooControl(ctrl_a);
  1519. if (a_is_soo != IsSooControl(ctrl_b)) return false;
  1520. if (a_is_soo) return slot_a == slot_b;
  1521. const void* low_slot = slot_a;
  1522. const void* hi_slot = slot_b;
  1523. if (ctrl_a > ctrl_b) {
  1524. std::swap(ctrl_a, ctrl_b);
  1525. std::swap(low_slot, hi_slot);
  1526. }
  1527. return ctrl_b < low_slot && low_slot <= hi_slot;
  1528. }
  1529. // Asserts that two iterators come from the same container.
  1530. // Note: we take slots by reference so that it's not UB if they're uninitialized
  1531. // as long as we don't read them (when ctrl is null).
  1532. inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
  1533. const void* const& slot_a,
  1534. const void* const& slot_b,
  1535. const GenerationType* generation_ptr_a,
  1536. const GenerationType* generation_ptr_b) {
  1537. if (!SwisstableDebugEnabled()) return;
  1538. // `SwisstableDebugEnabled()` is also true for release builds with hardening
  1539. // enabled. To minimize their impact in those builds:
  1540. // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
  1541. // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
  1542. // the chances that the hot paths will be inlined.
  1543. // fail_if(is_invalid, message) crashes when is_invalid is true and provides
  1544. // an error message based on `message`.
  1545. const auto fail_if = [](bool is_invalid, const char* message) {
  1546. if (ABSL_PREDICT_FALSE(is_invalid)) {
  1547. ABSL_RAW_LOG(FATAL, "Invalid iterator comparison. %s", message);
  1548. }
  1549. };
  1550. const bool a_is_default = ctrl_a == EmptyGroup();
  1551. const bool b_is_default = ctrl_b == EmptyGroup();
  1552. if (a_is_default && b_is_default) return;
  1553. fail_if(a_is_default != b_is_default,
  1554. "Comparing default-constructed hashtable iterator with a "
  1555. "non-default-constructed hashtable iterator.");
  1556. if (SwisstableGenerationsEnabled()) {
  1557. if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return;
  1558. // Users don't need to know whether the tables are SOO so don't mention SOO
  1559. // in the debug message.
  1560. const bool a_is_soo = IsSooControl(ctrl_a);
  1561. const bool b_is_soo = IsSooControl(ctrl_b);
  1562. fail_if(a_is_soo != b_is_soo || (a_is_soo && b_is_soo),
  1563. "Comparing iterators from different hashtables.");
  1564. const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
  1565. const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
  1566. fail_if(a_is_empty != b_is_empty,
  1567. "Comparing an iterator from an empty hashtable with an iterator "
  1568. "from a non-empty hashtable.");
  1569. fail_if(a_is_empty && b_is_empty,
  1570. "Comparing iterators from different empty hashtables.");
  1571. const bool a_is_end = ctrl_a == nullptr;
  1572. const bool b_is_end = ctrl_b == nullptr;
  1573. fail_if(a_is_end || b_is_end,
  1574. "Comparing iterator with an end() iterator from a different "
  1575. "hashtable.");
  1576. fail_if(true, "Comparing non-end() iterators from different hashtables.");
  1577. } else {
  1578. ABSL_HARDENING_ASSERT_SLOW(
  1579. AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
  1580. "Invalid iterator comparison. The iterators may be from different "
  1581. "containers or the container might have rehashed or moved. Consider "
  1582. "running with --config=asan to diagnose issues.");
  1583. }
  1584. }
  1585. struct FindInfo {
  1586. size_t offset;
  1587. size_t probe_length;
  1588. };
  1589. // Whether a table is "small". A small table fits entirely into a probing
  1590. // group, i.e., has a capacity < `Group::kWidth`.
  1591. //
  1592. // In small mode we are able to use the whole capacity. The extra control
  1593. // bytes give us at least one "empty" control byte to stop the iteration.
  1594. // This is important to make 1 a valid capacity.
  1595. //
  1596. // In small mode only the first `capacity` control bytes after the sentinel
  1597. // are valid. The rest contain dummy ctrl_t::kEmpty values that do not
  1598. // represent a real slot. This is important to take into account on
  1599. // `find_first_non_full()`, where we never try
  1600. // `ShouldInsertBackwards()` for small tables.
  1601. inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
  1602. // Whether a table fits entirely into a probing group.
  1603. // Arbitrary order of elements in such tables is correct.
  1604. inline bool is_single_group(size_t capacity) {
  1605. return capacity <= Group::kWidth;
  1606. }
  1607. // Begins a probing operation on `common.control`, using `hash`.
  1608. inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
  1609. size_t hash) {
  1610. return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
  1611. }
  1612. inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
  1613. return probe(common.control(), common.capacity(), hash);
  1614. }
  1615. // Probes an array of control bits using a probe sequence derived from `hash`,
  1616. // and returns the offset corresponding to the first deleted or empty slot.
  1617. //
  1618. // Behavior when the entire table is full is undefined.
  1619. //
  1620. // NOTE: this function must work with tables having both empty and deleted
  1621. // slots in the same group. Such tables appear during `erase()`.
  1622. template <typename = void>
  1623. inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
  1624. auto seq = probe(common, hash);
  1625. const ctrl_t* ctrl = common.control();
  1626. if (IsEmptyOrDeleted(ctrl[seq.offset()]) &&
  1627. !ShouldInsertBackwards(common.capacity(), hash, ctrl)) {
  1628. return {seq.offset(), /*probe_length=*/0};
  1629. }
  1630. while (true) {
  1631. GroupFullEmptyOrDeleted g{ctrl + seq.offset()};
  1632. auto mask = g.MaskEmptyOrDeleted();
  1633. if (mask) {
  1634. return {
  1635. seq.offset(GetInsertionOffset(mask, common.capacity(), hash, ctrl)),
  1636. seq.index()};
  1637. }
  1638. seq.next();
  1639. ABSL_SWISSTABLE_ASSERT(seq.index() <= common.capacity() && "full table!");
  1640. }
  1641. }
  1642. // Extern template for inline function keep possibility of inlining.
  1643. // When compiler decided to not inline, no symbols will be added to the
  1644. // corresponding translation unit.
  1645. extern template FindInfo find_first_non_full(const CommonFields&, size_t);
  1646. // Non-inlined version of find_first_non_full for use in less
  1647. // performance critical routines.
  1648. FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
  1649. inline void ResetGrowthLeft(CommonFields& common) {
  1650. common.growth_info().InitGrowthLeftNoDeleted(
  1651. CapacityToGrowth(common.capacity()) - common.size());
  1652. }
  1653. // Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
  1654. // array as marked as empty.
  1655. inline void ResetCtrl(CommonFields& common, size_t slot_size) {
  1656. const size_t capacity = common.capacity();
  1657. ctrl_t* ctrl = common.control();
  1658. std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
  1659. capacity + 1 + NumClonedBytes());
  1660. ctrl[capacity] = ctrl_t::kSentinel;
  1661. SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
  1662. }
  1663. // Sets sanitizer poisoning for slot corresponding to control byte being set.
  1664. inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,
  1665. size_t slot_size) {
  1666. ABSL_SWISSTABLE_ASSERT(i < c.capacity());
  1667. auto* slot_i = static_cast<const char*>(c.slot_array()) + i * slot_size;
  1668. if (IsFull(h)) {
  1669. SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
  1670. } else {
  1671. SanitizerPoisonMemoryRegion(slot_i, slot_size);
  1672. }
  1673. }
  1674. // Sets `ctrl[i]` to `h`.
  1675. //
  1676. // Unlike setting it directly, this function will perform bounds checks and
  1677. // mirror the value to the cloned tail if necessary.
  1678. inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h,
  1679. size_t slot_size) {
  1680. DoSanitizeOnSetCtrl(c, i, h, slot_size);
  1681. ctrl_t* ctrl = c.control();
  1682. ctrl[i] = h;
  1683. ctrl[((i - NumClonedBytes()) & c.capacity()) +
  1684. (NumClonedBytes() & c.capacity())] = h;
  1685. }
  1686. // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
  1687. inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) {
  1688. SetCtrl(c, i, static_cast<ctrl_t>(h), slot_size);
  1689. }
  1690. // Like SetCtrl, but in a single group table, we can save some operations when
  1691. // setting the cloned control byte.
  1692. inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h,
  1693. size_t slot_size) {
  1694. ABSL_SWISSTABLE_ASSERT(is_single_group(c.capacity()));
  1695. DoSanitizeOnSetCtrl(c, i, h, slot_size);
  1696. ctrl_t* ctrl = c.control();
  1697. ctrl[i] = h;
  1698. ctrl[i + c.capacity() + 1] = h;
  1699. }
  1700. // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
  1701. inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h,
  1702. size_t slot_size) {
  1703. SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size);
  1704. }
  1705. // growth_info (which is a size_t) is stored with the backing array.
  1706. constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
  1707. return (std::max)(align_of_slot, alignof(GrowthInfo));
  1708. }
  1709. // Returns the address of the ith slot in slots where each slot occupies
  1710. // slot_size.
  1711. inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
  1712. return static_cast<void*>(static_cast<char*>(slot_array) +
  1713. (slot * slot_size));
  1714. }
  1715. // Iterates over all full slots and calls `cb(const ctrl_t*, SlotType*)`.
  1716. // No insertion to the table allowed during Callback call.
  1717. // Erasure is allowed only for the element passed to the callback.
  1718. template <class SlotType, class Callback>
  1719. ABSL_ATTRIBUTE_ALWAYS_INLINE inline void IterateOverFullSlots(
  1720. const CommonFields& c, SlotType* slot, Callback cb) {
  1721. const size_t cap = c.capacity();
  1722. const ctrl_t* ctrl = c.control();
  1723. if (is_small(cap)) {
  1724. // Mirrored/cloned control bytes in small table are also located in the
  1725. // first group (starting from position 0). We are taking group from position
  1726. // `capacity` in order to avoid duplicates.
  1727. // Small tables capacity fits into portable group, where
  1728. // GroupPortableImpl::MaskFull is more efficient for the
  1729. // capacity <= GroupPortableImpl::kWidth.
  1730. ABSL_SWISSTABLE_ASSERT(cap <= GroupPortableImpl::kWidth &&
  1731. "unexpectedly large small capacity");
  1732. static_assert(Group::kWidth >= GroupPortableImpl::kWidth,
  1733. "unexpected group width");
  1734. // Group starts from kSentinel slot, so indices in the mask will
  1735. // be increased by 1.
  1736. const auto mask = GroupPortableImpl(ctrl + cap).MaskFull();
  1737. --ctrl;
  1738. --slot;
  1739. for (uint32_t i : mask) {
  1740. cb(ctrl + i, slot + i);
  1741. }
  1742. return;
  1743. }
  1744. size_t remaining = c.size();
  1745. ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
  1746. while (remaining != 0) {
  1747. for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
  1748. ABSL_SWISSTABLE_ASSERT(IsFull(ctrl[i]) &&
  1749. "hash table was modified unexpectedly");
  1750. cb(ctrl + i, slot + i);
  1751. --remaining;
  1752. }
  1753. ctrl += Group::kWidth;
  1754. slot += Group::kWidth;
  1755. ABSL_SWISSTABLE_ASSERT(
  1756. (remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
  1757. "hash table was modified unexpectedly");
  1758. }
  1759. // NOTE: erasure of the current element is allowed in callback for
  1760. // absl::erase_if specialization. So we use `>=`.
  1761. ABSL_SWISSTABLE_ASSERT(original_size_for_assert >= c.size() &&
  1762. "hash table was modified unexpectedly");
  1763. }
  1764. template <typename CharAlloc>
  1765. constexpr bool ShouldSampleHashtablezInfo() {
  1766. // Folks with custom allocators often make unwarranted assumptions about the
  1767. // behavior of their classes vis-a-vis trivial destructability and what
  1768. // calls they will or won't make. Avoid sampling for people with custom
  1769. // allocators to get us out of this mess. This is not a hard guarantee but
  1770. // a workaround while we plan the exact guarantee we want to provide.
  1771. return std::is_same<CharAlloc, std::allocator<char>>::value;
  1772. }
  1773. template <bool kSooEnabled>
  1774. HashtablezInfoHandle SampleHashtablezInfo(size_t sizeof_slot, size_t sizeof_key,
  1775. size_t sizeof_value,
  1776. size_t old_capacity, bool was_soo,
  1777. HashtablezInfoHandle forced_infoz,
  1778. CommonFields& c) {
  1779. if (forced_infoz.IsSampled()) return forced_infoz;
  1780. // In SOO, we sample on the first insertion so if this is an empty SOO case
  1781. // (e.g. when reserve is called), then we still need to sample.
  1782. if (kSooEnabled && was_soo && c.size() == 0) {
  1783. return Sample(sizeof_slot, sizeof_key, sizeof_value, SooCapacity());
  1784. }
  1785. // For non-SOO cases, we sample whenever the capacity is increasing from zero
  1786. // to non-zero.
  1787. if (!kSooEnabled && old_capacity == 0) {
  1788. return Sample(sizeof_slot, sizeof_key, sizeof_value, 0);
  1789. }
  1790. return c.infoz();
  1791. }
  1792. // Helper class to perform resize of the hash set.
  1793. //
  1794. // It contains special optimizations for small group resizes.
  1795. // See GrowIntoSingleGroupShuffleControlBytes for details.
  1796. class HashSetResizeHelper {
  1797. public:
  1798. explicit HashSetResizeHelper(CommonFields& c, bool was_soo, bool had_soo_slot,
  1799. HashtablezInfoHandle forced_infoz)
  1800. : old_capacity_(c.capacity()),
  1801. had_infoz_(c.has_infoz()),
  1802. was_soo_(was_soo),
  1803. had_soo_slot_(had_soo_slot),
  1804. forced_infoz_(forced_infoz) {}
  1805. // Optimized for small groups version of `find_first_non_full`.
  1806. // Beneficial only right after calling `raw_hash_set::resize`.
  1807. // It is safe to call in case capacity is big or was not changed, but there
  1808. // will be no performance benefit.
  1809. // It has implicit assumption that `resize` will call
  1810. // `GrowSizeIntoSingleGroup*` in case `IsGrowingIntoSingleGroupApplicable`.
  1811. // Falls back to `find_first_non_full` in case of big groups.
  1812. static FindInfo FindFirstNonFullAfterResize(const CommonFields& c,
  1813. size_t old_capacity, size_t hash);
  1814. HeapOrSoo& old_heap_or_soo() { return old_heap_or_soo_; }
  1815. void* old_soo_data() { return old_heap_or_soo_.get_soo_data(); }
  1816. ctrl_t* old_ctrl() const {
  1817. ABSL_SWISSTABLE_ASSERT(!was_soo_);
  1818. return old_heap_or_soo_.control();
  1819. }
  1820. void* old_slots() const {
  1821. ABSL_SWISSTABLE_ASSERT(!was_soo_);
  1822. return old_heap_or_soo_.slot_array().get();
  1823. }
  1824. size_t old_capacity() const { return old_capacity_; }
  1825. // Returns the index of the SOO slot when growing from SOO to non-SOO in a
  1826. // single group. See also InitControlBytesAfterSoo(). It's important to use
  1827. // index 1 so that when resizing from capacity 1 to 3, we can still have
  1828. // random iteration order between the first two inserted elements.
  1829. // I.e. it allows inserting the second element at either index 0 or 2.
  1830. static size_t SooSlotIndex() { return 1; }
  1831. // Allocates a backing array for the hashtable.
  1832. // Reads `capacity` and updates all other fields based on the result of
  1833. // the allocation.
  1834. //
  1835. // It also may do the following actions:
  1836. // 1. initialize control bytes
  1837. // 2. initialize slots
  1838. // 3. deallocate old slots.
  1839. //
  1840. // We are bundling a lot of functionality
  1841. // in one ABSL_ATTRIBUTE_NOINLINE function in order to minimize binary code
  1842. // duplication in raw_hash_set<>::resize.
  1843. //
  1844. // `c.capacity()` must be nonzero.
  1845. // POSTCONDITIONS:
  1846. // 1. CommonFields is initialized.
  1847. //
  1848. // if IsGrowingIntoSingleGroupApplicable && TransferUsesMemcpy
  1849. // Both control bytes and slots are fully initialized.
  1850. // old_slots are deallocated.
  1851. // infoz.RecordRehash is called.
  1852. //
  1853. // if IsGrowingIntoSingleGroupApplicable && !TransferUsesMemcpy
  1854. // Control bytes are fully initialized.
  1855. // infoz.RecordRehash is called.
  1856. // GrowSizeIntoSingleGroup must be called to finish slots initialization.
  1857. //
  1858. // if !IsGrowingIntoSingleGroupApplicable
  1859. // Control bytes are initialized to empty table via ResetCtrl.
  1860. // raw_hash_set<>::resize must insert elements regularly.
  1861. // infoz.RecordRehash is called if old_capacity == 0.
  1862. //
  1863. // Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation.
  1864. template <typename Alloc, size_t SizeOfSlot, bool TransferUsesMemcpy,
  1865. bool SooEnabled, size_t AlignOfSlot>
  1866. ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, Alloc alloc,
  1867. ctrl_t soo_slot_h2,
  1868. size_t key_size,
  1869. size_t value_size) {
  1870. ABSL_SWISSTABLE_ASSERT(c.capacity());
  1871. HashtablezInfoHandle infoz =
  1872. ShouldSampleHashtablezInfo<Alloc>()
  1873. ? SampleHashtablezInfo<SooEnabled>(SizeOfSlot, key_size, value_size,
  1874. old_capacity_, was_soo_,
  1875. forced_infoz_, c)
  1876. : HashtablezInfoHandle{};
  1877. const bool has_infoz = infoz.IsSampled();
  1878. RawHashSetLayout layout(c.capacity(), AlignOfSlot, has_infoz);
  1879. char* mem = static_cast<char*>(Allocate<BackingArrayAlignment(AlignOfSlot)>(
  1880. &alloc, layout.alloc_size(SizeOfSlot)));
  1881. const GenerationType old_generation = c.generation();
  1882. c.set_generation_ptr(
  1883. reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
  1884. c.set_generation(NextGeneration(old_generation));
  1885. c.set_control(reinterpret_cast<ctrl_t*>(mem + layout.control_offset()));
  1886. c.set_slots(mem + layout.slot_offset());
  1887. ResetGrowthLeft(c);
  1888. const bool grow_single_group =
  1889. IsGrowingIntoSingleGroupApplicable(old_capacity_, layout.capacity());
  1890. if (SooEnabled && was_soo_ && grow_single_group) {
  1891. InitControlBytesAfterSoo(c.control(), soo_slot_h2, layout.capacity());
  1892. if (TransferUsesMemcpy && had_soo_slot_) {
  1893. TransferSlotAfterSoo(c, SizeOfSlot);
  1894. }
  1895. // SooEnabled implies that old_capacity_ != 0.
  1896. } else if ((SooEnabled || old_capacity_ != 0) && grow_single_group) {
  1897. if (TransferUsesMemcpy) {
  1898. GrowSizeIntoSingleGroupTransferable(c, SizeOfSlot);
  1899. DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot);
  1900. } else {
  1901. GrowIntoSingleGroupShuffleControlBytes(c.control(), layout.capacity());
  1902. }
  1903. } else {
  1904. ResetCtrl(c, SizeOfSlot);
  1905. }
  1906. c.set_has_infoz(has_infoz);
  1907. if (has_infoz) {
  1908. infoz.RecordStorageChanged(c.size(), layout.capacity());
  1909. if ((SooEnabled && was_soo_) || grow_single_group || old_capacity_ == 0) {
  1910. infoz.RecordRehash(0);
  1911. }
  1912. c.set_infoz(infoz);
  1913. }
  1914. return grow_single_group;
  1915. }
  1916. // Relocates slots into new single group consistent with
  1917. // GrowIntoSingleGroupShuffleControlBytes.
  1918. //
  1919. // PRECONDITIONS:
  1920. // 1. GrowIntoSingleGroupShuffleControlBytes was already called.
  1921. template <class PolicyTraits, class Alloc>
  1922. void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref) {
  1923. ABSL_SWISSTABLE_ASSERT(old_capacity_ < Group::kWidth / 2);
  1924. ABSL_SWISSTABLE_ASSERT(
  1925. IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
  1926. using slot_type = typename PolicyTraits::slot_type;
  1927. ABSL_SWISSTABLE_ASSERT(is_single_group(c.capacity()));
  1928. auto* new_slots = static_cast<slot_type*>(c.slot_array()) + 1;
  1929. auto* old_slots_ptr = static_cast<slot_type*>(old_slots());
  1930. auto* old_ctrl_ptr = old_ctrl();
  1931. for (size_t i = 0; i < old_capacity_; ++i, ++new_slots) {
  1932. if (IsFull(old_ctrl_ptr[i])) {
  1933. SanitizerUnpoisonMemoryRegion(new_slots, sizeof(slot_type));
  1934. PolicyTraits::transfer(&alloc_ref, new_slots, old_slots_ptr + i);
  1935. }
  1936. }
  1937. PoisonSingleGroupEmptySlots(c, sizeof(slot_type));
  1938. }
  1939. // Deallocates old backing array.
  1940. template <size_t AlignOfSlot, class CharAlloc>
  1941. void DeallocateOld(CharAlloc alloc_ref, size_t slot_size) {
  1942. SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_);
  1943. auto layout = RawHashSetLayout(old_capacity_, AlignOfSlot, had_infoz_);
  1944. Deallocate<BackingArrayAlignment(AlignOfSlot)>(
  1945. &alloc_ref, old_ctrl() - layout.control_offset(),
  1946. layout.alloc_size(slot_size));
  1947. }
  1948. private:
  1949. // Returns true if `GrowSizeIntoSingleGroup` can be used for resizing.
  1950. static bool IsGrowingIntoSingleGroupApplicable(size_t old_capacity,
  1951. size_t new_capacity) {
  1952. // NOTE that `old_capacity < new_capacity` in order to have
  1953. // `old_capacity < Group::kWidth / 2` to make faster copies of 8 bytes.
  1954. return is_single_group(new_capacity) && old_capacity < new_capacity;
  1955. }
  1956. // Relocates control bytes and slots into new single group for
  1957. // transferable objects.
  1958. // Must be called only if IsGrowingIntoSingleGroupApplicable returned true.
  1959. void GrowSizeIntoSingleGroupTransferable(CommonFields& c, size_t slot_size);
  1960. // If there was an SOO slot and slots are transferable, transfers the SOO slot
  1961. // into the new heap allocation. Must be called only if
  1962. // IsGrowingIntoSingleGroupApplicable returned true.
  1963. void TransferSlotAfterSoo(CommonFields& c, size_t slot_size);
  1964. // Shuffle control bits deterministically to the next capacity.
  1965. // Returns offset for newly added element with given hash.
  1966. //
  1967. // PRECONDITIONs:
  1968. // 1. new_ctrl is allocated for new_capacity,
  1969. // but not initialized.
  1970. // 2. new_capacity is a single group.
  1971. // 3. old_capacity > 0.
  1972. //
  1973. // All elements are transferred into the first `old_capacity + 1` positions
  1974. // of the new_ctrl. Elements are shifted by 1 in order to keep a space at the
  1975. // beginning for the new element.
  1976. // Position of the new added element will be based on `H1` and is not
  1977. // deterministic.
  1978. //
  1979. // Examples:
  1980. // S = kSentinel, E = kEmpty
  1981. //
  1982. // old_ctrl = 0SEEEEEEE...
  1983. // new_ctrl = E0ESE0EEE...
  1984. //
  1985. // old_ctrl = 012S012EEEEEEEEE...
  1986. // new_ctrl = E012EEESE012EEE...
  1987. //
  1988. // old_ctrl = 0123456S0123456EEEEEEEEEEE...
  1989. // new_ctrl = E0123456EEEEEESE0123456EEE...
  1990. void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl,
  1991. size_t new_capacity) const;
  1992. // If the table was SOO, initializes new control bytes. `h2` is the control
  1993. // byte corresponding to the full slot. Must be called only if
  1994. // IsGrowingIntoSingleGroupApplicable returned true.
  1995. // Requires: `had_soo_slot_ || h2 == ctrl_t::kEmpty`.
  1996. void InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
  1997. size_t new_capacity);
  1998. // Shuffle trivially transferable slots in the way consistent with
  1999. // GrowIntoSingleGroupShuffleControlBytes.
  2000. //
  2001. // PRECONDITIONs:
  2002. // 1. old_capacity must be non-zero.
  2003. // 2. new_ctrl is fully initialized using
  2004. // GrowIntoSingleGroupShuffleControlBytes.
  2005. // 3. new_slots is allocated and *not* poisoned.
  2006. //
  2007. // POSTCONDITIONS:
  2008. // 1. new_slots are transferred from old_slots_ consistent with
  2009. // GrowIntoSingleGroupShuffleControlBytes.
  2010. // 2. Empty new_slots are *not* poisoned.
  2011. void GrowIntoSingleGroupShuffleTransferableSlots(void* new_slots,
  2012. size_t slot_size) const;
  2013. // Poison empty slots that were transferred using the deterministic algorithm
  2014. // described above.
  2015. // PRECONDITIONs:
  2016. // 1. new_ctrl is fully initialized using
  2017. // GrowIntoSingleGroupShuffleControlBytes.
  2018. // 2. new_slots is fully initialized consistent with
  2019. // GrowIntoSingleGroupShuffleControlBytes.
  2020. void PoisonSingleGroupEmptySlots(CommonFields& c, size_t slot_size) const {
  2021. // poison non full items
  2022. for (size_t i = 0; i < c.capacity(); ++i) {
  2023. if (!IsFull(c.control()[i])) {
  2024. SanitizerPoisonMemoryRegion(SlotAddress(c.slot_array(), i, slot_size),
  2025. slot_size);
  2026. }
  2027. }
  2028. }
  2029. HeapOrSoo old_heap_or_soo_;
  2030. size_t old_capacity_;
  2031. bool had_infoz_;
  2032. bool was_soo_;
  2033. bool had_soo_slot_;
  2034. // Either null infoz or a pre-sampled forced infoz for SOO tables.
  2035. HashtablezInfoHandle forced_infoz_;
  2036. };
  2037. inline void PrepareInsertCommon(CommonFields& common) {
  2038. common.increment_size();
  2039. common.maybe_increment_generation_on_insert();
  2040. }
  2041. // Like prepare_insert, but for the case of inserting into a full SOO table.
  2042. size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
  2043. CommonFields& common);
  2044. // PolicyFunctions bundles together some information for a particular
  2045. // raw_hash_set<T, ...> instantiation. This information is passed to
  2046. // type-erased functions that want to do small amounts of type-specific
  2047. // work.
  2048. struct PolicyFunctions {
  2049. size_t slot_size;
  2050. // Returns the pointer to the hash function stored in the set.
  2051. const void* (*hash_fn)(const CommonFields& common);
  2052. // Returns the hash of the pointed-to slot.
  2053. size_t (*hash_slot)(const void* hash_fn, void* slot);
  2054. // Transfers the contents of src_slot to dst_slot.
  2055. void (*transfer)(void* set, void* dst_slot, void* src_slot);
  2056. // Deallocates the backing store from common.
  2057. void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
  2058. // Resizes set to the new capacity.
  2059. // Arguments are used as in raw_hash_set::resize_impl.
  2060. void (*resize)(CommonFields& common, size_t new_capacity,
  2061. HashtablezInfoHandle forced_infoz);
  2062. };
  2063. // ClearBackingArray clears the backing array, either modifying it in place,
  2064. // or creating a new one based on the value of "reuse".
  2065. // REQUIRES: c.capacity > 0
  2066. void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
  2067. bool reuse, bool soo_enabled);
  2068. // Type-erased version of raw_hash_set::erase_meta_only.
  2069. void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size);
  2070. // Function to place in PolicyFunctions::dealloc for raw_hash_sets
  2071. // that are using std::allocator. This allows us to share the same
  2072. // function body for raw_hash_set instantiations that have the
  2073. // same slot alignment.
  2074. template <size_t AlignOfSlot>
  2075. ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
  2076. const PolicyFunctions& policy) {
  2077. // Unpoison before returning the memory to the allocator.
  2078. SanitizerUnpoisonMemoryRegion(common.slot_array(),
  2079. policy.slot_size * common.capacity());
  2080. std::allocator<char> alloc;
  2081. common.infoz().Unregister();
  2082. Deallocate<BackingArrayAlignment(AlignOfSlot)>(
  2083. &alloc, common.backing_array_start(),
  2084. common.alloc_size(policy.slot_size, AlignOfSlot));
  2085. }
  2086. // For trivially relocatable types we use memcpy directly. This allows us to
  2087. // share the same function body for raw_hash_set instantiations that have the
  2088. // same slot size as long as they are relocatable.
  2089. template <size_t SizeOfSlot>
  2090. ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {
  2091. memcpy(dst, src, SizeOfSlot);
  2092. }
  2093. // Type erased raw_hash_set::get_hash_ref_fn for the empty hash function case.
  2094. const void* GetHashRefForEmptyHasher(const CommonFields& common);
  2095. // Given the hash of a value not currently in the table and the first empty
  2096. // slot in the probe sequence, finds a viable slot index to insert it at.
  2097. //
  2098. // In case there's no space left, the table can be resized or rehashed
  2099. // (for tables with deleted slots, see FindInsertPositionWithGrowthOrRehash).
  2100. //
  2101. // In the case of absence of deleted slots and positive growth_left, the element
  2102. // can be inserted in the provided `target` position.
  2103. //
  2104. // When the table has deleted slots (according to GrowthInfo), the target
  2105. // position will be searched one more time using `find_first_non_full`.
  2106. //
  2107. // REQUIRES: Table is not SOO.
  2108. // REQUIRES: At least one non-full slot available.
  2109. // REQUIRES: `target` is a valid empty position to insert.
  2110. size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target,
  2111. const PolicyFunctions& policy);
  2112. // A SwissTable.
  2113. //
  2114. // Policy: a policy defines how to perform different operations on
  2115. // the slots of the hashtable (see hash_policy_traits.h for the full interface
  2116. // of policy).
  2117. //
  2118. // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
  2119. // functor should accept a key and return size_t as hash. For best performance
  2120. // it is important that the hash function provides high entropy across all bits
  2121. // of the hash.
  2122. //
  2123. // Eq: a (possibly polymorphic) functor that compares two keys for equality. It
  2124. // should accept two (of possibly different type) keys and return a bool: true
  2125. // if they are equal, false if they are not. If two keys compare equal, then
  2126. // their hash values as defined by Hash MUST be equal.
  2127. //
  2128. // Allocator: an Allocator
  2129. // [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
  2130. // the storage of the hashtable will be allocated and the elements will be
  2131. // constructed and destroyed.
  2132. template <class Policy, class Hash, class Eq, class Alloc>
  2133. class raw_hash_set {
  2134. using PolicyTraits = hash_policy_traits<Policy>;
  2135. using KeyArgImpl =
  2136. KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
  2137. public:
  2138. using init_type = typename PolicyTraits::init_type;
  2139. using key_type = typename PolicyTraits::key_type;
  2140. // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
  2141. // code fixes!
  2142. using slot_type = typename PolicyTraits::slot_type;
  2143. using allocator_type = Alloc;
  2144. using size_type = size_t;
  2145. using difference_type = ptrdiff_t;
  2146. using hasher = Hash;
  2147. using key_equal = Eq;
  2148. using policy_type = Policy;
  2149. using value_type = typename PolicyTraits::value_type;
  2150. using reference = value_type&;
  2151. using const_reference = const value_type&;
  2152. using pointer = typename absl::allocator_traits<
  2153. allocator_type>::template rebind_traits<value_type>::pointer;
  2154. using const_pointer = typename absl::allocator_traits<
  2155. allocator_type>::template rebind_traits<value_type>::const_pointer;
  2156. // Alias used for heterogeneous lookup functions.
  2157. // `key_arg<K>` evaluates to `K` when the functors are transparent and to
  2158. // `key_type` otherwise. It permits template argument deduction on `K` for the
  2159. // transparent case.
  2160. template <class K>
  2161. using key_arg = typename KeyArgImpl::template type<K, key_type>;
  2162. private:
  2163. // TODO(b/289225379): we could add extra SOO space inside raw_hash_set
  2164. // after CommonFields to allow inlining larger slot_types (e.g. std::string),
  2165. // but it's a bit complicated if we want to support incomplete mapped_type in
  2166. // flat_hash_map. We could potentially do this for flat_hash_set and for an
  2167. // allowlist of `mapped_type`s of flat_hash_map that includes e.g. arithmetic
  2168. // types, strings, cords, and pairs/tuples of allowlisted types.
  2169. constexpr static bool SooEnabled() {
  2170. return PolicyTraits::soo_enabled() &&
  2171. sizeof(slot_type) <= sizeof(HeapOrSoo) &&
  2172. alignof(slot_type) <= alignof(HeapOrSoo);
  2173. }
  2174. constexpr static size_t DefaultCapacity() {
  2175. return SooEnabled() ? SooCapacity() : 0;
  2176. }
  2177. // Whether `size` fits in the SOO capacity of this table.
  2178. bool fits_in_soo(size_t size) const {
  2179. return SooEnabled() && size <= SooCapacity();
  2180. }
  2181. // Whether this table is in SOO mode or non-SOO mode.
  2182. bool is_soo() const { return fits_in_soo(capacity()); }
  2183. bool is_full_soo() const { return is_soo() && !empty(); }
  2184. // Give an early error when key_type is not hashable/eq.
  2185. auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
  2186. auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
  2187. using AllocTraits = absl::allocator_traits<allocator_type>;
  2188. using SlotAlloc = typename absl::allocator_traits<
  2189. allocator_type>::template rebind_alloc<slot_type>;
  2190. // People are often sloppy with the exact type of their allocator (sometimes
  2191. // it has an extra const or is missing the pair, but rebinds made it work
  2192. // anyway).
  2193. using CharAlloc =
  2194. typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
  2195. using SlotAllocTraits = typename absl::allocator_traits<
  2196. allocator_type>::template rebind_traits<slot_type>;
  2197. static_assert(std::is_lvalue_reference<reference>::value,
  2198. "Policy::element() must return a reference");
  2199. // An enabler for insert(T&&): T must be convertible to init_type or be the
  2200. // same as [cv] value_type [ref].
  2201. template <class T>
  2202. using Insertable = absl::disjunction<
  2203. std::is_same<absl::remove_cvref_t<reference>, absl::remove_cvref_t<T>>,
  2204. std::is_convertible<T, init_type>>;
  2205. template <class T>
  2206. using IsNotBitField = std::is_pointer<T*>;
  2207. // RequiresNotInit is a workaround for gcc prior to 7.1.
  2208. // See https://godbolt.org/g/Y4xsUh.
  2209. template <class T>
  2210. using RequiresNotInit =
  2211. typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
  2212. template <class... Ts>
  2213. using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
  2214. template <class T>
  2215. using IsDecomposableAndInsertable =
  2216. IsDecomposable<std::enable_if_t<Insertable<T>::value, T>>;
  2217. // Evaluates to true if an assignment from the given type would require the
  2218. // source object to remain alive for the life of the element.
  2219. template <class U>
  2220. using IsLifetimeBoundAssignmentFrom = std::conditional_t<
  2221. policy_trait_element_is_owner<Policy>::value, std::false_type,
  2222. type_traits_internal::IsLifetimeBoundAssignment<init_type, U>>;
  2223. public:
  2224. static_assert(std::is_same<pointer, value_type*>::value,
  2225. "Allocators with custom pointer types are not supported");
  2226. static_assert(std::is_same<const_pointer, const value_type*>::value,
  2227. "Allocators with custom pointer types are not supported");
  2228. class iterator : private HashSetIteratorGenerationInfo {
  2229. friend class raw_hash_set;
  2230. friend struct HashtableFreeFunctionsAccess;
  2231. public:
  2232. using iterator_category = std::forward_iterator_tag;
  2233. using value_type = typename raw_hash_set::value_type;
  2234. using reference =
  2235. absl::conditional_t<PolicyTraits::constant_iterators::value,
  2236. const value_type&, value_type&>;
  2237. using pointer = absl::remove_reference_t<reference>*;
  2238. using difference_type = typename raw_hash_set::difference_type;
  2239. iterator() {}
  2240. // PRECONDITION: not an end() iterator.
  2241. reference operator*() const {
  2242. AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
  2243. return unchecked_deref();
  2244. }
  2245. // PRECONDITION: not an end() iterator.
  2246. pointer operator->() const {
  2247. AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->");
  2248. return &operator*();
  2249. }
  2250. // PRECONDITION: not an end() iterator.
  2251. iterator& operator++() {
  2252. AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++");
  2253. ++ctrl_;
  2254. ++slot_;
  2255. skip_empty_or_deleted();
  2256. if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
  2257. return *this;
  2258. }
  2259. // PRECONDITION: not an end() iterator.
  2260. iterator operator++(int) {
  2261. auto tmp = *this;
  2262. ++*this;
  2263. return tmp;
  2264. }
  2265. friend bool operator==(const iterator& a, const iterator& b) {
  2266. AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr());
  2267. AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr());
  2268. AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_,
  2269. a.generation_ptr(), b.generation_ptr());
  2270. return a.ctrl_ == b.ctrl_;
  2271. }
  2272. friend bool operator!=(const iterator& a, const iterator& b) {
  2273. return !(a == b);
  2274. }
  2275. private:
  2276. iterator(ctrl_t* ctrl, slot_type* slot,
  2277. const GenerationType* generation_ptr)
  2278. : HashSetIteratorGenerationInfo(generation_ptr),
  2279. ctrl_(ctrl),
  2280. slot_(slot) {
  2281. // This assumption helps the compiler know that any non-end iterator is
  2282. // not equal to any end iterator.
  2283. ABSL_ASSUME(ctrl != nullptr);
  2284. }
  2285. // This constructor is used in begin() to avoid an MSan
  2286. // use-of-uninitialized-value error. Delegating from this constructor to
  2287. // the previous one doesn't avoid the error.
  2288. iterator(ctrl_t* ctrl, MaybeInitializedPtr slot,
  2289. const GenerationType* generation_ptr)
  2290. : HashSetIteratorGenerationInfo(generation_ptr),
  2291. ctrl_(ctrl),
  2292. slot_(to_slot(slot.get())) {
  2293. // This assumption helps the compiler know that any non-end iterator is
  2294. // not equal to any end iterator.
  2295. ABSL_ASSUME(ctrl != nullptr);
  2296. }
  2297. // For end() iterators.
  2298. explicit iterator(const GenerationType* generation_ptr)
  2299. : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
  2300. // Fixes up `ctrl_` to point to a full or sentinel by advancing `ctrl_` and
  2301. // `slot_` until they reach one.
  2302. void skip_empty_or_deleted() {
  2303. while (IsEmptyOrDeleted(*ctrl_)) {
  2304. uint32_t shift =
  2305. GroupFullEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted();
  2306. ctrl_ += shift;
  2307. slot_ += shift;
  2308. }
  2309. }
  2310. ctrl_t* control() const { return ctrl_; }
  2311. slot_type* slot() const { return slot_; }
  2312. // We use EmptyGroup() for default-constructed iterators so that they can
  2313. // be distinguished from end iterators, which have nullptr ctrl_.
  2314. ctrl_t* ctrl_ = EmptyGroup();
  2315. // To avoid uninitialized member warnings, put slot_ in an anonymous union.
  2316. // The member is not initialized on singleton and end iterators.
  2317. union {
  2318. slot_type* slot_;
  2319. };
  2320. // An equality check which skips ABSL Hardening iterator invalidation
  2321. // checks.
  2322. // Should be used when the lifetimes of the iterators are well-enough
  2323. // understood to prove that they cannot be invalid.
  2324. bool unchecked_equals(const iterator& b) { return ctrl_ == b.control(); }
  2325. // Dereferences the iterator without ABSL Hardening iterator invalidation
  2326. // checks.
  2327. reference unchecked_deref() const { return PolicyTraits::element(slot_); }
  2328. };
  2329. class const_iterator {
  2330. friend class raw_hash_set;
  2331. template <class Container, typename Enabler>
  2332. friend struct absl::container_internal::hashtable_debug_internal::
  2333. HashtableDebugAccess;
  2334. public:
  2335. using iterator_category = typename iterator::iterator_category;
  2336. using value_type = typename raw_hash_set::value_type;
  2337. using reference = typename raw_hash_set::const_reference;
  2338. using pointer = typename raw_hash_set::const_pointer;
  2339. using difference_type = typename raw_hash_set::difference_type;
  2340. const_iterator() = default;
  2341. // Implicit construction from iterator.
  2342. const_iterator(iterator i) : inner_(std::move(i)) {} // NOLINT
  2343. reference operator*() const { return *inner_; }
  2344. pointer operator->() const { return inner_.operator->(); }
  2345. const_iterator& operator++() {
  2346. ++inner_;
  2347. return *this;
  2348. }
  2349. const_iterator operator++(int) { return inner_++; }
  2350. friend bool operator==(const const_iterator& a, const const_iterator& b) {
  2351. return a.inner_ == b.inner_;
  2352. }
  2353. friend bool operator!=(const const_iterator& a, const const_iterator& b) {
  2354. return !(a == b);
  2355. }
  2356. private:
  2357. const_iterator(const ctrl_t* ctrl, const slot_type* slot,
  2358. const GenerationType* gen)
  2359. : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) {
  2360. }
  2361. ctrl_t* control() const { return inner_.control(); }
  2362. slot_type* slot() const { return inner_.slot(); }
  2363. iterator inner_;
  2364. bool unchecked_equals(const const_iterator& b) {
  2365. return inner_.unchecked_equals(b.inner_);
  2366. }
  2367. };
  2368. using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
  2369. using insert_return_type = InsertReturnType<iterator, node_type>;
  2370. // Note: can't use `= default` due to non-default noexcept (causes
  2371. // problems for some compilers). NOLINTNEXTLINE
  2372. raw_hash_set() noexcept(
  2373. std::is_nothrow_default_constructible<hasher>::value &&
  2374. std::is_nothrow_default_constructible<key_equal>::value &&
  2375. std::is_nothrow_default_constructible<allocator_type>::value) {}
  2376. ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
  2377. size_t bucket_count, const hasher& hash = hasher(),
  2378. const key_equal& eq = key_equal(),
  2379. const allocator_type& alloc = allocator_type())
  2380. : settings_(CommonFields::CreateDefault<SooEnabled()>(), hash, eq,
  2381. alloc) {
  2382. if (bucket_count > DefaultCapacity()) {
  2383. if (ABSL_PREDICT_FALSE(bucket_count >
  2384. MaxValidCapacity<sizeof(slot_type)>())) {
  2385. HashTableSizeOverflow();
  2386. }
  2387. resize(NormalizeCapacity(bucket_count));
  2388. }
  2389. }
  2390. raw_hash_set(size_t bucket_count, const hasher& hash,
  2391. const allocator_type& alloc)
  2392. : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
  2393. raw_hash_set(size_t bucket_count, const allocator_type& alloc)
  2394. : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
  2395. explicit raw_hash_set(const allocator_type& alloc)
  2396. : raw_hash_set(0, hasher(), key_equal(), alloc) {}
  2397. template <class InputIter>
  2398. raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
  2399. const hasher& hash = hasher(), const key_equal& eq = key_equal(),
  2400. const allocator_type& alloc = allocator_type())
  2401. : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
  2402. hash, eq, alloc) {
  2403. insert(first, last);
  2404. }
  2405. template <class InputIter>
  2406. raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
  2407. const hasher& hash, const allocator_type& alloc)
  2408. : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
  2409. template <class InputIter>
  2410. raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
  2411. const allocator_type& alloc)
  2412. : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
  2413. template <class InputIter>
  2414. raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
  2415. : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
  2416. // Instead of accepting std::initializer_list<value_type> as the first
  2417. // argument like std::unordered_set<value_type> does, we have two overloads
  2418. // that accept std::initializer_list<T> and std::initializer_list<init_type>.
  2419. // This is advantageous for performance.
  2420. //
  2421. // // Turns {"abc", "def"} into std::initializer_list<std::string>, then
  2422. // // copies the strings into the set.
  2423. // std::unordered_set<std::string> s = {"abc", "def"};
  2424. //
  2425. // // Turns {"abc", "def"} into std::initializer_list<const char*>, then
  2426. // // copies the strings into the set.
  2427. // absl::flat_hash_set<std::string> s = {"abc", "def"};
  2428. //
  2429. // The same trick is used in insert().
  2430. //
  2431. // The enabler is necessary to prevent this constructor from triggering where
  2432. // the copy constructor is meant to be called.
  2433. //
  2434. // absl::flat_hash_set<int> a, b{a};
  2435. //
  2436. // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
  2437. template <class T, RequiresNotInit<T> = 0,
  2438. std::enable_if_t<Insertable<T>::value, int> = 0>
  2439. raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
  2440. const hasher& hash = hasher(), const key_equal& eq = key_equal(),
  2441. const allocator_type& alloc = allocator_type())
  2442. : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
  2443. raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
  2444. const hasher& hash = hasher(), const key_equal& eq = key_equal(),
  2445. const allocator_type& alloc = allocator_type())
  2446. : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
  2447. template <class T, RequiresNotInit<T> = 0,
  2448. std::enable_if_t<Insertable<T>::value, int> = 0>
  2449. raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
  2450. const hasher& hash, const allocator_type& alloc)
  2451. : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
  2452. raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
  2453. const hasher& hash, const allocator_type& alloc)
  2454. : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
  2455. template <class T, RequiresNotInit<T> = 0,
  2456. std::enable_if_t<Insertable<T>::value, int> = 0>
  2457. raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
  2458. const allocator_type& alloc)
  2459. : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
  2460. raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
  2461. const allocator_type& alloc)
  2462. : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
  2463. template <class T, RequiresNotInit<T> = 0,
  2464. std::enable_if_t<Insertable<T>::value, int> = 0>
  2465. raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
  2466. : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
  2467. raw_hash_set(std::initializer_list<init_type> init,
  2468. const allocator_type& alloc)
  2469. : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
  2470. raw_hash_set(const raw_hash_set& that)
  2471. : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
  2472. that.alloc_ref())) {}
  2473. raw_hash_set(const raw_hash_set& that, const allocator_type& a)
  2474. : raw_hash_set(GrowthToLowerboundCapacity(that.size()), that.hash_ref(),
  2475. that.eq_ref(), a) {
  2476. that.AssertNotDebugCapacity();
  2477. const size_t size = that.size();
  2478. if (size == 0) {
  2479. return;
  2480. }
  2481. // We don't use `that.is_soo()` here because `that` can have non-SOO
  2482. // capacity but have a size that fits into SOO capacity.
  2483. if (fits_in_soo(size)) {
  2484. ABSL_SWISSTABLE_ASSERT(size == 1);
  2485. common().set_full_soo();
  2486. emplace_at(soo_iterator(), *that.begin());
  2487. const HashtablezInfoHandle infoz = try_sample_soo();
  2488. if (infoz.IsSampled()) resize_with_soo_infoz(infoz);
  2489. return;
  2490. }
  2491. ABSL_SWISSTABLE_ASSERT(!that.is_soo());
  2492. const size_t cap = capacity();
  2493. // Note about single group tables:
  2494. // 1. It is correct to have any order of elements.
  2495. // 2. Order has to be non deterministic.
  2496. // 3. We are assigning elements with arbitrary `shift` starting from
  2497. // `capacity + shift` position.
  2498. // 4. `shift` must be coprime with `capacity + 1` in order to be able to use
  2499. // modular arithmetic to traverse all positions, instead if cycling
  2500. // through a subset of positions. Odd numbers are coprime with any
  2501. // `capacity + 1` (2^N).
  2502. size_t offset = cap;
  2503. const size_t shift =
  2504. is_single_group(cap) ? (PerTableSalt(control()) | 1) : 0;
  2505. IterateOverFullSlots(
  2506. that.common(), that.slot_array(),
  2507. [&](const ctrl_t* that_ctrl,
  2508. slot_type* that_slot) ABSL_ATTRIBUTE_ALWAYS_INLINE {
  2509. if (shift == 0) {
  2510. // Big tables case. Position must be searched via probing.
  2511. // The table is guaranteed to be empty, so we can do faster than
  2512. // a full `insert`.
  2513. const size_t hash = PolicyTraits::apply(
  2514. HashElement{hash_ref()}, PolicyTraits::element(that_slot));
  2515. FindInfo target = find_first_non_full_outofline(common(), hash);
  2516. infoz().RecordInsert(hash, target.probe_length);
  2517. offset = target.offset;
  2518. } else {
  2519. // Small tables case. Next position is computed via shift.
  2520. offset = (offset + shift) & cap;
  2521. }
  2522. const h2_t h2 = static_cast<h2_t>(*that_ctrl);
  2523. ABSL_SWISSTABLE_ASSERT( // We rely that hash is not changed for small
  2524. // tables.
  2525. H2(PolicyTraits::apply(HashElement{hash_ref()},
  2526. PolicyTraits::element(that_slot))) == h2 &&
  2527. "hash function value changed unexpectedly during the copy");
  2528. SetCtrl(common(), offset, h2, sizeof(slot_type));
  2529. emplace_at(iterator_at(offset), PolicyTraits::element(that_slot));
  2530. common().maybe_increment_generation_on_insert();
  2531. });
  2532. if (shift != 0) {
  2533. // On small table copy we do not record individual inserts.
  2534. // RecordInsert requires hash, but it is unknown for small tables.
  2535. infoz().RecordStorageChanged(size, cap);
  2536. }
  2537. common().set_size(size);
  2538. growth_info().OverwriteManyEmptyAsFull(size);
  2539. }
  2540. ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
  2541. std::is_nothrow_copy_constructible<hasher>::value &&
  2542. std::is_nothrow_copy_constructible<key_equal>::value &&
  2543. std::is_nothrow_copy_constructible<allocator_type>::value)
  2544. : // Hash, equality and allocator are copied instead of moved because
  2545. // `that` must be left valid. If Hash is std::function<Key>, moving it
  2546. // would create a nullptr functor that cannot be called.
  2547. // Note: we avoid using exchange for better generated code.
  2548. settings_(PolicyTraits::transfer_uses_memcpy() || !that.is_full_soo()
  2549. ? std::move(that.common())
  2550. : CommonFields{full_soo_tag_t{}},
  2551. that.hash_ref(), that.eq_ref(), that.alloc_ref()) {
  2552. if (!PolicyTraits::transfer_uses_memcpy() && that.is_full_soo()) {
  2553. transfer(soo_slot(), that.soo_slot());
  2554. }
  2555. that.common() = CommonFields::CreateDefault<SooEnabled()>();
  2556. annotate_for_bug_detection_on_move(that);
  2557. }
  2558. raw_hash_set(raw_hash_set&& that, const allocator_type& a)
  2559. : settings_(CommonFields::CreateDefault<SooEnabled()>(), that.hash_ref(),
  2560. that.eq_ref(), a) {
  2561. if (a == that.alloc_ref()) {
  2562. swap_common(that);
  2563. annotate_for_bug_detection_on_move(that);
  2564. } else {
  2565. move_elements_allocs_unequal(std::move(that));
  2566. }
  2567. }
  2568. raw_hash_set& operator=(const raw_hash_set& that) {
  2569. that.AssertNotDebugCapacity();
  2570. if (ABSL_PREDICT_FALSE(this == &that)) return *this;
  2571. constexpr bool propagate_alloc =
  2572. AllocTraits::propagate_on_container_copy_assignment::value;
  2573. // TODO(ezb): maybe avoid allocating a new backing array if this->capacity()
  2574. // is an exact match for that.size(). If this->capacity() is too big, then
  2575. // it would make iteration very slow to reuse the allocation. Maybe we can
  2576. // do the same heuristic as clear() and reuse if it's small enough.
  2577. raw_hash_set tmp(that, propagate_alloc ? that.alloc_ref() : alloc_ref());
  2578. // NOLINTNEXTLINE: not returning *this for performance.
  2579. return assign_impl<propagate_alloc>(std::move(tmp));
  2580. }
  2581. raw_hash_set& operator=(raw_hash_set&& that) noexcept(
  2582. absl::allocator_traits<allocator_type>::is_always_equal::value &&
  2583. std::is_nothrow_move_assignable<hasher>::value &&
  2584. std::is_nothrow_move_assignable<key_equal>::value) {
  2585. // TODO(sbenza): We should only use the operations from the noexcept clause
  2586. // to make sure we actually adhere to that contract.
  2587. // NOLINTNEXTLINE: not returning *this for performance.
  2588. return move_assign(
  2589. std::move(that),
  2590. typename AllocTraits::propagate_on_container_move_assignment());
  2591. }
  2592. ~raw_hash_set() {
  2593. destructor_impl();
  2594. #ifndef NDEBUG
  2595. common().set_capacity(InvalidCapacity::kDestroyed);
  2596. #endif
  2597. }
  2598. iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2599. if (ABSL_PREDICT_FALSE(empty())) return end();
  2600. if (is_soo()) return soo_iterator();
  2601. iterator it = {control(), common().slots_union(),
  2602. common().generation_ptr()};
  2603. it.skip_empty_or_deleted();
  2604. ABSL_SWISSTABLE_ASSERT(IsFull(*it.control()));
  2605. return it;
  2606. }
  2607. iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2608. AssertNotDebugCapacity();
  2609. return iterator(common().generation_ptr());
  2610. }
  2611. const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2612. return const_cast<raw_hash_set*>(this)->begin();
  2613. }
  2614. const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2615. return const_cast<raw_hash_set*>(this)->end();
  2616. }
  2617. const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2618. return begin();
  2619. }
  2620. const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
  2621. bool empty() const { return !size(); }
  2622. size_t size() const {
  2623. AssertNotDebugCapacity();
  2624. return common().size();
  2625. }
  2626. size_t capacity() const {
  2627. const size_t cap = common().capacity();
  2628. // Compiler complains when using functions in ASSUME so use local variable.
  2629. ABSL_ATTRIBUTE_UNUSED static constexpr size_t kDefaultCapacity =
  2630. DefaultCapacity();
  2631. ABSL_ASSUME(cap >= kDefaultCapacity);
  2632. return cap;
  2633. }
  2634. size_t max_size() const {
  2635. return CapacityToGrowth(MaxValidCapacity<sizeof(slot_type)>());
  2636. }
  2637. ABSL_ATTRIBUTE_REINITIALIZES void clear() {
  2638. if (SwisstableGenerationsEnabled() &&
  2639. capacity() >= InvalidCapacity::kMovedFrom) {
  2640. common().set_capacity(DefaultCapacity());
  2641. }
  2642. AssertNotDebugCapacity();
  2643. // Iterating over this container is O(bucket_count()). When bucket_count()
  2644. // is much greater than size(), iteration becomes prohibitively expensive.
  2645. // For clear() it is more important to reuse the allocated array when the
  2646. // container is small because allocation takes comparatively long time
  2647. // compared to destruction of the elements of the container. So we pick the
  2648. // largest bucket_count() threshold for which iteration is still fast and
  2649. // past that we simply deallocate the array.
  2650. const size_t cap = capacity();
  2651. if (cap == 0) {
  2652. // Already guaranteed to be empty; so nothing to do.
  2653. } else if (is_soo()) {
  2654. if (!empty()) destroy(soo_slot());
  2655. common().set_empty_soo();
  2656. } else {
  2657. destroy_slots();
  2658. ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128,
  2659. SooEnabled());
  2660. }
  2661. common().set_reserved_growth(0);
  2662. common().set_reservation_size(0);
  2663. }
  2664. // This overload kicks in when the argument is an rvalue of insertable and
  2665. // decomposable type other than init_type.
  2666. //
  2667. // flat_hash_map<std::string, int> m;
  2668. // m.insert(std::make_pair("abc", 42));
  2669. template <class T,
  2670. std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
  2671. IsNotBitField<T>::value &&
  2672. !IsLifetimeBoundAssignmentFrom<T>::value,
  2673. int> = 0>
  2674. std::pair<iterator, bool> insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2675. return emplace(std::forward<T>(value));
  2676. }
  2677. template <class T,
  2678. std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
  2679. IsNotBitField<T>::value &&
  2680. IsLifetimeBoundAssignmentFrom<T>::value,
  2681. int> = 0>
  2682. std::pair<iterator, bool> insert(
  2683. T&& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
  2684. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2685. return emplace(std::forward<T>(value));
  2686. }
  2687. // This overload kicks in when the argument is a bitfield or an lvalue of
  2688. // insertable and decomposable type.
  2689. //
  2690. // union { int n : 1; };
  2691. // flat_hash_set<int> s;
  2692. // s.insert(n);
  2693. //
  2694. // flat_hash_set<std::string> s;
  2695. // const char* p = "hello";
  2696. // s.insert(p);
  2697. //
  2698. template <class T, std::enable_if_t<
  2699. IsDecomposableAndInsertable<const T&>::value &&
  2700. !IsLifetimeBoundAssignmentFrom<const T&>::value,
  2701. int> = 0>
  2702. std::pair<iterator, bool> insert(const T& value)
  2703. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2704. return emplace(value);
  2705. }
  2706. template <class T,
  2707. std::enable_if_t<IsDecomposableAndInsertable<const T&>::value &&
  2708. IsLifetimeBoundAssignmentFrom<const T&>::value,
  2709. int> = 0>
  2710. std::pair<iterator, bool> insert(
  2711. const T& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
  2712. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2713. return emplace(value);
  2714. }
  2715. // This overload kicks in when the argument is an rvalue of init_type. Its
  2716. // purpose is to handle brace-init-list arguments.
  2717. //
  2718. // flat_hash_map<std::string, int> s;
  2719. // s.insert({"abc", 42});
  2720. std::pair<iterator, bool> insert(init_type&& value)
  2721. ABSL_ATTRIBUTE_LIFETIME_BOUND
  2722. #if __cplusplus >= 202002L
  2723. requires(!IsLifetimeBoundAssignmentFrom<init_type>::value)
  2724. #endif
  2725. {
  2726. return emplace(std::move(value));
  2727. }
  2728. #if __cplusplus >= 202002L
  2729. std::pair<iterator, bool> insert(
  2730. init_type&& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
  2731. ABSL_ATTRIBUTE_LIFETIME_BOUND
  2732. requires(IsLifetimeBoundAssignmentFrom<init_type>::value)
  2733. {
  2734. return emplace(std::move(value));
  2735. }
  2736. #endif
  2737. template <class T,
  2738. std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
  2739. IsNotBitField<T>::value &&
  2740. !IsLifetimeBoundAssignmentFrom<T>::value,
  2741. int> = 0>
  2742. iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2743. return insert(std::forward<T>(value)).first;
  2744. }
  2745. template <class T,
  2746. std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
  2747. IsNotBitField<T>::value &&
  2748. IsLifetimeBoundAssignmentFrom<T>::value,
  2749. int> = 0>
  2750. iterator insert(const_iterator, T&& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(
  2751. this)) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2752. return insert(std::forward<T>(value)).first;
  2753. }
  2754. template <class T, std::enable_if_t<
  2755. IsDecomposableAndInsertable<const T&>::value, int> = 0>
  2756. iterator insert(const_iterator,
  2757. const T& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2758. return insert(value).first;
  2759. }
  2760. iterator insert(const_iterator,
  2761. init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2762. return insert(std::move(value)).first;
  2763. }
  2764. template <class InputIt>
  2765. void insert(InputIt first, InputIt last) {
  2766. for (; first != last; ++first) emplace(*first);
  2767. }
  2768. template <class T, RequiresNotInit<T> = 0,
  2769. std::enable_if_t<Insertable<const T&>::value, int> = 0>
  2770. void insert(std::initializer_list<T> ilist) {
  2771. insert(ilist.begin(), ilist.end());
  2772. }
  2773. void insert(std::initializer_list<init_type> ilist) {
  2774. insert(ilist.begin(), ilist.end());
  2775. }
  2776. insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2777. if (!node) return {end(), false, node_type()};
  2778. const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
  2779. auto res = PolicyTraits::apply(
  2780. InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
  2781. elem);
  2782. if (res.second) {
  2783. CommonAccess::Reset(&node);
  2784. return {res.first, true, node_type()};
  2785. } else {
  2786. return {res.first, false, std::move(node)};
  2787. }
  2788. }
  2789. iterator insert(const_iterator,
  2790. node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2791. auto res = insert(std::move(node));
  2792. node = std::move(res.node);
  2793. return res.position;
  2794. }
  2795. // This overload kicks in if we can deduce the key from args. This enables us
  2796. // to avoid constructing value_type if an entry with the same key already
  2797. // exists.
  2798. //
  2799. // For example:
  2800. //
  2801. // flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
  2802. // // Creates no std::string copies and makes no heap allocations.
  2803. // m.emplace("abc", "xyz");
  2804. template <class... Args,
  2805. std::enable_if_t<IsDecomposable<Args...>::value, int> = 0>
  2806. std::pair<iterator, bool> emplace(Args&&... args)
  2807. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2808. return PolicyTraits::apply(EmplaceDecomposable{*this},
  2809. std::forward<Args>(args)...);
  2810. }
  2811. // This overload kicks in if we cannot deduce the key from args. It constructs
  2812. // value_type unconditionally and then either moves it into the table or
  2813. // destroys.
  2814. template <class... Args,
  2815. std::enable_if_t<!IsDecomposable<Args...>::value, int> = 0>
  2816. std::pair<iterator, bool> emplace(Args&&... args)
  2817. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2818. alignas(slot_type) unsigned char raw[sizeof(slot_type)];
  2819. slot_type* slot = to_slot(&raw);
  2820. construct(slot, std::forward<Args>(args)...);
  2821. const auto& elem = PolicyTraits::element(slot);
  2822. return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
  2823. }
  2824. template <class... Args>
  2825. iterator emplace_hint(const_iterator,
  2826. Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2827. return emplace(std::forward<Args>(args)...).first;
  2828. }
  2829. // Extension API: support for lazy emplace.
  2830. //
  2831. // Looks up key in the table. If found, returns the iterator to the element.
  2832. // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`,
  2833. // and returns an iterator to the new element.
  2834. //
  2835. // `f` must abide by several restrictions:
  2836. // - it MUST call `raw_hash_set::constructor` with arguments as if a
  2837. // `raw_hash_set::value_type` is constructed,
  2838. // - it MUST NOT access the container before the call to
  2839. // `raw_hash_set::constructor`, and
  2840. // - it MUST NOT erase the lazily emplaced element.
  2841. // Doing any of these is undefined behavior.
  2842. //
  2843. // For example:
  2844. //
  2845. // std::unordered_set<ArenaString> s;
  2846. // // Makes ArenaStr even if "abc" is in the map.
  2847. // s.insert(ArenaString(&arena, "abc"));
  2848. //
  2849. // flat_hash_set<ArenaStr> s;
  2850. // // Makes ArenaStr only if "abc" is not in the map.
  2851. // s.lazy_emplace("abc", [&](const constructor& ctor) {
  2852. // ctor(&arena, "abc");
  2853. // });
  2854. //
  2855. // WARNING: This API is currently experimental. If there is a way to implement
  2856. // the same thing with the rest of the API, prefer that.
  2857. class constructor {
  2858. friend class raw_hash_set;
  2859. public:
  2860. template <class... Args>
  2861. void operator()(Args&&... args) const {
  2862. ABSL_SWISSTABLE_ASSERT(*slot_);
  2863. PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
  2864. *slot_ = nullptr;
  2865. }
  2866. private:
  2867. constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
  2868. allocator_type* alloc_;
  2869. slot_type** slot_;
  2870. };
  2871. template <class K = key_type, class F>
  2872. iterator lazy_emplace(const key_arg<K>& key,
  2873. F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2874. auto res = find_or_prepare_insert(key);
  2875. if (res.second) {
  2876. slot_type* slot = res.first.slot();
  2877. std::forward<F>(f)(constructor(&alloc_ref(), &slot));
  2878. ABSL_SWISSTABLE_ASSERT(!slot);
  2879. }
  2880. return res.first;
  2881. }
  2882. // Extension API: support for heterogeneous keys.
  2883. //
  2884. // std::unordered_set<std::string> s;
  2885. // // Turns "abc" into std::string.
  2886. // s.erase("abc");
  2887. //
  2888. // flat_hash_set<std::string> s;
  2889. // // Uses "abc" directly without copying it into std::string.
  2890. // s.erase("abc");
  2891. template <class K = key_type>
  2892. size_type erase(const key_arg<K>& key) {
  2893. auto it = find(key);
  2894. if (it == end()) return 0;
  2895. erase(it);
  2896. return 1;
  2897. }
  2898. // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`,
  2899. // this method returns void to reduce algorithmic complexity to O(1). The
  2900. // iterator is invalidated so any increment should be done before calling
  2901. // erase (e.g. `erase(it++)`).
  2902. void erase(const_iterator cit) { erase(cit.inner_); }
  2903. // This overload is necessary because otherwise erase<K>(const K&) would be
  2904. // a better match if non-const iterator is passed as an argument.
  2905. void erase(iterator it) {
  2906. AssertNotDebugCapacity();
  2907. AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()");
  2908. destroy(it.slot());
  2909. if (is_soo()) {
  2910. common().set_empty_soo();
  2911. } else {
  2912. erase_meta_only(it);
  2913. }
  2914. }
  2915. iterator erase(const_iterator first,
  2916. const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2917. AssertNotDebugCapacity();
  2918. // We check for empty first because ClearBackingArray requires that
  2919. // capacity() > 0 as a precondition.
  2920. if (empty()) return end();
  2921. if (first == last) return last.inner_;
  2922. if (is_soo()) {
  2923. destroy(soo_slot());
  2924. common().set_empty_soo();
  2925. return end();
  2926. }
  2927. if (first == begin() && last == end()) {
  2928. // TODO(ezb): we access control bytes in destroy_slots so it could make
  2929. // sense to combine destroy_slots and ClearBackingArray to avoid cache
  2930. // misses when the table is large. Note that we also do this in clear().
  2931. destroy_slots();
  2932. ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true,
  2933. SooEnabled());
  2934. common().set_reserved_growth(common().reservation_size());
  2935. return end();
  2936. }
  2937. while (first != last) {
  2938. erase(first++);
  2939. }
  2940. return last.inner_;
  2941. }
  2942. // Moves elements from `src` into `this`.
  2943. // If the element already exists in `this`, it is left unmodified in `src`.
  2944. template <typename H, typename E>
  2945. void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
  2946. AssertNotDebugCapacity();
  2947. src.AssertNotDebugCapacity();
  2948. assert(this != &src);
  2949. // Returns whether insertion took place.
  2950. const auto insert_slot = [this](slot_type* src_slot) {
  2951. return PolicyTraits::apply(InsertSlot<false>{*this, std::move(*src_slot)},
  2952. PolicyTraits::element(src_slot))
  2953. .second;
  2954. };
  2955. if (src.is_soo()) {
  2956. if (src.empty()) return;
  2957. if (insert_slot(src.soo_slot())) src.common().set_empty_soo();
  2958. return;
  2959. }
  2960. for (auto it = src.begin(), e = src.end(); it != e;) {
  2961. auto next = std::next(it);
  2962. if (insert_slot(it.slot())) src.erase_meta_only(it);
  2963. it = next;
  2964. }
  2965. }
  2966. template <typename H, typename E>
  2967. void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
  2968. merge(src);
  2969. }
  2970. node_type extract(const_iterator position) {
  2971. AssertNotDebugCapacity();
  2972. AssertIsFull(position.control(), position.inner_.generation(),
  2973. position.inner_.generation_ptr(), "extract()");
  2974. auto node = CommonAccess::Transfer<node_type>(alloc_ref(), position.slot());
  2975. if (is_soo()) {
  2976. common().set_empty_soo();
  2977. } else {
  2978. erase_meta_only(position);
  2979. }
  2980. return node;
  2981. }
  2982. template <class K = key_type,
  2983. std::enable_if_t<!std::is_same<K, iterator>::value, int> = 0>
  2984. node_type extract(const key_arg<K>& key) {
  2985. auto it = find(key);
  2986. return it == end() ? node_type() : extract(const_iterator{it});
  2987. }
  2988. void swap(raw_hash_set& that) noexcept(
  2989. IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
  2990. IsNoThrowSwappable<allocator_type>(
  2991. typename AllocTraits::propagate_on_container_swap{})) {
  2992. AssertNotDebugCapacity();
  2993. that.AssertNotDebugCapacity();
  2994. using std::swap;
  2995. swap_common(that);
  2996. swap(hash_ref(), that.hash_ref());
  2997. swap(eq_ref(), that.eq_ref());
  2998. SwapAlloc(alloc_ref(), that.alloc_ref(),
  2999. typename AllocTraits::propagate_on_container_swap{});
  3000. }
  3001. void rehash(size_t n) {
  3002. const size_t cap = capacity();
  3003. if (n == 0) {
  3004. if (cap == 0 || is_soo()) return;
  3005. if (empty()) {
  3006. ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
  3007. SooEnabled());
  3008. return;
  3009. }
  3010. if (fits_in_soo(size())) {
  3011. // When the table is already sampled, we keep it sampled.
  3012. if (infoz().IsSampled()) {
  3013. const size_t kInitialSampledCapacity = NextCapacity(SooCapacity());
  3014. if (capacity() > kInitialSampledCapacity) {
  3015. resize(kInitialSampledCapacity);
  3016. }
  3017. // This asserts that we didn't lose sampling coverage in `resize`.
  3018. ABSL_SWISSTABLE_ASSERT(infoz().IsSampled());
  3019. return;
  3020. }
  3021. alignas(slot_type) unsigned char slot_space[sizeof(slot_type)];
  3022. slot_type* tmp_slot = to_slot(slot_space);
  3023. transfer(tmp_slot, begin().slot());
  3024. ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
  3025. SooEnabled());
  3026. transfer(soo_slot(), tmp_slot);
  3027. common().set_full_soo();
  3028. return;
  3029. }
  3030. }
  3031. // bitor is a faster way of doing `max` here. We will round up to the next
  3032. // power-of-2-minus-1, so bitor is good enough.
  3033. auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
  3034. // n == 0 unconditionally rehashes as per the standard.
  3035. if (n == 0 || m > cap) {
  3036. if (ABSL_PREDICT_FALSE(m > MaxValidCapacity<sizeof(slot_type)>())) {
  3037. HashTableSizeOverflow();
  3038. }
  3039. resize(m);
  3040. // This is after resize, to ensure that we have completed the allocation
  3041. // and have potentially sampled the hashtable.
  3042. infoz().RecordReservation(n);
  3043. }
  3044. }
  3045. void reserve(size_t n) {
  3046. const size_t max_size_before_growth =
  3047. is_soo() ? SooCapacity() : size() + growth_left();
  3048. if (n > max_size_before_growth) {
  3049. if (ABSL_PREDICT_FALSE(n > max_size())) {
  3050. HashTableSizeOverflow();
  3051. }
  3052. size_t m = GrowthToLowerboundCapacity(n);
  3053. resize(NormalizeCapacity(m));
  3054. // This is after resize, to ensure that we have completed the allocation
  3055. // and have potentially sampled the hashtable.
  3056. infoz().RecordReservation(n);
  3057. }
  3058. common().reset_reserved_growth(n);
  3059. common().set_reservation_size(n);
  3060. }
  3061. // Extension API: support for heterogeneous keys.
  3062. //
  3063. // std::unordered_set<std::string> s;
  3064. // // Turns "abc" into std::string.
  3065. // s.count("abc");
  3066. //
  3067. // ch_set<std::string> s;
  3068. // // Uses "abc" directly without copying it into std::string.
  3069. // s.count("abc");
  3070. template <class K = key_type>
  3071. size_t count(const key_arg<K>& key) const {
  3072. return find(key) == end() ? 0 : 1;
  3073. }
  3074. // Issues CPU prefetch instructions for the memory needed to find or insert
  3075. // a key. Like all lookup functions, this support heterogeneous keys.
  3076. //
  3077. // NOTE: This is a very low level operation and should not be used without
  3078. // specific benchmarks indicating its importance.
  3079. template <class K = key_type>
  3080. void prefetch(const key_arg<K>& key) const {
  3081. if (capacity() == DefaultCapacity()) return;
  3082. (void)key;
  3083. // Avoid probing if we won't be able to prefetch the addresses received.
  3084. #ifdef ABSL_HAVE_PREFETCH
  3085. prefetch_heap_block();
  3086. auto seq = probe(common(), hash_ref()(key));
  3087. PrefetchToLocalCache(control() + seq.offset());
  3088. PrefetchToLocalCache(slot_array() + seq.offset());
  3089. #endif // ABSL_HAVE_PREFETCH
  3090. }
  3091. template <class K = key_type>
  3092. ABSL_DEPRECATE_AND_INLINE()
  3093. iterator find(const key_arg<K>& key,
  3094. size_t) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3095. return find(key);
  3096. }
  3097. // The API of find() has one extension: the type of the key argument doesn't
  3098. // have to be key_type. This is so called heterogeneous key support.
  3099. template <class K = key_type>
  3100. iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3101. AssertOnFind(key);
  3102. if (is_soo()) return find_soo(key);
  3103. prefetch_heap_block();
  3104. return find_non_soo(key, hash_ref()(key));
  3105. }
  3106. template <class K = key_type>
  3107. ABSL_DEPRECATE_AND_INLINE()
  3108. const_iterator find(const key_arg<K>& key,
  3109. size_t) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3110. return find(key);
  3111. }
  3112. template <class K = key_type>
  3113. const_iterator find(const key_arg<K>& key) const
  3114. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3115. return const_cast<raw_hash_set*>(this)->find(key);
  3116. }
  3117. template <class K = key_type>
  3118. bool contains(const key_arg<K>& key) const {
  3119. // Here neither the iterator returned by `find()` nor `end()` can be invalid
  3120. // outside of potential thread-safety issues.
  3121. // `find()`'s return value is constructed, used, and then destructed
  3122. // all in this context.
  3123. return !find(key).unchecked_equals(end());
  3124. }
  3125. template <class K = key_type>
  3126. std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
  3127. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3128. auto it = find(key);
  3129. if (it != end()) return {it, std::next(it)};
  3130. return {it, it};
  3131. }
  3132. template <class K = key_type>
  3133. std::pair<const_iterator, const_iterator> equal_range(
  3134. const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3135. auto it = find(key);
  3136. if (it != end()) return {it, std::next(it)};
  3137. return {it, it};
  3138. }
  3139. size_t bucket_count() const { return capacity(); }
  3140. float load_factor() const {
  3141. return capacity() ? static_cast<double>(size()) / capacity() : 0.0;
  3142. }
  3143. float max_load_factor() const { return 1.0f; }
  3144. void max_load_factor(float) {
  3145. // Does nothing.
  3146. }
  3147. hasher hash_function() const { return hash_ref(); }
  3148. key_equal key_eq() const { return eq_ref(); }
  3149. allocator_type get_allocator() const { return alloc_ref(); }
  3150. friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
  3151. if (a.size() != b.size()) return false;
  3152. const raw_hash_set* outer = &a;
  3153. const raw_hash_set* inner = &b;
  3154. if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
  3155. for (const value_type& elem : *outer) {
  3156. auto it = PolicyTraits::apply(FindElement{*inner}, elem);
  3157. if (it == inner->end()) return false;
  3158. // Note: we used key_equal to check for key equality in FindElement, but
  3159. // we may need to do an additional comparison using
  3160. // value_type::operator==. E.g. the keys could be equal and the
  3161. // mapped_types could be unequal in a map or even in a set, key_equal
  3162. // could ignore some fields that aren't ignored by operator==.
  3163. static constexpr bool kKeyEqIsValueEq =
  3164. std::is_same<key_type, value_type>::value &&
  3165. std::is_same<key_equal, hash_default_eq<key_type>>::value;
  3166. if (!kKeyEqIsValueEq && !(*it == elem)) return false;
  3167. }
  3168. return true;
  3169. }
  3170. friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
  3171. return !(a == b);
  3172. }
  3173. template <typename H>
  3174. friend typename std::enable_if<H::template is_hashable<value_type>::value,
  3175. H>::type
  3176. AbslHashValue(H h, const raw_hash_set& s) {
  3177. return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
  3178. s.size());
  3179. }
  3180. friend void swap(raw_hash_set& a,
  3181. raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
  3182. a.swap(b);
  3183. }
  3184. private:
  3185. template <class Container, typename Enabler>
  3186. friend struct absl::container_internal::hashtable_debug_internal::
  3187. HashtableDebugAccess;
  3188. friend struct absl::container_internal::HashtableFreeFunctionsAccess;
  3189. struct FindElement {
  3190. template <class K, class... Args>
  3191. const_iterator operator()(const K& key, Args&&...) const {
  3192. return s.find(key);
  3193. }
  3194. const raw_hash_set& s;
  3195. };
  3196. struct HashElement {
  3197. template <class K, class... Args>
  3198. size_t operator()(const K& key, Args&&...) const {
  3199. return h(key);
  3200. }
  3201. const hasher& h;
  3202. };
  3203. template <class K1>
  3204. struct EqualElement {
  3205. template <class K2, class... Args>
  3206. bool operator()(const K2& lhs, Args&&...) const {
  3207. return eq(lhs, rhs);
  3208. }
  3209. const K1& rhs;
  3210. const key_equal& eq;
  3211. };
  3212. struct EmplaceDecomposable {
  3213. template <class K, class... Args>
  3214. std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
  3215. auto res = s.find_or_prepare_insert(key);
  3216. if (res.second) {
  3217. s.emplace_at(res.first, std::forward<Args>(args)...);
  3218. }
  3219. return res;
  3220. }
  3221. raw_hash_set& s;
  3222. };
  3223. template <bool do_destroy>
  3224. struct InsertSlot {
  3225. template <class K, class... Args>
  3226. std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
  3227. auto res = s.find_or_prepare_insert(key);
  3228. if (res.second) {
  3229. s.transfer(res.first.slot(), &slot);
  3230. } else if (do_destroy) {
  3231. s.destroy(&slot);
  3232. }
  3233. return res;
  3234. }
  3235. raw_hash_set& s;
  3236. // Constructed slot. Either moved into place or destroyed.
  3237. slot_type&& slot;
  3238. };
  3239. template <typename... Args>
  3240. inline void construct(slot_type* slot, Args&&... args) {
  3241. common().RunWithReentrancyGuard([&] {
  3242. PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
  3243. });
  3244. }
  3245. inline void destroy(slot_type* slot) {
  3246. common().RunWithReentrancyGuard(
  3247. [&] { PolicyTraits::destroy(&alloc_ref(), slot); });
  3248. }
  3249. inline void transfer(slot_type* to, slot_type* from) {
  3250. common().RunWithReentrancyGuard(
  3251. [&] { PolicyTraits::transfer(&alloc_ref(), to, from); });
  3252. }
  3253. // TODO(b/289225379): consider having a helper class that has the impls for
  3254. // SOO functionality.
  3255. template <class K = key_type>
  3256. iterator find_soo(const key_arg<K>& key) {
  3257. ABSL_SWISSTABLE_ASSERT(is_soo());
  3258. return empty() || !PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
  3259. PolicyTraits::element(soo_slot()))
  3260. ? end()
  3261. : soo_iterator();
  3262. }
  3263. template <class K = key_type>
  3264. iterator find_non_soo(const key_arg<K>& key, size_t hash) {
  3265. ABSL_SWISSTABLE_ASSERT(!is_soo());
  3266. auto seq = probe(common(), hash);
  3267. const ctrl_t* ctrl = control();
  3268. while (true) {
  3269. Group g{ctrl + seq.offset()};
  3270. for (uint32_t i : g.Match(H2(hash))) {
  3271. if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
  3272. EqualElement<K>{key, eq_ref()},
  3273. PolicyTraits::element(slot_array() + seq.offset(i)))))
  3274. return iterator_at(seq.offset(i));
  3275. }
  3276. if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
  3277. seq.next();
  3278. ABSL_SWISSTABLE_ASSERT(seq.index() <= capacity() && "full table!");
  3279. }
  3280. }
  3281. // Conditionally samples hashtablez for SOO tables. This should be called on
  3282. // insertion into an empty SOO table and in copy construction when the size
  3283. // can fit in SOO capacity.
  3284. inline HashtablezInfoHandle try_sample_soo() {
  3285. ABSL_SWISSTABLE_ASSERT(is_soo());
  3286. if (!ShouldSampleHashtablezInfo<CharAlloc>()) return HashtablezInfoHandle{};
  3287. return Sample(sizeof(slot_type), sizeof(key_type), sizeof(value_type),
  3288. SooCapacity());
  3289. }
  3290. inline void destroy_slots() {
  3291. ABSL_SWISSTABLE_ASSERT(!is_soo());
  3292. if (PolicyTraits::template destroy_is_trivial<Alloc>()) return;
  3293. IterateOverFullSlots(
  3294. common(), slot_array(),
  3295. [&](const ctrl_t*, slot_type* slot)
  3296. ABSL_ATTRIBUTE_ALWAYS_INLINE { this->destroy(slot); });
  3297. }
  3298. inline void dealloc() {
  3299. ABSL_SWISSTABLE_ASSERT(capacity() != 0);
  3300. // Unpoison before returning the memory to the allocator.
  3301. SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * capacity());
  3302. infoz().Unregister();
  3303. Deallocate<BackingArrayAlignment(alignof(slot_type))>(
  3304. &alloc_ref(), common().backing_array_start(),
  3305. common().alloc_size(sizeof(slot_type), alignof(slot_type)));
  3306. }
  3307. inline void destructor_impl() {
  3308. if (SwisstableGenerationsEnabled() &&
  3309. capacity() >= InvalidCapacity::kMovedFrom) {
  3310. return;
  3311. }
  3312. if (capacity() == 0) return;
  3313. if (is_soo()) {
  3314. if (!empty()) {
  3315. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(destroy(soo_slot()));
  3316. }
  3317. return;
  3318. }
  3319. destroy_slots();
  3320. dealloc();
  3321. }
  3322. // Erases, but does not destroy, the value pointed to by `it`.
  3323. //
  3324. // This merely updates the pertinent control byte. This can be used in
  3325. // conjunction with Policy::transfer to move the object to another place.
  3326. void erase_meta_only(const_iterator it) {
  3327. ABSL_SWISSTABLE_ASSERT(!is_soo());
  3328. EraseMetaOnly(common(), static_cast<size_t>(it.control() - control()),
  3329. sizeof(slot_type));
  3330. }
  3331. size_t hash_of(slot_type* slot) const {
  3332. return PolicyTraits::apply(HashElement{hash_ref()},
  3333. PolicyTraits::element(slot));
  3334. }
  3335. // Resizes table to the new capacity and move all elements to the new
  3336. // positions accordingly.
  3337. //
  3338. // Note that for better performance instead of
  3339. // find_first_non_full(common(), hash),
  3340. // HashSetResizeHelper::FindFirstNonFullAfterResize(
  3341. // common(), old_capacity, hash)
  3342. // can be called right after `resize`.
  3343. void resize(size_t new_capacity) {
  3344. raw_hash_set::resize_impl(common(), new_capacity, HashtablezInfoHandle{});
  3345. }
  3346. // As above, except that we also accept a pre-sampled, forced infoz for
  3347. // SOO tables, since they need to switch from SOO to heap in order to
  3348. // store the infoz.
  3349. void resize_with_soo_infoz(HashtablezInfoHandle forced_infoz) {
  3350. ABSL_SWISSTABLE_ASSERT(forced_infoz.IsSampled());
  3351. raw_hash_set::resize_impl(common(), NextCapacity(SooCapacity()),
  3352. forced_infoz);
  3353. }
  3354. // Resizes set to the new capacity.
  3355. // It is a static function in order to use its pointer in GetPolicyFunctions.
  3356. ABSL_ATTRIBUTE_NOINLINE static void resize_impl(
  3357. CommonFields& common, size_t new_capacity,
  3358. HashtablezInfoHandle forced_infoz) {
  3359. raw_hash_set* set = reinterpret_cast<raw_hash_set*>(&common);
  3360. ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
  3361. ABSL_SWISSTABLE_ASSERT(!set->fits_in_soo(new_capacity));
  3362. const bool was_soo = set->is_soo();
  3363. const bool had_soo_slot = was_soo && !set->empty();
  3364. const ctrl_t soo_slot_h2 =
  3365. had_soo_slot ? static_cast<ctrl_t>(H2(set->hash_of(set->soo_slot())))
  3366. : ctrl_t::kEmpty;
  3367. HashSetResizeHelper resize_helper(common, was_soo, had_soo_slot,
  3368. forced_infoz);
  3369. // Initialize HashSetResizeHelper::old_heap_or_soo_. We can't do this in
  3370. // HashSetResizeHelper constructor because it can't transfer slots when
  3371. // transfer_uses_memcpy is false.
  3372. // TODO(b/289225379): try to handle more of the SOO cases inside
  3373. // InitializeSlots. See comment on cl/555990034 snapshot #63.
  3374. if (PolicyTraits::transfer_uses_memcpy() || !had_soo_slot) {
  3375. resize_helper.old_heap_or_soo() = common.heap_or_soo();
  3376. } else {
  3377. set->transfer(set->to_slot(resize_helper.old_soo_data()),
  3378. set->soo_slot());
  3379. }
  3380. common.set_capacity(new_capacity);
  3381. // Note that `InitializeSlots` does different number initialization steps
  3382. // depending on the values of `transfer_uses_memcpy` and capacities.
  3383. // Refer to the comment in `InitializeSlots` for more details.
  3384. const bool grow_single_group =
  3385. resize_helper.InitializeSlots<CharAlloc, sizeof(slot_type),
  3386. PolicyTraits::transfer_uses_memcpy(),
  3387. SooEnabled(), alignof(slot_type)>(
  3388. common, CharAlloc(set->alloc_ref()), soo_slot_h2, sizeof(key_type),
  3389. sizeof(value_type));
  3390. // In the SooEnabled() case, capacity is never 0 so we don't check.
  3391. if (!SooEnabled() && resize_helper.old_capacity() == 0) {
  3392. // InitializeSlots did all the work including infoz().RecordRehash().
  3393. return;
  3394. }
  3395. ABSL_SWISSTABLE_ASSERT(resize_helper.old_capacity() > 0);
  3396. // Nothing more to do in this case.
  3397. if (was_soo && !had_soo_slot) return;
  3398. slot_type* new_slots = set->slot_array();
  3399. if (grow_single_group) {
  3400. if (PolicyTraits::transfer_uses_memcpy()) {
  3401. // InitializeSlots did all the work.
  3402. return;
  3403. }
  3404. if (was_soo) {
  3405. set->transfer(new_slots + resize_helper.SooSlotIndex(),
  3406. to_slot(resize_helper.old_soo_data()));
  3407. return;
  3408. } else {
  3409. // We want GrowSizeIntoSingleGroup to be called here in order to make
  3410. // InitializeSlots not depend on PolicyTraits.
  3411. resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common,
  3412. set->alloc_ref());
  3413. }
  3414. } else {
  3415. // InitializeSlots prepares control bytes to correspond to empty table.
  3416. const auto insert_slot = [&](slot_type* slot) {
  3417. size_t hash = PolicyTraits::apply(HashElement{set->hash_ref()},
  3418. PolicyTraits::element(slot));
  3419. auto target = find_first_non_full(common, hash);
  3420. SetCtrl(common, target.offset, H2(hash), sizeof(slot_type));
  3421. set->transfer(new_slots + target.offset, slot);
  3422. return target.probe_length;
  3423. };
  3424. if (was_soo) {
  3425. insert_slot(to_slot(resize_helper.old_soo_data()));
  3426. return;
  3427. } else {
  3428. auto* old_slots = static_cast<slot_type*>(resize_helper.old_slots());
  3429. size_t total_probe_length = 0;
  3430. for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
  3431. if (IsFull(resize_helper.old_ctrl()[i])) {
  3432. total_probe_length += insert_slot(old_slots + i);
  3433. }
  3434. }
  3435. common.infoz().RecordRehash(total_probe_length);
  3436. }
  3437. }
  3438. resize_helper.DeallocateOld<alignof(slot_type)>(CharAlloc(set->alloc_ref()),
  3439. sizeof(slot_type));
  3440. }
  3441. // Casting directly from e.g. char* to slot_type* can cause compilation errors
  3442. // on objective-C. This function converts to void* first, avoiding the issue.
  3443. static slot_type* to_slot(void* buf) { return static_cast<slot_type*>(buf); }
  3444. // Requires that lhs does not have a full SOO slot.
  3445. static void move_common(bool rhs_is_full_soo, allocator_type& rhs_alloc,
  3446. CommonFields& lhs, CommonFields&& rhs) {
  3447. if (PolicyTraits::transfer_uses_memcpy() || !rhs_is_full_soo) {
  3448. lhs = std::move(rhs);
  3449. } else {
  3450. lhs.move_non_heap_or_soo_fields(rhs);
  3451. rhs.RunWithReentrancyGuard([&] {
  3452. lhs.RunWithReentrancyGuard([&] {
  3453. PolicyTraits::transfer(&rhs_alloc, to_slot(lhs.soo_data()),
  3454. to_slot(rhs.soo_data()));
  3455. });
  3456. });
  3457. }
  3458. }
  3459. // Swaps common fields making sure to avoid memcpy'ing a full SOO slot if we
  3460. // aren't allowed to do so.
  3461. void swap_common(raw_hash_set& that) {
  3462. using std::swap;
  3463. if (PolicyTraits::transfer_uses_memcpy()) {
  3464. swap(common(), that.common());
  3465. return;
  3466. }
  3467. CommonFields tmp = CommonFields(uninitialized_tag_t{});
  3468. const bool that_is_full_soo = that.is_full_soo();
  3469. move_common(that_is_full_soo, that.alloc_ref(), tmp,
  3470. std::move(that.common()));
  3471. move_common(is_full_soo(), alloc_ref(), that.common(), std::move(common()));
  3472. move_common(that_is_full_soo, that.alloc_ref(), common(), std::move(tmp));
  3473. }
  3474. void annotate_for_bug_detection_on_move(
  3475. ABSL_ATTRIBUTE_UNUSED raw_hash_set& that) {
  3476. // We only enable moved-from validation when generations are enabled (rather
  3477. // than using NDEBUG) to avoid issues in which NDEBUG is enabled in some
  3478. // translation units but not in others.
  3479. if (SwisstableGenerationsEnabled()) {
  3480. that.common().set_capacity(this == &that ? InvalidCapacity::kSelfMovedFrom
  3481. : InvalidCapacity::kMovedFrom);
  3482. }
  3483. if (!SwisstableGenerationsEnabled() || capacity() == DefaultCapacity() ||
  3484. capacity() > kAboveMaxValidCapacity) {
  3485. return;
  3486. }
  3487. common().increment_generation();
  3488. if (!empty() && common().should_rehash_for_bug_detection_on_move()) {
  3489. resize(capacity());
  3490. }
  3491. }
  3492. template <bool propagate_alloc>
  3493. raw_hash_set& assign_impl(raw_hash_set&& that) {
  3494. // We don't bother checking for this/that aliasing. We just need to avoid
  3495. // breaking the invariants in that case.
  3496. destructor_impl();
  3497. move_common(that.is_full_soo(), that.alloc_ref(), common(),
  3498. std::move(that.common()));
  3499. hash_ref() = that.hash_ref();
  3500. eq_ref() = that.eq_ref();
  3501. CopyAlloc(alloc_ref(), that.alloc_ref(),
  3502. std::integral_constant<bool, propagate_alloc>());
  3503. that.common() = CommonFields::CreateDefault<SooEnabled()>();
  3504. annotate_for_bug_detection_on_move(that);
  3505. return *this;
  3506. }
  3507. raw_hash_set& move_elements_allocs_unequal(raw_hash_set&& that) {
  3508. const size_t size = that.size();
  3509. if (size == 0) return *this;
  3510. reserve(size);
  3511. for (iterator it = that.begin(); it != that.end(); ++it) {
  3512. insert(std::move(PolicyTraits::element(it.slot())));
  3513. that.destroy(it.slot());
  3514. }
  3515. if (!that.is_soo()) that.dealloc();
  3516. that.common() = CommonFields::CreateDefault<SooEnabled()>();
  3517. annotate_for_bug_detection_on_move(that);
  3518. return *this;
  3519. }
  3520. raw_hash_set& move_assign(raw_hash_set&& that,
  3521. std::true_type /*propagate_alloc*/) {
  3522. return assign_impl<true>(std::move(that));
  3523. }
  3524. raw_hash_set& move_assign(raw_hash_set&& that,
  3525. std::false_type /*propagate_alloc*/) {
  3526. if (alloc_ref() == that.alloc_ref()) {
  3527. return assign_impl<false>(std::move(that));
  3528. }
  3529. // Aliasing can't happen here because allocs would compare equal above.
  3530. assert(this != &that);
  3531. destructor_impl();
  3532. // We can't take over that's memory so we need to move each element.
  3533. // While moving elements, this should have that's hash/eq so copy hash/eq
  3534. // before moving elements.
  3535. hash_ref() = that.hash_ref();
  3536. eq_ref() = that.eq_ref();
  3537. return move_elements_allocs_unequal(std::move(that));
  3538. }
  3539. template <class K>
  3540. std::pair<iterator, bool> find_or_prepare_insert_soo(const K& key) {
  3541. if (empty()) {
  3542. const HashtablezInfoHandle infoz = try_sample_soo();
  3543. if (infoz.IsSampled()) {
  3544. resize_with_soo_infoz(infoz);
  3545. } else {
  3546. common().set_full_soo();
  3547. return {soo_iterator(), true};
  3548. }
  3549. } else if (PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
  3550. PolicyTraits::element(soo_slot()))) {
  3551. return {soo_iterator(), false};
  3552. } else {
  3553. resize(NextCapacity(SooCapacity()));
  3554. }
  3555. const size_t index =
  3556. PrepareInsertAfterSoo(hash_ref()(key), sizeof(slot_type), common());
  3557. return {iterator_at(index), true};
  3558. }
  3559. template <class K>
  3560. std::pair<iterator, bool> find_or_prepare_insert_non_soo(const K& key) {
  3561. ABSL_SWISSTABLE_ASSERT(!is_soo());
  3562. prefetch_heap_block();
  3563. auto hash = hash_ref()(key);
  3564. auto seq = probe(common(), hash);
  3565. const ctrl_t* ctrl = control();
  3566. while (true) {
  3567. Group g{ctrl + seq.offset()};
  3568. for (uint32_t i : g.Match(H2(hash))) {
  3569. if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
  3570. EqualElement<K>{key, eq_ref()},
  3571. PolicyTraits::element(slot_array() + seq.offset(i)))))
  3572. return {iterator_at(seq.offset(i)), false};
  3573. }
  3574. auto mask_empty = g.MaskEmpty();
  3575. if (ABSL_PREDICT_TRUE(mask_empty)) {
  3576. size_t target = seq.offset(
  3577. GetInsertionOffset(mask_empty, capacity(), hash, control()));
  3578. return {iterator_at(PrepareInsertNonSoo(common(), hash,
  3579. FindInfo{target, seq.index()},
  3580. GetPolicyFunctions())),
  3581. true};
  3582. }
  3583. seq.next();
  3584. ABSL_SWISSTABLE_ASSERT(seq.index() <= capacity() && "full table!");
  3585. }
  3586. }
  3587. protected:
  3588. // Asserts for correctness that we run on find/find_or_prepare_insert.
  3589. template <class K>
  3590. void AssertOnFind(ABSL_ATTRIBUTE_UNUSED const K& key) {
  3591. AssertHashEqConsistent(key);
  3592. AssertNotDebugCapacity();
  3593. }
  3594. // Asserts that the capacity is not a sentinel invalid value.
  3595. void AssertNotDebugCapacity() const {
  3596. if (ABSL_PREDICT_TRUE(capacity() <
  3597. InvalidCapacity::kAboveMaxValidCapacity)) {
  3598. return;
  3599. }
  3600. assert(capacity() != InvalidCapacity::kReentrance &&
  3601. "Reentrant container access during element construction/destruction "
  3602. "is not allowed.");
  3603. assert(capacity() != InvalidCapacity::kDestroyed &&
  3604. "Use of destroyed hash table.");
  3605. if (SwisstableGenerationsEnabled() &&
  3606. ABSL_PREDICT_FALSE(capacity() >= InvalidCapacity::kMovedFrom)) {
  3607. if (capacity() == InvalidCapacity::kSelfMovedFrom) {
  3608. // If this log triggers, then a hash table was move-assigned to itself
  3609. // and then used again later without being reinitialized.
  3610. ABSL_RAW_LOG(FATAL, "Use of self-move-assigned hash table.");
  3611. }
  3612. ABSL_RAW_LOG(FATAL, "Use of moved-from hash table.");
  3613. }
  3614. }
  3615. // Asserts that hash and equal functors provided by the user are consistent,
  3616. // meaning that `eq(k1, k2)` implies `hash(k1)==hash(k2)`.
  3617. template <class K>
  3618. void AssertHashEqConsistent(const K& key) {
  3619. #ifdef NDEBUG
  3620. return;
  3621. #endif
  3622. // If the hash/eq functors are known to be consistent, then skip validation.
  3623. if (std::is_same<hasher, absl::container_internal::StringHash>::value &&
  3624. std::is_same<key_equal, absl::container_internal::StringEq>::value) {
  3625. return;
  3626. }
  3627. if (std::is_scalar<key_type>::value &&
  3628. std::is_same<hasher, absl::Hash<key_type>>::value &&
  3629. std::is_same<key_equal, std::equal_to<key_type>>::value) {
  3630. return;
  3631. }
  3632. if (empty()) return;
  3633. const size_t hash_of_arg = hash_ref()(key);
  3634. const auto assert_consistent = [&](const ctrl_t*, slot_type* slot) {
  3635. const value_type& element = PolicyTraits::element(slot);
  3636. const bool is_key_equal =
  3637. PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, element);
  3638. if (!is_key_equal) return;
  3639. const size_t hash_of_slot =
  3640. PolicyTraits::apply(HashElement{hash_ref()}, element);
  3641. ABSL_ATTRIBUTE_UNUSED const bool is_hash_equal =
  3642. hash_of_arg == hash_of_slot;
  3643. assert((!is_key_equal || is_hash_equal) &&
  3644. "eq(k1, k2) must imply that hash(k1) == hash(k2). "
  3645. "hash/eq functors are inconsistent.");
  3646. };
  3647. if (is_soo()) {
  3648. assert_consistent(/*unused*/ nullptr, soo_slot());
  3649. return;
  3650. }
  3651. // We only do validation for small tables so that it's constant time.
  3652. if (capacity() > 16) return;
  3653. IterateOverFullSlots(common(), slot_array(), assert_consistent);
  3654. }
  3655. // Attempts to find `key` in the table; if it isn't found, returns an iterator
  3656. // where the value can be inserted into, with the control byte already set to
  3657. // `key`'s H2. Returns a bool indicating whether an insertion can take place.
  3658. template <class K>
  3659. std::pair<iterator, bool> find_or_prepare_insert(const K& key) {
  3660. AssertOnFind(key);
  3661. if (is_soo()) return find_or_prepare_insert_soo(key);
  3662. return find_or_prepare_insert_non_soo(key);
  3663. }
  3664. // Constructs the value in the space pointed by the iterator. This only works
  3665. // after an unsuccessful find_or_prepare_insert() and before any other
  3666. // modifications happen in the raw_hash_set.
  3667. //
  3668. // PRECONDITION: iter was returned from find_or_prepare_insert(k), where k is
  3669. // the key decomposed from `forward<Args>(args)...`, and the bool returned by
  3670. // find_or_prepare_insert(k) was true.
  3671. // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
  3672. template <class... Args>
  3673. void emplace_at(iterator iter, Args&&... args) {
  3674. construct(iter.slot(), std::forward<Args>(args)...);
  3675. assert(PolicyTraits::apply(FindElement{*this}, *iter) == iter &&
  3676. "constructed value does not match the lookup key");
  3677. }
  3678. iterator iterator_at(size_t i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3679. return {control() + i, slot_array() + i, common().generation_ptr()};
  3680. }
  3681. const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3682. return const_cast<raw_hash_set*>(this)->iterator_at(i);
  3683. }
  3684. reference unchecked_deref(iterator it) { return it.unchecked_deref(); }
  3685. private:
  3686. friend struct RawHashSetTestOnlyAccess;
  3687. // The number of slots we can still fill without needing to rehash.
  3688. //
  3689. // This is stored separately due to tombstones: we do not include tombstones
  3690. // in the growth capacity, because we'd like to rehash when the table is
  3691. // otherwise filled with tombstones: otherwise, probe sequences might get
  3692. // unacceptably long without triggering a rehash. Callers can also force a
  3693. // rehash via the standard `rehash(0)`, which will recompute this value as a
  3694. // side-effect.
  3695. //
  3696. // See `CapacityToGrowth()`.
  3697. size_t growth_left() const {
  3698. ABSL_SWISSTABLE_ASSERT(!is_soo());
  3699. return common().growth_left();
  3700. }
  3701. GrowthInfo& growth_info() {
  3702. ABSL_SWISSTABLE_ASSERT(!is_soo());
  3703. return common().growth_info();
  3704. }
  3705. GrowthInfo growth_info() const {
  3706. ABSL_SWISSTABLE_ASSERT(!is_soo());
  3707. return common().growth_info();
  3708. }
  3709. // Prefetch the heap-allocated memory region to resolve potential TLB and
  3710. // cache misses. This is intended to overlap with execution of calculating the
  3711. // hash for a key.
  3712. void prefetch_heap_block() const {
  3713. ABSL_SWISSTABLE_ASSERT(!is_soo());
  3714. #if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
  3715. __builtin_prefetch(control(), 0, 1);
  3716. #endif
  3717. }
  3718. CommonFields& common() { return settings_.template get<0>(); }
  3719. const CommonFields& common() const { return settings_.template get<0>(); }
  3720. ctrl_t* control() const {
  3721. ABSL_SWISSTABLE_ASSERT(!is_soo());
  3722. return common().control();
  3723. }
  3724. slot_type* slot_array() const {
  3725. ABSL_SWISSTABLE_ASSERT(!is_soo());
  3726. return static_cast<slot_type*>(common().slot_array());
  3727. }
  3728. slot_type* soo_slot() {
  3729. ABSL_SWISSTABLE_ASSERT(is_soo());
  3730. return static_cast<slot_type*>(common().soo_data());
  3731. }
  3732. const slot_type* soo_slot() const {
  3733. return const_cast<raw_hash_set*>(this)->soo_slot();
  3734. }
  3735. iterator soo_iterator() {
  3736. return {SooControl(), soo_slot(), common().generation_ptr()};
  3737. }
  3738. const_iterator soo_iterator() const {
  3739. return const_cast<raw_hash_set*>(this)->soo_iterator();
  3740. }
  3741. HashtablezInfoHandle infoz() {
  3742. ABSL_SWISSTABLE_ASSERT(!is_soo());
  3743. return common().infoz();
  3744. }
  3745. hasher& hash_ref() { return settings_.template get<1>(); }
  3746. const hasher& hash_ref() const { return settings_.template get<1>(); }
  3747. key_equal& eq_ref() { return settings_.template get<2>(); }
  3748. const key_equal& eq_ref() const { return settings_.template get<2>(); }
  3749. allocator_type& alloc_ref() { return settings_.template get<3>(); }
  3750. const allocator_type& alloc_ref() const {
  3751. return settings_.template get<3>();
  3752. }
  3753. static const void* get_hash_ref_fn(const CommonFields& common) {
  3754. auto* h = reinterpret_cast<const raw_hash_set*>(&common);
  3755. return &h->hash_ref();
  3756. }
  3757. static void transfer_slot_fn(void* set, void* dst, void* src) {
  3758. auto* h = static_cast<raw_hash_set*>(set);
  3759. h->transfer(static_cast<slot_type*>(dst), static_cast<slot_type*>(src));
  3760. }
  3761. // Note: dealloc_fn will only be used if we have a non-standard allocator.
  3762. static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
  3763. auto* set = reinterpret_cast<raw_hash_set*>(&common);
  3764. // Unpoison before returning the memory to the allocator.
  3765. SanitizerUnpoisonMemoryRegion(common.slot_array(),
  3766. sizeof(slot_type) * common.capacity());
  3767. common.infoz().Unregister();
  3768. Deallocate<BackingArrayAlignment(alignof(slot_type))>(
  3769. &set->alloc_ref(), common.backing_array_start(),
  3770. common.alloc_size(sizeof(slot_type), alignof(slot_type)));
  3771. }
  3772. static const PolicyFunctions& GetPolicyFunctions() {
  3773. static constexpr PolicyFunctions value = {
  3774. sizeof(slot_type),
  3775. // TODO(b/328722020): try to type erase
  3776. // for standard layout and alignof(Hash) <= alignof(CommonFields).
  3777. std::is_empty<hasher>::value ? &GetHashRefForEmptyHasher
  3778. : &raw_hash_set::get_hash_ref_fn,
  3779. PolicyTraits::template get_hash_slot_fn<hasher>(),
  3780. PolicyTraits::transfer_uses_memcpy()
  3781. ? TransferRelocatable<sizeof(slot_type)>
  3782. : &raw_hash_set::transfer_slot_fn,
  3783. (std::is_same<SlotAlloc, std::allocator<slot_type>>::value
  3784. ? &DeallocateStandard<alignof(slot_type)>
  3785. : &raw_hash_set::dealloc_fn),
  3786. &raw_hash_set::resize_impl
  3787. };
  3788. return value;
  3789. }
  3790. // Bundle together CommonFields plus other objects which might be empty.
  3791. // CompressedTuple will ensure that sizeof is not affected by any of the empty
  3792. // fields that occur after CommonFields.
  3793. absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
  3794. allocator_type>
  3795. settings_{CommonFields::CreateDefault<SooEnabled()>(), hasher{},
  3796. key_equal{}, allocator_type{}};
  3797. };
  3798. // Friend access for free functions in raw_hash_set.h.
  3799. struct HashtableFreeFunctionsAccess {
  3800. template <class Predicate, typename Set>
  3801. static typename Set::size_type EraseIf(Predicate& pred, Set* c) {
  3802. if (c->empty()) {
  3803. return 0;
  3804. }
  3805. if (c->is_soo()) {
  3806. auto it = c->soo_iterator();
  3807. if (!pred(*it)) {
  3808. ABSL_SWISSTABLE_ASSERT(c->size() == 1 &&
  3809. "hash table was modified unexpectedly");
  3810. return 0;
  3811. }
  3812. c->destroy(it.slot());
  3813. c->common().set_empty_soo();
  3814. return 1;
  3815. }
  3816. ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = c->size();
  3817. size_t num_deleted = 0;
  3818. IterateOverFullSlots(
  3819. c->common(), c->slot_array(), [&](const ctrl_t* ctrl, auto* slot) {
  3820. if (pred(Set::PolicyTraits::element(slot))) {
  3821. c->destroy(slot);
  3822. EraseMetaOnly(c->common(), static_cast<size_t>(ctrl - c->control()),
  3823. sizeof(*slot));
  3824. ++num_deleted;
  3825. }
  3826. });
  3827. // NOTE: IterateOverFullSlots allow removal of the current element, so we
  3828. // verify the size additionally here.
  3829. ABSL_SWISSTABLE_ASSERT(original_size_for_assert - num_deleted ==
  3830. c->size() &&
  3831. "hash table was modified unexpectedly");
  3832. return num_deleted;
  3833. }
  3834. template <class Callback, typename Set>
  3835. static void ForEach(Callback& cb, Set* c) {
  3836. if (c->empty()) {
  3837. return;
  3838. }
  3839. if (c->is_soo()) {
  3840. cb(*c->soo_iterator());
  3841. return;
  3842. }
  3843. using ElementTypeWithConstness = decltype(*c->begin());
  3844. IterateOverFullSlots(
  3845. c->common(), c->slot_array(), [&cb](const ctrl_t*, auto* slot) {
  3846. ElementTypeWithConstness& element = Set::PolicyTraits::element(slot);
  3847. cb(element);
  3848. });
  3849. }
  3850. };
  3851. // Erases all elements that satisfy the predicate `pred` from the container `c`.
  3852. template <typename P, typename H, typename E, typename A, typename Predicate>
  3853. typename raw_hash_set<P, H, E, A>::size_type EraseIf(
  3854. Predicate& pred, raw_hash_set<P, H, E, A>* c) {
  3855. return HashtableFreeFunctionsAccess::EraseIf(pred, c);
  3856. }
  3857. // Calls `cb` for all elements in the container `c`.
  3858. template <typename P, typename H, typename E, typename A, typename Callback>
  3859. void ForEach(Callback& cb, raw_hash_set<P, H, E, A>* c) {
  3860. return HashtableFreeFunctionsAccess::ForEach(cb, c);
  3861. }
  3862. template <typename P, typename H, typename E, typename A, typename Callback>
  3863. void ForEach(Callback& cb, const raw_hash_set<P, H, E, A>* c) {
  3864. return HashtableFreeFunctionsAccess::ForEach(cb, c);
  3865. }
  3866. namespace hashtable_debug_internal {
  3867. template <typename Set>
  3868. struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
  3869. using Traits = typename Set::PolicyTraits;
  3870. using Slot = typename Traits::slot_type;
  3871. static size_t GetNumProbes(const Set& set,
  3872. const typename Set::key_type& key) {
  3873. if (set.is_soo()) return 0;
  3874. size_t num_probes = 0;
  3875. size_t hash = set.hash_ref()(key);
  3876. auto seq = probe(set.common(), hash);
  3877. const ctrl_t* ctrl = set.control();
  3878. while (true) {
  3879. container_internal::Group g{ctrl + seq.offset()};
  3880. for (uint32_t i : g.Match(container_internal::H2(hash))) {
  3881. if (Traits::apply(
  3882. typename Set::template EqualElement<typename Set::key_type>{
  3883. key, set.eq_ref()},
  3884. Traits::element(set.slot_array() + seq.offset(i))))
  3885. return num_probes;
  3886. ++num_probes;
  3887. }
  3888. if (g.MaskEmpty()) return num_probes;
  3889. seq.next();
  3890. ++num_probes;
  3891. }
  3892. }
  3893. static size_t AllocatedByteSize(const Set& c) {
  3894. size_t capacity = c.capacity();
  3895. if (capacity == 0) return 0;
  3896. size_t m =
  3897. c.is_soo() ? 0 : c.common().alloc_size(sizeof(Slot), alignof(Slot));
  3898. size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
  3899. if (per_slot != ~size_t{}) {
  3900. m += per_slot * c.size();
  3901. } else {
  3902. for (auto it = c.begin(); it != c.end(); ++it) {
  3903. m += Traits::space_used(it.slot());
  3904. }
  3905. }
  3906. return m;
  3907. }
  3908. };
  3909. } // namespace hashtable_debug_internal
  3910. } // namespace container_internal
  3911. ABSL_NAMESPACE_END
  3912. } // namespace absl
  3913. #undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
  3914. #undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED
  3915. #undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN
  3916. #undef ABSL_SWISSTABLE_ASSERT
  3917. #endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_