raw_hash_set.h 162 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166
  1. // Copyright 2018 The Abseil Authors.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // https://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. //
  15. // An open-addressing
  16. // hashtable with quadratic probing.
  17. //
  18. // This is a low level hashtable on top of which different interfaces can be
  19. // implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
  20. //
  21. // The table interface is similar to that of std::unordered_set. Notable
  22. // differences are that most member functions support heterogeneous keys when
  23. // BOTH the hash and eq functions are marked as transparent. They do so by
  24. // providing a typedef called `is_transparent`.
  25. //
  26. // When heterogeneous lookup is enabled, functions that take key_type act as if
  27. // they have an overload set like:
  28. //
  29. // iterator find(const key_type& key);
  30. // template <class K>
  31. // iterator find(const K& key);
  32. //
  33. // size_type erase(const key_type& key);
  34. // template <class K>
  35. // size_type erase(const K& key);
  36. //
  37. // std::pair<iterator, iterator> equal_range(const key_type& key);
  38. // template <class K>
  39. // std::pair<iterator, iterator> equal_range(const K& key);
  40. //
  41. // When heterogeneous lookup is disabled, only the explicit `key_type` overloads
  42. // exist.
  43. //
  44. // find() also supports passing the hash explicitly:
  45. //
  46. // iterator find(const key_type& key, size_t hash);
  47. // template <class U>
  48. // iterator find(const U& key, size_t hash);
  49. //
  50. // In addition the pointer to element and iterator stability guarantees are
  51. // weaker: all iterators and pointers are invalidated after a new element is
  52. // inserted.
  53. //
  54. // IMPLEMENTATION DETAILS
  55. //
  56. // # Table Layout
  57. //
  58. // A raw_hash_set's backing array consists of control bytes followed by slots
  59. // that may or may not contain objects.
  60. //
  61. // The layout of the backing array, for `capacity` slots, is thus, as a
  62. // pseudo-struct:
  63. //
  64. // struct BackingArray {
  65. // // Sampling handler. This field isn't present when the sampling is
  66. // // disabled or this allocation hasn't been selected for sampling.
  67. // HashtablezInfoHandle infoz_;
  68. // // The number of elements we can insert before growing the capacity.
  69. // size_t growth_left;
  70. // // Control bytes for the "real" slots.
  71. // ctrl_t ctrl[capacity];
  72. // // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
  73. // // stop and serves no other purpose.
  74. // ctrl_t sentinel;
  75. // // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
  76. // // that if a probe sequence picks a value near the end of `ctrl`,
  77. // // `Group` will have valid control bytes to look at.
  78. // ctrl_t clones[kWidth - 1];
  79. // // The actual slot data.
  80. // slot_type slots[capacity];
  81. // };
  82. //
  83. // The length of this array is computed by `RawHashSetLayout::alloc_size` below.
  84. //
  85. // Control bytes (`ctrl_t`) are bytes (collected into groups of a
  86. // platform-specific size) that define the state of the corresponding slot in
  87. // the slot array. Group manipulation is tightly optimized to be as efficient
  88. // as possible: SSE and friends on x86, clever bit operations on other arches.
  89. //
  90. // Group 1 Group 2 Group 3
  91. // +---------------+---------------+---------------+
  92. // | | | | | | | | | | | | | | | | | | | | | | | | |
  93. // +---------------+---------------+---------------+
  94. //
  95. // Each control byte is either a special value for empty slots, deleted slots
  96. // (sometimes called *tombstones*), and a special end-of-table marker used by
  97. // iterators, or, if occupied, seven bits (H2) from the hash of the value in the
  98. // corresponding slot.
  99. //
  100. // Storing control bytes in a separate array also has beneficial cache effects,
  101. // since more logical slots will fit into a cache line.
  102. //
  103. // # Small Object Optimization (SOO)
  104. //
  105. // When the size/alignment of the value_type and the capacity of the table are
  106. // small, we enable small object optimization and store the values inline in
  107. // the raw_hash_set object. This optimization allows us to avoid
  108. // allocation/deallocation as well as cache/dTLB misses.
  109. //
  110. // # Hashing
  111. //
  112. // We compute two separate hashes, `H1` and `H2`, from the hash of an object.
  113. // `H1(hash(x))` is an index into `slots`, and essentially the starting point
  114. // for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
  115. // objects that cannot possibly be the one we are looking for.
  116. //
  117. // # Table operations.
  118. //
  119. // The key operations are `insert`, `find`, and `erase`.
  120. //
  121. // Since `insert` and `erase` are implemented in terms of `find`, we describe
  122. // `find` first. To `find` a value `x`, we compute `hash(x)`. From
  123. // `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
  124. // group of slots in some interesting order.
  125. //
  126. // We now walk through these indices. At each index, we select the entire group
  127. // starting with that index and extract potential candidates: occupied slots
  128. // with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
  129. // group, we stop and return an error. Each candidate slot `y` is compared with
  130. // `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
  131. // next probe index. Tombstones effectively behave like full slots that never
  132. // match the value we're looking for.
  133. //
  134. // The `H2` bits ensure when we compare a slot to an object with `==`, we are
  135. // likely to have actually found the object. That is, the chance is low that
  136. // `==` is called and returns `false`. Thus, when we search for an object, we
  137. // are unlikely to call `==` many times. This likelyhood can be analyzed as
  138. // follows (assuming that H2 is a random enough hash function).
  139. //
  140. // Let's assume that there are `k` "wrong" objects that must be examined in a
  141. // probe sequence. For example, when doing a `find` on an object that is in the
  142. // table, `k` is the number of objects between the start of the probe sequence
  143. // and the final found object (not including the final found object). The
  144. // expected number of objects with an H2 match is then `k/128`. Measurements
  145. // and analysis indicate that even at high load factors, `k` is less than 32,
  146. // meaning that the number of "false positive" comparisons we must perform is
  147. // less than 1/8 per `find`.
  148. // `insert` is implemented in terms of `unchecked_insert`, which inserts a
  149. // value presumed to not be in the table (violating this requirement will cause
  150. // the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
  151. // it, we construct a `probe_seq` once again, and use it to find the first
  152. // group with an unoccupied (empty *or* deleted) slot. We place `x` into the
  153. // first such slot in the group and mark it as full with `x`'s H2.
  154. //
  155. // To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
  156. // perform a `find` to see if it's already present; if it is, we're done. If
  157. // it's not, we may decide the table is getting overcrowded (i.e. the load
  158. // factor is greater than 7/8 for big tables; `is_small()` tables use a max load
  159. // factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
  160. // each element of the table into the new array (we know that no insertion here
  161. // will insert an already-present value), and discard the old backing array. At
  162. // this point, we may `unchecked_insert` the value `x`.
  163. //
  164. // Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
  165. // presents a viable, initialized slot pointee to the caller.
  166. //
  167. // `erase` is implemented in terms of `erase_at`, which takes an index to a
  168. // slot. Given an offset, we simply create a tombstone and destroy its contents.
  169. // If we can prove that the slot would not appear in a probe sequence, we can
  170. // make the slot as empty, instead. We can prove this by observing that if a
  171. // group has any empty slots, it has never been full (assuming we never create
  172. // an empty slot in a group with no empties, which this heuristic guarantees we
  173. // never do) and find would stop at this group anyways (since it does not probe
  174. // beyond groups with empties).
  175. //
  176. // `erase` is `erase_at` composed with `find`: if we
  177. // have a value `x`, we can perform a `find`, and then `erase_at` the resulting
  178. // slot.
  179. //
  180. // To iterate, we simply traverse the array, skipping empty and deleted slots
  181. // and stopping when we hit a `kSentinel`.
  182. #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
  183. #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
  184. #include <algorithm>
  185. #include <cassert>
  186. #include <cmath>
  187. #include <cstddef>
  188. #include <cstdint>
  189. #include <cstring>
  190. #include <initializer_list>
  191. #include <iterator>
  192. #include <limits>
  193. #include <memory>
  194. #include <tuple>
  195. #include <type_traits>
  196. #include <utility>
  197. #include "absl/base/attributes.h"
  198. #include "absl/base/config.h"
  199. #include "absl/base/internal/endian.h"
  200. #include "absl/base/internal/raw_logging.h"
  201. #include "absl/base/macros.h"
  202. #include "absl/base/optimization.h"
  203. #include "absl/base/options.h"
  204. #include "absl/base/port.h"
  205. #include "absl/base/prefetch.h"
  206. #include "absl/container/internal/common.h" // IWYU pragma: export // for node_handle
  207. #include "absl/container/internal/compressed_tuple.h"
  208. #include "absl/container/internal/container_memory.h"
  209. #include "absl/container/internal/hash_policy_traits.h"
  210. #include "absl/container/internal/hashtable_debug_hooks.h"
  211. #include "absl/container/internal/hashtablez_sampler.h"
  212. #include "absl/memory/memory.h"
  213. #include "absl/meta/type_traits.h"
  214. #include "absl/numeric/bits.h"
  215. #include "absl/utility/utility.h"
  216. #ifdef ABSL_INTERNAL_HAVE_SSE2
  217. #include <emmintrin.h>
  218. #endif
  219. #ifdef ABSL_INTERNAL_HAVE_SSSE3
  220. #include <tmmintrin.h>
  221. #endif
  222. #ifdef _MSC_VER
  223. #include <intrin.h>
  224. #endif
  225. #ifdef ABSL_INTERNAL_HAVE_ARM_NEON
  226. #include <arm_neon.h>
  227. #endif
  228. namespace absl {
  229. ABSL_NAMESPACE_BEGIN
  230. namespace container_internal {
  231. #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
  232. #error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
  233. #elif (defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
  234. defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
  235. defined(ABSL_HAVE_MEMORY_SANITIZER)) && \
  236. !defined(NDEBUG_SANITIZER) // If defined, performance is important.
  237. // When compiled in sanitizer mode, we add generation integers to the backing
  238. // array and iterators. In the backing array, we store the generation between
  239. // the control bytes and the slots. When iterators are dereferenced, we assert
  240. // that the container has not been mutated in a way that could cause iterator
  241. // invalidation since the iterator was initialized.
  242. #define ABSL_SWISSTABLE_ENABLE_GENERATIONS
  243. #endif
  244. // We use uint8_t so we don't need to worry about padding.
  245. using GenerationType = uint8_t;
  246. // A sentinel value for empty generations. Using 0 makes it easy to constexpr
  247. // initialize an array of this value.
  248. constexpr GenerationType SentinelEmptyGeneration() { return 0; }
  249. constexpr GenerationType NextGeneration(GenerationType generation) {
  250. return ++generation == SentinelEmptyGeneration() ? ++generation : generation;
  251. }
  252. #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
  253. constexpr bool SwisstableGenerationsEnabled() { return true; }
  254. constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
  255. #else
  256. constexpr bool SwisstableGenerationsEnabled() { return false; }
  257. constexpr size_t NumGenerationBytes() { return 0; }
  258. #endif
  259. template <typename AllocType>
  260. void SwapAlloc(AllocType& lhs, AllocType& rhs,
  261. std::true_type /* propagate_on_container_swap */) {
  262. using std::swap;
  263. swap(lhs, rhs);
  264. }
  265. template <typename AllocType>
  266. void SwapAlloc(AllocType& lhs, AllocType& rhs,
  267. std::false_type /* propagate_on_container_swap */) {
  268. (void)lhs;
  269. (void)rhs;
  270. assert(lhs == rhs &&
  271. "It's UB to call swap with unequal non-propagating allocators.");
  272. }
  273. template <typename AllocType>
  274. void CopyAlloc(AllocType& lhs, AllocType& rhs,
  275. std::true_type /* propagate_alloc */) {
  276. lhs = rhs;
  277. }
  278. template <typename AllocType>
  279. void CopyAlloc(AllocType&, AllocType&, std::false_type /* propagate_alloc */) {}
  280. // The state for a probe sequence.
  281. //
  282. // Currently, the sequence is a triangular progression of the form
  283. //
  284. // p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
  285. //
  286. // The use of `Width` ensures that each probe step does not overlap groups;
  287. // the sequence effectively outputs the addresses of *groups* (although not
  288. // necessarily aligned to any boundary). The `Group` machinery allows us
  289. // to check an entire group with minimal branching.
  290. //
  291. // Wrapping around at `mask + 1` is important, but not for the obvious reason.
  292. // As described above, the first few entries of the control byte array
  293. // are mirrored at the end of the array, which `Group` will find and use
  294. // for selecting candidates. However, when those candidates' slots are
  295. // actually inspected, there are no corresponding slots for the cloned bytes,
  296. // so we need to make sure we've treated those offsets as "wrapping around".
  297. //
  298. // It turns out that this probe sequence visits every group exactly once if the
  299. // number of groups is a power of two, since (i^2+i)/2 is a bijection in
  300. // Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
  301. template <size_t Width>
  302. class probe_seq {
  303. public:
  304. // Creates a new probe sequence using `hash` as the initial value of the
  305. // sequence and `mask` (usually the capacity of the table) as the mask to
  306. // apply to each value in the progression.
  307. probe_seq(size_t hash, size_t mask) {
  308. assert(((mask + 1) & mask) == 0 && "not a mask");
  309. mask_ = mask;
  310. offset_ = hash & mask_;
  311. }
  312. // The offset within the table, i.e., the value `p(i)` above.
  313. size_t offset() const { return offset_; }
  314. size_t offset(size_t i) const { return (offset_ + i) & mask_; }
  315. void next() {
  316. index_ += Width;
  317. offset_ += index_;
  318. offset_ &= mask_;
  319. }
  320. // 0-based probe index, a multiple of `Width`.
  321. size_t index() const { return index_; }
  322. private:
  323. size_t mask_;
  324. size_t offset_;
  325. size_t index_ = 0;
  326. };
  327. template <class ContainerKey, class Hash, class Eq>
  328. struct RequireUsableKey {
  329. template <class PassedKey, class... Args>
  330. std::pair<
  331. decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
  332. decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
  333. std::declval<const PassedKey&>()))>*
  334. operator()(const PassedKey&, const Args&...) const;
  335. };
  336. template <class E, class Policy, class Hash, class Eq, class... Ts>
  337. struct IsDecomposable : std::false_type {};
  338. template <class Policy, class Hash, class Eq, class... Ts>
  339. struct IsDecomposable<
  340. absl::void_t<decltype(Policy::apply(
  341. RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
  342. std::declval<Ts>()...))>,
  343. Policy, Hash, Eq, Ts...> : std::true_type {};
  344. // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
  345. template <class T>
  346. constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
  347. using std::swap;
  348. return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
  349. }
  350. template <class T>
  351. constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
  352. return false;
  353. }
  354. template <typename T>
  355. uint32_t TrailingZeros(T x) {
  356. ABSL_ASSUME(x != 0);
  357. return static_cast<uint32_t>(countr_zero(x));
  358. }
  359. // 8 bytes bitmask with most significant bit set for every byte.
  360. constexpr uint64_t kMsbs8Bytes = 0x8080808080808080ULL;
  361. // An abstract bitmask, such as that emitted by a SIMD instruction.
  362. //
  363. // Specifically, this type implements a simple bitset whose representation is
  364. // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
  365. // of abstract bits in the bitset, while `Shift` is the log-base-two of the
  366. // width of an abstract bit in the representation.
  367. // This mask provides operations for any number of real bits set in an abstract
  368. // bit. To add iteration on top of that, implementation must guarantee no more
  369. // than the most significant real bit is set in a set abstract bit.
  370. template <class T, int SignificantBits, int Shift = 0>
  371. class NonIterableBitMask {
  372. public:
  373. explicit NonIterableBitMask(T mask) : mask_(mask) {}
  374. explicit operator bool() const { return this->mask_ != 0; }
  375. // Returns the index of the lowest *abstract* bit set in `self`.
  376. uint32_t LowestBitSet() const {
  377. return container_internal::TrailingZeros(mask_) >> Shift;
  378. }
  379. // Returns the index of the highest *abstract* bit set in `self`.
  380. uint32_t HighestBitSet() const {
  381. return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
  382. }
  383. // Returns the number of trailing zero *abstract* bits.
  384. uint32_t TrailingZeros() const {
  385. return container_internal::TrailingZeros(mask_) >> Shift;
  386. }
  387. // Returns the number of leading zero *abstract* bits.
  388. uint32_t LeadingZeros() const {
  389. constexpr int total_significant_bits = SignificantBits << Shift;
  390. constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
  391. return static_cast<uint32_t>(
  392. countl_zero(static_cast<T>(mask_ << extra_bits))) >>
  393. Shift;
  394. }
  395. T mask_;
  396. };
  397. // Mask that can be iterable
  398. //
  399. // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
  400. // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
  401. // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
  402. // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
  403. // If NullifyBitsOnIteration is true (only allowed for Shift == 3),
  404. // non zero abstract bit is allowed to have additional bits
  405. // (e.g., `0xff`, `0x83` and `0x9c` are ok, but `0x6f` is not).
  406. //
  407. // For example:
  408. // for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
  409. // for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
  410. template <class T, int SignificantBits, int Shift = 0,
  411. bool NullifyBitsOnIteration = false>
  412. class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
  413. using Base = NonIterableBitMask<T, SignificantBits, Shift>;
  414. static_assert(std::is_unsigned<T>::value, "");
  415. static_assert(Shift == 0 || Shift == 3, "");
  416. static_assert(!NullifyBitsOnIteration || Shift == 3, "");
  417. public:
  418. explicit BitMask(T mask) : Base(mask) {
  419. if (Shift == 3 && !NullifyBitsOnIteration) {
  420. assert(this->mask_ == (this->mask_ & kMsbs8Bytes));
  421. }
  422. }
  423. // BitMask is an iterator over the indices of its abstract bits.
  424. using value_type = int;
  425. using iterator = BitMask;
  426. using const_iterator = BitMask;
  427. BitMask& operator++() {
  428. if (Shift == 3 && NullifyBitsOnIteration) {
  429. this->mask_ &= kMsbs8Bytes;
  430. }
  431. this->mask_ &= (this->mask_ - 1);
  432. return *this;
  433. }
  434. uint32_t operator*() const { return Base::LowestBitSet(); }
  435. BitMask begin() const { return *this; }
  436. BitMask end() const { return BitMask(0); }
  437. private:
  438. friend bool operator==(const BitMask& a, const BitMask& b) {
  439. return a.mask_ == b.mask_;
  440. }
  441. friend bool operator!=(const BitMask& a, const BitMask& b) {
  442. return a.mask_ != b.mask_;
  443. }
  444. };
  445. using h2_t = uint8_t;
  446. // The values here are selected for maximum performance. See the static asserts
  447. // below for details.
  448. // A `ctrl_t` is a single control byte, which can have one of four
  449. // states: empty, deleted, full (which has an associated seven-bit h2_t value)
  450. // and the sentinel. They have the following bit patterns:
  451. //
  452. // empty: 1 0 0 0 0 0 0 0
  453. // deleted: 1 1 1 1 1 1 1 0
  454. // full: 0 h h h h h h h // h represents the hash bits.
  455. // sentinel: 1 1 1 1 1 1 1 1
  456. //
  457. // These values are specifically tuned for SSE-flavored SIMD.
  458. // The static_asserts below detail the source of these choices.
  459. //
  460. // We use an enum class so that when strict aliasing is enabled, the compiler
  461. // knows ctrl_t doesn't alias other types.
  462. enum class ctrl_t : int8_t {
  463. kEmpty = -128, // 0b10000000
  464. kDeleted = -2, // 0b11111110
  465. kSentinel = -1, // 0b11111111
  466. };
  467. static_assert(
  468. (static_cast<int8_t>(ctrl_t::kEmpty) &
  469. static_cast<int8_t>(ctrl_t::kDeleted) &
  470. static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
  471. "Special markers need to have the MSB to make checking for them efficient");
  472. static_assert(
  473. ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
  474. "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
  475. "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
  476. static_assert(
  477. ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
  478. "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
  479. "registers (pcmpeqd xmm, xmm)");
  480. static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
  481. "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
  482. "existence efficient (psignb xmm, xmm)");
  483. static_assert(
  484. (~static_cast<int8_t>(ctrl_t::kEmpty) &
  485. ~static_cast<int8_t>(ctrl_t::kDeleted) &
  486. static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
  487. "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
  488. "shared by ctrl_t::kSentinel to make the scalar test for "
  489. "MaskEmptyOrDeleted() efficient");
  490. static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
  491. "ctrl_t::kDeleted must be -2 to make the implementation of "
  492. "ConvertSpecialToEmptyAndFullToDeleted efficient");
  493. // See definition comment for why this is size 32.
  494. ABSL_DLL extern const ctrl_t kEmptyGroup[32];
  495. // Returns a pointer to a control byte group that can be used by empty tables.
  496. inline ctrl_t* EmptyGroup() {
  497. // Const must be cast away here; no uses of this function will actually write
  498. // to it because it is only used for empty tables.
  499. return const_cast<ctrl_t*>(kEmptyGroup + 16);
  500. }
  501. // For use in SOO iterators.
  502. // TODO(b/289225379): we could potentially get rid of this by adding an is_soo
  503. // bit in iterators. This would add branches but reduce cache misses.
  504. ABSL_DLL extern const ctrl_t kSooControl[17];
  505. // Returns a pointer to a full byte followed by a sentinel byte.
  506. inline ctrl_t* SooControl() {
  507. // Const must be cast away here; no uses of this function will actually write
  508. // to it because it is only used for SOO iterators.
  509. return const_cast<ctrl_t*>(kSooControl);
  510. }
  511. // Whether ctrl is from the SooControl array.
  512. inline bool IsSooControl(const ctrl_t* ctrl) { return ctrl == SooControl(); }
  513. // Returns a pointer to a generation to use for an empty hashtable.
  514. GenerationType* EmptyGeneration();
  515. // Returns whether `generation` is a generation for an empty hashtable that
  516. // could be returned by EmptyGeneration().
  517. inline bool IsEmptyGeneration(const GenerationType* generation) {
  518. return *generation == SentinelEmptyGeneration();
  519. }
  520. // Mixes a randomly generated per-process seed with `hash` and `ctrl` to
  521. // randomize insertion order within groups.
  522. bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
  523. const ctrl_t* ctrl);
  524. ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool ShouldInsertBackwards(
  525. ABSL_ATTRIBUTE_UNUSED size_t capacity, ABSL_ATTRIBUTE_UNUSED size_t hash,
  526. ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
  527. #if defined(NDEBUG)
  528. return false;
  529. #else
  530. return ShouldInsertBackwardsForDebug(capacity, hash, ctrl);
  531. #endif
  532. }
  533. // Returns insert position for the given mask.
  534. // We want to add entropy even when ASLR is not enabled.
  535. // In debug build we will randomly insert in either the front or back of
  536. // the group.
  537. // TODO(kfm,sbenza): revisit after we do unconditional mixing
  538. template <class Mask>
  539. ABSL_ATTRIBUTE_ALWAYS_INLINE inline auto GetInsertionOffset(
  540. Mask mask, ABSL_ATTRIBUTE_UNUSED size_t capacity,
  541. ABSL_ATTRIBUTE_UNUSED size_t hash,
  542. ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
  543. #if defined(NDEBUG)
  544. return mask.LowestBitSet();
  545. #else
  546. return ShouldInsertBackwardsForDebug(capacity, hash, ctrl)
  547. ? mask.HighestBitSet()
  548. : mask.LowestBitSet();
  549. #endif
  550. }
  551. // Returns a per-table, hash salt, which changes on resize. This gets mixed into
  552. // H1 to randomize iteration order per-table.
  553. //
  554. // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
  555. // non-determinism of iteration order in most cases.
  556. inline size_t PerTableSalt(const ctrl_t* ctrl) {
  557. // The low bits of the pointer have little or no entropy because of
  558. // alignment. We shift the pointer to try to use higher entropy bits. A
  559. // good number seems to be 12 bits, because that aligns with page size.
  560. return reinterpret_cast<uintptr_t>(ctrl) >> 12;
  561. }
  562. // Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
  563. inline size_t H1(size_t hash, const ctrl_t* ctrl) {
  564. return (hash >> 7) ^ PerTableSalt(ctrl);
  565. }
  566. // Extracts the H2 portion of a hash: the 7 bits not used for H1.
  567. //
  568. // These are used as an occupied control byte.
  569. inline h2_t H2(size_t hash) { return hash & 0x7F; }
  570. // Helpers for checking the state of a control byte.
  571. inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
  572. inline bool IsFull(ctrl_t c) {
  573. // Cast `c` to the underlying type instead of casting `0` to `ctrl_t` as `0`
  574. // is not a value in the enum. Both ways are equivalent, but this way makes
  575. // linters happier.
  576. return static_cast<std::underlying_type_t<ctrl_t>>(c) >= 0;
  577. }
  578. inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
  579. inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
  580. #ifdef ABSL_INTERNAL_HAVE_SSE2
  581. // Quick reference guide for intrinsics used below:
  582. //
  583. // * __m128i: An XMM (128-bit) word.
  584. //
  585. // * _mm_setzero_si128: Returns a zero vector.
  586. // * _mm_set1_epi8: Returns a vector with the same i8 in each lane.
  587. //
  588. // * _mm_subs_epi8: Saturating-subtracts two i8 vectors.
  589. // * _mm_and_si128: Ands two i128s together.
  590. // * _mm_or_si128: Ors two i128s together.
  591. // * _mm_andnot_si128: And-nots two i128s together.
  592. //
  593. // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
  594. // filling each lane with 0x00 or 0xff.
  595. // * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
  596. //
  597. // * _mm_loadu_si128: Performs an unaligned load of an i128.
  598. // * _mm_storeu_si128: Performs an unaligned store of an i128.
  599. //
  600. // * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first
  601. // argument if the corresponding lane of the second
  602. // argument is positive, negative, or zero, respectively.
  603. // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
  604. // bitmask consisting of those bits.
  605. // * _mm_shuffle_epi8: Selects i8s from the first argument, using the low
  606. // four bits of each i8 lane in the second argument as
  607. // indices.
  608. // https://github.com/abseil/abseil-cpp/issues/209
  609. // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
  610. // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
  611. // Work around this by using the portable implementation of Group
  612. // when using -funsigned-char under GCC.
  613. inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
  614. #if defined(__GNUC__) && !defined(__clang__)
  615. if (std::is_unsigned<char>::value) {
  616. const __m128i mask = _mm_set1_epi8(0x80);
  617. const __m128i diff = _mm_subs_epi8(b, a);
  618. return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
  619. }
  620. #endif
  621. return _mm_cmpgt_epi8(a, b);
  622. }
  623. struct GroupSse2Impl {
  624. static constexpr size_t kWidth = 16; // the number of slots per group
  625. explicit GroupSse2Impl(const ctrl_t* pos) {
  626. ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
  627. }
  628. // Returns a bitmask representing the positions of slots that match hash.
  629. BitMask<uint16_t, kWidth> Match(h2_t hash) const {
  630. auto match = _mm_set1_epi8(static_cast<char>(hash));
  631. BitMask<uint16_t, kWidth> result = BitMask<uint16_t, kWidth>(0);
  632. result = BitMask<uint16_t, kWidth>(
  633. static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
  634. return result;
  635. }
  636. // Returns a bitmask representing the positions of empty slots.
  637. NonIterableBitMask<uint16_t, kWidth> MaskEmpty() const {
  638. #ifdef ABSL_INTERNAL_HAVE_SSSE3
  639. // This only works because ctrl_t::kEmpty is -128.
  640. return NonIterableBitMask<uint16_t, kWidth>(
  641. static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
  642. #else
  643. auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
  644. return NonIterableBitMask<uint16_t, kWidth>(
  645. static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
  646. #endif
  647. }
  648. // Returns a bitmask representing the positions of full slots.
  649. // Note: for `is_small()` tables group may contain the "same" slot twice:
  650. // original and mirrored.
  651. BitMask<uint16_t, kWidth> MaskFull() const {
  652. return BitMask<uint16_t, kWidth>(
  653. static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff));
  654. }
  655. // Returns a bitmask representing the positions of non full slots.
  656. // Note: this includes: kEmpty, kDeleted, kSentinel.
  657. // It is useful in contexts when kSentinel is not present.
  658. auto MaskNonFull() const {
  659. return BitMask<uint16_t, kWidth>(
  660. static_cast<uint16_t>(_mm_movemask_epi8(ctrl)));
  661. }
  662. // Returns a bitmask representing the positions of empty or deleted slots.
  663. NonIterableBitMask<uint16_t, kWidth> MaskEmptyOrDeleted() const {
  664. auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
  665. return NonIterableBitMask<uint16_t, kWidth>(static_cast<uint16_t>(
  666. _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
  667. }
  668. // Returns the number of trailing empty or deleted elements in the group.
  669. uint32_t CountLeadingEmptyOrDeleted() const {
  670. auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
  671. return TrailingZeros(static_cast<uint32_t>(
  672. _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
  673. }
  674. void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
  675. auto msbs = _mm_set1_epi8(static_cast<char>(-128));
  676. auto x126 = _mm_set1_epi8(126);
  677. #ifdef ABSL_INTERNAL_HAVE_SSSE3
  678. auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
  679. #else
  680. auto zero = _mm_setzero_si128();
  681. auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
  682. auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
  683. #endif
  684. _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
  685. }
  686. __m128i ctrl;
  687. };
  688. #endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
  689. #if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
  690. struct GroupAArch64Impl {
  691. static constexpr size_t kWidth = 8;
  692. explicit GroupAArch64Impl(const ctrl_t* pos) {
  693. ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
  694. }
  695. auto Match(h2_t hash) const {
  696. uint8x8_t dup = vdup_n_u8(hash);
  697. auto mask = vceq_u8(ctrl, dup);
  698. return BitMask<uint64_t, kWidth, /*Shift=*/3,
  699. /*NullifyBitsOnIteration=*/true>(
  700. vget_lane_u64(vreinterpret_u64_u8(mask), 0));
  701. }
  702. NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
  703. uint64_t mask =
  704. vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
  705. vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
  706. vreinterpret_s8_u8(ctrl))),
  707. 0);
  708. return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
  709. }
  710. // Returns a bitmask representing the positions of full slots.
  711. // Note: for `is_small()` tables group may contain the "same" slot twice:
  712. // original and mirrored.
  713. auto MaskFull() const {
  714. uint64_t mask = vget_lane_u64(
  715. vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl),
  716. vdup_n_s8(static_cast<int8_t>(0)))),
  717. 0);
  718. return BitMask<uint64_t, kWidth, /*Shift=*/3,
  719. /*NullifyBitsOnIteration=*/true>(mask);
  720. }
  721. // Returns a bitmask representing the positions of non full slots.
  722. // Note: this includes: kEmpty, kDeleted, kSentinel.
  723. // It is useful in contexts when kSentinel is not present.
  724. auto MaskNonFull() const {
  725. uint64_t mask = vget_lane_u64(
  726. vreinterpret_u64_u8(vclt_s8(vreinterpret_s8_u8(ctrl),
  727. vdup_n_s8(static_cast<int8_t>(0)))),
  728. 0);
  729. return BitMask<uint64_t, kWidth, /*Shift=*/3,
  730. /*NullifyBitsOnIteration=*/true>(mask);
  731. }
  732. NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
  733. uint64_t mask =
  734. vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
  735. vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
  736. vreinterpret_s8_u8(ctrl))),
  737. 0);
  738. return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
  739. }
  740. uint32_t CountLeadingEmptyOrDeleted() const {
  741. uint64_t mask =
  742. vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
  743. vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
  744. vreinterpret_s8_u8(ctrl))),
  745. 0);
  746. // Similar to MaskEmptyorDeleted() but we invert the logic to invert the
  747. // produced bitfield. We then count number of trailing zeros.
  748. // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
  749. // so we should be fine.
  750. return static_cast<uint32_t>(countr_zero(mask)) >> 3;
  751. }
  752. void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
  753. uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
  754. constexpr uint64_t slsbs = 0x0202020202020202ULL;
  755. constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
  756. auto x = slsbs & (mask >> 6);
  757. auto res = (x + midbs) | kMsbs8Bytes;
  758. little_endian::Store64(dst, res);
  759. }
  760. uint8x8_t ctrl;
  761. };
  762. #endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
  763. struct GroupPortableImpl {
  764. static constexpr size_t kWidth = 8;
  765. explicit GroupPortableImpl(const ctrl_t* pos)
  766. : ctrl(little_endian::Load64(pos)) {}
  767. BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
  768. // For the technique, see:
  769. // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
  770. // (Determine if a word has a byte equal to n).
  771. //
  772. // Caveat: there are false positives but:
  773. // - they only occur if there is a real match
  774. // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
  775. // - they will be handled gracefully by subsequent checks in code
  776. //
  777. // Example:
  778. // v = 0x1716151413121110
  779. // hash = 0x12
  780. // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
  781. constexpr uint64_t lsbs = 0x0101010101010101ULL;
  782. auto x = ctrl ^ (lsbs * hash);
  783. return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & kMsbs8Bytes);
  784. }
  785. NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
  786. return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 6)) &
  787. kMsbs8Bytes);
  788. }
  789. // Returns a bitmask representing the positions of full slots.
  790. // Note: for `is_small()` tables group may contain the "same" slot twice:
  791. // original and mirrored.
  792. BitMask<uint64_t, kWidth, 3> MaskFull() const {
  793. return BitMask<uint64_t, kWidth, 3>((ctrl ^ kMsbs8Bytes) & kMsbs8Bytes);
  794. }
  795. // Returns a bitmask representing the positions of non full slots.
  796. // Note: this includes: kEmpty, kDeleted, kSentinel.
  797. // It is useful in contexts when kSentinel is not present.
  798. auto MaskNonFull() const {
  799. return BitMask<uint64_t, kWidth, 3>(ctrl & kMsbs8Bytes);
  800. }
  801. NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
  802. return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 7)) &
  803. kMsbs8Bytes);
  804. }
  805. uint32_t CountLeadingEmptyOrDeleted() const {
  806. // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
  807. // kDeleted. We lower all other bits and count number of trailing zeros.
  808. constexpr uint64_t bits = 0x0101010101010101ULL;
  809. return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
  810. 3);
  811. }
  812. void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
  813. constexpr uint64_t lsbs = 0x0101010101010101ULL;
  814. auto x = ctrl & kMsbs8Bytes;
  815. auto res = (~x + (x >> 7)) & ~lsbs;
  816. little_endian::Store64(dst, res);
  817. }
  818. uint64_t ctrl;
  819. };
  820. #ifdef ABSL_INTERNAL_HAVE_SSE2
  821. using Group = GroupSse2Impl;
  822. using GroupFullEmptyOrDeleted = GroupSse2Impl;
  823. #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
  824. using Group = GroupAArch64Impl;
  825. // For Aarch64, we use the portable implementation for counting and masking
  826. // full, empty or deleted group elements. This is to avoid the latency of moving
  827. // between data GPRs and Neon registers when it does not provide a benefit.
  828. // Using Neon is profitable when we call Match(), but is not when we don't,
  829. // which is the case when we do *EmptyOrDeleted and MaskFull operations.
  830. // It is difficult to make a similar approach beneficial on other architectures
  831. // such as x86 since they have much lower GPR <-> vector register transfer
  832. // latency and 16-wide Groups.
  833. using GroupFullEmptyOrDeleted = GroupPortableImpl;
  834. #else
  835. using Group = GroupPortableImpl;
  836. using GroupFullEmptyOrDeleted = GroupPortableImpl;
  837. #endif
  838. // When there is an insertion with no reserved growth, we rehash with
  839. // probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
  840. // constant divided by capacity ensures that inserting N elements is still O(N)
  841. // in the average case. Using the constant 16 means that we expect to rehash ~8
  842. // times more often than when generations are disabled. We are adding expected
  843. // rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 -
  844. // 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth.
  845. inline size_t RehashProbabilityConstant() { return 16; }
  846. class CommonFieldsGenerationInfoEnabled {
  847. // A sentinel value for reserved_growth_ indicating that we just ran out of
  848. // reserved growth on the last insertion. When reserve is called and then
  849. // insertions take place, reserved_growth_'s state machine is N, ..., 1,
  850. // kReservedGrowthJustRanOut, 0.
  851. static constexpr size_t kReservedGrowthJustRanOut =
  852. (std::numeric_limits<size_t>::max)();
  853. public:
  854. CommonFieldsGenerationInfoEnabled() = default;
  855. CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that)
  856. : reserved_growth_(that.reserved_growth_),
  857. reservation_size_(that.reservation_size_),
  858. generation_(that.generation_) {
  859. that.reserved_growth_ = 0;
  860. that.reservation_size_ = 0;
  861. that.generation_ = EmptyGeneration();
  862. }
  863. CommonFieldsGenerationInfoEnabled& operator=(
  864. CommonFieldsGenerationInfoEnabled&&) = default;
  865. // Whether we should rehash on insert in order to detect bugs of using invalid
  866. // references. We rehash on the first insertion after reserved_growth_ reaches
  867. // 0 after a call to reserve. We also do a rehash with low probability
  868. // whenever reserved_growth_ is zero.
  869. bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
  870. size_t capacity) const;
  871. // Similar to above, except that we don't depend on reserved_growth_.
  872. bool should_rehash_for_bug_detection_on_move(const ctrl_t* ctrl,
  873. size_t capacity) const;
  874. void maybe_increment_generation_on_insert() {
  875. if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
  876. if (reserved_growth_ > 0) {
  877. if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
  878. } else {
  879. increment_generation();
  880. }
  881. }
  882. void increment_generation() { *generation_ = NextGeneration(*generation_); }
  883. void reset_reserved_growth(size_t reservation, size_t size) {
  884. reserved_growth_ = reservation - size;
  885. }
  886. size_t reserved_growth() const { return reserved_growth_; }
  887. void set_reserved_growth(size_t r) { reserved_growth_ = r; }
  888. size_t reservation_size() const { return reservation_size_; }
  889. void set_reservation_size(size_t r) { reservation_size_ = r; }
  890. GenerationType generation() const { return *generation_; }
  891. void set_generation(GenerationType g) { *generation_ = g; }
  892. GenerationType* generation_ptr() const { return generation_; }
  893. void set_generation_ptr(GenerationType* g) { generation_ = g; }
  894. private:
  895. // The number of insertions remaining that are guaranteed to not rehash due to
  896. // a prior call to reserve. Note: we store reserved growth in addition to
  897. // reservation size because calls to erase() decrease size_ but don't decrease
  898. // reserved growth.
  899. size_t reserved_growth_ = 0;
  900. // The maximum argument to reserve() since the container was cleared. We need
  901. // to keep track of this, in addition to reserved growth, because we reset
  902. // reserved growth to this when erase(begin(), end()) is called.
  903. size_t reservation_size_ = 0;
  904. // Pointer to the generation counter, which is used to validate iterators and
  905. // is stored in the backing array between the control bytes and the slots.
  906. // Note that we can't store the generation inside the container itself and
  907. // keep a pointer to the container in the iterators because iterators must
  908. // remain valid when the container is moved.
  909. // Note: we could derive this pointer from the control pointer, but it makes
  910. // the code more complicated, and there's a benefit in having the sizes of
  911. // raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different,
  912. // which is that tests are less likely to rely on the size remaining the same.
  913. GenerationType* generation_ = EmptyGeneration();
  914. };
  915. class CommonFieldsGenerationInfoDisabled {
  916. public:
  917. CommonFieldsGenerationInfoDisabled() = default;
  918. CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) =
  919. default;
  920. CommonFieldsGenerationInfoDisabled& operator=(
  921. CommonFieldsGenerationInfoDisabled&&) = default;
  922. bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
  923. return false;
  924. }
  925. bool should_rehash_for_bug_detection_on_move(const ctrl_t*, size_t) const {
  926. return false;
  927. }
  928. void maybe_increment_generation_on_insert() {}
  929. void increment_generation() {}
  930. void reset_reserved_growth(size_t, size_t) {}
  931. size_t reserved_growth() const { return 0; }
  932. void set_reserved_growth(size_t) {}
  933. size_t reservation_size() const { return 0; }
  934. void set_reservation_size(size_t) {}
  935. GenerationType generation() const { return 0; }
  936. void set_generation(GenerationType) {}
  937. GenerationType* generation_ptr() const { return nullptr; }
  938. void set_generation_ptr(GenerationType*) {}
  939. };
  940. class HashSetIteratorGenerationInfoEnabled {
  941. public:
  942. HashSetIteratorGenerationInfoEnabled() = default;
  943. explicit HashSetIteratorGenerationInfoEnabled(
  944. const GenerationType* generation_ptr)
  945. : generation_ptr_(generation_ptr), generation_(*generation_ptr) {}
  946. GenerationType generation() const { return generation_; }
  947. void reset_generation() { generation_ = *generation_ptr_; }
  948. const GenerationType* generation_ptr() const { return generation_ptr_; }
  949. void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; }
  950. private:
  951. const GenerationType* generation_ptr_ = EmptyGeneration();
  952. GenerationType generation_ = *generation_ptr_;
  953. };
  954. class HashSetIteratorGenerationInfoDisabled {
  955. public:
  956. HashSetIteratorGenerationInfoDisabled() = default;
  957. explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) {}
  958. GenerationType generation() const { return 0; }
  959. void reset_generation() {}
  960. const GenerationType* generation_ptr() const { return nullptr; }
  961. void set_generation_ptr(const GenerationType*) {}
  962. };
  963. #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
  964. using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled;
  965. using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled;
  966. #else
  967. using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
  968. using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
  969. #endif
  970. // Stored the information regarding number of slots we can still fill
  971. // without needing to rehash.
  972. //
  973. // We want to ensure sufficient number of empty slots in the table in order
  974. // to keep probe sequences relatively short. Empty slot in the probe group
  975. // is required to stop probing.
  976. //
  977. // Tombstones (kDeleted slots) are not included in the growth capacity,
  978. // because we'd like to rehash when the table is filled with tombstones and/or
  979. // full slots.
  980. //
  981. // GrowthInfo also stores a bit that encodes whether table may have any
  982. // deleted slots.
  983. // Most of the tables (>95%) have no deleted slots, so some functions can
  984. // be more efficient with this information.
  985. //
  986. // Callers can also force a rehash via the standard `rehash(0)`,
  987. // which will recompute this value as a side-effect.
  988. //
  989. // See also `CapacityToGrowth()`.
  990. class GrowthInfo {
  991. public:
  992. // Leaves data member uninitialized.
  993. GrowthInfo() = default;
  994. // Initializes the GrowthInfo assuming we can grow `growth_left` elements
  995. // and there are no kDeleted slots in the table.
  996. void InitGrowthLeftNoDeleted(size_t growth_left) {
  997. growth_left_info_ = growth_left;
  998. }
  999. // Overwrites single full slot with an empty slot.
  1000. void OverwriteFullAsEmpty() { ++growth_left_info_; }
  1001. // Overwrites single empty slot with a full slot.
  1002. void OverwriteEmptyAsFull() {
  1003. assert(GetGrowthLeft() > 0);
  1004. --growth_left_info_;
  1005. }
  1006. // Overwrites several empty slots with full slots.
  1007. void OverwriteManyEmptyAsFull(size_t cnt) {
  1008. assert(GetGrowthLeft() >= cnt);
  1009. growth_left_info_ -= cnt;
  1010. }
  1011. // Overwrites specified control element with full slot.
  1012. void OverwriteControlAsFull(ctrl_t ctrl) {
  1013. assert(GetGrowthLeft() >= static_cast<size_t>(IsEmpty(ctrl)));
  1014. growth_left_info_ -= static_cast<size_t>(IsEmpty(ctrl));
  1015. }
  1016. // Overwrites single full slot with a deleted slot.
  1017. void OverwriteFullAsDeleted() { growth_left_info_ |= kDeletedBit; }
  1018. // Returns true if table satisfies two properties:
  1019. // 1. Guaranteed to have no kDeleted slots.
  1020. // 2. There is a place for at least one element to grow.
  1021. bool HasNoDeletedAndGrowthLeft() const {
  1022. return static_cast<std::make_signed_t<size_t>>(growth_left_info_) > 0;
  1023. }
  1024. // Returns true if the table satisfies two properties:
  1025. // 1. Guaranteed to have no kDeleted slots.
  1026. // 2. There is no growth left.
  1027. bool HasNoGrowthLeftAndNoDeleted() const { return growth_left_info_ == 0; }
  1028. // Returns true if table guaranteed to have no k
  1029. bool HasNoDeleted() const {
  1030. return static_cast<std::make_signed_t<size_t>>(growth_left_info_) >= 0;
  1031. }
  1032. // Returns the number of elements left to grow.
  1033. size_t GetGrowthLeft() const { return growth_left_info_ & kGrowthLeftMask; }
  1034. private:
  1035. static constexpr size_t kGrowthLeftMask = ((~size_t{}) >> 1);
  1036. static constexpr size_t kDeletedBit = ~kGrowthLeftMask;
  1037. // Topmost bit signal whenever there are deleted slots.
  1038. size_t growth_left_info_;
  1039. };
  1040. static_assert(sizeof(GrowthInfo) == sizeof(size_t), "");
  1041. static_assert(alignof(GrowthInfo) == alignof(size_t), "");
  1042. // Returns whether `n` is a valid capacity (i.e., number of slots).
  1043. //
  1044. // A valid capacity is a non-zero integer `2^m - 1`.
  1045. inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
  1046. // Returns the number of "cloned control bytes".
  1047. //
  1048. // This is the number of control bytes that are present both at the beginning
  1049. // of the control byte array and at the end, such that we can create a
  1050. // `Group::kWidth`-width probe window starting from any control byte.
  1051. constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
  1052. // Returns the number of control bytes including cloned.
  1053. constexpr size_t NumControlBytes(size_t capacity) {
  1054. return capacity + 1 + NumClonedBytes();
  1055. }
  1056. // Computes the offset from the start of the backing allocation of control.
  1057. // infoz and growth_info are stored at the beginning of the backing array.
  1058. inline static size_t ControlOffset(bool has_infoz) {
  1059. return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(GrowthInfo);
  1060. }
  1061. // Helper class for computing offsets and allocation size of hash set fields.
  1062. class RawHashSetLayout {
  1063. public:
  1064. explicit RawHashSetLayout(size_t capacity, size_t slot_align, bool has_infoz)
  1065. : capacity_(capacity),
  1066. control_offset_(ControlOffset(has_infoz)),
  1067. generation_offset_(control_offset_ + NumControlBytes(capacity)),
  1068. slot_offset_(
  1069. (generation_offset_ + NumGenerationBytes() + slot_align - 1) &
  1070. (~slot_align + 1)) {
  1071. assert(IsValidCapacity(capacity));
  1072. }
  1073. // Returns the capacity of a table.
  1074. size_t capacity() const { return capacity_; }
  1075. // Returns precomputed offset from the start of the backing allocation of
  1076. // control.
  1077. size_t control_offset() const { return control_offset_; }
  1078. // Given the capacity of a table, computes the offset (from the start of the
  1079. // backing allocation) of the generation counter (if it exists).
  1080. size_t generation_offset() const { return generation_offset_; }
  1081. // Given the capacity of a table, computes the offset (from the start of the
  1082. // backing allocation) at which the slots begin.
  1083. size_t slot_offset() const { return slot_offset_; }
  1084. // Given the capacity of a table, computes the total size of the backing
  1085. // array.
  1086. size_t alloc_size(size_t slot_size) const {
  1087. return slot_offset_ + capacity_ * slot_size;
  1088. }
  1089. private:
  1090. size_t capacity_;
  1091. size_t control_offset_;
  1092. size_t generation_offset_;
  1093. size_t slot_offset_;
  1094. };
  1095. struct HashtableFreeFunctionsAccess;
  1096. // We only allow a maximum of 1 SOO element, which makes the implementation
  1097. // much simpler. Complications with multiple SOO elements include:
  1098. // - Satisfying the guarantee that erasing one element doesn't invalidate
  1099. // iterators to other elements means we would probably need actual SOO
  1100. // control bytes.
  1101. // - In order to prevent user code from depending on iteration order for small
  1102. // tables, we would need to randomize the iteration order somehow.
  1103. constexpr size_t SooCapacity() { return 1; }
  1104. // Sentinel type to indicate SOO CommonFields construction.
  1105. struct soo_tag_t {};
  1106. // Sentinel type to indicate SOO CommonFields construction with full size.
  1107. struct full_soo_tag_t {};
  1108. // Suppress erroneous uninitialized memory errors on GCC. For example, GCC
  1109. // thinks that the call to slot_array() in find_or_prepare_insert() is reading
  1110. // uninitialized memory, but slot_array is only called there when the table is
  1111. // non-empty and this memory is initialized when the table is non-empty.
  1112. #if !defined(__clang__) && defined(__GNUC__)
  1113. #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) \
  1114. _Pragma("GCC diagnostic push") \
  1115. _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") \
  1116. _Pragma("GCC diagnostic ignored \"-Wuninitialized\"") x; \
  1117. _Pragma("GCC diagnostic pop")
  1118. #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) \
  1119. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(return x)
  1120. #else
  1121. #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) x
  1122. #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) return x
  1123. #endif
  1124. // This allows us to work around an uninitialized memory warning when
  1125. // constructing begin() iterators in empty hashtables.
  1126. union MaybeInitializedPtr {
  1127. void* get() const { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(p); }
  1128. void set(void* ptr) { p = ptr; }
  1129. void* p;
  1130. };
  1131. struct HeapPtrs {
  1132. HeapPtrs() = default;
  1133. explicit HeapPtrs(ctrl_t* c) : control(c) {}
  1134. // The control bytes (and, also, a pointer near to the base of the backing
  1135. // array).
  1136. //
  1137. // This contains `capacity + 1 + NumClonedBytes()` entries, even
  1138. // when the table is empty (hence EmptyGroup).
  1139. //
  1140. // Note that growth_info is stored immediately before this pointer.
  1141. // May be uninitialized for SOO tables.
  1142. ctrl_t* control;
  1143. // The beginning of the slots, located at `SlotOffset()` bytes after
  1144. // `control`. May be uninitialized for empty tables.
  1145. // Note: we can't use `slots` because Qt defines "slots" as a macro.
  1146. MaybeInitializedPtr slot_array;
  1147. };
  1148. // Manages the backing array pointers or the SOO slot. When raw_hash_set::is_soo
  1149. // is true, the SOO slot is stored in `soo_data`. Otherwise, we use `heap`.
  1150. union HeapOrSoo {
  1151. HeapOrSoo() = default;
  1152. explicit HeapOrSoo(ctrl_t* c) : heap(c) {}
  1153. ctrl_t*& control() {
  1154. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control);
  1155. }
  1156. ctrl_t* control() const {
  1157. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control);
  1158. }
  1159. MaybeInitializedPtr& slot_array() {
  1160. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array);
  1161. }
  1162. MaybeInitializedPtr slot_array() const {
  1163. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array);
  1164. }
  1165. void* get_soo_data() {
  1166. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data);
  1167. }
  1168. const void* get_soo_data() const {
  1169. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data);
  1170. }
  1171. HeapPtrs heap;
  1172. unsigned char soo_data[sizeof(HeapPtrs)];
  1173. };
  1174. // CommonFields hold the fields in raw_hash_set that do not depend
  1175. // on template parameters. This allows us to conveniently pass all
  1176. // of this state to helper functions as a single argument.
  1177. class CommonFields : public CommonFieldsGenerationInfo {
  1178. public:
  1179. CommonFields() : capacity_(0), size_(0), heap_or_soo_(EmptyGroup()) {}
  1180. explicit CommonFields(soo_tag_t) : capacity_(SooCapacity()), size_(0) {}
  1181. explicit CommonFields(full_soo_tag_t)
  1182. : capacity_(SooCapacity()), size_(size_t{1} << HasInfozShift()) {}
  1183. // Not copyable
  1184. CommonFields(const CommonFields&) = delete;
  1185. CommonFields& operator=(const CommonFields&) = delete;
  1186. // Movable
  1187. CommonFields(CommonFields&& that) = default;
  1188. CommonFields& operator=(CommonFields&&) = default;
  1189. template <bool kSooEnabled>
  1190. static CommonFields CreateDefault() {
  1191. return kSooEnabled ? CommonFields{soo_tag_t{}} : CommonFields{};
  1192. }
  1193. // The inline data for SOO is written on top of control_/slots_.
  1194. const void* soo_data() const { return heap_or_soo_.get_soo_data(); }
  1195. void* soo_data() { return heap_or_soo_.get_soo_data(); }
  1196. HeapOrSoo heap_or_soo() const { return heap_or_soo_; }
  1197. const HeapOrSoo& heap_or_soo_ref() const { return heap_or_soo_; }
  1198. ctrl_t* control() const { return heap_or_soo_.control(); }
  1199. void set_control(ctrl_t* c) { heap_or_soo_.control() = c; }
  1200. void* backing_array_start() const {
  1201. // growth_info (and maybe infoz) is stored before control bytes.
  1202. assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
  1203. return control() - ControlOffset(has_infoz());
  1204. }
  1205. // Note: we can't use slots() because Qt defines "slots" as a macro.
  1206. void* slot_array() const { return heap_or_soo_.slot_array().get(); }
  1207. MaybeInitializedPtr slots_union() const { return heap_or_soo_.slot_array(); }
  1208. void set_slots(void* s) { heap_or_soo_.slot_array().set(s); }
  1209. // The number of filled slots.
  1210. size_t size() const { return size_ >> HasInfozShift(); }
  1211. void set_size(size_t s) {
  1212. size_ = (s << HasInfozShift()) | (size_ & HasInfozMask());
  1213. }
  1214. void set_empty_soo() {
  1215. AssertInSooMode();
  1216. size_ = 0;
  1217. }
  1218. void set_full_soo() {
  1219. AssertInSooMode();
  1220. size_ = size_t{1} << HasInfozShift();
  1221. }
  1222. void increment_size() {
  1223. assert(size() < capacity());
  1224. size_ += size_t{1} << HasInfozShift();
  1225. }
  1226. void decrement_size() {
  1227. assert(size() > 0);
  1228. size_ -= size_t{1} << HasInfozShift();
  1229. }
  1230. // The total number of available slots.
  1231. size_t capacity() const { return capacity_; }
  1232. void set_capacity(size_t c) {
  1233. assert(c == 0 || IsValidCapacity(c));
  1234. capacity_ = c;
  1235. }
  1236. // The number of slots we can still fill without needing to rehash.
  1237. // This is stored in the heap allocation before the control bytes.
  1238. // TODO(b/289225379): experiment with moving growth_info back inline to
  1239. // increase room for SOO.
  1240. size_t growth_left() const { return growth_info().GetGrowthLeft(); }
  1241. GrowthInfo& growth_info() {
  1242. auto* gl_ptr = reinterpret_cast<GrowthInfo*>(control()) - 1;
  1243. assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(GrowthInfo) == 0);
  1244. return *gl_ptr;
  1245. }
  1246. GrowthInfo growth_info() const {
  1247. return const_cast<CommonFields*>(this)->growth_info();
  1248. }
  1249. bool has_infoz() const {
  1250. return ABSL_PREDICT_FALSE((size_ & HasInfozMask()) != 0);
  1251. }
  1252. void set_has_infoz(bool has_infoz) {
  1253. size_ = (size() << HasInfozShift()) | static_cast<size_t>(has_infoz);
  1254. }
  1255. HashtablezInfoHandle infoz() {
  1256. return has_infoz()
  1257. ? *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start())
  1258. : HashtablezInfoHandle();
  1259. }
  1260. void set_infoz(HashtablezInfoHandle infoz) {
  1261. assert(has_infoz());
  1262. *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start()) = infoz;
  1263. }
  1264. bool should_rehash_for_bug_detection_on_insert() const {
  1265. return CommonFieldsGenerationInfo::
  1266. should_rehash_for_bug_detection_on_insert(control(), capacity());
  1267. }
  1268. bool should_rehash_for_bug_detection_on_move() const {
  1269. return CommonFieldsGenerationInfo::should_rehash_for_bug_detection_on_move(
  1270. control(), capacity());
  1271. }
  1272. void reset_reserved_growth(size_t reservation) {
  1273. CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
  1274. }
  1275. // The size of the backing array allocation.
  1276. size_t alloc_size(size_t slot_size, size_t slot_align) const {
  1277. return RawHashSetLayout(capacity(), slot_align, has_infoz())
  1278. .alloc_size(slot_size);
  1279. }
  1280. // Move fields other than heap_or_soo_.
  1281. void move_non_heap_or_soo_fields(CommonFields& that) {
  1282. static_cast<CommonFieldsGenerationInfo&>(*this) =
  1283. std::move(static_cast<CommonFieldsGenerationInfo&>(that));
  1284. capacity_ = that.capacity_;
  1285. size_ = that.size_;
  1286. }
  1287. // Returns the number of control bytes set to kDeleted. For testing only.
  1288. size_t TombstonesCount() const {
  1289. return static_cast<size_t>(
  1290. std::count(control(), control() + capacity(), ctrl_t::kDeleted));
  1291. }
  1292. private:
  1293. // We store the has_infoz bit in the lowest bit of size_.
  1294. static constexpr size_t HasInfozShift() { return 1; }
  1295. static constexpr size_t HasInfozMask() {
  1296. return (size_t{1} << HasInfozShift()) - 1;
  1297. }
  1298. // We can't assert that SOO is enabled because we don't have SooEnabled(), but
  1299. // we assert what we can.
  1300. void AssertInSooMode() const {
  1301. assert(capacity() == SooCapacity());
  1302. assert(!has_infoz());
  1303. }
  1304. // The number of slots in the backing array. This is always 2^N-1 for an
  1305. // integer N. NOTE: we tried experimenting with compressing the capacity and
  1306. // storing it together with size_: (a) using 6 bits to store the corresponding
  1307. // power (N in 2^N-1), and (b) storing 2^N as the most significant bit of
  1308. // size_ and storing size in the low bits. Both of these experiments were
  1309. // regressions, presumably because we need capacity to do find operations.
  1310. size_t capacity_;
  1311. // The size and also has one bit that stores whether we have infoz.
  1312. // TODO(b/289225379): we could put size_ into HeapOrSoo and make capacity_
  1313. // encode the size in SOO case. We would be making size()/capacity() more
  1314. // expensive in order to have more SOO space.
  1315. size_t size_;
  1316. // Either the control/slots pointers or the SOO slot.
  1317. HeapOrSoo heap_or_soo_;
  1318. };
  1319. template <class Policy, class Hash, class Eq, class Alloc>
  1320. class raw_hash_set;
  1321. // Returns the next valid capacity after `n`.
  1322. inline size_t NextCapacity(size_t n) {
  1323. assert(IsValidCapacity(n) || n == 0);
  1324. return n * 2 + 1;
  1325. }
  1326. // Applies the following mapping to every byte in the control array:
  1327. // * kDeleted -> kEmpty
  1328. // * kEmpty -> kEmpty
  1329. // * _ -> kDeleted
  1330. // PRECONDITION:
  1331. // IsValidCapacity(capacity)
  1332. // ctrl[capacity] == ctrl_t::kSentinel
  1333. // ctrl[i] != ctrl_t::kSentinel for all i < capacity
  1334. void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
  1335. // Converts `n` into the next valid capacity, per `IsValidCapacity`.
  1336. inline size_t NormalizeCapacity(size_t n) {
  1337. return n ? ~size_t{} >> countl_zero(n) : 1;
  1338. }
  1339. // General notes on capacity/growth methods below:
  1340. // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
  1341. // average of two empty slots per group.
  1342. // - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
  1343. // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
  1344. // never need to probe (the whole table fits in one group) so we don't need a
  1345. // load factor less than 1.
  1346. // Given `capacity`, applies the load factor; i.e., it returns the maximum
  1347. // number of values we should put into the table before a resizing rehash.
  1348. inline size_t CapacityToGrowth(size_t capacity) {
  1349. assert(IsValidCapacity(capacity));
  1350. // `capacity*7/8`
  1351. if (Group::kWidth == 8 && capacity == 7) {
  1352. // x-x/8 does not work when x==7.
  1353. return 6;
  1354. }
  1355. return capacity - capacity / 8;
  1356. }
  1357. // Given `growth`, "unapplies" the load factor to find how large the capacity
  1358. // should be to stay within the load factor.
  1359. //
  1360. // This might not be a valid capacity and `NormalizeCapacity()` should be
  1361. // called on this.
  1362. inline size_t GrowthToLowerboundCapacity(size_t growth) {
  1363. // `growth*8/7`
  1364. if (Group::kWidth == 8 && growth == 7) {
  1365. // x+(x-1)/7 does not work when x==7.
  1366. return 8;
  1367. }
  1368. return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
  1369. }
  1370. template <class InputIter>
  1371. size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
  1372. size_t bucket_count) {
  1373. if (bucket_count != 0) {
  1374. return bucket_count;
  1375. }
  1376. using InputIterCategory =
  1377. typename std::iterator_traits<InputIter>::iterator_category;
  1378. if (std::is_base_of<std::random_access_iterator_tag,
  1379. InputIterCategory>::value) {
  1380. return GrowthToLowerboundCapacity(
  1381. static_cast<size_t>(std::distance(first, last)));
  1382. }
  1383. return 0;
  1384. }
  1385. constexpr bool SwisstableDebugEnabled() {
  1386. #if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
  1387. ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
  1388. return true;
  1389. #else
  1390. return false;
  1391. #endif
  1392. }
  1393. inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
  1394. const GenerationType* generation_ptr,
  1395. const char* operation) {
  1396. if (!SwisstableDebugEnabled()) return;
  1397. // `SwisstableDebugEnabled()` is also true for release builds with hardening
  1398. // enabled. To minimize their impact in those builds:
  1399. // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
  1400. // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
  1401. // the chances that the hot paths will be inlined.
  1402. if (ABSL_PREDICT_FALSE(ctrl == nullptr)) {
  1403. ABSL_RAW_LOG(FATAL, "%s called on end() iterator.", operation);
  1404. }
  1405. if (ABSL_PREDICT_FALSE(ctrl == EmptyGroup())) {
  1406. ABSL_RAW_LOG(FATAL, "%s called on default-constructed iterator.",
  1407. operation);
  1408. }
  1409. if (SwisstableGenerationsEnabled()) {
  1410. if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
  1411. ABSL_RAW_LOG(FATAL,
  1412. "%s called on invalid iterator. The table could have "
  1413. "rehashed or moved since this iterator was initialized.",
  1414. operation);
  1415. }
  1416. if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
  1417. ABSL_RAW_LOG(
  1418. FATAL,
  1419. "%s called on invalid iterator. The element was likely erased.",
  1420. operation);
  1421. }
  1422. } else {
  1423. if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
  1424. ABSL_RAW_LOG(
  1425. FATAL,
  1426. "%s called on invalid iterator. The element might have been erased "
  1427. "or the table might have rehashed. Consider running with "
  1428. "--config=asan to diagnose rehashing issues.",
  1429. operation);
  1430. }
  1431. }
  1432. }
  1433. // Note that for comparisons, null/end iterators are valid.
  1434. inline void AssertIsValidForComparison(const ctrl_t* ctrl,
  1435. GenerationType generation,
  1436. const GenerationType* generation_ptr) {
  1437. if (!SwisstableDebugEnabled()) return;
  1438. const bool ctrl_is_valid_for_comparison =
  1439. ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
  1440. if (SwisstableGenerationsEnabled()) {
  1441. if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
  1442. ABSL_RAW_LOG(FATAL,
  1443. "Invalid iterator comparison. The table could have rehashed "
  1444. "or moved since this iterator was initialized.");
  1445. }
  1446. if (ABSL_PREDICT_FALSE(!ctrl_is_valid_for_comparison)) {
  1447. ABSL_RAW_LOG(
  1448. FATAL, "Invalid iterator comparison. The element was likely erased.");
  1449. }
  1450. } else {
  1451. ABSL_HARDENING_ASSERT(
  1452. ctrl_is_valid_for_comparison &&
  1453. "Invalid iterator comparison. The element might have been erased or "
  1454. "the table might have rehashed. Consider running with --config=asan to "
  1455. "diagnose rehashing issues.");
  1456. }
  1457. }
  1458. // If the two iterators come from the same container, then their pointers will
  1459. // interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa.
  1460. // Note: we take slots by reference so that it's not UB if they're uninitialized
  1461. // as long as we don't read them (when ctrl is null).
  1462. inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
  1463. const ctrl_t* ctrl_b,
  1464. const void* const& slot_a,
  1465. const void* const& slot_b) {
  1466. // If either control byte is null, then we can't tell.
  1467. if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
  1468. const bool a_is_soo = IsSooControl(ctrl_a);
  1469. if (a_is_soo != IsSooControl(ctrl_b)) return false;
  1470. if (a_is_soo) return slot_a == slot_b;
  1471. const void* low_slot = slot_a;
  1472. const void* hi_slot = slot_b;
  1473. if (ctrl_a > ctrl_b) {
  1474. std::swap(ctrl_a, ctrl_b);
  1475. std::swap(low_slot, hi_slot);
  1476. }
  1477. return ctrl_b < low_slot && low_slot <= hi_slot;
  1478. }
  1479. // Asserts that two iterators come from the same container.
  1480. // Note: we take slots by reference so that it's not UB if they're uninitialized
  1481. // as long as we don't read them (when ctrl is null).
  1482. inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
  1483. const void* const& slot_a,
  1484. const void* const& slot_b,
  1485. const GenerationType* generation_ptr_a,
  1486. const GenerationType* generation_ptr_b) {
  1487. if (!SwisstableDebugEnabled()) return;
  1488. // `SwisstableDebugEnabled()` is also true for release builds with hardening
  1489. // enabled. To minimize their impact in those builds:
  1490. // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
  1491. // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
  1492. // the chances that the hot paths will be inlined.
  1493. // fail_if(is_invalid, message) crashes when is_invalid is true and provides
  1494. // an error message based on `message`.
  1495. const auto fail_if = [](bool is_invalid, const char* message) {
  1496. if (ABSL_PREDICT_FALSE(is_invalid)) {
  1497. ABSL_RAW_LOG(FATAL, "Invalid iterator comparison. %s", message);
  1498. }
  1499. };
  1500. const bool a_is_default = ctrl_a == EmptyGroup();
  1501. const bool b_is_default = ctrl_b == EmptyGroup();
  1502. if (a_is_default && b_is_default) return;
  1503. fail_if(a_is_default != b_is_default,
  1504. "Comparing default-constructed hashtable iterator with a "
  1505. "non-default-constructed hashtable iterator.");
  1506. if (SwisstableGenerationsEnabled()) {
  1507. if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return;
  1508. // Users don't need to know whether the tables are SOO so don't mention SOO
  1509. // in the debug message.
  1510. const bool a_is_soo = IsSooControl(ctrl_a);
  1511. const bool b_is_soo = IsSooControl(ctrl_b);
  1512. fail_if(a_is_soo != b_is_soo || (a_is_soo && b_is_soo),
  1513. "Comparing iterators from different hashtables.");
  1514. const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
  1515. const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
  1516. fail_if(a_is_empty != b_is_empty,
  1517. "Comparing an iterator from an empty hashtable with an iterator "
  1518. "from a non-empty hashtable.");
  1519. fail_if(a_is_empty && b_is_empty,
  1520. "Comparing iterators from different empty hashtables.");
  1521. const bool a_is_end = ctrl_a == nullptr;
  1522. const bool b_is_end = ctrl_b == nullptr;
  1523. fail_if(a_is_end || b_is_end,
  1524. "Comparing iterator with an end() iterator from a different "
  1525. "hashtable.");
  1526. fail_if(true, "Comparing non-end() iterators from different hashtables.");
  1527. } else {
  1528. ABSL_HARDENING_ASSERT(
  1529. AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
  1530. "Invalid iterator comparison. The iterators may be from different "
  1531. "containers or the container might have rehashed or moved. Consider "
  1532. "running with --config=asan to diagnose issues.");
  1533. }
  1534. }
  1535. struct FindInfo {
  1536. size_t offset;
  1537. size_t probe_length;
  1538. };
  1539. // Whether a table is "small". A small table fits entirely into a probing
  1540. // group, i.e., has a capacity < `Group::kWidth`.
  1541. //
  1542. // In small mode we are able to use the whole capacity. The extra control
  1543. // bytes give us at least one "empty" control byte to stop the iteration.
  1544. // This is important to make 1 a valid capacity.
  1545. //
  1546. // In small mode only the first `capacity` control bytes after the sentinel
  1547. // are valid. The rest contain dummy ctrl_t::kEmpty values that do not
  1548. // represent a real slot. This is important to take into account on
  1549. // `find_first_non_full()`, where we never try
  1550. // `ShouldInsertBackwards()` for small tables.
  1551. inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
  1552. // Whether a table fits entirely into a probing group.
  1553. // Arbitrary order of elements in such tables is correct.
  1554. inline bool is_single_group(size_t capacity) {
  1555. return capacity <= Group::kWidth;
  1556. }
  1557. // Begins a probing operation on `common.control`, using `hash`.
  1558. inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
  1559. size_t hash) {
  1560. return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
  1561. }
  1562. inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
  1563. return probe(common.control(), common.capacity(), hash);
  1564. }
  1565. // Probes an array of control bits using a probe sequence derived from `hash`,
  1566. // and returns the offset corresponding to the first deleted or empty slot.
  1567. //
  1568. // Behavior when the entire table is full is undefined.
  1569. //
  1570. // NOTE: this function must work with tables having both empty and deleted
  1571. // slots in the same group. Such tables appear during `erase()`.
  1572. template <typename = void>
  1573. inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
  1574. auto seq = probe(common, hash);
  1575. const ctrl_t* ctrl = common.control();
  1576. if (IsEmptyOrDeleted(ctrl[seq.offset()]) &&
  1577. !ShouldInsertBackwards(common.capacity(), hash, ctrl)) {
  1578. return {seq.offset(), /*probe_length=*/0};
  1579. }
  1580. while (true) {
  1581. GroupFullEmptyOrDeleted g{ctrl + seq.offset()};
  1582. auto mask = g.MaskEmptyOrDeleted();
  1583. if (mask) {
  1584. return {
  1585. seq.offset(GetInsertionOffset(mask, common.capacity(), hash, ctrl)),
  1586. seq.index()};
  1587. }
  1588. seq.next();
  1589. assert(seq.index() <= common.capacity() && "full table!");
  1590. }
  1591. }
  1592. // Extern template for inline function keep possibility of inlining.
  1593. // When compiler decided to not inline, no symbols will be added to the
  1594. // corresponding translation unit.
  1595. extern template FindInfo find_first_non_full(const CommonFields&, size_t);
  1596. // Non-inlined version of find_first_non_full for use in less
  1597. // performance critical routines.
  1598. FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
  1599. inline void ResetGrowthLeft(CommonFields& common) {
  1600. common.growth_info().InitGrowthLeftNoDeleted(
  1601. CapacityToGrowth(common.capacity()) - common.size());
  1602. }
  1603. // Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
  1604. // array as marked as empty.
  1605. inline void ResetCtrl(CommonFields& common, size_t slot_size) {
  1606. const size_t capacity = common.capacity();
  1607. ctrl_t* ctrl = common.control();
  1608. std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
  1609. capacity + 1 + NumClonedBytes());
  1610. ctrl[capacity] = ctrl_t::kSentinel;
  1611. SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
  1612. }
  1613. // Sets sanitizer poisoning for slot corresponding to control byte being set.
  1614. inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,
  1615. size_t slot_size) {
  1616. assert(i < c.capacity());
  1617. auto* slot_i = static_cast<const char*>(c.slot_array()) + i * slot_size;
  1618. if (IsFull(h)) {
  1619. SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
  1620. } else {
  1621. SanitizerPoisonMemoryRegion(slot_i, slot_size);
  1622. }
  1623. }
  1624. // Sets `ctrl[i]` to `h`.
  1625. //
  1626. // Unlike setting it directly, this function will perform bounds checks and
  1627. // mirror the value to the cloned tail if necessary.
  1628. inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h,
  1629. size_t slot_size) {
  1630. DoSanitizeOnSetCtrl(c, i, h, slot_size);
  1631. ctrl_t* ctrl = c.control();
  1632. ctrl[i] = h;
  1633. ctrl[((i - NumClonedBytes()) & c.capacity()) +
  1634. (NumClonedBytes() & c.capacity())] = h;
  1635. }
  1636. // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
  1637. inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) {
  1638. SetCtrl(c, i, static_cast<ctrl_t>(h), slot_size);
  1639. }
  1640. // Like SetCtrl, but in a single group table, we can save some operations when
  1641. // setting the cloned control byte.
  1642. inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h,
  1643. size_t slot_size) {
  1644. assert(is_single_group(c.capacity()));
  1645. DoSanitizeOnSetCtrl(c, i, h, slot_size);
  1646. ctrl_t* ctrl = c.control();
  1647. ctrl[i] = h;
  1648. ctrl[i + c.capacity() + 1] = h;
  1649. }
  1650. // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
  1651. inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h,
  1652. size_t slot_size) {
  1653. SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size);
  1654. }
  1655. // growth_info (which is a size_t) is stored with the backing array.
  1656. constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
  1657. return (std::max)(align_of_slot, alignof(GrowthInfo));
  1658. }
  1659. // Returns the address of the ith slot in slots where each slot occupies
  1660. // slot_size.
  1661. inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
  1662. return static_cast<void*>(static_cast<char*>(slot_array) +
  1663. (slot * slot_size));
  1664. }
  1665. // Iterates over all full slots and calls `cb(const ctrl_t*, SlotType*)`.
  1666. // No insertion to the table allowed during Callback call.
  1667. // Erasure is allowed only for the element passed to the callback.
  1668. template <class SlotType, class Callback>
  1669. ABSL_ATTRIBUTE_ALWAYS_INLINE inline void IterateOverFullSlots(
  1670. const CommonFields& c, SlotType* slot, Callback cb) {
  1671. const size_t cap = c.capacity();
  1672. const ctrl_t* ctrl = c.control();
  1673. if (is_small(cap)) {
  1674. // Mirrored/cloned control bytes in small table are also located in the
  1675. // first group (starting from position 0). We are taking group from position
  1676. // `capacity` in order to avoid duplicates.
  1677. // Small tables capacity fits into portable group, where
  1678. // GroupPortableImpl::MaskFull is more efficient for the
  1679. // capacity <= GroupPortableImpl::kWidth.
  1680. assert(cap <= GroupPortableImpl::kWidth &&
  1681. "unexpectedly large small capacity");
  1682. static_assert(Group::kWidth >= GroupPortableImpl::kWidth,
  1683. "unexpected group width");
  1684. // Group starts from kSentinel slot, so indices in the mask will
  1685. // be increased by 1.
  1686. const auto mask = GroupPortableImpl(ctrl + cap).MaskFull();
  1687. --ctrl;
  1688. --slot;
  1689. for (uint32_t i : mask) {
  1690. cb(ctrl + i, slot + i);
  1691. }
  1692. return;
  1693. }
  1694. size_t remaining = c.size();
  1695. ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
  1696. while (remaining != 0) {
  1697. for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
  1698. assert(IsFull(ctrl[i]) && "hash table was modified unexpectedly");
  1699. cb(ctrl + i, slot + i);
  1700. --remaining;
  1701. }
  1702. ctrl += Group::kWidth;
  1703. slot += Group::kWidth;
  1704. assert((remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
  1705. "hash table was modified unexpectedly");
  1706. }
  1707. // NOTE: erasure of the current element is allowed in callback for
  1708. // absl::erase_if specialization. So we use `>=`.
  1709. assert(original_size_for_assert >= c.size() &&
  1710. "hash table was modified unexpectedly");
  1711. }
  1712. template <typename CharAlloc>
  1713. constexpr bool ShouldSampleHashtablezInfo() {
  1714. // Folks with custom allocators often make unwarranted assumptions about the
  1715. // behavior of their classes vis-a-vis trivial destructability and what
  1716. // calls they will or won't make. Avoid sampling for people with custom
  1717. // allocators to get us out of this mess. This is not a hard guarantee but
  1718. // a workaround while we plan the exact guarantee we want to provide.
  1719. return std::is_same<CharAlloc, std::allocator<char>>::value;
  1720. }
  1721. template <bool kSooEnabled>
  1722. HashtablezInfoHandle SampleHashtablezInfo(size_t sizeof_slot, size_t sizeof_key,
  1723. size_t sizeof_value,
  1724. size_t old_capacity, bool was_soo,
  1725. HashtablezInfoHandle forced_infoz,
  1726. CommonFields& c) {
  1727. if (forced_infoz.IsSampled()) return forced_infoz;
  1728. // In SOO, we sample on the first insertion so if this is an empty SOO case
  1729. // (e.g. when reserve is called), then we still need to sample.
  1730. if (kSooEnabled && was_soo && c.size() == 0) {
  1731. return Sample(sizeof_slot, sizeof_key, sizeof_value, SooCapacity());
  1732. }
  1733. // For non-SOO cases, we sample whenever the capacity is increasing from zero
  1734. // to non-zero.
  1735. if (!kSooEnabled && old_capacity == 0) {
  1736. return Sample(sizeof_slot, sizeof_key, sizeof_value, 0);
  1737. }
  1738. return c.infoz();
  1739. }
  1740. // Helper class to perform resize of the hash set.
  1741. //
  1742. // It contains special optimizations for small group resizes.
  1743. // See GrowIntoSingleGroupShuffleControlBytes for details.
  1744. class HashSetResizeHelper {
  1745. public:
  1746. explicit HashSetResizeHelper(CommonFields& c, bool was_soo, bool had_soo_slot,
  1747. HashtablezInfoHandle forced_infoz)
  1748. : old_capacity_(c.capacity()),
  1749. had_infoz_(c.has_infoz()),
  1750. was_soo_(was_soo),
  1751. had_soo_slot_(had_soo_slot),
  1752. forced_infoz_(forced_infoz) {}
  1753. // Optimized for small groups version of `find_first_non_full`.
  1754. // Beneficial only right after calling `raw_hash_set::resize`.
  1755. // It is safe to call in case capacity is big or was not changed, but there
  1756. // will be no performance benefit.
  1757. // It has implicit assumption that `resize` will call
  1758. // `GrowSizeIntoSingleGroup*` in case `IsGrowingIntoSingleGroupApplicable`.
  1759. // Falls back to `find_first_non_full` in case of big groups.
  1760. static FindInfo FindFirstNonFullAfterResize(const CommonFields& c,
  1761. size_t old_capacity,
  1762. size_t hash) {
  1763. if (!IsGrowingIntoSingleGroupApplicable(old_capacity, c.capacity())) {
  1764. return find_first_non_full(c, hash);
  1765. }
  1766. // Find a location for the new element non-deterministically.
  1767. // Note that any position is correct.
  1768. // It will located at `half_old_capacity` or one of the other
  1769. // empty slots with approximately 50% probability each.
  1770. size_t offset = probe(c, hash).offset();
  1771. // Note that we intentionally use unsigned int underflow.
  1772. if (offset - (old_capacity + 1) >= old_capacity) {
  1773. // Offset fall on kSentinel or into the mostly occupied first half.
  1774. offset = old_capacity / 2;
  1775. }
  1776. assert(IsEmpty(c.control()[offset]));
  1777. return FindInfo{offset, 0};
  1778. }
  1779. HeapOrSoo& old_heap_or_soo() { return old_heap_or_soo_; }
  1780. void* old_soo_data() { return old_heap_or_soo_.get_soo_data(); }
  1781. ctrl_t* old_ctrl() const {
  1782. assert(!was_soo_);
  1783. return old_heap_or_soo_.control();
  1784. }
  1785. void* old_slots() const {
  1786. assert(!was_soo_);
  1787. return old_heap_or_soo_.slot_array().get();
  1788. }
  1789. size_t old_capacity() const { return old_capacity_; }
  1790. // Returns the index of the SOO slot when growing from SOO to non-SOO in a
  1791. // single group. See also InitControlBytesAfterSoo(). It's important to use
  1792. // index 1 so that when resizing from capacity 1 to 3, we can still have
  1793. // random iteration order between the first two inserted elements.
  1794. // I.e. it allows inserting the second element at either index 0 or 2.
  1795. static size_t SooSlotIndex() { return 1; }
  1796. // Allocates a backing array for the hashtable.
  1797. // Reads `capacity` and updates all other fields based on the result of
  1798. // the allocation.
  1799. //
  1800. // It also may do the following actions:
  1801. // 1. initialize control bytes
  1802. // 2. initialize slots
  1803. // 3. deallocate old slots.
  1804. //
  1805. // We are bundling a lot of functionality
  1806. // in one ABSL_ATTRIBUTE_NOINLINE function in order to minimize binary code
  1807. // duplication in raw_hash_set<>::resize.
  1808. //
  1809. // `c.capacity()` must be nonzero.
  1810. // POSTCONDITIONS:
  1811. // 1. CommonFields is initialized.
  1812. //
  1813. // if IsGrowingIntoSingleGroupApplicable && TransferUsesMemcpy
  1814. // Both control bytes and slots are fully initialized.
  1815. // old_slots are deallocated.
  1816. // infoz.RecordRehash is called.
  1817. //
  1818. // if IsGrowingIntoSingleGroupApplicable && !TransferUsesMemcpy
  1819. // Control bytes are fully initialized.
  1820. // infoz.RecordRehash is called.
  1821. // GrowSizeIntoSingleGroup must be called to finish slots initialization.
  1822. //
  1823. // if !IsGrowingIntoSingleGroupApplicable
  1824. // Control bytes are initialized to empty table via ResetCtrl.
  1825. // raw_hash_set<>::resize must insert elements regularly.
  1826. // infoz.RecordRehash is called if old_capacity == 0.
  1827. //
  1828. // Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation.
  1829. template <typename Alloc, size_t SizeOfSlot, bool TransferUsesMemcpy,
  1830. bool SooEnabled, size_t AlignOfSlot>
  1831. ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, Alloc alloc,
  1832. ctrl_t soo_slot_h2,
  1833. size_t key_size,
  1834. size_t value_size) {
  1835. assert(c.capacity());
  1836. HashtablezInfoHandle infoz =
  1837. ShouldSampleHashtablezInfo<Alloc>()
  1838. ? SampleHashtablezInfo<SooEnabled>(SizeOfSlot, key_size, value_size,
  1839. old_capacity_, was_soo_,
  1840. forced_infoz_, c)
  1841. : HashtablezInfoHandle{};
  1842. const bool has_infoz = infoz.IsSampled();
  1843. RawHashSetLayout layout(c.capacity(), AlignOfSlot, has_infoz);
  1844. char* mem = static_cast<char*>(Allocate<BackingArrayAlignment(AlignOfSlot)>(
  1845. &alloc, layout.alloc_size(SizeOfSlot)));
  1846. const GenerationType old_generation = c.generation();
  1847. c.set_generation_ptr(
  1848. reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
  1849. c.set_generation(NextGeneration(old_generation));
  1850. c.set_control(reinterpret_cast<ctrl_t*>(mem + layout.control_offset()));
  1851. c.set_slots(mem + layout.slot_offset());
  1852. ResetGrowthLeft(c);
  1853. const bool grow_single_group =
  1854. IsGrowingIntoSingleGroupApplicable(old_capacity_, layout.capacity());
  1855. if (SooEnabled && was_soo_ && grow_single_group) {
  1856. InitControlBytesAfterSoo(c.control(), soo_slot_h2, layout.capacity());
  1857. if (TransferUsesMemcpy && had_soo_slot_) {
  1858. TransferSlotAfterSoo(c, SizeOfSlot);
  1859. }
  1860. // SooEnabled implies that old_capacity_ != 0.
  1861. } else if ((SooEnabled || old_capacity_ != 0) && grow_single_group) {
  1862. if (TransferUsesMemcpy) {
  1863. GrowSizeIntoSingleGroupTransferable(c, SizeOfSlot);
  1864. DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot);
  1865. } else {
  1866. GrowIntoSingleGroupShuffleControlBytes(c.control(), layout.capacity());
  1867. }
  1868. } else {
  1869. ResetCtrl(c, SizeOfSlot);
  1870. }
  1871. c.set_has_infoz(has_infoz);
  1872. if (has_infoz) {
  1873. infoz.RecordStorageChanged(c.size(), layout.capacity());
  1874. if ((SooEnabled && was_soo_) || grow_single_group || old_capacity_ == 0) {
  1875. infoz.RecordRehash(0);
  1876. }
  1877. c.set_infoz(infoz);
  1878. }
  1879. return grow_single_group;
  1880. }
  1881. // Relocates slots into new single group consistent with
  1882. // GrowIntoSingleGroupShuffleControlBytes.
  1883. //
  1884. // PRECONDITIONS:
  1885. // 1. GrowIntoSingleGroupShuffleControlBytes was already called.
  1886. template <class PolicyTraits, class Alloc>
  1887. void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref) {
  1888. assert(old_capacity_ < Group::kWidth / 2);
  1889. assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
  1890. using slot_type = typename PolicyTraits::slot_type;
  1891. assert(is_single_group(c.capacity()));
  1892. auto* new_slots = static_cast<slot_type*>(c.slot_array());
  1893. auto* old_slots_ptr = static_cast<slot_type*>(old_slots());
  1894. size_t shuffle_bit = old_capacity_ / 2 + 1;
  1895. for (size_t i = 0; i < old_capacity_; ++i) {
  1896. if (IsFull(old_ctrl()[i])) {
  1897. size_t new_i = i ^ shuffle_bit;
  1898. SanitizerUnpoisonMemoryRegion(new_slots + new_i, sizeof(slot_type));
  1899. PolicyTraits::transfer(&alloc_ref, new_slots + new_i,
  1900. old_slots_ptr + i);
  1901. }
  1902. }
  1903. PoisonSingleGroupEmptySlots(c, sizeof(slot_type));
  1904. }
  1905. // Deallocates old backing array.
  1906. template <size_t AlignOfSlot, class CharAlloc>
  1907. void DeallocateOld(CharAlloc alloc_ref, size_t slot_size) {
  1908. SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_);
  1909. auto layout = RawHashSetLayout(old_capacity_, AlignOfSlot, had_infoz_);
  1910. Deallocate<BackingArrayAlignment(AlignOfSlot)>(
  1911. &alloc_ref, old_ctrl() - layout.control_offset(),
  1912. layout.alloc_size(slot_size));
  1913. }
  1914. private:
  1915. // Returns true if `GrowSizeIntoSingleGroup` can be used for resizing.
  1916. static bool IsGrowingIntoSingleGroupApplicable(size_t old_capacity,
  1917. size_t new_capacity) {
  1918. // NOTE that `old_capacity < new_capacity` in order to have
  1919. // `old_capacity < Group::kWidth / 2` to make faster copies of 8 bytes.
  1920. return is_single_group(new_capacity) && old_capacity < new_capacity;
  1921. }
  1922. // Relocates control bytes and slots into new single group for
  1923. // transferable objects.
  1924. // Must be called only if IsGrowingIntoSingleGroupApplicable returned true.
  1925. void GrowSizeIntoSingleGroupTransferable(CommonFields& c, size_t slot_size);
  1926. // If there was an SOO slot and slots are transferable, transfers the SOO slot
  1927. // into the new heap allocation. Must be called only if
  1928. // IsGrowingIntoSingleGroupApplicable returned true.
  1929. void TransferSlotAfterSoo(CommonFields& c, size_t slot_size);
  1930. // Shuffle control bits deterministically to the next capacity.
  1931. // Returns offset for newly added element with given hash.
  1932. //
  1933. // PRECONDITIONs:
  1934. // 1. new_ctrl is allocated for new_capacity,
  1935. // but not initialized.
  1936. // 2. new_capacity is a single group.
  1937. //
  1938. // All elements are transferred into the first `old_capacity + 1` positions
  1939. // of the new_ctrl. Elements are rotated by `old_capacity_ / 2 + 1` positions
  1940. // in order to change an order and keep it non deterministic.
  1941. // Although rotation itself deterministic, position of the new added element
  1942. // will be based on `H1` and is not deterministic.
  1943. //
  1944. // Examples:
  1945. // S = kSentinel, E = kEmpty
  1946. //
  1947. // old_ctrl = SEEEEEEEE...
  1948. // new_ctrl = ESEEEEEEE...
  1949. //
  1950. // old_ctrl = 0SEEEEEEE...
  1951. // new_ctrl = E0ESE0EEE...
  1952. //
  1953. // old_ctrl = 012S012EEEEEEEEE...
  1954. // new_ctrl = 2E01EEES2E01EEE...
  1955. //
  1956. // old_ctrl = 0123456S0123456EEEEEEEEEEE...
  1957. // new_ctrl = 456E0123EEEEEES456E0123EEE...
  1958. void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl,
  1959. size_t new_capacity) const;
  1960. // If the table was SOO, initializes new control bytes. `h2` is the control
  1961. // byte corresponding to the full slot. Must be called only if
  1962. // IsGrowingIntoSingleGroupApplicable returned true.
  1963. // Requires: `had_soo_slot_ || h2 == ctrl_t::kEmpty`.
  1964. void InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
  1965. size_t new_capacity);
  1966. // Shuffle trivially transferable slots in the way consistent with
  1967. // GrowIntoSingleGroupShuffleControlBytes.
  1968. //
  1969. // PRECONDITIONs:
  1970. // 1. old_capacity must be non-zero.
  1971. // 2. new_ctrl is fully initialized using
  1972. // GrowIntoSingleGroupShuffleControlBytes.
  1973. // 3. new_slots is allocated and *not* poisoned.
  1974. //
  1975. // POSTCONDITIONS:
  1976. // 1. new_slots are transferred from old_slots_ consistent with
  1977. // GrowIntoSingleGroupShuffleControlBytes.
  1978. // 2. Empty new_slots are *not* poisoned.
  1979. void GrowIntoSingleGroupShuffleTransferableSlots(void* new_slots,
  1980. size_t slot_size) const;
  1981. // Poison empty slots that were transferred using the deterministic algorithm
  1982. // described above.
  1983. // PRECONDITIONs:
  1984. // 1. new_ctrl is fully initialized using
  1985. // GrowIntoSingleGroupShuffleControlBytes.
  1986. // 2. new_slots is fully initialized consistent with
  1987. // GrowIntoSingleGroupShuffleControlBytes.
  1988. void PoisonSingleGroupEmptySlots(CommonFields& c, size_t slot_size) const {
  1989. // poison non full items
  1990. for (size_t i = 0; i < c.capacity(); ++i) {
  1991. if (!IsFull(c.control()[i])) {
  1992. SanitizerPoisonMemoryRegion(SlotAddress(c.slot_array(), i, slot_size),
  1993. slot_size);
  1994. }
  1995. }
  1996. }
  1997. HeapOrSoo old_heap_or_soo_;
  1998. size_t old_capacity_;
  1999. bool had_infoz_;
  2000. bool was_soo_;
  2001. bool had_soo_slot_;
  2002. // Either null infoz or a pre-sampled forced infoz for SOO tables.
  2003. HashtablezInfoHandle forced_infoz_;
  2004. };
  2005. inline void PrepareInsertCommon(CommonFields& common) {
  2006. common.increment_size();
  2007. common.maybe_increment_generation_on_insert();
  2008. }
  2009. // Like prepare_insert, but for the case of inserting into a full SOO table.
  2010. size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
  2011. CommonFields& common);
  2012. // PolicyFunctions bundles together some information for a particular
  2013. // raw_hash_set<T, ...> instantiation. This information is passed to
  2014. // type-erased functions that want to do small amounts of type-specific
  2015. // work.
  2016. struct PolicyFunctions {
  2017. size_t slot_size;
  2018. // Returns the pointer to the hash function stored in the set.
  2019. const void* (*hash_fn)(const CommonFields& common);
  2020. // Returns the hash of the pointed-to slot.
  2021. size_t (*hash_slot)(const void* hash_fn, void* slot);
  2022. // Transfers the contents of src_slot to dst_slot.
  2023. void (*transfer)(void* set, void* dst_slot, void* src_slot);
  2024. // Deallocates the backing store from common.
  2025. void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
  2026. // Resizes set to the new capacity.
  2027. // Arguments are used as in raw_hash_set::resize_impl.
  2028. void (*resize)(CommonFields& common, size_t new_capacity,
  2029. HashtablezInfoHandle forced_infoz);
  2030. };
  2031. // ClearBackingArray clears the backing array, either modifying it in place,
  2032. // or creating a new one based on the value of "reuse".
  2033. // REQUIRES: c.capacity > 0
  2034. void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
  2035. bool reuse, bool soo_enabled);
  2036. // Type-erased version of raw_hash_set::erase_meta_only.
  2037. void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size);
  2038. // Function to place in PolicyFunctions::dealloc for raw_hash_sets
  2039. // that are using std::allocator. This allows us to share the same
  2040. // function body for raw_hash_set instantiations that have the
  2041. // same slot alignment.
  2042. template <size_t AlignOfSlot>
  2043. ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
  2044. const PolicyFunctions& policy) {
  2045. // Unpoison before returning the memory to the allocator.
  2046. SanitizerUnpoisonMemoryRegion(common.slot_array(),
  2047. policy.slot_size * common.capacity());
  2048. std::allocator<char> alloc;
  2049. common.infoz().Unregister();
  2050. Deallocate<BackingArrayAlignment(AlignOfSlot)>(
  2051. &alloc, common.backing_array_start(),
  2052. common.alloc_size(policy.slot_size, AlignOfSlot));
  2053. }
  2054. // For trivially relocatable types we use memcpy directly. This allows us to
  2055. // share the same function body for raw_hash_set instantiations that have the
  2056. // same slot size as long as they are relocatable.
  2057. template <size_t SizeOfSlot>
  2058. ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {
  2059. memcpy(dst, src, SizeOfSlot);
  2060. }
  2061. // Type erased raw_hash_set::get_hash_ref_fn for the empty hash function case.
  2062. const void* GetHashRefForEmptyHasher(const CommonFields& common);
  2063. // Given the hash of a value not currently in the table and the first empty
  2064. // slot in the probe sequence, finds a viable slot index to insert it at.
  2065. //
  2066. // In case there's no space left, the table can be resized or rehashed
  2067. // (for tables with deleted slots, see FindInsertPositionWithGrowthOrRehash).
  2068. //
  2069. // In the case of absence of deleted slots and positive growth_left, the element
  2070. // can be inserted in the provided `target` position.
  2071. //
  2072. // When the table has deleted slots (according to GrowthInfo), the target
  2073. // position will be searched one more time using `find_first_non_full`.
  2074. //
  2075. // REQUIRES: Table is not SOO.
  2076. // REQUIRES: At least one non-full slot available.
  2077. // REQUIRES: `target` is a valid empty position to insert.
  2078. size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target,
  2079. const PolicyFunctions& policy);
  2080. // A SwissTable.
  2081. //
  2082. // Policy: a policy defines how to perform different operations on
  2083. // the slots of the hashtable (see hash_policy_traits.h for the full interface
  2084. // of policy).
  2085. //
  2086. // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
  2087. // functor should accept a key and return size_t as hash. For best performance
  2088. // it is important that the hash function provides high entropy across all bits
  2089. // of the hash.
  2090. //
  2091. // Eq: a (possibly polymorphic) functor that compares two keys for equality. It
  2092. // should accept two (of possibly different type) keys and return a bool: true
  2093. // if they are equal, false if they are not. If two keys compare equal, then
  2094. // their hash values as defined by Hash MUST be equal.
  2095. //
  2096. // Allocator: an Allocator
  2097. // [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
  2098. // the storage of the hashtable will be allocated and the elements will be
  2099. // constructed and destroyed.
  2100. template <class Policy, class Hash, class Eq, class Alloc>
  2101. class raw_hash_set {
  2102. using PolicyTraits = hash_policy_traits<Policy>;
  2103. using KeyArgImpl =
  2104. KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
  2105. public:
  2106. using init_type = typename PolicyTraits::init_type;
  2107. using key_type = typename PolicyTraits::key_type;
  2108. // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
  2109. // code fixes!
  2110. using slot_type = typename PolicyTraits::slot_type;
  2111. using allocator_type = Alloc;
  2112. using size_type = size_t;
  2113. using difference_type = ptrdiff_t;
  2114. using hasher = Hash;
  2115. using key_equal = Eq;
  2116. using policy_type = Policy;
  2117. using value_type = typename PolicyTraits::value_type;
  2118. using reference = value_type&;
  2119. using const_reference = const value_type&;
  2120. using pointer = typename absl::allocator_traits<
  2121. allocator_type>::template rebind_traits<value_type>::pointer;
  2122. using const_pointer = typename absl::allocator_traits<
  2123. allocator_type>::template rebind_traits<value_type>::const_pointer;
  2124. // Alias used for heterogeneous lookup functions.
  2125. // `key_arg<K>` evaluates to `K` when the functors are transparent and to
  2126. // `key_type` otherwise. It permits template argument deduction on `K` for the
  2127. // transparent case.
  2128. template <class K>
  2129. using key_arg = typename KeyArgImpl::template type<K, key_type>;
  2130. private:
  2131. // TODO(b/289225379): we could add extra SOO space inside raw_hash_set
  2132. // after CommonFields to allow inlining larger slot_types (e.g. std::string),
  2133. // but it's a bit complicated if we want to support incomplete mapped_type in
  2134. // flat_hash_map. We could potentially do this for flat_hash_set and for an
  2135. // allowlist of `mapped_type`s of flat_hash_map that includes e.g. arithmetic
  2136. // types, strings, cords, and pairs/tuples of allowlisted types.
  2137. constexpr static bool SooEnabled() {
  2138. return PolicyTraits::soo_enabled() &&
  2139. sizeof(slot_type) <= sizeof(HeapOrSoo) &&
  2140. alignof(slot_type) <= alignof(HeapOrSoo);
  2141. }
  2142. // Whether `size` fits in the SOO capacity of this table.
  2143. bool fits_in_soo(size_t size) const {
  2144. return SooEnabled() && size <= SooCapacity();
  2145. }
  2146. // Whether this table is in SOO mode or non-SOO mode.
  2147. bool is_soo() const { return fits_in_soo(capacity()); }
  2148. bool is_full_soo() const { return is_soo() && !empty(); }
  2149. // Give an early error when key_type is not hashable/eq.
  2150. auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
  2151. auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
  2152. using AllocTraits = absl::allocator_traits<allocator_type>;
  2153. using SlotAlloc = typename absl::allocator_traits<
  2154. allocator_type>::template rebind_alloc<slot_type>;
  2155. // People are often sloppy with the exact type of their allocator (sometimes
  2156. // it has an extra const or is missing the pair, but rebinds made it work
  2157. // anyway).
  2158. using CharAlloc =
  2159. typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
  2160. using SlotAllocTraits = typename absl::allocator_traits<
  2161. allocator_type>::template rebind_traits<slot_type>;
  2162. static_assert(std::is_lvalue_reference<reference>::value,
  2163. "Policy::element() must return a reference");
  2164. template <typename T>
  2165. struct SameAsElementReference
  2166. : std::is_same<typename std::remove_cv<
  2167. typename std::remove_reference<reference>::type>::type,
  2168. typename std::remove_cv<
  2169. typename std::remove_reference<T>::type>::type> {};
  2170. // An enabler for insert(T&&): T must be convertible to init_type or be the
  2171. // same as [cv] value_type [ref].
  2172. // Note: we separate SameAsElementReference into its own type to avoid using
  2173. // reference unless we need to. MSVC doesn't seem to like it in some
  2174. // cases.
  2175. template <class T>
  2176. using RequiresInsertable = typename std::enable_if<
  2177. absl::disjunction<std::is_convertible<T, init_type>,
  2178. SameAsElementReference<T>>::value,
  2179. int>::type;
  2180. // RequiresNotInit is a workaround for gcc prior to 7.1.
  2181. // See https://godbolt.org/g/Y4xsUh.
  2182. template <class T>
  2183. using RequiresNotInit =
  2184. typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
  2185. template <class... Ts>
  2186. using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
  2187. public:
  2188. static_assert(std::is_same<pointer, value_type*>::value,
  2189. "Allocators with custom pointer types are not supported");
  2190. static_assert(std::is_same<const_pointer, const value_type*>::value,
  2191. "Allocators with custom pointer types are not supported");
  2192. class iterator : private HashSetIteratorGenerationInfo {
  2193. friend class raw_hash_set;
  2194. friend struct HashtableFreeFunctionsAccess;
  2195. public:
  2196. using iterator_category = std::forward_iterator_tag;
  2197. using value_type = typename raw_hash_set::value_type;
  2198. using reference =
  2199. absl::conditional_t<PolicyTraits::constant_iterators::value,
  2200. const value_type&, value_type&>;
  2201. using pointer = absl::remove_reference_t<reference>*;
  2202. using difference_type = typename raw_hash_set::difference_type;
  2203. iterator() {}
  2204. // PRECONDITION: not an end() iterator.
  2205. reference operator*() const {
  2206. AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
  2207. return unchecked_deref();
  2208. }
  2209. // PRECONDITION: not an end() iterator.
  2210. pointer operator->() const {
  2211. AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->");
  2212. return &operator*();
  2213. }
  2214. // PRECONDITION: not an end() iterator.
  2215. iterator& operator++() {
  2216. AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++");
  2217. ++ctrl_;
  2218. ++slot_;
  2219. skip_empty_or_deleted();
  2220. if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
  2221. return *this;
  2222. }
  2223. // PRECONDITION: not an end() iterator.
  2224. iterator operator++(int) {
  2225. auto tmp = *this;
  2226. ++*this;
  2227. return tmp;
  2228. }
  2229. friend bool operator==(const iterator& a, const iterator& b) {
  2230. AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr());
  2231. AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr());
  2232. AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_,
  2233. a.generation_ptr(), b.generation_ptr());
  2234. return a.ctrl_ == b.ctrl_;
  2235. }
  2236. friend bool operator!=(const iterator& a, const iterator& b) {
  2237. return !(a == b);
  2238. }
  2239. private:
  2240. iterator(ctrl_t* ctrl, slot_type* slot,
  2241. const GenerationType* generation_ptr)
  2242. : HashSetIteratorGenerationInfo(generation_ptr),
  2243. ctrl_(ctrl),
  2244. slot_(slot) {
  2245. // This assumption helps the compiler know that any non-end iterator is
  2246. // not equal to any end iterator.
  2247. ABSL_ASSUME(ctrl != nullptr);
  2248. }
  2249. // This constructor is used in begin() to avoid an MSan
  2250. // use-of-uninitialized-value error. Delegating from this constructor to
  2251. // the previous one doesn't avoid the error.
  2252. iterator(ctrl_t* ctrl, MaybeInitializedPtr slot,
  2253. const GenerationType* generation_ptr)
  2254. : HashSetIteratorGenerationInfo(generation_ptr),
  2255. ctrl_(ctrl),
  2256. slot_(to_slot(slot.get())) {
  2257. // This assumption helps the compiler know that any non-end iterator is
  2258. // not equal to any end iterator.
  2259. ABSL_ASSUME(ctrl != nullptr);
  2260. }
  2261. // For end() iterators.
  2262. explicit iterator(const GenerationType* generation_ptr)
  2263. : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
  2264. // Fixes up `ctrl_` to point to a full or sentinel by advancing `ctrl_` and
  2265. // `slot_` until they reach one.
  2266. void skip_empty_or_deleted() {
  2267. while (IsEmptyOrDeleted(*ctrl_)) {
  2268. uint32_t shift =
  2269. GroupFullEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted();
  2270. ctrl_ += shift;
  2271. slot_ += shift;
  2272. }
  2273. }
  2274. ctrl_t* control() const { return ctrl_; }
  2275. slot_type* slot() const { return slot_; }
  2276. // We use EmptyGroup() for default-constructed iterators so that they can
  2277. // be distinguished from end iterators, which have nullptr ctrl_.
  2278. ctrl_t* ctrl_ = EmptyGroup();
  2279. // To avoid uninitialized member warnings, put slot_ in an anonymous union.
  2280. // The member is not initialized on singleton and end iterators.
  2281. union {
  2282. slot_type* slot_;
  2283. };
  2284. // An equality check which skips ABSL Hardening iterator invalidation
  2285. // checks.
  2286. // Should be used when the lifetimes of the iterators are well-enough
  2287. // understood to prove that they cannot be invalid.
  2288. bool unchecked_equals(const iterator& b) { return ctrl_ == b.control(); }
  2289. // Dereferences the iterator without ABSL Hardening iterator invalidation
  2290. // checks.
  2291. reference unchecked_deref() const { return PolicyTraits::element(slot_); }
  2292. };
  2293. class const_iterator {
  2294. friend class raw_hash_set;
  2295. template <class Container, typename Enabler>
  2296. friend struct absl::container_internal::hashtable_debug_internal::
  2297. HashtableDebugAccess;
  2298. public:
  2299. using iterator_category = typename iterator::iterator_category;
  2300. using value_type = typename raw_hash_set::value_type;
  2301. using reference = typename raw_hash_set::const_reference;
  2302. using pointer = typename raw_hash_set::const_pointer;
  2303. using difference_type = typename raw_hash_set::difference_type;
  2304. const_iterator() = default;
  2305. // Implicit construction from iterator.
  2306. const_iterator(iterator i) : inner_(std::move(i)) {} // NOLINT
  2307. reference operator*() const { return *inner_; }
  2308. pointer operator->() const { return inner_.operator->(); }
  2309. const_iterator& operator++() {
  2310. ++inner_;
  2311. return *this;
  2312. }
  2313. const_iterator operator++(int) { return inner_++; }
  2314. friend bool operator==(const const_iterator& a, const const_iterator& b) {
  2315. return a.inner_ == b.inner_;
  2316. }
  2317. friend bool operator!=(const const_iterator& a, const const_iterator& b) {
  2318. return !(a == b);
  2319. }
  2320. private:
  2321. const_iterator(const ctrl_t* ctrl, const slot_type* slot,
  2322. const GenerationType* gen)
  2323. : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) {
  2324. }
  2325. ctrl_t* control() const { return inner_.control(); }
  2326. slot_type* slot() const { return inner_.slot(); }
  2327. iterator inner_;
  2328. bool unchecked_equals(const const_iterator& b) {
  2329. return inner_.unchecked_equals(b.inner_);
  2330. }
  2331. };
  2332. using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
  2333. using insert_return_type = InsertReturnType<iterator, node_type>;
  2334. // Note: can't use `= default` due to non-default noexcept (causes
  2335. // problems for some compilers). NOLINTNEXTLINE
  2336. raw_hash_set() noexcept(
  2337. std::is_nothrow_default_constructible<hasher>::value &&
  2338. std::is_nothrow_default_constructible<key_equal>::value &&
  2339. std::is_nothrow_default_constructible<allocator_type>::value) {}
  2340. ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
  2341. size_t bucket_count, const hasher& hash = hasher(),
  2342. const key_equal& eq = key_equal(),
  2343. const allocator_type& alloc = allocator_type())
  2344. : settings_(CommonFields::CreateDefault<SooEnabled()>(), hash, eq,
  2345. alloc) {
  2346. if (bucket_count > (SooEnabled() ? SooCapacity() : 0)) {
  2347. resize(NormalizeCapacity(bucket_count));
  2348. }
  2349. }
  2350. raw_hash_set(size_t bucket_count, const hasher& hash,
  2351. const allocator_type& alloc)
  2352. : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
  2353. raw_hash_set(size_t bucket_count, const allocator_type& alloc)
  2354. : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
  2355. explicit raw_hash_set(const allocator_type& alloc)
  2356. : raw_hash_set(0, hasher(), key_equal(), alloc) {}
  2357. template <class InputIter>
  2358. raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
  2359. const hasher& hash = hasher(), const key_equal& eq = key_equal(),
  2360. const allocator_type& alloc = allocator_type())
  2361. : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
  2362. hash, eq, alloc) {
  2363. insert(first, last);
  2364. }
  2365. template <class InputIter>
  2366. raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
  2367. const hasher& hash, const allocator_type& alloc)
  2368. : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
  2369. template <class InputIter>
  2370. raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
  2371. const allocator_type& alloc)
  2372. : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
  2373. template <class InputIter>
  2374. raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
  2375. : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
  2376. // Instead of accepting std::initializer_list<value_type> as the first
  2377. // argument like std::unordered_set<value_type> does, we have two overloads
  2378. // that accept std::initializer_list<T> and std::initializer_list<init_type>.
  2379. // This is advantageous for performance.
  2380. //
  2381. // // Turns {"abc", "def"} into std::initializer_list<std::string>, then
  2382. // // copies the strings into the set.
  2383. // std::unordered_set<std::string> s = {"abc", "def"};
  2384. //
  2385. // // Turns {"abc", "def"} into std::initializer_list<const char*>, then
  2386. // // copies the strings into the set.
  2387. // absl::flat_hash_set<std::string> s = {"abc", "def"};
  2388. //
  2389. // The same trick is used in insert().
  2390. //
  2391. // The enabler is necessary to prevent this constructor from triggering where
  2392. // the copy constructor is meant to be called.
  2393. //
  2394. // absl::flat_hash_set<int> a, b{a};
  2395. //
  2396. // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
  2397. template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
  2398. raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
  2399. const hasher& hash = hasher(), const key_equal& eq = key_equal(),
  2400. const allocator_type& alloc = allocator_type())
  2401. : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
  2402. raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
  2403. const hasher& hash = hasher(), const key_equal& eq = key_equal(),
  2404. const allocator_type& alloc = allocator_type())
  2405. : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
  2406. template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
  2407. raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
  2408. const hasher& hash, const allocator_type& alloc)
  2409. : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
  2410. raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
  2411. const hasher& hash, const allocator_type& alloc)
  2412. : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
  2413. template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
  2414. raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
  2415. const allocator_type& alloc)
  2416. : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
  2417. raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
  2418. const allocator_type& alloc)
  2419. : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
  2420. template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
  2421. raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
  2422. : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
  2423. raw_hash_set(std::initializer_list<init_type> init,
  2424. const allocator_type& alloc)
  2425. : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
  2426. raw_hash_set(const raw_hash_set& that)
  2427. : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
  2428. that.alloc_ref())) {}
  2429. raw_hash_set(const raw_hash_set& that, const allocator_type& a)
  2430. : raw_hash_set(GrowthToLowerboundCapacity(that.size()), that.hash_ref(),
  2431. that.eq_ref(), a) {
  2432. const size_t size = that.size();
  2433. if (size == 0) {
  2434. return;
  2435. }
  2436. // We don't use `that.is_soo()` here because `that` can have non-SOO
  2437. // capacity but have a size that fits into SOO capacity.
  2438. if (fits_in_soo(size)) {
  2439. assert(size == 1);
  2440. common().set_full_soo();
  2441. emplace_at(soo_iterator(), *that.begin());
  2442. const HashtablezInfoHandle infoz = try_sample_soo();
  2443. if (infoz.IsSampled()) resize_with_soo_infoz(infoz);
  2444. return;
  2445. }
  2446. assert(!that.is_soo());
  2447. const size_t cap = capacity();
  2448. // Note about single group tables:
  2449. // 1. It is correct to have any order of elements.
  2450. // 2. Order has to be non deterministic.
  2451. // 3. We are assigning elements with arbitrary `shift` starting from
  2452. // `capacity + shift` position.
  2453. // 4. `shift` must be coprime with `capacity + 1` in order to be able to use
  2454. // modular arithmetic to traverse all positions, instead if cycling
  2455. // through a subset of positions. Odd numbers are coprime with any
  2456. // `capacity + 1` (2^N).
  2457. size_t offset = cap;
  2458. const size_t shift =
  2459. is_single_group(cap) ? (PerTableSalt(control()) | 1) : 0;
  2460. IterateOverFullSlots(
  2461. that.common(), that.slot_array(),
  2462. [&](const ctrl_t* that_ctrl,
  2463. slot_type* that_slot) ABSL_ATTRIBUTE_ALWAYS_INLINE {
  2464. if (shift == 0) {
  2465. // Big tables case. Position must be searched via probing.
  2466. // The table is guaranteed to be empty, so we can do faster than
  2467. // a full `insert`.
  2468. const size_t hash = PolicyTraits::apply(
  2469. HashElement{hash_ref()}, PolicyTraits::element(that_slot));
  2470. FindInfo target = find_first_non_full_outofline(common(), hash);
  2471. infoz().RecordInsert(hash, target.probe_length);
  2472. offset = target.offset;
  2473. } else {
  2474. // Small tables case. Next position is computed via shift.
  2475. offset = (offset + shift) & cap;
  2476. }
  2477. const h2_t h2 = static_cast<h2_t>(*that_ctrl);
  2478. assert( // We rely that hash is not changed for small tables.
  2479. H2(PolicyTraits::apply(HashElement{hash_ref()},
  2480. PolicyTraits::element(that_slot))) == h2 &&
  2481. "hash function value changed unexpectedly during the copy");
  2482. SetCtrl(common(), offset, h2, sizeof(slot_type));
  2483. emplace_at(iterator_at(offset), PolicyTraits::element(that_slot));
  2484. common().maybe_increment_generation_on_insert();
  2485. });
  2486. if (shift != 0) {
  2487. // On small table copy we do not record individual inserts.
  2488. // RecordInsert requires hash, but it is unknown for small tables.
  2489. infoz().RecordStorageChanged(size, cap);
  2490. }
  2491. common().set_size(size);
  2492. growth_info().OverwriteManyEmptyAsFull(size);
  2493. }
  2494. ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
  2495. std::is_nothrow_copy_constructible<hasher>::value &&
  2496. std::is_nothrow_copy_constructible<key_equal>::value &&
  2497. std::is_nothrow_copy_constructible<allocator_type>::value)
  2498. : // Hash, equality and allocator are copied instead of moved because
  2499. // `that` must be left valid. If Hash is std::function<Key>, moving it
  2500. // would create a nullptr functor that cannot be called.
  2501. // TODO(b/296061262): move instead of copying hash/eq/alloc.
  2502. // Note: we avoid using exchange for better generated code.
  2503. settings_(PolicyTraits::transfer_uses_memcpy() || !that.is_full_soo()
  2504. ? std::move(that.common())
  2505. : CommonFields{full_soo_tag_t{}},
  2506. that.hash_ref(), that.eq_ref(), that.alloc_ref()) {
  2507. if (!PolicyTraits::transfer_uses_memcpy() && that.is_full_soo()) {
  2508. transfer(soo_slot(), that.soo_slot());
  2509. }
  2510. that.common() = CommonFields::CreateDefault<SooEnabled()>();
  2511. maybe_increment_generation_or_rehash_on_move();
  2512. }
  2513. raw_hash_set(raw_hash_set&& that, const allocator_type& a)
  2514. : settings_(CommonFields::CreateDefault<SooEnabled()>(), that.hash_ref(),
  2515. that.eq_ref(), a) {
  2516. if (a == that.alloc_ref()) {
  2517. swap_common(that);
  2518. maybe_increment_generation_or_rehash_on_move();
  2519. } else {
  2520. move_elements_allocs_unequal(std::move(that));
  2521. }
  2522. }
  2523. raw_hash_set& operator=(const raw_hash_set& that) {
  2524. if (ABSL_PREDICT_FALSE(this == &that)) return *this;
  2525. constexpr bool propagate_alloc =
  2526. AllocTraits::propagate_on_container_copy_assignment::value;
  2527. // TODO(ezb): maybe avoid allocating a new backing array if this->capacity()
  2528. // is an exact match for that.size(). If this->capacity() is too big, then
  2529. // it would make iteration very slow to reuse the allocation. Maybe we can
  2530. // do the same heuristic as clear() and reuse if it's small enough.
  2531. raw_hash_set tmp(that, propagate_alloc ? that.alloc_ref() : alloc_ref());
  2532. // NOLINTNEXTLINE: not returning *this for performance.
  2533. return assign_impl<propagate_alloc>(std::move(tmp));
  2534. }
  2535. raw_hash_set& operator=(raw_hash_set&& that) noexcept(
  2536. absl::allocator_traits<allocator_type>::is_always_equal::value &&
  2537. std::is_nothrow_move_assignable<hasher>::value &&
  2538. std::is_nothrow_move_assignable<key_equal>::value) {
  2539. // TODO(sbenza): We should only use the operations from the noexcept clause
  2540. // to make sure we actually adhere to that contract.
  2541. // NOLINTNEXTLINE: not returning *this for performance.
  2542. return move_assign(
  2543. std::move(that),
  2544. typename AllocTraits::propagate_on_container_move_assignment());
  2545. }
  2546. ~raw_hash_set() { destructor_impl(); }
  2547. iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2548. if (ABSL_PREDICT_FALSE(empty())) return end();
  2549. if (is_soo()) return soo_iterator();
  2550. iterator it = {control(), common().slots_union(),
  2551. common().generation_ptr()};
  2552. it.skip_empty_or_deleted();
  2553. assert(IsFull(*it.control()));
  2554. return it;
  2555. }
  2556. iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2557. return iterator(common().generation_ptr());
  2558. }
  2559. const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2560. return const_cast<raw_hash_set*>(this)->begin();
  2561. }
  2562. const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2563. return iterator(common().generation_ptr());
  2564. }
  2565. const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2566. return begin();
  2567. }
  2568. const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
  2569. bool empty() const { return !size(); }
  2570. size_t size() const { return common().size(); }
  2571. size_t capacity() const {
  2572. const size_t cap = common().capacity();
  2573. // Compiler complains when using functions in assume so use local variables.
  2574. ABSL_ATTRIBUTE_UNUSED static constexpr bool kEnabled = SooEnabled();
  2575. ABSL_ATTRIBUTE_UNUSED static constexpr size_t kCapacity = SooCapacity();
  2576. ABSL_ASSUME(!kEnabled || cap >= kCapacity);
  2577. return cap;
  2578. }
  2579. size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
  2580. ABSL_ATTRIBUTE_REINITIALIZES void clear() {
  2581. // Iterating over this container is O(bucket_count()). When bucket_count()
  2582. // is much greater than size(), iteration becomes prohibitively expensive.
  2583. // For clear() it is more important to reuse the allocated array when the
  2584. // container is small because allocation takes comparatively long time
  2585. // compared to destruction of the elements of the container. So we pick the
  2586. // largest bucket_count() threshold for which iteration is still fast and
  2587. // past that we simply deallocate the array.
  2588. const size_t cap = capacity();
  2589. if (cap == 0) {
  2590. // Already guaranteed to be empty; so nothing to do.
  2591. } else if (is_soo()) {
  2592. if (!empty()) destroy(soo_slot());
  2593. common().set_empty_soo();
  2594. } else {
  2595. destroy_slots();
  2596. ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128,
  2597. SooEnabled());
  2598. }
  2599. common().set_reserved_growth(0);
  2600. common().set_reservation_size(0);
  2601. }
  2602. // This overload kicks in when the argument is an rvalue of insertable and
  2603. // decomposable type other than init_type.
  2604. //
  2605. // flat_hash_map<std::string, int> m;
  2606. // m.insert(std::make_pair("abc", 42));
  2607. // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
  2608. // bug.
  2609. template <class T, RequiresInsertable<T> = 0, class T2 = T,
  2610. typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
  2611. T* = nullptr>
  2612. std::pair<iterator, bool> insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2613. return emplace(std::forward<T>(value));
  2614. }
  2615. // This overload kicks in when the argument is a bitfield or an lvalue of
  2616. // insertable and decomposable type.
  2617. //
  2618. // union { int n : 1; };
  2619. // flat_hash_set<int> s;
  2620. // s.insert(n);
  2621. //
  2622. // flat_hash_set<std::string> s;
  2623. // const char* p = "hello";
  2624. // s.insert(p);
  2625. //
  2626. template <
  2627. class T, RequiresInsertable<const T&> = 0,
  2628. typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
  2629. std::pair<iterator, bool> insert(const T& value)
  2630. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2631. return emplace(value);
  2632. }
  2633. // This overload kicks in when the argument is an rvalue of init_type. Its
  2634. // purpose is to handle brace-init-list arguments.
  2635. //
  2636. // flat_hash_map<std::string, int> s;
  2637. // s.insert({"abc", 42});
  2638. std::pair<iterator, bool> insert(init_type&& value)
  2639. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2640. return emplace(std::move(value));
  2641. }
  2642. // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
  2643. // bug.
  2644. template <class T, RequiresInsertable<T> = 0, class T2 = T,
  2645. typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
  2646. T* = nullptr>
  2647. iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2648. return insert(std::forward<T>(value)).first;
  2649. }
  2650. template <
  2651. class T, RequiresInsertable<const T&> = 0,
  2652. typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
  2653. iterator insert(const_iterator,
  2654. const T& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2655. return insert(value).first;
  2656. }
  2657. iterator insert(const_iterator,
  2658. init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2659. return insert(std::move(value)).first;
  2660. }
  2661. template <class InputIt>
  2662. void insert(InputIt first, InputIt last) {
  2663. for (; first != last; ++first) emplace(*first);
  2664. }
  2665. template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
  2666. void insert(std::initializer_list<T> ilist) {
  2667. insert(ilist.begin(), ilist.end());
  2668. }
  2669. void insert(std::initializer_list<init_type> ilist) {
  2670. insert(ilist.begin(), ilist.end());
  2671. }
  2672. insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2673. if (!node) return {end(), false, node_type()};
  2674. const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
  2675. auto res = PolicyTraits::apply(
  2676. InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
  2677. elem);
  2678. if (res.second) {
  2679. CommonAccess::Reset(&node);
  2680. return {res.first, true, node_type()};
  2681. } else {
  2682. return {res.first, false, std::move(node)};
  2683. }
  2684. }
  2685. iterator insert(const_iterator,
  2686. node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2687. auto res = insert(std::move(node));
  2688. node = std::move(res.node);
  2689. return res.position;
  2690. }
  2691. // This overload kicks in if we can deduce the key from args. This enables us
  2692. // to avoid constructing value_type if an entry with the same key already
  2693. // exists.
  2694. //
  2695. // For example:
  2696. //
  2697. // flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
  2698. // // Creates no std::string copies and makes no heap allocations.
  2699. // m.emplace("abc", "xyz");
  2700. template <class... Args, typename std::enable_if<
  2701. IsDecomposable<Args...>::value, int>::type = 0>
  2702. std::pair<iterator, bool> emplace(Args&&... args)
  2703. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2704. return PolicyTraits::apply(EmplaceDecomposable{*this},
  2705. std::forward<Args>(args)...);
  2706. }
  2707. // This overload kicks in if we cannot deduce the key from args. It constructs
  2708. // value_type unconditionally and then either moves it into the table or
  2709. // destroys.
  2710. template <class... Args, typename std::enable_if<
  2711. !IsDecomposable<Args...>::value, int>::type = 0>
  2712. std::pair<iterator, bool> emplace(Args&&... args)
  2713. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2714. alignas(slot_type) unsigned char raw[sizeof(slot_type)];
  2715. slot_type* slot = to_slot(&raw);
  2716. construct(slot, std::forward<Args>(args)...);
  2717. const auto& elem = PolicyTraits::element(slot);
  2718. return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
  2719. }
  2720. template <class... Args>
  2721. iterator emplace_hint(const_iterator,
  2722. Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2723. return emplace(std::forward<Args>(args)...).first;
  2724. }
  2725. // Extension API: support for lazy emplace.
  2726. //
  2727. // Looks up key in the table. If found, returns the iterator to the element.
  2728. // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`,
  2729. // and returns an iterator to the new element.
  2730. //
  2731. // `f` must abide by several restrictions:
  2732. // - it MUST call `raw_hash_set::constructor` with arguments as if a
  2733. // `raw_hash_set::value_type` is constructed,
  2734. // - it MUST NOT access the container before the call to
  2735. // `raw_hash_set::constructor`, and
  2736. // - it MUST NOT erase the lazily emplaced element.
  2737. // Doing any of these is undefined behavior.
  2738. //
  2739. // For example:
  2740. //
  2741. // std::unordered_set<ArenaString> s;
  2742. // // Makes ArenaStr even if "abc" is in the map.
  2743. // s.insert(ArenaString(&arena, "abc"));
  2744. //
  2745. // flat_hash_set<ArenaStr> s;
  2746. // // Makes ArenaStr only if "abc" is not in the map.
  2747. // s.lazy_emplace("abc", [&](const constructor& ctor) {
  2748. // ctor(&arena, "abc");
  2749. // });
  2750. //
  2751. // WARNING: This API is currently experimental. If there is a way to implement
  2752. // the same thing with the rest of the API, prefer that.
  2753. class constructor {
  2754. friend class raw_hash_set;
  2755. public:
  2756. template <class... Args>
  2757. void operator()(Args&&... args) const {
  2758. assert(*slot_);
  2759. PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
  2760. *slot_ = nullptr;
  2761. }
  2762. private:
  2763. constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
  2764. allocator_type* alloc_;
  2765. slot_type** slot_;
  2766. };
  2767. template <class K = key_type, class F>
  2768. iterator lazy_emplace(const key_arg<K>& key,
  2769. F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2770. auto res = find_or_prepare_insert(key);
  2771. if (res.second) {
  2772. slot_type* slot = res.first.slot();
  2773. std::forward<F>(f)(constructor(&alloc_ref(), &slot));
  2774. assert(!slot);
  2775. }
  2776. return res.first;
  2777. }
  2778. // Extension API: support for heterogeneous keys.
  2779. //
  2780. // std::unordered_set<std::string> s;
  2781. // // Turns "abc" into std::string.
  2782. // s.erase("abc");
  2783. //
  2784. // flat_hash_set<std::string> s;
  2785. // // Uses "abc" directly without copying it into std::string.
  2786. // s.erase("abc");
  2787. template <class K = key_type>
  2788. size_type erase(const key_arg<K>& key) {
  2789. auto it = find(key);
  2790. if (it == end()) return 0;
  2791. erase(it);
  2792. return 1;
  2793. }
  2794. // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`,
  2795. // this method returns void to reduce algorithmic complexity to O(1). The
  2796. // iterator is invalidated, so any increment should be done before calling
  2797. // erase. In order to erase while iterating across a map, use the following
  2798. // idiom (which also works for some standard containers):
  2799. //
  2800. // for (auto it = m.begin(), end = m.end(); it != end;) {
  2801. // // `erase()` will invalidate `it`, so advance `it` first.
  2802. // auto copy_it = it++;
  2803. // if (<pred>) {
  2804. // m.erase(copy_it);
  2805. // }
  2806. // }
  2807. void erase(const_iterator cit) { erase(cit.inner_); }
  2808. // This overload is necessary because otherwise erase<K>(const K&) would be
  2809. // a better match if non-const iterator is passed as an argument.
  2810. void erase(iterator it) {
  2811. AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()");
  2812. destroy(it.slot());
  2813. if (is_soo()) {
  2814. common().set_empty_soo();
  2815. } else {
  2816. erase_meta_only(it);
  2817. }
  2818. }
  2819. iterator erase(const_iterator first,
  2820. const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2821. // We check for empty first because ClearBackingArray requires that
  2822. // capacity() > 0 as a precondition.
  2823. if (empty()) return end();
  2824. if (first == last) return last.inner_;
  2825. if (is_soo()) {
  2826. destroy(soo_slot());
  2827. common().set_empty_soo();
  2828. return end();
  2829. }
  2830. if (first == begin() && last == end()) {
  2831. // TODO(ezb): we access control bytes in destroy_slots so it could make
  2832. // sense to combine destroy_slots and ClearBackingArray to avoid cache
  2833. // misses when the table is large. Note that we also do this in clear().
  2834. destroy_slots();
  2835. ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true,
  2836. SooEnabled());
  2837. common().set_reserved_growth(common().reservation_size());
  2838. return end();
  2839. }
  2840. while (first != last) {
  2841. erase(first++);
  2842. }
  2843. return last.inner_;
  2844. }
  2845. // Moves elements from `src` into `this`.
  2846. // If the element already exists in `this`, it is left unmodified in `src`.
  2847. template <typename H, typename E>
  2848. void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
  2849. assert(this != &src);
  2850. // Returns whether insertion took place.
  2851. const auto insert_slot = [this](slot_type* src_slot) {
  2852. return PolicyTraits::apply(InsertSlot<false>{*this, std::move(*src_slot)},
  2853. PolicyTraits::element(src_slot))
  2854. .second;
  2855. };
  2856. if (src.is_soo()) {
  2857. if (src.empty()) return;
  2858. if (insert_slot(src.soo_slot())) src.common().set_empty_soo();
  2859. return;
  2860. }
  2861. for (auto it = src.begin(), e = src.end(); it != e;) {
  2862. auto next = std::next(it);
  2863. if (insert_slot(it.slot())) src.erase_meta_only(it);
  2864. it = next;
  2865. }
  2866. }
  2867. template <typename H, typename E>
  2868. void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
  2869. merge(src);
  2870. }
  2871. node_type extract(const_iterator position) {
  2872. AssertIsFull(position.control(), position.inner_.generation(),
  2873. position.inner_.generation_ptr(), "extract()");
  2874. auto node = CommonAccess::Transfer<node_type>(alloc_ref(), position.slot());
  2875. if (is_soo()) {
  2876. common().set_empty_soo();
  2877. } else {
  2878. erase_meta_only(position);
  2879. }
  2880. return node;
  2881. }
  2882. template <
  2883. class K = key_type,
  2884. typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
  2885. node_type extract(const key_arg<K>& key) {
  2886. auto it = find(key);
  2887. return it == end() ? node_type() : extract(const_iterator{it});
  2888. }
  2889. void swap(raw_hash_set& that) noexcept(
  2890. IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
  2891. IsNoThrowSwappable<allocator_type>(
  2892. typename AllocTraits::propagate_on_container_swap{})) {
  2893. using std::swap;
  2894. swap_common(that);
  2895. swap(hash_ref(), that.hash_ref());
  2896. swap(eq_ref(), that.eq_ref());
  2897. SwapAlloc(alloc_ref(), that.alloc_ref(),
  2898. typename AllocTraits::propagate_on_container_swap{});
  2899. }
  2900. void rehash(size_t n) {
  2901. const size_t cap = capacity();
  2902. if (n == 0) {
  2903. if (cap == 0 || is_soo()) return;
  2904. if (empty()) {
  2905. ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
  2906. SooEnabled());
  2907. return;
  2908. }
  2909. if (fits_in_soo(size())) {
  2910. // When the table is already sampled, we keep it sampled.
  2911. if (infoz().IsSampled()) {
  2912. const size_t kInitialSampledCapacity = NextCapacity(SooCapacity());
  2913. if (capacity() > kInitialSampledCapacity) {
  2914. resize(kInitialSampledCapacity);
  2915. }
  2916. // This asserts that we didn't lose sampling coverage in `resize`.
  2917. assert(infoz().IsSampled());
  2918. return;
  2919. }
  2920. alignas(slot_type) unsigned char slot_space[sizeof(slot_type)];
  2921. slot_type* tmp_slot = to_slot(slot_space);
  2922. transfer(tmp_slot, begin().slot());
  2923. ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
  2924. SooEnabled());
  2925. transfer(soo_slot(), tmp_slot);
  2926. common().set_full_soo();
  2927. return;
  2928. }
  2929. }
  2930. // bitor is a faster way of doing `max` here. We will round up to the next
  2931. // power-of-2-minus-1, so bitor is good enough.
  2932. auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
  2933. // n == 0 unconditionally rehashes as per the standard.
  2934. if (n == 0 || m > cap) {
  2935. resize(m);
  2936. // This is after resize, to ensure that we have completed the allocation
  2937. // and have potentially sampled the hashtable.
  2938. infoz().RecordReservation(n);
  2939. }
  2940. }
  2941. void reserve(size_t n) {
  2942. const size_t max_size_before_growth =
  2943. is_soo() ? SooCapacity() : size() + growth_left();
  2944. if (n > max_size_before_growth) {
  2945. size_t m = GrowthToLowerboundCapacity(n);
  2946. resize(NormalizeCapacity(m));
  2947. // This is after resize, to ensure that we have completed the allocation
  2948. // and have potentially sampled the hashtable.
  2949. infoz().RecordReservation(n);
  2950. }
  2951. common().reset_reserved_growth(n);
  2952. common().set_reservation_size(n);
  2953. }
  2954. // Extension API: support for heterogeneous keys.
  2955. //
  2956. // std::unordered_set<std::string> s;
  2957. // // Turns "abc" into std::string.
  2958. // s.count("abc");
  2959. //
  2960. // ch_set<std::string> s;
  2961. // // Uses "abc" directly without copying it into std::string.
  2962. // s.count("abc");
  2963. template <class K = key_type>
  2964. size_t count(const key_arg<K>& key) const {
  2965. return find(key) == end() ? 0 : 1;
  2966. }
  2967. // Issues CPU prefetch instructions for the memory needed to find or insert
  2968. // a key. Like all lookup functions, this support heterogeneous keys.
  2969. //
  2970. // NOTE: This is a very low level operation and should not be used without
  2971. // specific benchmarks indicating its importance.
  2972. template <class K = key_type>
  2973. void prefetch(const key_arg<K>& key) const {
  2974. if (SooEnabled() ? is_soo() : capacity() == 0) return;
  2975. (void)key;
  2976. // Avoid probing if we won't be able to prefetch the addresses received.
  2977. #ifdef ABSL_HAVE_PREFETCH
  2978. prefetch_heap_block();
  2979. auto seq = probe(common(), hash_ref()(key));
  2980. PrefetchToLocalCache(control() + seq.offset());
  2981. PrefetchToLocalCache(slot_array() + seq.offset());
  2982. #endif // ABSL_HAVE_PREFETCH
  2983. }
  2984. // The API of find() has two extensions.
  2985. //
  2986. // 1. The hash can be passed by the user. It must be equal to the hash of the
  2987. // key.
  2988. //
  2989. // 2. The type of the key argument doesn't have to be key_type. This is so
  2990. // called heterogeneous key support.
  2991. template <class K = key_type>
  2992. iterator find(const key_arg<K>& key,
  2993. size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  2994. AssertHashEqConsistent(key);
  2995. if (is_soo()) return find_soo(key);
  2996. return find_non_soo(key, hash);
  2997. }
  2998. template <class K = key_type>
  2999. iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3000. AssertHashEqConsistent(key);
  3001. if (is_soo()) return find_soo(key);
  3002. prefetch_heap_block();
  3003. return find_non_soo(key, hash_ref()(key));
  3004. }
  3005. template <class K = key_type>
  3006. const_iterator find(const key_arg<K>& key,
  3007. size_t hash) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3008. return const_cast<raw_hash_set*>(this)->find(key, hash);
  3009. }
  3010. template <class K = key_type>
  3011. const_iterator find(const key_arg<K>& key) const
  3012. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3013. return const_cast<raw_hash_set*>(this)->find(key);
  3014. }
  3015. template <class K = key_type>
  3016. bool contains(const key_arg<K>& key) const {
  3017. // Here neither the iterator returned by `find()` nor `end()` can be invalid
  3018. // outside of potential thread-safety issues.
  3019. // `find()`'s return value is constructed, used, and then destructed
  3020. // all in this context.
  3021. return !find(key).unchecked_equals(end());
  3022. }
  3023. template <class K = key_type>
  3024. std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
  3025. ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3026. auto it = find(key);
  3027. if (it != end()) return {it, std::next(it)};
  3028. return {it, it};
  3029. }
  3030. template <class K = key_type>
  3031. std::pair<const_iterator, const_iterator> equal_range(
  3032. const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3033. auto it = find(key);
  3034. if (it != end()) return {it, std::next(it)};
  3035. return {it, it};
  3036. }
  3037. size_t bucket_count() const { return capacity(); }
  3038. float load_factor() const {
  3039. return capacity() ? static_cast<double>(size()) / capacity() : 0.0;
  3040. }
  3041. float max_load_factor() const { return 1.0f; }
  3042. void max_load_factor(float) {
  3043. // Does nothing.
  3044. }
  3045. hasher hash_function() const { return hash_ref(); }
  3046. key_equal key_eq() const { return eq_ref(); }
  3047. allocator_type get_allocator() const { return alloc_ref(); }
  3048. friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
  3049. if (a.size() != b.size()) return false;
  3050. const raw_hash_set* outer = &a;
  3051. const raw_hash_set* inner = &b;
  3052. if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
  3053. for (const value_type& elem : *outer) {
  3054. auto it = PolicyTraits::apply(FindElement{*inner}, elem);
  3055. if (it == inner->end() || !(*it == elem)) return false;
  3056. }
  3057. return true;
  3058. }
  3059. friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
  3060. return !(a == b);
  3061. }
  3062. template <typename H>
  3063. friend typename std::enable_if<H::template is_hashable<value_type>::value,
  3064. H>::type
  3065. AbslHashValue(H h, const raw_hash_set& s) {
  3066. return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
  3067. s.size());
  3068. }
  3069. friend void swap(raw_hash_set& a,
  3070. raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
  3071. a.swap(b);
  3072. }
  3073. private:
  3074. template <class Container, typename Enabler>
  3075. friend struct absl::container_internal::hashtable_debug_internal::
  3076. HashtableDebugAccess;
  3077. friend struct absl::container_internal::HashtableFreeFunctionsAccess;
  3078. struct FindElement {
  3079. template <class K, class... Args>
  3080. const_iterator operator()(const K& key, Args&&...) const {
  3081. return s.find(key);
  3082. }
  3083. const raw_hash_set& s;
  3084. };
  3085. struct HashElement {
  3086. template <class K, class... Args>
  3087. size_t operator()(const K& key, Args&&...) const {
  3088. return h(key);
  3089. }
  3090. const hasher& h;
  3091. };
  3092. template <class K1>
  3093. struct EqualElement {
  3094. template <class K2, class... Args>
  3095. bool operator()(const K2& lhs, Args&&...) const {
  3096. return eq(lhs, rhs);
  3097. }
  3098. const K1& rhs;
  3099. const key_equal& eq;
  3100. };
  3101. struct EmplaceDecomposable {
  3102. template <class K, class... Args>
  3103. std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
  3104. auto res = s.find_or_prepare_insert(key);
  3105. if (res.second) {
  3106. s.emplace_at(res.first, std::forward<Args>(args)...);
  3107. }
  3108. return res;
  3109. }
  3110. raw_hash_set& s;
  3111. };
  3112. template <bool do_destroy>
  3113. struct InsertSlot {
  3114. template <class K, class... Args>
  3115. std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
  3116. auto res = s.find_or_prepare_insert(key);
  3117. if (res.second) {
  3118. s.transfer(res.first.slot(), &slot);
  3119. } else if (do_destroy) {
  3120. s.destroy(&slot);
  3121. }
  3122. return res;
  3123. }
  3124. raw_hash_set& s;
  3125. // Constructed slot. Either moved into place or destroyed.
  3126. slot_type&& slot;
  3127. };
  3128. // TODO(b/303305702): re-enable reentrant validation.
  3129. template <typename... Args>
  3130. inline void construct(slot_type* slot, Args&&... args) {
  3131. PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
  3132. }
  3133. inline void destroy(slot_type* slot) {
  3134. PolicyTraits::destroy(&alloc_ref(), slot);
  3135. }
  3136. inline void transfer(slot_type* to, slot_type* from) {
  3137. PolicyTraits::transfer(&alloc_ref(), to, from);
  3138. }
  3139. // TODO(b/289225379): consider having a helper class that has the impls for
  3140. // SOO functionality.
  3141. template <class K = key_type>
  3142. iterator find_soo(const key_arg<K>& key) {
  3143. assert(is_soo());
  3144. return empty() || !PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
  3145. PolicyTraits::element(soo_slot()))
  3146. ? end()
  3147. : soo_iterator();
  3148. }
  3149. template <class K = key_type>
  3150. iterator find_non_soo(const key_arg<K>& key, size_t hash) {
  3151. assert(!is_soo());
  3152. auto seq = probe(common(), hash);
  3153. const ctrl_t* ctrl = control();
  3154. while (true) {
  3155. Group g{ctrl + seq.offset()};
  3156. for (uint32_t i : g.Match(H2(hash))) {
  3157. if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
  3158. EqualElement<K>{key, eq_ref()},
  3159. PolicyTraits::element(slot_array() + seq.offset(i)))))
  3160. return iterator_at(seq.offset(i));
  3161. }
  3162. if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
  3163. seq.next();
  3164. assert(seq.index() <= capacity() && "full table!");
  3165. }
  3166. }
  3167. // Conditionally samples hashtablez for SOO tables. This should be called on
  3168. // insertion into an empty SOO table and in copy construction when the size
  3169. // can fit in SOO capacity.
  3170. inline HashtablezInfoHandle try_sample_soo() {
  3171. assert(is_soo());
  3172. if (!ShouldSampleHashtablezInfo<CharAlloc>()) return HashtablezInfoHandle{};
  3173. return Sample(sizeof(slot_type), sizeof(key_type), sizeof(value_type),
  3174. SooCapacity());
  3175. }
  3176. inline void destroy_slots() {
  3177. assert(!is_soo());
  3178. if (PolicyTraits::template destroy_is_trivial<Alloc>()) return;
  3179. IterateOverFullSlots(
  3180. common(), slot_array(),
  3181. [&](const ctrl_t*, slot_type* slot)
  3182. ABSL_ATTRIBUTE_ALWAYS_INLINE { this->destroy(slot); });
  3183. }
  3184. inline void dealloc() {
  3185. assert(capacity() != 0);
  3186. // Unpoison before returning the memory to the allocator.
  3187. SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * capacity());
  3188. infoz().Unregister();
  3189. Deallocate<BackingArrayAlignment(alignof(slot_type))>(
  3190. &alloc_ref(), common().backing_array_start(),
  3191. common().alloc_size(sizeof(slot_type), alignof(slot_type)));
  3192. }
  3193. inline void destructor_impl() {
  3194. if (capacity() == 0) return;
  3195. if (is_soo()) {
  3196. if (!empty()) {
  3197. ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(destroy(soo_slot()));
  3198. }
  3199. return;
  3200. }
  3201. destroy_slots();
  3202. dealloc();
  3203. }
  3204. // Erases, but does not destroy, the value pointed to by `it`.
  3205. //
  3206. // This merely updates the pertinent control byte. This can be used in
  3207. // conjunction with Policy::transfer to move the object to another place.
  3208. void erase_meta_only(const_iterator it) {
  3209. assert(!is_soo());
  3210. EraseMetaOnly(common(), static_cast<size_t>(it.control() - control()),
  3211. sizeof(slot_type));
  3212. }
  3213. size_t hash_of(slot_type* slot) const {
  3214. return PolicyTraits::apply(HashElement{hash_ref()},
  3215. PolicyTraits::element(slot));
  3216. }
  3217. // Resizes table to the new capacity and move all elements to the new
  3218. // positions accordingly.
  3219. //
  3220. // Note that for better performance instead of
  3221. // find_first_non_full(common(), hash),
  3222. // HashSetResizeHelper::FindFirstNonFullAfterResize(
  3223. // common(), old_capacity, hash)
  3224. // can be called right after `resize`.
  3225. void resize(size_t new_capacity) {
  3226. raw_hash_set::resize_impl(common(), new_capacity, HashtablezInfoHandle{});
  3227. }
  3228. // As above, except that we also accept a pre-sampled, forced infoz for
  3229. // SOO tables, since they need to switch from SOO to heap in order to
  3230. // store the infoz.
  3231. void resize_with_soo_infoz(HashtablezInfoHandle forced_infoz) {
  3232. assert(forced_infoz.IsSampled());
  3233. raw_hash_set::resize_impl(common(), NextCapacity(SooCapacity()),
  3234. forced_infoz);
  3235. }
  3236. // Resizes set to the new capacity.
  3237. // It is a static function in order to use its pointer in GetPolicyFunctions.
  3238. ABSL_ATTRIBUTE_NOINLINE static void resize_impl(
  3239. CommonFields& common, size_t new_capacity,
  3240. HashtablezInfoHandle forced_infoz) {
  3241. raw_hash_set* set = reinterpret_cast<raw_hash_set*>(&common);
  3242. assert(IsValidCapacity(new_capacity));
  3243. assert(!set->fits_in_soo(new_capacity));
  3244. const bool was_soo = set->is_soo();
  3245. const bool had_soo_slot = was_soo && !set->empty();
  3246. const ctrl_t soo_slot_h2 =
  3247. had_soo_slot ? static_cast<ctrl_t>(H2(set->hash_of(set->soo_slot())))
  3248. : ctrl_t::kEmpty;
  3249. HashSetResizeHelper resize_helper(common, was_soo, had_soo_slot,
  3250. forced_infoz);
  3251. // Initialize HashSetResizeHelper::old_heap_or_soo_. We can't do this in
  3252. // HashSetResizeHelper constructor because it can't transfer slots when
  3253. // transfer_uses_memcpy is false.
  3254. // TODO(b/289225379): try to handle more of the SOO cases inside
  3255. // InitializeSlots. See comment on cl/555990034 snapshot #63.
  3256. if (PolicyTraits::transfer_uses_memcpy() || !had_soo_slot) {
  3257. resize_helper.old_heap_or_soo() = common.heap_or_soo();
  3258. } else {
  3259. set->transfer(set->to_slot(resize_helper.old_soo_data()),
  3260. set->soo_slot());
  3261. }
  3262. common.set_capacity(new_capacity);
  3263. // Note that `InitializeSlots` does different number initialization steps
  3264. // depending on the values of `transfer_uses_memcpy` and capacities.
  3265. // Refer to the comment in `InitializeSlots` for more details.
  3266. const bool grow_single_group =
  3267. resize_helper.InitializeSlots<CharAlloc, sizeof(slot_type),
  3268. PolicyTraits::transfer_uses_memcpy(),
  3269. SooEnabled(), alignof(slot_type)>(
  3270. common, CharAlloc(set->alloc_ref()), soo_slot_h2, sizeof(key_type),
  3271. sizeof(value_type));
  3272. // In the SooEnabled() case, capacity is never 0 so we don't check.
  3273. if (!SooEnabled() && resize_helper.old_capacity() == 0) {
  3274. // InitializeSlots did all the work including infoz().RecordRehash().
  3275. return;
  3276. }
  3277. assert(resize_helper.old_capacity() > 0);
  3278. // Nothing more to do in this case.
  3279. if (was_soo && !had_soo_slot) return;
  3280. slot_type* new_slots = set->slot_array();
  3281. if (grow_single_group) {
  3282. if (PolicyTraits::transfer_uses_memcpy()) {
  3283. // InitializeSlots did all the work.
  3284. return;
  3285. }
  3286. if (was_soo) {
  3287. set->transfer(new_slots + resize_helper.SooSlotIndex(),
  3288. to_slot(resize_helper.old_soo_data()));
  3289. return;
  3290. } else {
  3291. // We want GrowSizeIntoSingleGroup to be called here in order to make
  3292. // InitializeSlots not depend on PolicyTraits.
  3293. resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common,
  3294. set->alloc_ref());
  3295. }
  3296. } else {
  3297. // InitializeSlots prepares control bytes to correspond to empty table.
  3298. const auto insert_slot = [&](slot_type* slot) {
  3299. size_t hash = PolicyTraits::apply(HashElement{set->hash_ref()},
  3300. PolicyTraits::element(slot));
  3301. auto target = find_first_non_full(common, hash);
  3302. SetCtrl(common, target.offset, H2(hash), sizeof(slot_type));
  3303. set->transfer(new_slots + target.offset, slot);
  3304. return target.probe_length;
  3305. };
  3306. if (was_soo) {
  3307. insert_slot(to_slot(resize_helper.old_soo_data()));
  3308. return;
  3309. } else {
  3310. auto* old_slots = static_cast<slot_type*>(resize_helper.old_slots());
  3311. size_t total_probe_length = 0;
  3312. for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
  3313. if (IsFull(resize_helper.old_ctrl()[i])) {
  3314. total_probe_length += insert_slot(old_slots + i);
  3315. }
  3316. }
  3317. common.infoz().RecordRehash(total_probe_length);
  3318. }
  3319. }
  3320. resize_helper.DeallocateOld<alignof(slot_type)>(CharAlloc(set->alloc_ref()),
  3321. sizeof(slot_type));
  3322. }
  3323. // Casting directly from e.g. char* to slot_type* can cause compilation errors
  3324. // on objective-C. This function converts to void* first, avoiding the issue.
  3325. static slot_type* to_slot(void* buf) { return static_cast<slot_type*>(buf); }
  3326. // Requires that lhs does not have a full SOO slot.
  3327. static void move_common(bool that_is_full_soo, allocator_type& rhs_alloc,
  3328. CommonFields& lhs, CommonFields&& rhs) {
  3329. if (PolicyTraits::transfer_uses_memcpy() || !that_is_full_soo) {
  3330. lhs = std::move(rhs);
  3331. } else {
  3332. lhs.move_non_heap_or_soo_fields(rhs);
  3333. // TODO(b/303305702): add reentrancy guard.
  3334. PolicyTraits::transfer(&rhs_alloc, to_slot(lhs.soo_data()),
  3335. to_slot(rhs.soo_data()));
  3336. }
  3337. }
  3338. // Swaps common fields making sure to avoid memcpy'ing a full SOO slot if we
  3339. // aren't allowed to do so.
  3340. void swap_common(raw_hash_set& that) {
  3341. using std::swap;
  3342. if (PolicyTraits::transfer_uses_memcpy()) {
  3343. swap(common(), that.common());
  3344. return;
  3345. }
  3346. CommonFields tmp = CommonFields::CreateDefault<SooEnabled()>();
  3347. const bool that_is_full_soo = that.is_full_soo();
  3348. move_common(that_is_full_soo, that.alloc_ref(), tmp,
  3349. std::move(that.common()));
  3350. move_common(is_full_soo(), alloc_ref(), that.common(), std::move(common()));
  3351. move_common(that_is_full_soo, that.alloc_ref(), common(), std::move(tmp));
  3352. }
  3353. void maybe_increment_generation_or_rehash_on_move() {
  3354. if (!SwisstableGenerationsEnabled() || capacity() == 0 || is_soo()) {
  3355. return;
  3356. }
  3357. common().increment_generation();
  3358. if (!empty() && common().should_rehash_for_bug_detection_on_move()) {
  3359. resize(capacity());
  3360. }
  3361. }
  3362. template <bool propagate_alloc>
  3363. raw_hash_set& assign_impl(raw_hash_set&& that) {
  3364. // We don't bother checking for this/that aliasing. We just need to avoid
  3365. // breaking the invariants in that case.
  3366. destructor_impl();
  3367. move_common(that.is_full_soo(), that.alloc_ref(), common(),
  3368. std::move(that.common()));
  3369. // TODO(b/296061262): move instead of copying hash/eq/alloc.
  3370. hash_ref() = that.hash_ref();
  3371. eq_ref() = that.eq_ref();
  3372. CopyAlloc(alloc_ref(), that.alloc_ref(),
  3373. std::integral_constant<bool, propagate_alloc>());
  3374. that.common() = CommonFields::CreateDefault<SooEnabled()>();
  3375. maybe_increment_generation_or_rehash_on_move();
  3376. return *this;
  3377. }
  3378. raw_hash_set& move_elements_allocs_unequal(raw_hash_set&& that) {
  3379. const size_t size = that.size();
  3380. if (size == 0) return *this;
  3381. reserve(size);
  3382. for (iterator it = that.begin(); it != that.end(); ++it) {
  3383. insert(std::move(PolicyTraits::element(it.slot())));
  3384. that.destroy(it.slot());
  3385. }
  3386. if (!that.is_soo()) that.dealloc();
  3387. that.common() = CommonFields::CreateDefault<SooEnabled()>();
  3388. maybe_increment_generation_or_rehash_on_move();
  3389. return *this;
  3390. }
  3391. raw_hash_set& move_assign(raw_hash_set&& that,
  3392. std::true_type /*propagate_alloc*/) {
  3393. return assign_impl<true>(std::move(that));
  3394. }
  3395. raw_hash_set& move_assign(raw_hash_set&& that,
  3396. std::false_type /*propagate_alloc*/) {
  3397. if (alloc_ref() == that.alloc_ref()) {
  3398. return assign_impl<false>(std::move(that));
  3399. }
  3400. // Aliasing can't happen here because allocs would compare equal above.
  3401. assert(this != &that);
  3402. destructor_impl();
  3403. // We can't take over that's memory so we need to move each element.
  3404. // While moving elements, this should have that's hash/eq so copy hash/eq
  3405. // before moving elements.
  3406. // TODO(b/296061262): move instead of copying hash/eq.
  3407. hash_ref() = that.hash_ref();
  3408. eq_ref() = that.eq_ref();
  3409. return move_elements_allocs_unequal(std::move(that));
  3410. }
  3411. template <class K>
  3412. std::pair<iterator, bool> find_or_prepare_insert_soo(const K& key) {
  3413. if (empty()) {
  3414. const HashtablezInfoHandle infoz = try_sample_soo();
  3415. if (infoz.IsSampled()) {
  3416. resize_with_soo_infoz(infoz);
  3417. } else {
  3418. common().set_full_soo();
  3419. return {soo_iterator(), true};
  3420. }
  3421. } else if (PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
  3422. PolicyTraits::element(soo_slot()))) {
  3423. return {soo_iterator(), false};
  3424. } else {
  3425. resize(NextCapacity(SooCapacity()));
  3426. }
  3427. const size_t index =
  3428. PrepareInsertAfterSoo(hash_ref()(key), sizeof(slot_type), common());
  3429. return {iterator_at(index), true};
  3430. }
  3431. template <class K>
  3432. std::pair<iterator, bool> find_or_prepare_insert_non_soo(const K& key) {
  3433. assert(!is_soo());
  3434. prefetch_heap_block();
  3435. auto hash = hash_ref()(key);
  3436. auto seq = probe(common(), hash);
  3437. const ctrl_t* ctrl = control();
  3438. while (true) {
  3439. Group g{ctrl + seq.offset()};
  3440. for (uint32_t i : g.Match(H2(hash))) {
  3441. if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
  3442. EqualElement<K>{key, eq_ref()},
  3443. PolicyTraits::element(slot_array() + seq.offset(i)))))
  3444. return {iterator_at(seq.offset(i)), false};
  3445. }
  3446. auto mask_empty = g.MaskEmpty();
  3447. if (ABSL_PREDICT_TRUE(mask_empty)) {
  3448. size_t target = seq.offset(
  3449. GetInsertionOffset(mask_empty, capacity(), hash, control()));
  3450. return {iterator_at(PrepareInsertNonSoo(common(), hash,
  3451. FindInfo{target, seq.index()},
  3452. GetPolicyFunctions())),
  3453. true};
  3454. }
  3455. seq.next();
  3456. assert(seq.index() <= capacity() && "full table!");
  3457. }
  3458. }
  3459. protected:
  3460. // Asserts that hash and equal functors provided by the user are consistent,
  3461. // meaning that `eq(k1, k2)` implies `hash(k1)==hash(k2)`.
  3462. template <class K>
  3463. void AssertHashEqConsistent(ABSL_ATTRIBUTE_UNUSED const K& key) {
  3464. #ifndef NDEBUG
  3465. if (empty()) return;
  3466. const size_t hash_of_arg = hash_ref()(key);
  3467. const auto assert_consistent = [&](const ctrl_t*, slot_type* slot) {
  3468. const value_type& element = PolicyTraits::element(slot);
  3469. const bool is_key_equal =
  3470. PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, element);
  3471. if (!is_key_equal) return;
  3472. const size_t hash_of_slot =
  3473. PolicyTraits::apply(HashElement{hash_ref()}, element);
  3474. const bool is_hash_equal = hash_of_arg == hash_of_slot;
  3475. if (!is_hash_equal) {
  3476. // In this case, we're going to crash. Do a couple of other checks for
  3477. // idempotence issues. Recalculating hash/eq here is also convenient for
  3478. // debugging with gdb/lldb.
  3479. const size_t once_more_hash_arg = hash_ref()(key);
  3480. assert(hash_of_arg == once_more_hash_arg && "hash is not idempotent.");
  3481. const size_t once_more_hash_slot =
  3482. PolicyTraits::apply(HashElement{hash_ref()}, element);
  3483. assert(hash_of_slot == once_more_hash_slot &&
  3484. "hash is not idempotent.");
  3485. const bool once_more_eq =
  3486. PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, element);
  3487. assert(is_key_equal == once_more_eq && "equality is not idempotent.");
  3488. }
  3489. assert((!is_key_equal || is_hash_equal) &&
  3490. "eq(k1, k2) must imply that hash(k1) == hash(k2). "
  3491. "hash/eq functors are inconsistent.");
  3492. };
  3493. if (is_soo()) {
  3494. assert_consistent(/*unused*/ nullptr, soo_slot());
  3495. return;
  3496. }
  3497. // We only do validation for small tables so that it's constant time.
  3498. if (capacity() > 16) return;
  3499. IterateOverFullSlots(common(), slot_array(), assert_consistent);
  3500. #endif
  3501. }
  3502. // Attempts to find `key` in the table; if it isn't found, returns an iterator
  3503. // where the value can be inserted into, with the control byte already set to
  3504. // `key`'s H2. Returns a bool indicating whether an insertion can take place.
  3505. template <class K>
  3506. std::pair<iterator, bool> find_or_prepare_insert(const K& key) {
  3507. AssertHashEqConsistent(key);
  3508. if (is_soo()) return find_or_prepare_insert_soo(key);
  3509. return find_or_prepare_insert_non_soo(key);
  3510. }
  3511. // Constructs the value in the space pointed by the iterator. This only works
  3512. // after an unsuccessful find_or_prepare_insert() and before any other
  3513. // modifications happen in the raw_hash_set.
  3514. //
  3515. // PRECONDITION: iter was returned from find_or_prepare_insert(k), where k is
  3516. // the key decomposed from `forward<Args>(args)...`, and the bool returned by
  3517. // find_or_prepare_insert(k) was true.
  3518. // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
  3519. template <class... Args>
  3520. void emplace_at(iterator iter, Args&&... args) {
  3521. construct(iter.slot(), std::forward<Args>(args)...);
  3522. assert(PolicyTraits::apply(FindElement{*this}, *iter) == iter &&
  3523. "constructed value does not match the lookup key");
  3524. }
  3525. iterator iterator_at(size_t i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3526. return {control() + i, slot_array() + i, common().generation_ptr()};
  3527. }
  3528. const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
  3529. return const_cast<raw_hash_set*>(this)->iterator_at(i);
  3530. }
  3531. reference unchecked_deref(iterator it) { return it.unchecked_deref(); }
  3532. private:
  3533. friend struct RawHashSetTestOnlyAccess;
  3534. // The number of slots we can still fill without needing to rehash.
  3535. //
  3536. // This is stored separately due to tombstones: we do not include tombstones
  3537. // in the growth capacity, because we'd like to rehash when the table is
  3538. // otherwise filled with tombstones: otherwise, probe sequences might get
  3539. // unacceptably long without triggering a rehash. Callers can also force a
  3540. // rehash via the standard `rehash(0)`, which will recompute this value as a
  3541. // side-effect.
  3542. //
  3543. // See `CapacityToGrowth()`.
  3544. size_t growth_left() const {
  3545. assert(!is_soo());
  3546. return common().growth_left();
  3547. }
  3548. GrowthInfo& growth_info() {
  3549. assert(!is_soo());
  3550. return common().growth_info();
  3551. }
  3552. GrowthInfo growth_info() const {
  3553. assert(!is_soo());
  3554. return common().growth_info();
  3555. }
  3556. // Prefetch the heap-allocated memory region to resolve potential TLB and
  3557. // cache misses. This is intended to overlap with execution of calculating the
  3558. // hash for a key.
  3559. void prefetch_heap_block() const {
  3560. assert(!is_soo());
  3561. #if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
  3562. __builtin_prefetch(control(), 0, 1);
  3563. #endif
  3564. }
  3565. CommonFields& common() { return settings_.template get<0>(); }
  3566. const CommonFields& common() const { return settings_.template get<0>(); }
  3567. ctrl_t* control() const {
  3568. assert(!is_soo());
  3569. return common().control();
  3570. }
  3571. slot_type* slot_array() const {
  3572. assert(!is_soo());
  3573. return static_cast<slot_type*>(common().slot_array());
  3574. }
  3575. slot_type* soo_slot() {
  3576. assert(is_soo());
  3577. return static_cast<slot_type*>(common().soo_data());
  3578. }
  3579. const slot_type* soo_slot() const {
  3580. return const_cast<raw_hash_set*>(this)->soo_slot();
  3581. }
  3582. iterator soo_iterator() {
  3583. return {SooControl(), soo_slot(), common().generation_ptr()};
  3584. }
  3585. const_iterator soo_iterator() const {
  3586. return const_cast<raw_hash_set*>(this)->soo_iterator();
  3587. }
  3588. HashtablezInfoHandle infoz() {
  3589. assert(!is_soo());
  3590. return common().infoz();
  3591. }
  3592. hasher& hash_ref() { return settings_.template get<1>(); }
  3593. const hasher& hash_ref() const { return settings_.template get<1>(); }
  3594. key_equal& eq_ref() { return settings_.template get<2>(); }
  3595. const key_equal& eq_ref() const { return settings_.template get<2>(); }
  3596. allocator_type& alloc_ref() { return settings_.template get<3>(); }
  3597. const allocator_type& alloc_ref() const {
  3598. return settings_.template get<3>();
  3599. }
  3600. static const void* get_hash_ref_fn(const CommonFields& common) {
  3601. auto* h = reinterpret_cast<const raw_hash_set*>(&common);
  3602. return &h->hash_ref();
  3603. }
  3604. static void transfer_slot_fn(void* set, void* dst, void* src) {
  3605. auto* h = static_cast<raw_hash_set*>(set);
  3606. h->transfer(static_cast<slot_type*>(dst), static_cast<slot_type*>(src));
  3607. }
  3608. // Note: dealloc_fn will only be used if we have a non-standard allocator.
  3609. static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
  3610. auto* set = reinterpret_cast<raw_hash_set*>(&common);
  3611. // Unpoison before returning the memory to the allocator.
  3612. SanitizerUnpoisonMemoryRegion(common.slot_array(),
  3613. sizeof(slot_type) * common.capacity());
  3614. common.infoz().Unregister();
  3615. Deallocate<BackingArrayAlignment(alignof(slot_type))>(
  3616. &set->alloc_ref(), common.backing_array_start(),
  3617. common.alloc_size(sizeof(slot_type), alignof(slot_type)));
  3618. }
  3619. static const PolicyFunctions& GetPolicyFunctions() {
  3620. static constexpr PolicyFunctions value = {
  3621. sizeof(slot_type),
  3622. // TODO(b/328722020): try to type erase
  3623. // for standard layout and alignof(Hash) <= alignof(CommonFields).
  3624. std::is_empty<hasher>::value ? &GetHashRefForEmptyHasher
  3625. : &raw_hash_set::get_hash_ref_fn,
  3626. PolicyTraits::template get_hash_slot_fn<hasher>(),
  3627. PolicyTraits::transfer_uses_memcpy()
  3628. ? TransferRelocatable<sizeof(slot_type)>
  3629. : &raw_hash_set::transfer_slot_fn,
  3630. (std::is_same<SlotAlloc, std::allocator<slot_type>>::value
  3631. ? &DeallocateStandard<alignof(slot_type)>
  3632. : &raw_hash_set::dealloc_fn),
  3633. &raw_hash_set::resize_impl,
  3634. };
  3635. return value;
  3636. }
  3637. // Bundle together CommonFields plus other objects which might be empty.
  3638. // CompressedTuple will ensure that sizeof is not affected by any of the empty
  3639. // fields that occur after CommonFields.
  3640. absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
  3641. allocator_type>
  3642. settings_{CommonFields::CreateDefault<SooEnabled()>(), hasher{},
  3643. key_equal{}, allocator_type{}};
  3644. };
  3645. // Friend access for free functions in raw_hash_set.h.
  3646. struct HashtableFreeFunctionsAccess {
  3647. template <class Predicate, typename Set>
  3648. static typename Set::size_type EraseIf(Predicate& pred, Set* c) {
  3649. if (c->empty()) {
  3650. return 0;
  3651. }
  3652. if (c->is_soo()) {
  3653. auto it = c->soo_iterator();
  3654. if (!pred(*it)) {
  3655. assert(c->size() == 1 && "hash table was modified unexpectedly");
  3656. return 0;
  3657. }
  3658. c->destroy(it.slot());
  3659. c->common().set_empty_soo();
  3660. return 1;
  3661. }
  3662. ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = c->size();
  3663. size_t num_deleted = 0;
  3664. IterateOverFullSlots(
  3665. c->common(), c->slot_array(), [&](const ctrl_t* ctrl, auto* slot) {
  3666. if (pred(Set::PolicyTraits::element(slot))) {
  3667. c->destroy(slot);
  3668. EraseMetaOnly(c->common(), static_cast<size_t>(ctrl - c->control()),
  3669. sizeof(*slot));
  3670. ++num_deleted;
  3671. }
  3672. });
  3673. // NOTE: IterateOverFullSlots allow removal of the current element, so we
  3674. // verify the size additionally here.
  3675. assert(original_size_for_assert - num_deleted == c->size() &&
  3676. "hash table was modified unexpectedly");
  3677. return num_deleted;
  3678. }
  3679. template <class Callback, typename Set>
  3680. static void ForEach(Callback& cb, Set* c) {
  3681. if (c->empty()) {
  3682. return;
  3683. }
  3684. if (c->is_soo()) {
  3685. cb(*c->soo_iterator());
  3686. return;
  3687. }
  3688. using ElementTypeWithConstness = decltype(*c->begin());
  3689. IterateOverFullSlots(
  3690. c->common(), c->slot_array(), [&cb](const ctrl_t*, auto* slot) {
  3691. ElementTypeWithConstness& element = Set::PolicyTraits::element(slot);
  3692. cb(element);
  3693. });
  3694. }
  3695. };
  3696. // Erases all elements that satisfy the predicate `pred` from the container `c`.
  3697. template <typename P, typename H, typename E, typename A, typename Predicate>
  3698. typename raw_hash_set<P, H, E, A>::size_type EraseIf(
  3699. Predicate& pred, raw_hash_set<P, H, E, A>* c) {
  3700. return HashtableFreeFunctionsAccess::EraseIf(pred, c);
  3701. }
  3702. // Calls `cb` for all elements in the container `c`.
  3703. template <typename P, typename H, typename E, typename A, typename Callback>
  3704. void ForEach(Callback& cb, raw_hash_set<P, H, E, A>* c) {
  3705. return HashtableFreeFunctionsAccess::ForEach(cb, c);
  3706. }
  3707. template <typename P, typename H, typename E, typename A, typename Callback>
  3708. void ForEach(Callback& cb, const raw_hash_set<P, H, E, A>* c) {
  3709. return HashtableFreeFunctionsAccess::ForEach(cb, c);
  3710. }
  3711. namespace hashtable_debug_internal {
  3712. template <typename Set>
  3713. struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
  3714. using Traits = typename Set::PolicyTraits;
  3715. using Slot = typename Traits::slot_type;
  3716. static size_t GetNumProbes(const Set& set,
  3717. const typename Set::key_type& key) {
  3718. if (set.is_soo()) return 0;
  3719. size_t num_probes = 0;
  3720. size_t hash = set.hash_ref()(key);
  3721. auto seq = probe(set.common(), hash);
  3722. const ctrl_t* ctrl = set.control();
  3723. while (true) {
  3724. container_internal::Group g{ctrl + seq.offset()};
  3725. for (uint32_t i : g.Match(container_internal::H2(hash))) {
  3726. if (Traits::apply(
  3727. typename Set::template EqualElement<typename Set::key_type>{
  3728. key, set.eq_ref()},
  3729. Traits::element(set.slot_array() + seq.offset(i))))
  3730. return num_probes;
  3731. ++num_probes;
  3732. }
  3733. if (g.MaskEmpty()) return num_probes;
  3734. seq.next();
  3735. ++num_probes;
  3736. }
  3737. }
  3738. static size_t AllocatedByteSize(const Set& c) {
  3739. size_t capacity = c.capacity();
  3740. if (capacity == 0) return 0;
  3741. size_t m =
  3742. c.is_soo() ? 0 : c.common().alloc_size(sizeof(Slot), alignof(Slot));
  3743. size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
  3744. if (per_slot != ~size_t{}) {
  3745. m += per_slot * c.size();
  3746. } else {
  3747. for (auto it = c.begin(); it != c.end(); ++it) {
  3748. m += Traits::space_used(it.slot());
  3749. }
  3750. }
  3751. return m;
  3752. }
  3753. };
  3754. } // namespace hashtable_debug_internal
  3755. } // namespace container_internal
  3756. ABSL_NAMESPACE_END
  3757. } // namespace absl
  3758. #undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
  3759. #undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED
  3760. #undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN
  3761. #endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_