Instructions.cpp 182 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001
  1. //===- Instructions.cpp - Implement the LLVM instructions -----------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements all of the non-inline methods for the LLVM instruction
  10. // classes.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/IR/Instructions.h"
  14. #include "LLVMContextImpl.h"
  15. #include "llvm/ADT/SmallBitVector.h"
  16. #include "llvm/ADT/SmallVector.h"
  17. #include "llvm/ADT/Twine.h"
  18. #include "llvm/IR/Attributes.h"
  19. #include "llvm/IR/BasicBlock.h"
  20. #include "llvm/IR/Constant.h"
  21. #include "llvm/IR/Constants.h"
  22. #include "llvm/IR/DataLayout.h"
  23. #include "llvm/IR/DerivedTypes.h"
  24. #include "llvm/IR/Function.h"
  25. #include "llvm/IR/InstrTypes.h"
  26. #include "llvm/IR/Instruction.h"
  27. #include "llvm/IR/Intrinsics.h"
  28. #include "llvm/IR/LLVMContext.h"
  29. #include "llvm/IR/MDBuilder.h"
  30. #include "llvm/IR/Metadata.h"
  31. #include "llvm/IR/Module.h"
  32. #include "llvm/IR/Operator.h"
  33. #include "llvm/IR/ProfDataUtils.h"
  34. #include "llvm/IR/Type.h"
  35. #include "llvm/IR/Value.h"
  36. #include "llvm/Support/AtomicOrdering.h"
  37. #include "llvm/Support/Casting.h"
  38. #include "llvm/Support/ErrorHandling.h"
  39. #include "llvm/Support/MathExtras.h"
  40. #include "llvm/Support/ModRef.h"
  41. #include "llvm/Support/TypeSize.h"
  42. #include <algorithm>
  43. #include <cassert>
  44. #include <cstdint>
  45. #include <optional>
  46. #include <vector>
  47. using namespace llvm;
  48. static cl::opt<bool> DisableI2pP2iOpt(
  49. "disable-i2p-p2i-opt", cl::init(false),
  50. cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
  51. //===----------------------------------------------------------------------===//
  52. // AllocaInst Class
  53. //===----------------------------------------------------------------------===//
  54. std::optional<TypeSize>
  55. AllocaInst::getAllocationSize(const DataLayout &DL) const {
  56. TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
  57. if (isArrayAllocation()) {
  58. auto *C = dyn_cast<ConstantInt>(getArraySize());
  59. if (!C)
  60. return std::nullopt;
  61. assert(!Size.isScalable() && "Array elements cannot have a scalable size");
  62. Size *= C->getZExtValue();
  63. }
  64. return Size;
  65. }
  66. std::optional<TypeSize>
  67. AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const {
  68. std::optional<TypeSize> Size = getAllocationSize(DL);
  69. if (Size)
  70. return *Size * 8;
  71. return std::nullopt;
  72. }
  73. //===----------------------------------------------------------------------===//
  74. // SelectInst Class
  75. //===----------------------------------------------------------------------===//
  76. /// areInvalidOperands - Return a string if the specified operands are invalid
  77. /// for a select operation, otherwise return null.
  78. const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
  79. if (Op1->getType() != Op2->getType())
  80. return "both values to select must have same type";
  81. if (Op1->getType()->isTokenTy())
  82. return "select values cannot have token type";
  83. if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
  84. // Vector select.
  85. if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
  86. return "vector select condition element type must be i1";
  87. VectorType *ET = dyn_cast<VectorType>(Op1->getType());
  88. if (!ET)
  89. return "selected values for vector select must be vectors";
  90. if (ET->getElementCount() != VT->getElementCount())
  91. return "vector select requires selected vectors to have "
  92. "the same vector length as select condition";
  93. } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
  94. return "select condition must be i1 or <n x i1>";
  95. }
  96. return nullptr;
  97. }
  98. //===----------------------------------------------------------------------===//
  99. // PHINode Class
  100. //===----------------------------------------------------------------------===//
  101. PHINode::PHINode(const PHINode &PN)
  102. : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
  103. ReservedSpace(PN.getNumOperands()) {
  104. allocHungoffUses(PN.getNumOperands());
  105. std::copy(PN.op_begin(), PN.op_end(), op_begin());
  106. copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
  107. SubclassOptionalData = PN.SubclassOptionalData;
  108. }
  109. // removeIncomingValue - Remove an incoming value. This is useful if a
  110. // predecessor basic block is deleted.
  111. Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
  112. Value *Removed = getIncomingValue(Idx);
  113. // Move everything after this operand down.
  114. //
  115. // FIXME: we could just swap with the end of the list, then erase. However,
  116. // clients might not expect this to happen. The code as it is thrashes the
  117. // use/def lists, which is kinda lame.
  118. std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
  119. copyIncomingBlocks(make_range(block_begin() + Idx + 1, block_end()), Idx);
  120. // Nuke the last value.
  121. Op<-1>().set(nullptr);
  122. setNumHungOffUseOperands(getNumOperands() - 1);
  123. // If the PHI node is dead, because it has zero entries, nuke it now.
  124. if (getNumOperands() == 0 && DeletePHIIfEmpty) {
  125. // If anyone is using this PHI, make them use a dummy value instead...
  126. replaceAllUsesWith(PoisonValue::get(getType()));
  127. eraseFromParent();
  128. }
  129. return Removed;
  130. }
  131. /// growOperands - grow operands - This grows the operand list in response
  132. /// to a push_back style of operation. This grows the number of ops by 1.5
  133. /// times.
  134. ///
  135. void PHINode::growOperands() {
  136. unsigned e = getNumOperands();
  137. unsigned NumOps = e + e / 2;
  138. if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
  139. ReservedSpace = NumOps;
  140. growHungoffUses(ReservedSpace, /* IsPhi */ true);
  141. }
  142. /// hasConstantValue - If the specified PHI node always merges together the same
  143. /// value, return the value, otherwise return null.
  144. Value *PHINode::hasConstantValue() const {
  145. // Exploit the fact that phi nodes always have at least one entry.
  146. Value *ConstantValue = getIncomingValue(0);
  147. for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
  148. if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
  149. if (ConstantValue != this)
  150. return nullptr; // Incoming values not all the same.
  151. // The case where the first value is this PHI.
  152. ConstantValue = getIncomingValue(i);
  153. }
  154. if (ConstantValue == this)
  155. return UndefValue::get(getType());
  156. return ConstantValue;
  157. }
  158. /// hasConstantOrUndefValue - Whether the specified PHI node always merges
  159. /// together the same value, assuming that undefs result in the same value as
  160. /// non-undefs.
  161. /// Unlike \ref hasConstantValue, this does not return a value because the
  162. /// unique non-undef incoming value need not dominate the PHI node.
  163. bool PHINode::hasConstantOrUndefValue() const {
  164. Value *ConstantValue = nullptr;
  165. for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
  166. Value *Incoming = getIncomingValue(i);
  167. if (Incoming != this && !isa<UndefValue>(Incoming)) {
  168. if (ConstantValue && ConstantValue != Incoming)
  169. return false;
  170. ConstantValue = Incoming;
  171. }
  172. }
  173. return true;
  174. }
  175. //===----------------------------------------------------------------------===//
  176. // LandingPadInst Implementation
  177. //===----------------------------------------------------------------------===//
  178. LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
  179. const Twine &NameStr, Instruction *InsertBefore)
  180. : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
  181. init(NumReservedValues, NameStr);
  182. }
  183. LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
  184. const Twine &NameStr, BasicBlock *InsertAtEnd)
  185. : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
  186. init(NumReservedValues, NameStr);
  187. }
  188. LandingPadInst::LandingPadInst(const LandingPadInst &LP)
  189. : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
  190. LP.getNumOperands()),
  191. ReservedSpace(LP.getNumOperands()) {
  192. allocHungoffUses(LP.getNumOperands());
  193. Use *OL = getOperandList();
  194. const Use *InOL = LP.getOperandList();
  195. for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
  196. OL[I] = InOL[I];
  197. setCleanup(LP.isCleanup());
  198. }
  199. LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
  200. const Twine &NameStr,
  201. Instruction *InsertBefore) {
  202. return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
  203. }
  204. LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
  205. const Twine &NameStr,
  206. BasicBlock *InsertAtEnd) {
  207. return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
  208. }
  209. void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
  210. ReservedSpace = NumReservedValues;
  211. setNumHungOffUseOperands(0);
  212. allocHungoffUses(ReservedSpace);
  213. setName(NameStr);
  214. setCleanup(false);
  215. }
  216. /// growOperands - grow operands - This grows the operand list in response to a
  217. /// push_back style of operation. This grows the number of ops by 2 times.
  218. void LandingPadInst::growOperands(unsigned Size) {
  219. unsigned e = getNumOperands();
  220. if (ReservedSpace >= e + Size) return;
  221. ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
  222. growHungoffUses(ReservedSpace);
  223. }
  224. void LandingPadInst::addClause(Constant *Val) {
  225. unsigned OpNo = getNumOperands();
  226. growOperands(1);
  227. assert(OpNo < ReservedSpace && "Growing didn't work!");
  228. setNumHungOffUseOperands(getNumOperands() + 1);
  229. getOperandList()[OpNo] = Val;
  230. }
  231. //===----------------------------------------------------------------------===//
  232. // CallBase Implementation
  233. //===----------------------------------------------------------------------===//
  234. CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
  235. Instruction *InsertPt) {
  236. switch (CB->getOpcode()) {
  237. case Instruction::Call:
  238. return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
  239. case Instruction::Invoke:
  240. return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
  241. case Instruction::CallBr:
  242. return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
  243. default:
  244. llvm_unreachable("Unknown CallBase sub-class!");
  245. }
  246. }
  247. CallBase *CallBase::Create(CallBase *CI, OperandBundleDef OpB,
  248. Instruction *InsertPt) {
  249. SmallVector<OperandBundleDef, 2> OpDefs;
  250. for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
  251. auto ChildOB = CI->getOperandBundleAt(i);
  252. if (ChildOB.getTagName() != OpB.getTag())
  253. OpDefs.emplace_back(ChildOB);
  254. }
  255. OpDefs.emplace_back(OpB);
  256. return CallBase::Create(CI, OpDefs, InsertPt);
  257. }
  258. Function *CallBase::getCaller() { return getParent()->getParent(); }
  259. unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {
  260. assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
  261. return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
  262. }
  263. bool CallBase::isIndirectCall() const {
  264. const Value *V = getCalledOperand();
  265. if (isa<Function>(V) || isa<Constant>(V))
  266. return false;
  267. return !isInlineAsm();
  268. }
  269. /// Tests if this call site must be tail call optimized. Only a CallInst can
  270. /// be tail call optimized.
  271. bool CallBase::isMustTailCall() const {
  272. if (auto *CI = dyn_cast<CallInst>(this))
  273. return CI->isMustTailCall();
  274. return false;
  275. }
  276. /// Tests if this call site is marked as a tail call.
  277. bool CallBase::isTailCall() const {
  278. if (auto *CI = dyn_cast<CallInst>(this))
  279. return CI->isTailCall();
  280. return false;
  281. }
  282. Intrinsic::ID CallBase::getIntrinsicID() const {
  283. if (auto *F = getCalledFunction())
  284. return F->getIntrinsicID();
  285. return Intrinsic::not_intrinsic;
  286. }
  287. bool CallBase::isReturnNonNull() const {
  288. if (hasRetAttr(Attribute::NonNull))
  289. return true;
  290. if (getRetDereferenceableBytes() > 0 &&
  291. !NullPointerIsDefined(getCaller(), getType()->getPointerAddressSpace()))
  292. return true;
  293. return false;
  294. }
  295. Value *CallBase::getArgOperandWithAttribute(Attribute::AttrKind Kind) const {
  296. unsigned Index;
  297. if (Attrs.hasAttrSomewhere(Kind, &Index))
  298. return getArgOperand(Index - AttributeList::FirstArgIndex);
  299. if (const Function *F = getCalledFunction())
  300. if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
  301. return getArgOperand(Index - AttributeList::FirstArgIndex);
  302. return nullptr;
  303. }
  304. /// Determine whether the argument or parameter has the given attribute.
  305. bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
  306. assert(ArgNo < arg_size() && "Param index out of bounds!");
  307. if (Attrs.hasParamAttr(ArgNo, Kind))
  308. return true;
  309. const Function *F = getCalledFunction();
  310. if (!F)
  311. return false;
  312. if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
  313. return false;
  314. // Take into account mod/ref by operand bundles.
  315. switch (Kind) {
  316. case Attribute::ReadNone:
  317. return !hasReadingOperandBundles() && !hasClobberingOperandBundles();
  318. case Attribute::ReadOnly:
  319. return !hasClobberingOperandBundles();
  320. case Attribute::WriteOnly:
  321. return !hasReadingOperandBundles();
  322. default:
  323. return true;
  324. }
  325. }
  326. bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
  327. Value *V = getCalledOperand();
  328. if (auto *CE = dyn_cast<ConstantExpr>(V))
  329. if (CE->getOpcode() == BitCast)
  330. V = CE->getOperand(0);
  331. if (auto *F = dyn_cast<Function>(V))
  332. return F->getAttributes().hasFnAttr(Kind);
  333. return false;
  334. }
  335. bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
  336. Value *V = getCalledOperand();
  337. if (auto *CE = dyn_cast<ConstantExpr>(V))
  338. if (CE->getOpcode() == BitCast)
  339. V = CE->getOperand(0);
  340. if (auto *F = dyn_cast<Function>(V))
  341. return F->getAttributes().hasFnAttr(Kind);
  342. return false;
  343. }
  344. template <typename AK>
  345. Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
  346. if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
  347. // getMemoryEffects() correctly combines memory effects from the call-site,
  348. // operand bundles and function.
  349. assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
  350. }
  351. Value *V = getCalledOperand();
  352. if (auto *CE = dyn_cast<ConstantExpr>(V))
  353. if (CE->getOpcode() == BitCast)
  354. V = CE->getOperand(0);
  355. if (auto *F = dyn_cast<Function>(V))
  356. return F->getAttributes().getFnAttr(Kind);
  357. return Attribute();
  358. }
  359. template Attribute
  360. CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
  361. template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
  362. void CallBase::getOperandBundlesAsDefs(
  363. SmallVectorImpl<OperandBundleDef> &Defs) const {
  364. for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
  365. Defs.emplace_back(getOperandBundleAt(i));
  366. }
  367. CallBase::op_iterator
  368. CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
  369. const unsigned BeginIndex) {
  370. auto It = op_begin() + BeginIndex;
  371. for (auto &B : Bundles)
  372. It = std::copy(B.input_begin(), B.input_end(), It);
  373. auto *ContextImpl = getContext().pImpl;
  374. auto BI = Bundles.begin();
  375. unsigned CurrentIndex = BeginIndex;
  376. for (auto &BOI : bundle_op_infos()) {
  377. assert(BI != Bundles.end() && "Incorrect allocation?");
  378. BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
  379. BOI.Begin = CurrentIndex;
  380. BOI.End = CurrentIndex + BI->input_size();
  381. CurrentIndex = BOI.End;
  382. BI++;
  383. }
  384. assert(BI == Bundles.end() && "Incorrect allocation?");
  385. return It;
  386. }
  387. CallBase::BundleOpInfo &CallBase::getBundleOpInfoForOperand(unsigned OpIdx) {
  388. /// When there isn't many bundles, we do a simple linear search.
  389. /// Else fallback to a binary-search that use the fact that bundles usually
  390. /// have similar number of argument to get faster convergence.
  391. if (bundle_op_info_end() - bundle_op_info_begin() < 8) {
  392. for (auto &BOI : bundle_op_infos())
  393. if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
  394. return BOI;
  395. llvm_unreachable("Did not find operand bundle for operand!");
  396. }
  397. assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
  398. assert(bundle_op_info_end() - bundle_op_info_begin() > 0 &&
  399. OpIdx < std::prev(bundle_op_info_end())->End &&
  400. "The Idx isn't in the operand bundle");
  401. /// We need a decimal number below and to prevent using floating point numbers
  402. /// we use an intergal value multiplied by this constant.
  403. constexpr unsigned NumberScaling = 1024;
  404. bundle_op_iterator Begin = bundle_op_info_begin();
  405. bundle_op_iterator End = bundle_op_info_end();
  406. bundle_op_iterator Current = Begin;
  407. while (Begin != End) {
  408. unsigned ScaledOperandPerBundle =
  409. NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
  410. Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
  411. ScaledOperandPerBundle);
  412. if (Current >= End)
  413. Current = std::prev(End);
  414. assert(Current < End && Current >= Begin &&
  415. "the operand bundle doesn't cover every value in the range");
  416. if (OpIdx >= Current->Begin && OpIdx < Current->End)
  417. break;
  418. if (OpIdx >= Current->End)
  419. Begin = Current + 1;
  420. else
  421. End = Current;
  422. }
  423. assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
  424. "the operand bundle doesn't cover every value in the range");
  425. return *Current;
  426. }
  427. CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
  428. OperandBundleDef OB,
  429. Instruction *InsertPt) {
  430. if (CB->getOperandBundle(ID))
  431. return CB;
  432. SmallVector<OperandBundleDef, 1> Bundles;
  433. CB->getOperandBundlesAsDefs(Bundles);
  434. Bundles.push_back(OB);
  435. return Create(CB, Bundles, InsertPt);
  436. }
  437. CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,
  438. Instruction *InsertPt) {
  439. SmallVector<OperandBundleDef, 1> Bundles;
  440. bool CreateNew = false;
  441. for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
  442. auto Bundle = CB->getOperandBundleAt(I);
  443. if (Bundle.getTagID() == ID) {
  444. CreateNew = true;
  445. continue;
  446. }
  447. Bundles.emplace_back(Bundle);
  448. }
  449. return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
  450. }
  451. bool CallBase::hasReadingOperandBundles() const {
  452. // Implementation note: this is a conservative implementation of operand
  453. // bundle semantics, where *any* non-assume operand bundle (other than
  454. // ptrauth) forces a callsite to be at least readonly.
  455. return hasOperandBundlesOtherThan(
  456. {LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}) &&
  457. getIntrinsicID() != Intrinsic::assume;
  458. }
  459. bool CallBase::hasClobberingOperandBundles() const {
  460. return hasOperandBundlesOtherThan(
  461. {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
  462. LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}) &&
  463. getIntrinsicID() != Intrinsic::assume;
  464. }
  465. MemoryEffects CallBase::getMemoryEffects() const {
  466. MemoryEffects ME = getAttributes().getMemoryEffects();
  467. if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
  468. MemoryEffects FnME = Fn->getMemoryEffects();
  469. if (hasOperandBundles()) {
  470. // TODO: Add a method to get memory effects for operand bundles instead.
  471. if (hasReadingOperandBundles())
  472. FnME |= MemoryEffects::readOnly();
  473. if (hasClobberingOperandBundles())
  474. FnME |= MemoryEffects::writeOnly();
  475. }
  476. ME &= FnME;
  477. }
  478. return ME;
  479. }
  480. void CallBase::setMemoryEffects(MemoryEffects ME) {
  481. addFnAttr(Attribute::getWithMemoryEffects(getContext(), ME));
  482. }
  483. /// Determine if the function does not access memory.
  484. bool CallBase::doesNotAccessMemory() const {
  485. return getMemoryEffects().doesNotAccessMemory();
  486. }
  487. void CallBase::setDoesNotAccessMemory() {
  488. setMemoryEffects(MemoryEffects::none());
  489. }
  490. /// Determine if the function does not access or only reads memory.
  491. bool CallBase::onlyReadsMemory() const {
  492. return getMemoryEffects().onlyReadsMemory();
  493. }
  494. void CallBase::setOnlyReadsMemory() {
  495. setMemoryEffects(getMemoryEffects() & MemoryEffects::readOnly());
  496. }
  497. /// Determine if the function does not access or only writes memory.
  498. bool CallBase::onlyWritesMemory() const {
  499. return getMemoryEffects().onlyWritesMemory();
  500. }
  501. void CallBase::setOnlyWritesMemory() {
  502. setMemoryEffects(getMemoryEffects() & MemoryEffects::writeOnly());
  503. }
  504. /// Determine if the call can access memmory only using pointers based
  505. /// on its arguments.
  506. bool CallBase::onlyAccessesArgMemory() const {
  507. return getMemoryEffects().onlyAccessesArgPointees();
  508. }
  509. void CallBase::setOnlyAccessesArgMemory() {
  510. setMemoryEffects(getMemoryEffects() & MemoryEffects::argMemOnly());
  511. }
  512. /// Determine if the function may only access memory that is
  513. /// inaccessible from the IR.
  514. bool CallBase::onlyAccessesInaccessibleMemory() const {
  515. return getMemoryEffects().onlyAccessesInaccessibleMem();
  516. }
  517. void CallBase::setOnlyAccessesInaccessibleMemory() {
  518. setMemoryEffects(getMemoryEffects() & MemoryEffects::inaccessibleMemOnly());
  519. }
  520. /// Determine if the function may only access memory that is
  521. /// either inaccessible from the IR or pointed to by its arguments.
  522. bool CallBase::onlyAccessesInaccessibleMemOrArgMem() const {
  523. return getMemoryEffects().onlyAccessesInaccessibleOrArgMem();
  524. }
  525. void CallBase::setOnlyAccessesInaccessibleMemOrArgMem() {
  526. setMemoryEffects(getMemoryEffects() &
  527. MemoryEffects::inaccessibleOrArgMemOnly());
  528. }
  529. //===----------------------------------------------------------------------===//
  530. // CallInst Implementation
  531. //===----------------------------------------------------------------------===//
  532. void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
  533. ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
  534. this->FTy = FTy;
  535. assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
  536. "NumOperands not set up?");
  537. #ifndef NDEBUG
  538. assert((Args.size() == FTy->getNumParams() ||
  539. (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
  540. "Calling a function with bad signature!");
  541. for (unsigned i = 0; i != Args.size(); ++i)
  542. assert((i >= FTy->getNumParams() ||
  543. FTy->getParamType(i) == Args[i]->getType()) &&
  544. "Calling a function with a bad signature!");
  545. #endif
  546. // Set operands in order of their index to match use-list-order
  547. // prediction.
  548. llvm::copy(Args, op_begin());
  549. setCalledOperand(Func);
  550. auto It = populateBundleOperandInfos(Bundles, Args.size());
  551. (void)It;
  552. assert(It + 1 == op_end() && "Should add up!");
  553. setName(NameStr);
  554. }
  555. void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
  556. this->FTy = FTy;
  557. assert(getNumOperands() == 1 && "NumOperands not set up?");
  558. setCalledOperand(Func);
  559. assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
  560. setName(NameStr);
  561. }
  562. CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
  563. Instruction *InsertBefore)
  564. : CallBase(Ty->getReturnType(), Instruction::Call,
  565. OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
  566. init(Ty, Func, Name);
  567. }
  568. CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
  569. BasicBlock *InsertAtEnd)
  570. : CallBase(Ty->getReturnType(), Instruction::Call,
  571. OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
  572. init(Ty, Func, Name);
  573. }
  574. CallInst::CallInst(const CallInst &CI)
  575. : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
  576. OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
  577. CI.getNumOperands()) {
  578. setTailCallKind(CI.getTailCallKind());
  579. setCallingConv(CI.getCallingConv());
  580. std::copy(CI.op_begin(), CI.op_end(), op_begin());
  581. std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
  582. bundle_op_info_begin());
  583. SubclassOptionalData = CI.SubclassOptionalData;
  584. }
  585. CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
  586. Instruction *InsertPt) {
  587. std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
  588. auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
  589. Args, OpB, CI->getName(), InsertPt);
  590. NewCI->setTailCallKind(CI->getTailCallKind());
  591. NewCI->setCallingConv(CI->getCallingConv());
  592. NewCI->SubclassOptionalData = CI->SubclassOptionalData;
  593. NewCI->setAttributes(CI->getAttributes());
  594. NewCI->setDebugLoc(CI->getDebugLoc());
  595. return NewCI;
  596. }
  597. // Update profile weight for call instruction by scaling it using the ratio
  598. // of S/T. The meaning of "branch_weights" meta data for call instruction is
  599. // transfered to represent call count.
  600. void CallInst::updateProfWeight(uint64_t S, uint64_t T) {
  601. auto *ProfileData = getMetadata(LLVMContext::MD_prof);
  602. if (ProfileData == nullptr)
  603. return;
  604. auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
  605. if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
  606. !ProfDataName->getString().equals("VP")))
  607. return;
  608. if (T == 0) {
  609. LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
  610. "div by 0. Ignoring. Likely the function "
  611. << getParent()->getParent()->getName()
  612. << " has 0 entry count, and contains call instructions "
  613. "with non-zero prof info.");
  614. return;
  615. }
  616. MDBuilder MDB(getContext());
  617. SmallVector<Metadata *, 3> Vals;
  618. Vals.push_back(ProfileData->getOperand(0));
  619. APInt APS(128, S), APT(128, T);
  620. if (ProfDataName->getString().equals("branch_weights") &&
  621. ProfileData->getNumOperands() > 0) {
  622. // Using APInt::div may be expensive, but most cases should fit 64 bits.
  623. APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
  624. ->getValue()
  625. .getZExtValue());
  626. Val *= APS;
  627. Vals.push_back(MDB.createConstant(
  628. ConstantInt::get(Type::getInt32Ty(getContext()),
  629. Val.udiv(APT).getLimitedValue(UINT32_MAX))));
  630. } else if (ProfDataName->getString().equals("VP"))
  631. for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
  632. // The first value is the key of the value profile, which will not change.
  633. Vals.push_back(ProfileData->getOperand(i));
  634. uint64_t Count =
  635. mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
  636. ->getValue()
  637. .getZExtValue();
  638. // Don't scale the magic number.
  639. if (Count == NOMORE_ICP_MAGICNUM) {
  640. Vals.push_back(ProfileData->getOperand(i + 1));
  641. continue;
  642. }
  643. // Using APInt::div may be expensive, but most cases should fit 64 bits.
  644. APInt Val(128, Count);
  645. Val *= APS;
  646. Vals.push_back(MDB.createConstant(
  647. ConstantInt::get(Type::getInt64Ty(getContext()),
  648. Val.udiv(APT).getLimitedValue())));
  649. }
  650. setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
  651. }
  652. /// IsConstantOne - Return true only if val is constant int 1
  653. static bool IsConstantOne(Value *val) {
  654. assert(val && "IsConstantOne does not work with nullptr val");
  655. const ConstantInt *CVal = dyn_cast<ConstantInt>(val);
  656. return CVal && CVal->isOne();
  657. }
  658. static Instruction *createMalloc(Instruction *InsertBefore,
  659. BasicBlock *InsertAtEnd, Type *IntPtrTy,
  660. Type *AllocTy, Value *AllocSize,
  661. Value *ArraySize,
  662. ArrayRef<OperandBundleDef> OpB,
  663. Function *MallocF, const Twine &Name) {
  664. assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
  665. "createMalloc needs either InsertBefore or InsertAtEnd");
  666. // malloc(type) becomes:
  667. // bitcast (i8* malloc(typeSize)) to type*
  668. // malloc(type, arraySize) becomes:
  669. // bitcast (i8* malloc(typeSize*arraySize)) to type*
  670. if (!ArraySize)
  671. ArraySize = ConstantInt::get(IntPtrTy, 1);
  672. else if (ArraySize->getType() != IntPtrTy) {
  673. if (InsertBefore)
  674. ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
  675. "", InsertBefore);
  676. else
  677. ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
  678. "", InsertAtEnd);
  679. }
  680. if (!IsConstantOne(ArraySize)) {
  681. if (IsConstantOne(AllocSize)) {
  682. AllocSize = ArraySize; // Operand * 1 = Operand
  683. } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) {
  684. Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy,
  685. false /*ZExt*/);
  686. // Malloc arg is constant product of type size and array size
  687. AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize));
  688. } else {
  689. // Multiply type size by the array size...
  690. if (InsertBefore)
  691. AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
  692. "mallocsize", InsertBefore);
  693. else
  694. AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
  695. "mallocsize", InsertAtEnd);
  696. }
  697. }
  698. assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
  699. // Create the call to Malloc.
  700. BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
  701. Module *M = BB->getParent()->getParent();
  702. Type *BPTy = Type::getInt8PtrTy(BB->getContext());
  703. FunctionCallee MallocFunc = MallocF;
  704. if (!MallocFunc)
  705. // prototype malloc as "void *malloc(size_t)"
  706. MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
  707. PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
  708. CallInst *MCall = nullptr;
  709. Instruction *Result = nullptr;
  710. if (InsertBefore) {
  711. MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall",
  712. InsertBefore);
  713. Result = MCall;
  714. if (Result->getType() != AllocPtrType)
  715. // Create a cast instruction to convert to the right type...
  716. Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore);
  717. } else {
  718. MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall");
  719. Result = MCall;
  720. if (Result->getType() != AllocPtrType) {
  721. MCall->insertInto(InsertAtEnd, InsertAtEnd->end());
  722. // Create a cast instruction to convert to the right type...
  723. Result = new BitCastInst(MCall, AllocPtrType, Name);
  724. }
  725. }
  726. MCall->setTailCall();
  727. if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) {
  728. MCall->setCallingConv(F->getCallingConv());
  729. if (!F->returnDoesNotAlias())
  730. F->setReturnDoesNotAlias();
  731. }
  732. assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
  733. return Result;
  734. }
  735. /// CreateMalloc - Generate the IR for a call to malloc:
  736. /// 1. Compute the malloc call's argument as the specified type's size,
  737. /// possibly multiplied by the array size if the array size is not
  738. /// constant 1.
  739. /// 2. Call malloc with that argument.
  740. /// 3. Bitcast the result of the malloc call to the specified type.
  741. Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
  742. Type *IntPtrTy, Type *AllocTy,
  743. Value *AllocSize, Value *ArraySize,
  744. Function *MallocF,
  745. const Twine &Name) {
  746. return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
  747. ArraySize, std::nullopt, MallocF, Name);
  748. }
  749. Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
  750. Type *IntPtrTy, Type *AllocTy,
  751. Value *AllocSize, Value *ArraySize,
  752. ArrayRef<OperandBundleDef> OpB,
  753. Function *MallocF,
  754. const Twine &Name) {
  755. return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
  756. ArraySize, OpB, MallocF, Name);
  757. }
  758. /// CreateMalloc - Generate the IR for a call to malloc:
  759. /// 1. Compute the malloc call's argument as the specified type's size,
  760. /// possibly multiplied by the array size if the array size is not
  761. /// constant 1.
  762. /// 2. Call malloc with that argument.
  763. /// 3. Bitcast the result of the malloc call to the specified type.
  764. /// Note: This function does not add the bitcast to the basic block, that is the
  765. /// responsibility of the caller.
  766. Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
  767. Type *IntPtrTy, Type *AllocTy,
  768. Value *AllocSize, Value *ArraySize,
  769. Function *MallocF, const Twine &Name) {
  770. return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
  771. ArraySize, std::nullopt, MallocF, Name);
  772. }
  773. Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
  774. Type *IntPtrTy, Type *AllocTy,
  775. Value *AllocSize, Value *ArraySize,
  776. ArrayRef<OperandBundleDef> OpB,
  777. Function *MallocF, const Twine &Name) {
  778. return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
  779. ArraySize, OpB, MallocF, Name);
  780. }
  781. static Instruction *createFree(Value *Source,
  782. ArrayRef<OperandBundleDef> Bundles,
  783. Instruction *InsertBefore,
  784. BasicBlock *InsertAtEnd) {
  785. assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
  786. "createFree needs either InsertBefore or InsertAtEnd");
  787. assert(Source->getType()->isPointerTy() &&
  788. "Can not free something of nonpointer type!");
  789. BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
  790. Module *M = BB->getParent()->getParent();
  791. Type *VoidTy = Type::getVoidTy(M->getContext());
  792. Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
  793. // prototype free as "void free(void*)"
  794. FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy);
  795. CallInst *Result = nullptr;
  796. Value *PtrCast = Source;
  797. if (InsertBefore) {
  798. if (Source->getType() != IntPtrTy)
  799. PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore);
  800. Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore);
  801. } else {
  802. if (Source->getType() != IntPtrTy)
  803. PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd);
  804. Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "");
  805. }
  806. Result->setTailCall();
  807. if (Function *F = dyn_cast<Function>(FreeFunc.getCallee()))
  808. Result->setCallingConv(F->getCallingConv());
  809. return Result;
  810. }
  811. /// CreateFree - Generate the IR for a call to the builtin free function.
  812. Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) {
  813. return createFree(Source, std::nullopt, InsertBefore, nullptr);
  814. }
  815. Instruction *CallInst::CreateFree(Value *Source,
  816. ArrayRef<OperandBundleDef> Bundles,
  817. Instruction *InsertBefore) {
  818. return createFree(Source, Bundles, InsertBefore, nullptr);
  819. }
  820. /// CreateFree - Generate the IR for a call to the builtin free function.
  821. /// Note: This function does not add the call to the basic block, that is the
  822. /// responsibility of the caller.
  823. Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) {
  824. Instruction *FreeCall =
  825. createFree(Source, std::nullopt, nullptr, InsertAtEnd);
  826. assert(FreeCall && "CreateFree did not create a CallInst");
  827. return FreeCall;
  828. }
  829. Instruction *CallInst::CreateFree(Value *Source,
  830. ArrayRef<OperandBundleDef> Bundles,
  831. BasicBlock *InsertAtEnd) {
  832. Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd);
  833. assert(FreeCall && "CreateFree did not create a CallInst");
  834. return FreeCall;
  835. }
  836. //===----------------------------------------------------------------------===//
  837. // InvokeInst Implementation
  838. //===----------------------------------------------------------------------===//
  839. void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
  840. BasicBlock *IfException, ArrayRef<Value *> Args,
  841. ArrayRef<OperandBundleDef> Bundles,
  842. const Twine &NameStr) {
  843. this->FTy = FTy;
  844. assert((int)getNumOperands() ==
  845. ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
  846. "NumOperands not set up?");
  847. #ifndef NDEBUG
  848. assert(((Args.size() == FTy->getNumParams()) ||
  849. (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
  850. "Invoking a function with bad signature");
  851. for (unsigned i = 0, e = Args.size(); i != e; i++)
  852. assert((i >= FTy->getNumParams() ||
  853. FTy->getParamType(i) == Args[i]->getType()) &&
  854. "Invoking a function with a bad signature!");
  855. #endif
  856. // Set operands in order of their index to match use-list-order
  857. // prediction.
  858. llvm::copy(Args, op_begin());
  859. setNormalDest(IfNormal);
  860. setUnwindDest(IfException);
  861. setCalledOperand(Fn);
  862. auto It = populateBundleOperandInfos(Bundles, Args.size());
  863. (void)It;
  864. assert(It + 3 == op_end() && "Should add up!");
  865. setName(NameStr);
  866. }
  867. InvokeInst::InvokeInst(const InvokeInst &II)
  868. : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
  869. OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
  870. II.getNumOperands()) {
  871. setCallingConv(II.getCallingConv());
  872. std::copy(II.op_begin(), II.op_end(), op_begin());
  873. std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
  874. bundle_op_info_begin());
  875. SubclassOptionalData = II.SubclassOptionalData;
  876. }
  877. InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
  878. Instruction *InsertPt) {
  879. std::vector<Value *> Args(II->arg_begin(), II->arg_end());
  880. auto *NewII = InvokeInst::Create(
  881. II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
  882. II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
  883. NewII->setCallingConv(II->getCallingConv());
  884. NewII->SubclassOptionalData = II->SubclassOptionalData;
  885. NewII->setAttributes(II->getAttributes());
  886. NewII->setDebugLoc(II->getDebugLoc());
  887. return NewII;
  888. }
  889. LandingPadInst *InvokeInst::getLandingPadInst() const {
  890. return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
  891. }
  892. //===----------------------------------------------------------------------===//
  893. // CallBrInst Implementation
  894. //===----------------------------------------------------------------------===//
  895. void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
  896. ArrayRef<BasicBlock *> IndirectDests,
  897. ArrayRef<Value *> Args,
  898. ArrayRef<OperandBundleDef> Bundles,
  899. const Twine &NameStr) {
  900. this->FTy = FTy;
  901. assert((int)getNumOperands() ==
  902. ComputeNumOperands(Args.size(), IndirectDests.size(),
  903. CountBundleInputs(Bundles)) &&
  904. "NumOperands not set up?");
  905. #ifndef NDEBUG
  906. assert(((Args.size() == FTy->getNumParams()) ||
  907. (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
  908. "Calling a function with bad signature");
  909. for (unsigned i = 0, e = Args.size(); i != e; i++)
  910. assert((i >= FTy->getNumParams() ||
  911. FTy->getParamType(i) == Args[i]->getType()) &&
  912. "Calling a function with a bad signature!");
  913. #endif
  914. // Set operands in order of their index to match use-list-order
  915. // prediction.
  916. std::copy(Args.begin(), Args.end(), op_begin());
  917. NumIndirectDests = IndirectDests.size();
  918. setDefaultDest(Fallthrough);
  919. for (unsigned i = 0; i != NumIndirectDests; ++i)
  920. setIndirectDest(i, IndirectDests[i]);
  921. setCalledOperand(Fn);
  922. auto It = populateBundleOperandInfos(Bundles, Args.size());
  923. (void)It;
  924. assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
  925. setName(NameStr);
  926. }
  927. CallBrInst::CallBrInst(const CallBrInst &CBI)
  928. : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
  929. OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
  930. CBI.getNumOperands()) {
  931. setCallingConv(CBI.getCallingConv());
  932. std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
  933. std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
  934. bundle_op_info_begin());
  935. SubclassOptionalData = CBI.SubclassOptionalData;
  936. NumIndirectDests = CBI.NumIndirectDests;
  937. }
  938. CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
  939. Instruction *InsertPt) {
  940. std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
  941. auto *NewCBI = CallBrInst::Create(
  942. CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
  943. CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
  944. NewCBI->setCallingConv(CBI->getCallingConv());
  945. NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
  946. NewCBI->setAttributes(CBI->getAttributes());
  947. NewCBI->setDebugLoc(CBI->getDebugLoc());
  948. NewCBI->NumIndirectDests = CBI->NumIndirectDests;
  949. return NewCBI;
  950. }
  951. //===----------------------------------------------------------------------===//
  952. // ReturnInst Implementation
  953. //===----------------------------------------------------------------------===//
  954. ReturnInst::ReturnInst(const ReturnInst &RI)
  955. : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
  956. OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
  957. RI.getNumOperands()) {
  958. if (RI.getNumOperands())
  959. Op<0>() = RI.Op<0>();
  960. SubclassOptionalData = RI.SubclassOptionalData;
  961. }
  962. ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
  963. : Instruction(Type::getVoidTy(C), Instruction::Ret,
  964. OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
  965. InsertBefore) {
  966. if (retVal)
  967. Op<0>() = retVal;
  968. }
  969. ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
  970. : Instruction(Type::getVoidTy(C), Instruction::Ret,
  971. OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
  972. InsertAtEnd) {
  973. if (retVal)
  974. Op<0>() = retVal;
  975. }
  976. ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
  977. : Instruction(Type::getVoidTy(Context), Instruction::Ret,
  978. OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
  979. //===----------------------------------------------------------------------===//
  980. // ResumeInst Implementation
  981. //===----------------------------------------------------------------------===//
  982. ResumeInst::ResumeInst(const ResumeInst &RI)
  983. : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
  984. OperandTraits<ResumeInst>::op_begin(this), 1) {
  985. Op<0>() = RI.Op<0>();
  986. }
  987. ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
  988. : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
  989. OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
  990. Op<0>() = Exn;
  991. }
  992. ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
  993. : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
  994. OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
  995. Op<0>() = Exn;
  996. }
  997. //===----------------------------------------------------------------------===//
  998. // CleanupReturnInst Implementation
  999. //===----------------------------------------------------------------------===//
  1000. CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
  1001. : Instruction(CRI.getType(), Instruction::CleanupRet,
  1002. OperandTraits<CleanupReturnInst>::op_end(this) -
  1003. CRI.getNumOperands(),
  1004. CRI.getNumOperands()) {
  1005. setSubclassData<Instruction::OpaqueField>(
  1006. CRI.getSubclassData<Instruction::OpaqueField>());
  1007. Op<0>() = CRI.Op<0>();
  1008. if (CRI.hasUnwindDest())
  1009. Op<1>() = CRI.Op<1>();
  1010. }
  1011. void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
  1012. if (UnwindBB)
  1013. setSubclassData<UnwindDestField>(true);
  1014. Op<0>() = CleanupPad;
  1015. if (UnwindBB)
  1016. Op<1>() = UnwindBB;
  1017. }
  1018. CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
  1019. unsigned Values, Instruction *InsertBefore)
  1020. : Instruction(Type::getVoidTy(CleanupPad->getContext()),
  1021. Instruction::CleanupRet,
  1022. OperandTraits<CleanupReturnInst>::op_end(this) - Values,
  1023. Values, InsertBefore) {
  1024. init(CleanupPad, UnwindBB);
  1025. }
  1026. CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
  1027. unsigned Values, BasicBlock *InsertAtEnd)
  1028. : Instruction(Type::getVoidTy(CleanupPad->getContext()),
  1029. Instruction::CleanupRet,
  1030. OperandTraits<CleanupReturnInst>::op_end(this) - Values,
  1031. Values, InsertAtEnd) {
  1032. init(CleanupPad, UnwindBB);
  1033. }
  1034. //===----------------------------------------------------------------------===//
  1035. // CatchReturnInst Implementation
  1036. //===----------------------------------------------------------------------===//
  1037. void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
  1038. Op<0>() = CatchPad;
  1039. Op<1>() = BB;
  1040. }
  1041. CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
  1042. : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
  1043. OperandTraits<CatchReturnInst>::op_begin(this), 2) {
  1044. Op<0>() = CRI.Op<0>();
  1045. Op<1>() = CRI.Op<1>();
  1046. }
  1047. CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
  1048. Instruction *InsertBefore)
  1049. : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
  1050. OperandTraits<CatchReturnInst>::op_begin(this), 2,
  1051. InsertBefore) {
  1052. init(CatchPad, BB);
  1053. }
  1054. CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
  1055. BasicBlock *InsertAtEnd)
  1056. : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
  1057. OperandTraits<CatchReturnInst>::op_begin(this), 2,
  1058. InsertAtEnd) {
  1059. init(CatchPad, BB);
  1060. }
  1061. //===----------------------------------------------------------------------===//
  1062. // CatchSwitchInst Implementation
  1063. //===----------------------------------------------------------------------===//
  1064. CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
  1065. unsigned NumReservedValues,
  1066. const Twine &NameStr,
  1067. Instruction *InsertBefore)
  1068. : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
  1069. InsertBefore) {
  1070. if (UnwindDest)
  1071. ++NumReservedValues;
  1072. init(ParentPad, UnwindDest, NumReservedValues + 1);
  1073. setName(NameStr);
  1074. }
  1075. CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
  1076. unsigned NumReservedValues,
  1077. const Twine &NameStr, BasicBlock *InsertAtEnd)
  1078. : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
  1079. InsertAtEnd) {
  1080. if (UnwindDest)
  1081. ++NumReservedValues;
  1082. init(ParentPad, UnwindDest, NumReservedValues + 1);
  1083. setName(NameStr);
  1084. }
  1085. CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
  1086. : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
  1087. CSI.getNumOperands()) {
  1088. init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
  1089. setNumHungOffUseOperands(ReservedSpace);
  1090. Use *OL = getOperandList();
  1091. const Use *InOL = CSI.getOperandList();
  1092. for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
  1093. OL[I] = InOL[I];
  1094. }
  1095. void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
  1096. unsigned NumReservedValues) {
  1097. assert(ParentPad && NumReservedValues);
  1098. ReservedSpace = NumReservedValues;
  1099. setNumHungOffUseOperands(UnwindDest ? 2 : 1);
  1100. allocHungoffUses(ReservedSpace);
  1101. Op<0>() = ParentPad;
  1102. if (UnwindDest) {
  1103. setSubclassData<UnwindDestField>(true);
  1104. setUnwindDest(UnwindDest);
  1105. }
  1106. }
  1107. /// growOperands - grow operands - This grows the operand list in response to a
  1108. /// push_back style of operation. This grows the number of ops by 2 times.
  1109. void CatchSwitchInst::growOperands(unsigned Size) {
  1110. unsigned NumOperands = getNumOperands();
  1111. assert(NumOperands >= 1);
  1112. if (ReservedSpace >= NumOperands + Size)
  1113. return;
  1114. ReservedSpace = (NumOperands + Size / 2) * 2;
  1115. growHungoffUses(ReservedSpace);
  1116. }
  1117. void CatchSwitchInst::addHandler(BasicBlock *Handler) {
  1118. unsigned OpNo = getNumOperands();
  1119. growOperands(1);
  1120. assert(OpNo < ReservedSpace && "Growing didn't work!");
  1121. setNumHungOffUseOperands(getNumOperands() + 1);
  1122. getOperandList()[OpNo] = Handler;
  1123. }
  1124. void CatchSwitchInst::removeHandler(handler_iterator HI) {
  1125. // Move all subsequent handlers up one.
  1126. Use *EndDst = op_end() - 1;
  1127. for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
  1128. *CurDst = *(CurDst + 1);
  1129. // Null out the last handler use.
  1130. *EndDst = nullptr;
  1131. setNumHungOffUseOperands(getNumOperands() - 1);
  1132. }
  1133. //===----------------------------------------------------------------------===//
  1134. // FuncletPadInst Implementation
  1135. //===----------------------------------------------------------------------===//
  1136. void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
  1137. const Twine &NameStr) {
  1138. assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
  1139. llvm::copy(Args, op_begin());
  1140. setParentPad(ParentPad);
  1141. setName(NameStr);
  1142. }
  1143. FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
  1144. : Instruction(FPI.getType(), FPI.getOpcode(),
  1145. OperandTraits<FuncletPadInst>::op_end(this) -
  1146. FPI.getNumOperands(),
  1147. FPI.getNumOperands()) {
  1148. std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
  1149. setParentPad(FPI.getParentPad());
  1150. }
  1151. FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
  1152. ArrayRef<Value *> Args, unsigned Values,
  1153. const Twine &NameStr, Instruction *InsertBefore)
  1154. : Instruction(ParentPad->getType(), Op,
  1155. OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
  1156. InsertBefore) {
  1157. init(ParentPad, Args, NameStr);
  1158. }
  1159. FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
  1160. ArrayRef<Value *> Args, unsigned Values,
  1161. const Twine &NameStr, BasicBlock *InsertAtEnd)
  1162. : Instruction(ParentPad->getType(), Op,
  1163. OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
  1164. InsertAtEnd) {
  1165. init(ParentPad, Args, NameStr);
  1166. }
  1167. //===----------------------------------------------------------------------===//
  1168. // UnreachableInst Implementation
  1169. //===----------------------------------------------------------------------===//
  1170. UnreachableInst::UnreachableInst(LLVMContext &Context,
  1171. Instruction *InsertBefore)
  1172. : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
  1173. 0, InsertBefore) {}
  1174. UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
  1175. : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
  1176. 0, InsertAtEnd) {}
  1177. //===----------------------------------------------------------------------===//
  1178. // BranchInst Implementation
  1179. //===----------------------------------------------------------------------===//
  1180. void BranchInst::AssertOK() {
  1181. if (isConditional())
  1182. assert(getCondition()->getType()->isIntegerTy(1) &&
  1183. "May only branch on boolean predicates!");
  1184. }
  1185. BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
  1186. : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
  1187. OperandTraits<BranchInst>::op_end(this) - 1, 1,
  1188. InsertBefore) {
  1189. assert(IfTrue && "Branch destination may not be null!");
  1190. Op<-1>() = IfTrue;
  1191. }
  1192. BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
  1193. Instruction *InsertBefore)
  1194. : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
  1195. OperandTraits<BranchInst>::op_end(this) - 3, 3,
  1196. InsertBefore) {
  1197. // Assign in order of operand index to make use-list order predictable.
  1198. Op<-3>() = Cond;
  1199. Op<-2>() = IfFalse;
  1200. Op<-1>() = IfTrue;
  1201. #ifndef NDEBUG
  1202. AssertOK();
  1203. #endif
  1204. }
  1205. BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
  1206. : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
  1207. OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
  1208. assert(IfTrue && "Branch destination may not be null!");
  1209. Op<-1>() = IfTrue;
  1210. }
  1211. BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
  1212. BasicBlock *InsertAtEnd)
  1213. : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
  1214. OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
  1215. // Assign in order of operand index to make use-list order predictable.
  1216. Op<-3>() = Cond;
  1217. Op<-2>() = IfFalse;
  1218. Op<-1>() = IfTrue;
  1219. #ifndef NDEBUG
  1220. AssertOK();
  1221. #endif
  1222. }
  1223. BranchInst::BranchInst(const BranchInst &BI)
  1224. : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
  1225. OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
  1226. BI.getNumOperands()) {
  1227. // Assign in order of operand index to make use-list order predictable.
  1228. if (BI.getNumOperands() != 1) {
  1229. assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
  1230. Op<-3>() = BI.Op<-3>();
  1231. Op<-2>() = BI.Op<-2>();
  1232. }
  1233. Op<-1>() = BI.Op<-1>();
  1234. SubclassOptionalData = BI.SubclassOptionalData;
  1235. }
  1236. void BranchInst::swapSuccessors() {
  1237. assert(isConditional() &&
  1238. "Cannot swap successors of an unconditional branch");
  1239. Op<-1>().swap(Op<-2>());
  1240. // Update profile metadata if present and it matches our structural
  1241. // expectations.
  1242. swapProfMetadata();
  1243. }
  1244. //===----------------------------------------------------------------------===//
  1245. // AllocaInst Implementation
  1246. //===----------------------------------------------------------------------===//
  1247. static Value *getAISize(LLVMContext &Context, Value *Amt) {
  1248. if (!Amt)
  1249. Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
  1250. else {
  1251. assert(!isa<BasicBlock>(Amt) &&
  1252. "Passed basic block into allocation size parameter! Use other ctor");
  1253. assert(Amt->getType()->isIntegerTy() &&
  1254. "Allocation array size is not an integer!");
  1255. }
  1256. return Amt;
  1257. }
  1258. static Align computeAllocaDefaultAlign(Type *Ty, BasicBlock *BB) {
  1259. assert(BB && "Insertion BB cannot be null when alignment not provided!");
  1260. assert(BB->getParent() &&
  1261. "BB must be in a Function when alignment not provided!");
  1262. const DataLayout &DL = BB->getModule()->getDataLayout();
  1263. return DL.getPrefTypeAlign(Ty);
  1264. }
  1265. static Align computeAllocaDefaultAlign(Type *Ty, Instruction *I) {
  1266. assert(I && "Insertion position cannot be null when alignment not provided!");
  1267. return computeAllocaDefaultAlign(Ty, I->getParent());
  1268. }
  1269. AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
  1270. Instruction *InsertBefore)
  1271. : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
  1272. AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
  1273. BasicBlock *InsertAtEnd)
  1274. : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
  1275. AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
  1276. const Twine &Name, Instruction *InsertBefore)
  1277. : AllocaInst(Ty, AddrSpace, ArraySize,
  1278. computeAllocaDefaultAlign(Ty, InsertBefore), Name,
  1279. InsertBefore) {}
  1280. AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
  1281. const Twine &Name, BasicBlock *InsertAtEnd)
  1282. : AllocaInst(Ty, AddrSpace, ArraySize,
  1283. computeAllocaDefaultAlign(Ty, InsertAtEnd), Name,
  1284. InsertAtEnd) {}
  1285. AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
  1286. Align Align, const Twine &Name,
  1287. Instruction *InsertBefore)
  1288. : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
  1289. getAISize(Ty->getContext(), ArraySize), InsertBefore),
  1290. AllocatedType(Ty) {
  1291. setAlignment(Align);
  1292. assert(!Ty->isVoidTy() && "Cannot allocate void!");
  1293. setName(Name);
  1294. }
  1295. AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
  1296. Align Align, const Twine &Name, BasicBlock *InsertAtEnd)
  1297. : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
  1298. getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
  1299. AllocatedType(Ty) {
  1300. setAlignment(Align);
  1301. assert(!Ty->isVoidTy() && "Cannot allocate void!");
  1302. setName(Name);
  1303. }
  1304. bool AllocaInst::isArrayAllocation() const {
  1305. if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
  1306. return !CI->isOne();
  1307. return true;
  1308. }
  1309. /// isStaticAlloca - Return true if this alloca is in the entry block of the
  1310. /// function and is a constant size. If so, the code generator will fold it
  1311. /// into the prolog/epilog code, so it is basically free.
  1312. bool AllocaInst::isStaticAlloca() const {
  1313. // Must be constant size.
  1314. if (!isa<ConstantInt>(getArraySize())) return false;
  1315. // Must be in the entry block.
  1316. const BasicBlock *Parent = getParent();
  1317. return Parent->isEntryBlock() && !isUsedWithInAlloca();
  1318. }
  1319. //===----------------------------------------------------------------------===//
  1320. // LoadInst Implementation
  1321. //===----------------------------------------------------------------------===//
  1322. void LoadInst::AssertOK() {
  1323. assert(getOperand(0)->getType()->isPointerTy() &&
  1324. "Ptr must have pointer type.");
  1325. }
  1326. static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB) {
  1327. assert(BB && "Insertion BB cannot be null when alignment not provided!");
  1328. assert(BB->getParent() &&
  1329. "BB must be in a Function when alignment not provided!");
  1330. const DataLayout &DL = BB->getModule()->getDataLayout();
  1331. return DL.getABITypeAlign(Ty);
  1332. }
  1333. static Align computeLoadStoreDefaultAlign(Type *Ty, Instruction *I) {
  1334. assert(I && "Insertion position cannot be null when alignment not provided!");
  1335. return computeLoadStoreDefaultAlign(Ty, I->getParent());
  1336. }
  1337. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
  1338. Instruction *InsertBef)
  1339. : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
  1340. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
  1341. BasicBlock *InsertAE)
  1342. : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
  1343. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
  1344. Instruction *InsertBef)
  1345. : LoadInst(Ty, Ptr, Name, isVolatile,
  1346. computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
  1347. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
  1348. BasicBlock *InsertAE)
  1349. : LoadInst(Ty, Ptr, Name, isVolatile,
  1350. computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {}
  1351. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
  1352. Align Align, Instruction *InsertBef)
  1353. : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
  1354. SyncScope::System, InsertBef) {}
  1355. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
  1356. Align Align, BasicBlock *InsertAE)
  1357. : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
  1358. SyncScope::System, InsertAE) {}
  1359. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
  1360. Align Align, AtomicOrdering Order, SyncScope::ID SSID,
  1361. Instruction *InsertBef)
  1362. : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
  1363. assert(cast<PointerType>(Ptr->getType())->isOpaqueOrPointeeTypeMatches(Ty));
  1364. setVolatile(isVolatile);
  1365. setAlignment(Align);
  1366. setAtomic(Order, SSID);
  1367. AssertOK();
  1368. setName(Name);
  1369. }
  1370. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
  1371. Align Align, AtomicOrdering Order, SyncScope::ID SSID,
  1372. BasicBlock *InsertAE)
  1373. : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
  1374. assert(cast<PointerType>(Ptr->getType())->isOpaqueOrPointeeTypeMatches(Ty));
  1375. setVolatile(isVolatile);
  1376. setAlignment(Align);
  1377. setAtomic(Order, SSID);
  1378. AssertOK();
  1379. setName(Name);
  1380. }
  1381. //===----------------------------------------------------------------------===//
  1382. // StoreInst Implementation
  1383. //===----------------------------------------------------------------------===//
  1384. void StoreInst::AssertOK() {
  1385. assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
  1386. assert(getOperand(1)->getType()->isPointerTy() &&
  1387. "Ptr must have pointer type!");
  1388. assert(cast<PointerType>(getOperand(1)->getType())
  1389. ->isOpaqueOrPointeeTypeMatches(getOperand(0)->getType()) &&
  1390. "Ptr must be a pointer to Val type!");
  1391. }
  1392. StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
  1393. : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
  1394. StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
  1395. : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
  1396. StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
  1397. Instruction *InsertBefore)
  1398. : StoreInst(val, addr, isVolatile,
  1399. computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
  1400. InsertBefore) {}
  1401. StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
  1402. BasicBlock *InsertAtEnd)
  1403. : StoreInst(val, addr, isVolatile,
  1404. computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd),
  1405. InsertAtEnd) {}
  1406. StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
  1407. Instruction *InsertBefore)
  1408. : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
  1409. SyncScope::System, InsertBefore) {}
  1410. StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
  1411. BasicBlock *InsertAtEnd)
  1412. : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
  1413. SyncScope::System, InsertAtEnd) {}
  1414. StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
  1415. AtomicOrdering Order, SyncScope::ID SSID,
  1416. Instruction *InsertBefore)
  1417. : Instruction(Type::getVoidTy(val->getContext()), Store,
  1418. OperandTraits<StoreInst>::op_begin(this),
  1419. OperandTraits<StoreInst>::operands(this), InsertBefore) {
  1420. Op<0>() = val;
  1421. Op<1>() = addr;
  1422. setVolatile(isVolatile);
  1423. setAlignment(Align);
  1424. setAtomic(Order, SSID);
  1425. AssertOK();
  1426. }
  1427. StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
  1428. AtomicOrdering Order, SyncScope::ID SSID,
  1429. BasicBlock *InsertAtEnd)
  1430. : Instruction(Type::getVoidTy(val->getContext()), Store,
  1431. OperandTraits<StoreInst>::op_begin(this),
  1432. OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
  1433. Op<0>() = val;
  1434. Op<1>() = addr;
  1435. setVolatile(isVolatile);
  1436. setAlignment(Align);
  1437. setAtomic(Order, SSID);
  1438. AssertOK();
  1439. }
  1440. //===----------------------------------------------------------------------===//
  1441. // AtomicCmpXchgInst Implementation
  1442. //===----------------------------------------------------------------------===//
  1443. void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
  1444. Align Alignment, AtomicOrdering SuccessOrdering,
  1445. AtomicOrdering FailureOrdering,
  1446. SyncScope::ID SSID) {
  1447. Op<0>() = Ptr;
  1448. Op<1>() = Cmp;
  1449. Op<2>() = NewVal;
  1450. setSuccessOrdering(SuccessOrdering);
  1451. setFailureOrdering(FailureOrdering);
  1452. setSyncScopeID(SSID);
  1453. setAlignment(Alignment);
  1454. assert(getOperand(0) && getOperand(1) && getOperand(2) &&
  1455. "All operands must be non-null!");
  1456. assert(getOperand(0)->getType()->isPointerTy() &&
  1457. "Ptr must have pointer type!");
  1458. assert(cast<PointerType>(getOperand(0)->getType())
  1459. ->isOpaqueOrPointeeTypeMatches(getOperand(1)->getType()) &&
  1460. "Ptr must be a pointer to Cmp type!");
  1461. assert(cast<PointerType>(getOperand(0)->getType())
  1462. ->isOpaqueOrPointeeTypeMatches(getOperand(2)->getType()) &&
  1463. "Ptr must be a pointer to NewVal type!");
  1464. assert(getOperand(1)->getType() == getOperand(2)->getType() &&
  1465. "Cmp type and NewVal type must be same!");
  1466. }
  1467. AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
  1468. Align Alignment,
  1469. AtomicOrdering SuccessOrdering,
  1470. AtomicOrdering FailureOrdering,
  1471. SyncScope::ID SSID,
  1472. Instruction *InsertBefore)
  1473. : Instruction(
  1474. StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
  1475. AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
  1476. OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
  1477. Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
  1478. }
  1479. AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
  1480. Align Alignment,
  1481. AtomicOrdering SuccessOrdering,
  1482. AtomicOrdering FailureOrdering,
  1483. SyncScope::ID SSID,
  1484. BasicBlock *InsertAtEnd)
  1485. : Instruction(
  1486. StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
  1487. AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
  1488. OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
  1489. Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
  1490. }
  1491. //===----------------------------------------------------------------------===//
  1492. // AtomicRMWInst Implementation
  1493. //===----------------------------------------------------------------------===//
  1494. void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
  1495. Align Alignment, AtomicOrdering Ordering,
  1496. SyncScope::ID SSID) {
  1497. assert(Ordering != AtomicOrdering::NotAtomic &&
  1498. "atomicrmw instructions can only be atomic.");
  1499. assert(Ordering != AtomicOrdering::Unordered &&
  1500. "atomicrmw instructions cannot be unordered.");
  1501. Op<0>() = Ptr;
  1502. Op<1>() = Val;
  1503. setOperation(Operation);
  1504. setOrdering(Ordering);
  1505. setSyncScopeID(SSID);
  1506. setAlignment(Alignment);
  1507. assert(getOperand(0) && getOperand(1) &&
  1508. "All operands must be non-null!");
  1509. assert(getOperand(0)->getType()->isPointerTy() &&
  1510. "Ptr must have pointer type!");
  1511. assert(cast<PointerType>(getOperand(0)->getType())
  1512. ->isOpaqueOrPointeeTypeMatches(getOperand(1)->getType()) &&
  1513. "Ptr must be a pointer to Val type!");
  1514. assert(Ordering != AtomicOrdering::NotAtomic &&
  1515. "AtomicRMW instructions must be atomic!");
  1516. }
  1517. AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
  1518. Align Alignment, AtomicOrdering Ordering,
  1519. SyncScope::ID SSID, Instruction *InsertBefore)
  1520. : Instruction(Val->getType(), AtomicRMW,
  1521. OperandTraits<AtomicRMWInst>::op_begin(this),
  1522. OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
  1523. Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
  1524. }
  1525. AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
  1526. Align Alignment, AtomicOrdering Ordering,
  1527. SyncScope::ID SSID, BasicBlock *InsertAtEnd)
  1528. : Instruction(Val->getType(), AtomicRMW,
  1529. OperandTraits<AtomicRMWInst>::op_begin(this),
  1530. OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
  1531. Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
  1532. }
  1533. StringRef AtomicRMWInst::getOperationName(BinOp Op) {
  1534. switch (Op) {
  1535. case AtomicRMWInst::Xchg:
  1536. return "xchg";
  1537. case AtomicRMWInst::Add:
  1538. return "add";
  1539. case AtomicRMWInst::Sub:
  1540. return "sub";
  1541. case AtomicRMWInst::And:
  1542. return "and";
  1543. case AtomicRMWInst::Nand:
  1544. return "nand";
  1545. case AtomicRMWInst::Or:
  1546. return "or";
  1547. case AtomicRMWInst::Xor:
  1548. return "xor";
  1549. case AtomicRMWInst::Max:
  1550. return "max";
  1551. case AtomicRMWInst::Min:
  1552. return "min";
  1553. case AtomicRMWInst::UMax:
  1554. return "umax";
  1555. case AtomicRMWInst::UMin:
  1556. return "umin";
  1557. case AtomicRMWInst::FAdd:
  1558. return "fadd";
  1559. case AtomicRMWInst::FSub:
  1560. return "fsub";
  1561. case AtomicRMWInst::FMax:
  1562. return "fmax";
  1563. case AtomicRMWInst::FMin:
  1564. return "fmin";
  1565. case AtomicRMWInst::UIncWrap:
  1566. return "uinc_wrap";
  1567. case AtomicRMWInst::UDecWrap:
  1568. return "udec_wrap";
  1569. case AtomicRMWInst::BAD_BINOP:
  1570. return "<invalid operation>";
  1571. }
  1572. llvm_unreachable("invalid atomicrmw operation");
  1573. }
  1574. //===----------------------------------------------------------------------===//
  1575. // FenceInst Implementation
  1576. //===----------------------------------------------------------------------===//
  1577. FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
  1578. SyncScope::ID SSID,
  1579. Instruction *InsertBefore)
  1580. : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
  1581. setOrdering(Ordering);
  1582. setSyncScopeID(SSID);
  1583. }
  1584. FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
  1585. SyncScope::ID SSID,
  1586. BasicBlock *InsertAtEnd)
  1587. : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
  1588. setOrdering(Ordering);
  1589. setSyncScopeID(SSID);
  1590. }
  1591. //===----------------------------------------------------------------------===//
  1592. // GetElementPtrInst Implementation
  1593. //===----------------------------------------------------------------------===//
  1594. void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
  1595. const Twine &Name) {
  1596. assert(getNumOperands() == 1 + IdxList.size() &&
  1597. "NumOperands not initialized?");
  1598. Op<0>() = Ptr;
  1599. llvm::copy(IdxList, op_begin() + 1);
  1600. setName(Name);
  1601. }
  1602. GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
  1603. : Instruction(GEPI.getType(), GetElementPtr,
  1604. OperandTraits<GetElementPtrInst>::op_end(this) -
  1605. GEPI.getNumOperands(),
  1606. GEPI.getNumOperands()),
  1607. SourceElementType(GEPI.SourceElementType),
  1608. ResultElementType(GEPI.ResultElementType) {
  1609. std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
  1610. SubclassOptionalData = GEPI.SubclassOptionalData;
  1611. }
  1612. Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, Value *Idx) {
  1613. if (auto *Struct = dyn_cast<StructType>(Ty)) {
  1614. if (!Struct->indexValid(Idx))
  1615. return nullptr;
  1616. return Struct->getTypeAtIndex(Idx);
  1617. }
  1618. if (!Idx->getType()->isIntOrIntVectorTy())
  1619. return nullptr;
  1620. if (auto *Array = dyn_cast<ArrayType>(Ty))
  1621. return Array->getElementType();
  1622. if (auto *Vector = dyn_cast<VectorType>(Ty))
  1623. return Vector->getElementType();
  1624. return nullptr;
  1625. }
  1626. Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, uint64_t Idx) {
  1627. if (auto *Struct = dyn_cast<StructType>(Ty)) {
  1628. if (Idx >= Struct->getNumElements())
  1629. return nullptr;
  1630. return Struct->getElementType(Idx);
  1631. }
  1632. if (auto *Array = dyn_cast<ArrayType>(Ty))
  1633. return Array->getElementType();
  1634. if (auto *Vector = dyn_cast<VectorType>(Ty))
  1635. return Vector->getElementType();
  1636. return nullptr;
  1637. }
  1638. template <typename IndexTy>
  1639. static Type *getIndexedTypeInternal(Type *Ty, ArrayRef<IndexTy> IdxList) {
  1640. if (IdxList.empty())
  1641. return Ty;
  1642. for (IndexTy V : IdxList.slice(1)) {
  1643. Ty = GetElementPtrInst::getTypeAtIndex(Ty, V);
  1644. if (!Ty)
  1645. return Ty;
  1646. }
  1647. return Ty;
  1648. }
  1649. Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) {
  1650. return getIndexedTypeInternal(Ty, IdxList);
  1651. }
  1652. Type *GetElementPtrInst::getIndexedType(Type *Ty,
  1653. ArrayRef<Constant *> IdxList) {
  1654. return getIndexedTypeInternal(Ty, IdxList);
  1655. }
  1656. Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) {
  1657. return getIndexedTypeInternal(Ty, IdxList);
  1658. }
  1659. /// hasAllZeroIndices - Return true if all of the indices of this GEP are
  1660. /// zeros. If so, the result pointer and the first operand have the same
  1661. /// value, just potentially different types.
  1662. bool GetElementPtrInst::hasAllZeroIndices() const {
  1663. for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
  1664. if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
  1665. if (!CI->isZero()) return false;
  1666. } else {
  1667. return false;
  1668. }
  1669. }
  1670. return true;
  1671. }
  1672. /// hasAllConstantIndices - Return true if all of the indices of this GEP are
  1673. /// constant integers. If so, the result pointer and the first operand have
  1674. /// a constant offset between them.
  1675. bool GetElementPtrInst::hasAllConstantIndices() const {
  1676. for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
  1677. if (!isa<ConstantInt>(getOperand(i)))
  1678. return false;
  1679. }
  1680. return true;
  1681. }
  1682. void GetElementPtrInst::setIsInBounds(bool B) {
  1683. cast<GEPOperator>(this)->setIsInBounds(B);
  1684. }
  1685. bool GetElementPtrInst::isInBounds() const {
  1686. return cast<GEPOperator>(this)->isInBounds();
  1687. }
  1688. bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
  1689. APInt &Offset) const {
  1690. // Delegate to the generic GEPOperator implementation.
  1691. return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
  1692. }
  1693. bool GetElementPtrInst::collectOffset(
  1694. const DataLayout &DL, unsigned BitWidth,
  1695. MapVector<Value *, APInt> &VariableOffsets,
  1696. APInt &ConstantOffset) const {
  1697. // Delegate to the generic GEPOperator implementation.
  1698. return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
  1699. ConstantOffset);
  1700. }
  1701. //===----------------------------------------------------------------------===//
  1702. // ExtractElementInst Implementation
  1703. //===----------------------------------------------------------------------===//
  1704. ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
  1705. const Twine &Name,
  1706. Instruction *InsertBef)
  1707. : Instruction(cast<VectorType>(Val->getType())->getElementType(),
  1708. ExtractElement,
  1709. OperandTraits<ExtractElementInst>::op_begin(this),
  1710. 2, InsertBef) {
  1711. assert(isValidOperands(Val, Index) &&
  1712. "Invalid extractelement instruction operands!");
  1713. Op<0>() = Val;
  1714. Op<1>() = Index;
  1715. setName(Name);
  1716. }
  1717. ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
  1718. const Twine &Name,
  1719. BasicBlock *InsertAE)
  1720. : Instruction(cast<VectorType>(Val->getType())->getElementType(),
  1721. ExtractElement,
  1722. OperandTraits<ExtractElementInst>::op_begin(this),
  1723. 2, InsertAE) {
  1724. assert(isValidOperands(Val, Index) &&
  1725. "Invalid extractelement instruction operands!");
  1726. Op<0>() = Val;
  1727. Op<1>() = Index;
  1728. setName(Name);
  1729. }
  1730. bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
  1731. if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
  1732. return false;
  1733. return true;
  1734. }
  1735. //===----------------------------------------------------------------------===//
  1736. // InsertElementInst Implementation
  1737. //===----------------------------------------------------------------------===//
  1738. InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
  1739. const Twine &Name,
  1740. Instruction *InsertBef)
  1741. : Instruction(Vec->getType(), InsertElement,
  1742. OperandTraits<InsertElementInst>::op_begin(this),
  1743. 3, InsertBef) {
  1744. assert(isValidOperands(Vec, Elt, Index) &&
  1745. "Invalid insertelement instruction operands!");
  1746. Op<0>() = Vec;
  1747. Op<1>() = Elt;
  1748. Op<2>() = Index;
  1749. setName(Name);
  1750. }
  1751. InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
  1752. const Twine &Name,
  1753. BasicBlock *InsertAE)
  1754. : Instruction(Vec->getType(), InsertElement,
  1755. OperandTraits<InsertElementInst>::op_begin(this),
  1756. 3, InsertAE) {
  1757. assert(isValidOperands(Vec, Elt, Index) &&
  1758. "Invalid insertelement instruction operands!");
  1759. Op<0>() = Vec;
  1760. Op<1>() = Elt;
  1761. Op<2>() = Index;
  1762. setName(Name);
  1763. }
  1764. bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
  1765. const Value *Index) {
  1766. if (!Vec->getType()->isVectorTy())
  1767. return false; // First operand of insertelement must be vector type.
  1768. if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
  1769. return false;// Second operand of insertelement must be vector element type.
  1770. if (!Index->getType()->isIntegerTy())
  1771. return false; // Third operand of insertelement must be i32.
  1772. return true;
  1773. }
  1774. //===----------------------------------------------------------------------===//
  1775. // ShuffleVectorInst Implementation
  1776. //===----------------------------------------------------------------------===//
  1777. static Value *createPlaceholderForShuffleVector(Value *V) {
  1778. assert(V && "Cannot create placeholder of nullptr V");
  1779. return PoisonValue::get(V->getType());
  1780. }
  1781. ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,
  1782. Instruction *InsertBefore)
  1783. : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
  1784. InsertBefore) {}
  1785. ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,
  1786. BasicBlock *InsertAtEnd)
  1787. : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
  1788. InsertAtEnd) {}
  1789. ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,
  1790. const Twine &Name,
  1791. Instruction *InsertBefore)
  1792. : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
  1793. InsertBefore) {}
  1794. ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,
  1795. const Twine &Name, BasicBlock *InsertAtEnd)
  1796. : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
  1797. InsertAtEnd) {}
  1798. ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
  1799. const Twine &Name,
  1800. Instruction *InsertBefore)
  1801. : Instruction(
  1802. VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
  1803. cast<VectorType>(Mask->getType())->getElementCount()),
  1804. ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
  1805. OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
  1806. assert(isValidOperands(V1, V2, Mask) &&
  1807. "Invalid shuffle vector instruction operands!");
  1808. Op<0>() = V1;
  1809. Op<1>() = V2;
  1810. SmallVector<int, 16> MaskArr;
  1811. getShuffleMask(cast<Constant>(Mask), MaskArr);
  1812. setShuffleMask(MaskArr);
  1813. setName(Name);
  1814. }
  1815. ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
  1816. const Twine &Name, BasicBlock *InsertAtEnd)
  1817. : Instruction(
  1818. VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
  1819. cast<VectorType>(Mask->getType())->getElementCount()),
  1820. ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
  1821. OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
  1822. assert(isValidOperands(V1, V2, Mask) &&
  1823. "Invalid shuffle vector instruction operands!");
  1824. Op<0>() = V1;
  1825. Op<1>() = V2;
  1826. SmallVector<int, 16> MaskArr;
  1827. getShuffleMask(cast<Constant>(Mask), MaskArr);
  1828. setShuffleMask(MaskArr);
  1829. setName(Name);
  1830. }
  1831. ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
  1832. const Twine &Name,
  1833. Instruction *InsertBefore)
  1834. : Instruction(
  1835. VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
  1836. Mask.size(), isa<ScalableVectorType>(V1->getType())),
  1837. ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
  1838. OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
  1839. assert(isValidOperands(V1, V2, Mask) &&
  1840. "Invalid shuffle vector instruction operands!");
  1841. Op<0>() = V1;
  1842. Op<1>() = V2;
  1843. setShuffleMask(Mask);
  1844. setName(Name);
  1845. }
  1846. ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
  1847. const Twine &Name, BasicBlock *InsertAtEnd)
  1848. : Instruction(
  1849. VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
  1850. Mask.size(), isa<ScalableVectorType>(V1->getType())),
  1851. ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
  1852. OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
  1853. assert(isValidOperands(V1, V2, Mask) &&
  1854. "Invalid shuffle vector instruction operands!");
  1855. Op<0>() = V1;
  1856. Op<1>() = V2;
  1857. setShuffleMask(Mask);
  1858. setName(Name);
  1859. }
  1860. void ShuffleVectorInst::commute() {
  1861. int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
  1862. int NumMaskElts = ShuffleMask.size();
  1863. SmallVector<int, 16> NewMask(NumMaskElts);
  1864. for (int i = 0; i != NumMaskElts; ++i) {
  1865. int MaskElt = getMaskValue(i);
  1866. if (MaskElt == UndefMaskElem) {
  1867. NewMask[i] = UndefMaskElem;
  1868. continue;
  1869. }
  1870. assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
  1871. MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
  1872. NewMask[i] = MaskElt;
  1873. }
  1874. setShuffleMask(NewMask);
  1875. Op<0>().swap(Op<1>());
  1876. }
  1877. bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
  1878. ArrayRef<int> Mask) {
  1879. // V1 and V2 must be vectors of the same type.
  1880. if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
  1881. return false;
  1882. // Make sure the mask elements make sense.
  1883. int V1Size =
  1884. cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
  1885. for (int Elem : Mask)
  1886. if (Elem != UndefMaskElem && Elem >= V1Size * 2)
  1887. return false;
  1888. if (isa<ScalableVectorType>(V1->getType()))
  1889. if ((Mask[0] != 0 && Mask[0] != UndefMaskElem) || !all_equal(Mask))
  1890. return false;
  1891. return true;
  1892. }
  1893. bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
  1894. const Value *Mask) {
  1895. // V1 and V2 must be vectors of the same type.
  1896. if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
  1897. return false;
  1898. // Mask must be vector of i32, and must be the same kind of vector as the
  1899. // input vectors
  1900. auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
  1901. if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
  1902. isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
  1903. return false;
  1904. // Check to see if Mask is valid.
  1905. if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
  1906. return true;
  1907. if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
  1908. unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
  1909. for (Value *Op : MV->operands()) {
  1910. if (auto *CI = dyn_cast<ConstantInt>(Op)) {
  1911. if (CI->uge(V1Size*2))
  1912. return false;
  1913. } else if (!isa<UndefValue>(Op)) {
  1914. return false;
  1915. }
  1916. }
  1917. return true;
  1918. }
  1919. if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
  1920. unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
  1921. for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
  1922. i != e; ++i)
  1923. if (CDS->getElementAsInteger(i) >= V1Size*2)
  1924. return false;
  1925. return true;
  1926. }
  1927. return false;
  1928. }
  1929. void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
  1930. SmallVectorImpl<int> &Result) {
  1931. ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
  1932. if (isa<ConstantAggregateZero>(Mask)) {
  1933. Result.resize(EC.getKnownMinValue(), 0);
  1934. return;
  1935. }
  1936. Result.reserve(EC.getKnownMinValue());
  1937. if (EC.isScalable()) {
  1938. assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
  1939. "Scalable vector shuffle mask must be undef or zeroinitializer");
  1940. int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
  1941. for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
  1942. Result.emplace_back(MaskVal);
  1943. return;
  1944. }
  1945. unsigned NumElts = EC.getKnownMinValue();
  1946. if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
  1947. for (unsigned i = 0; i != NumElts; ++i)
  1948. Result.push_back(CDS->getElementAsInteger(i));
  1949. return;
  1950. }
  1951. for (unsigned i = 0; i != NumElts; ++i) {
  1952. Constant *C = Mask->getAggregateElement(i);
  1953. Result.push_back(isa<UndefValue>(C) ? -1 :
  1954. cast<ConstantInt>(C)->getZExtValue());
  1955. }
  1956. }
  1957. void ShuffleVectorInst::setShuffleMask(ArrayRef<int> Mask) {
  1958. ShuffleMask.assign(Mask.begin(), Mask.end());
  1959. ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
  1960. }
  1961. Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef<int> Mask,
  1962. Type *ResultTy) {
  1963. Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
  1964. if (isa<ScalableVectorType>(ResultTy)) {
  1965. assert(all_equal(Mask) && "Unexpected shuffle");
  1966. Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
  1967. if (Mask[0] == 0)
  1968. return Constant::getNullValue(VecTy);
  1969. return UndefValue::get(VecTy);
  1970. }
  1971. SmallVector<Constant *, 16> MaskConst;
  1972. for (int Elem : Mask) {
  1973. if (Elem == UndefMaskElem)
  1974. MaskConst.push_back(UndefValue::get(Int32Ty));
  1975. else
  1976. MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
  1977. }
  1978. return ConstantVector::get(MaskConst);
  1979. }
  1980. static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
  1981. assert(!Mask.empty() && "Shuffle mask must contain elements");
  1982. bool UsesLHS = false;
  1983. bool UsesRHS = false;
  1984. for (int I : Mask) {
  1985. if (I == -1)
  1986. continue;
  1987. assert(I >= 0 && I < (NumOpElts * 2) &&
  1988. "Out-of-bounds shuffle mask element");
  1989. UsesLHS |= (I < NumOpElts);
  1990. UsesRHS |= (I >= NumOpElts);
  1991. if (UsesLHS && UsesRHS)
  1992. return false;
  1993. }
  1994. // Allow for degenerate case: completely undef mask means neither source is used.
  1995. return UsesLHS || UsesRHS;
  1996. }
  1997. bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) {
  1998. // We don't have vector operand size information, so assume operands are the
  1999. // same size as the mask.
  2000. return isSingleSourceMaskImpl(Mask, Mask.size());
  2001. }
  2002. static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
  2003. if (!isSingleSourceMaskImpl(Mask, NumOpElts))
  2004. return false;
  2005. for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
  2006. if (Mask[i] == -1)
  2007. continue;
  2008. if (Mask[i] != i && Mask[i] != (NumOpElts + i))
  2009. return false;
  2010. }
  2011. return true;
  2012. }
  2013. bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) {
  2014. // We don't have vector operand size information, so assume operands are the
  2015. // same size as the mask.
  2016. return isIdentityMaskImpl(Mask, Mask.size());
  2017. }
  2018. bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) {
  2019. if (!isSingleSourceMask(Mask))
  2020. return false;
  2021. // The number of elements in the mask must be at least 2.
  2022. int NumElts = Mask.size();
  2023. if (NumElts < 2)
  2024. return false;
  2025. for (int i = 0; i < NumElts; ++i) {
  2026. if (Mask[i] == -1)
  2027. continue;
  2028. if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i))
  2029. return false;
  2030. }
  2031. return true;
  2032. }
  2033. bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask) {
  2034. if (!isSingleSourceMask(Mask))
  2035. return false;
  2036. for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
  2037. if (Mask[i] == -1)
  2038. continue;
  2039. if (Mask[i] != 0 && Mask[i] != NumElts)
  2040. return false;
  2041. }
  2042. return true;
  2043. }
  2044. bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask) {
  2045. // Select is differentiated from identity. It requires using both sources.
  2046. if (isSingleSourceMask(Mask))
  2047. return false;
  2048. for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
  2049. if (Mask[i] == -1)
  2050. continue;
  2051. if (Mask[i] != i && Mask[i] != (NumElts + i))
  2052. return false;
  2053. }
  2054. return true;
  2055. }
  2056. bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) {
  2057. // Example masks that will return true:
  2058. // v1 = <a, b, c, d>
  2059. // v2 = <e, f, g, h>
  2060. // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
  2061. // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
  2062. // 1. The number of elements in the mask must be a power-of-2 and at least 2.
  2063. int NumElts = Mask.size();
  2064. if (NumElts < 2 || !isPowerOf2_32(NumElts))
  2065. return false;
  2066. // 2. The first element of the mask must be either a 0 or a 1.
  2067. if (Mask[0] != 0 && Mask[0] != 1)
  2068. return false;
  2069. // 3. The difference between the first 2 elements must be equal to the
  2070. // number of elements in the mask.
  2071. if ((Mask[1] - Mask[0]) != NumElts)
  2072. return false;
  2073. // 4. The difference between consecutive even-numbered and odd-numbered
  2074. // elements must be equal to 2.
  2075. for (int i = 2; i < NumElts; ++i) {
  2076. int MaskEltVal = Mask[i];
  2077. if (MaskEltVal == -1)
  2078. return false;
  2079. int MaskEltPrevVal = Mask[i - 2];
  2080. if (MaskEltVal - MaskEltPrevVal != 2)
  2081. return false;
  2082. }
  2083. return true;
  2084. }
  2085. bool ShuffleVectorInst::isSpliceMask(ArrayRef<int> Mask, int &Index) {
  2086. // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
  2087. int StartIndex = -1;
  2088. for (int I = 0, E = Mask.size(); I != E; ++I) {
  2089. int MaskEltVal = Mask[I];
  2090. if (MaskEltVal == -1)
  2091. continue;
  2092. if (StartIndex == -1) {
  2093. // Don't support a StartIndex that begins in the second input, or if the
  2094. // first non-undef index would access below the StartIndex.
  2095. if (MaskEltVal < I || E <= (MaskEltVal - I))
  2096. return false;
  2097. StartIndex = MaskEltVal - I;
  2098. continue;
  2099. }
  2100. // Splice is sequential starting from StartIndex.
  2101. if (MaskEltVal != (StartIndex + I))
  2102. return false;
  2103. }
  2104. if (StartIndex == -1)
  2105. return false;
  2106. // NOTE: This accepts StartIndex == 0 (COPY).
  2107. Index = StartIndex;
  2108. return true;
  2109. }
  2110. bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask,
  2111. int NumSrcElts, int &Index) {
  2112. // Must extract from a single source.
  2113. if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
  2114. return false;
  2115. // Must be smaller (else this is an Identity shuffle).
  2116. if (NumSrcElts <= (int)Mask.size())
  2117. return false;
  2118. // Find start of extraction, accounting that we may start with an UNDEF.
  2119. int SubIndex = -1;
  2120. for (int i = 0, e = Mask.size(); i != e; ++i) {
  2121. int M = Mask[i];
  2122. if (M < 0)
  2123. continue;
  2124. int Offset = (M % NumSrcElts) - i;
  2125. if (0 <= SubIndex && SubIndex != Offset)
  2126. return false;
  2127. SubIndex = Offset;
  2128. }
  2129. if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
  2130. Index = SubIndex;
  2131. return true;
  2132. }
  2133. return false;
  2134. }
  2135. bool ShuffleVectorInst::isInsertSubvectorMask(ArrayRef<int> Mask,
  2136. int NumSrcElts, int &NumSubElts,
  2137. int &Index) {
  2138. int NumMaskElts = Mask.size();
  2139. // Don't try to match if we're shuffling to a smaller size.
  2140. if (NumMaskElts < NumSrcElts)
  2141. return false;
  2142. // TODO: We don't recognize self-insertion/widening.
  2143. if (isSingleSourceMaskImpl(Mask, NumSrcElts))
  2144. return false;
  2145. // Determine which mask elements are attributed to which source.
  2146. APInt UndefElts = APInt::getZero(NumMaskElts);
  2147. APInt Src0Elts = APInt::getZero(NumMaskElts);
  2148. APInt Src1Elts = APInt::getZero(NumMaskElts);
  2149. bool Src0Identity = true;
  2150. bool Src1Identity = true;
  2151. for (int i = 0; i != NumMaskElts; ++i) {
  2152. int M = Mask[i];
  2153. if (M < 0) {
  2154. UndefElts.setBit(i);
  2155. continue;
  2156. }
  2157. if (M < NumSrcElts) {
  2158. Src0Elts.setBit(i);
  2159. Src0Identity &= (M == i);
  2160. continue;
  2161. }
  2162. Src1Elts.setBit(i);
  2163. Src1Identity &= (M == (i + NumSrcElts));
  2164. }
  2165. assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
  2166. "unknown shuffle elements");
  2167. assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
  2168. "2-source shuffle not found");
  2169. // Determine lo/hi span ranges.
  2170. // TODO: How should we handle undefs at the start of subvector insertions?
  2171. int Src0Lo = Src0Elts.countTrailingZeros();
  2172. int Src1Lo = Src1Elts.countTrailingZeros();
  2173. int Src0Hi = NumMaskElts - Src0Elts.countLeadingZeros();
  2174. int Src1Hi = NumMaskElts - Src1Elts.countLeadingZeros();
  2175. // If src0 is in place, see if the src1 elements is inplace within its own
  2176. // span.
  2177. if (Src0Identity) {
  2178. int NumSub1Elts = Src1Hi - Src1Lo;
  2179. ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
  2180. if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
  2181. NumSubElts = NumSub1Elts;
  2182. Index = Src1Lo;
  2183. return true;
  2184. }
  2185. }
  2186. // If src1 is in place, see if the src0 elements is inplace within its own
  2187. // span.
  2188. if (Src1Identity) {
  2189. int NumSub0Elts = Src0Hi - Src0Lo;
  2190. ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
  2191. if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
  2192. NumSubElts = NumSub0Elts;
  2193. Index = Src0Lo;
  2194. return true;
  2195. }
  2196. }
  2197. return false;
  2198. }
  2199. bool ShuffleVectorInst::isIdentityWithPadding() const {
  2200. if (isa<UndefValue>(Op<2>()))
  2201. return false;
  2202. // FIXME: Not currently possible to express a shuffle mask for a scalable
  2203. // vector for this case.
  2204. if (isa<ScalableVectorType>(getType()))
  2205. return false;
  2206. int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
  2207. int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
  2208. if (NumMaskElts <= NumOpElts)
  2209. return false;
  2210. // The first part of the mask must choose elements from exactly 1 source op.
  2211. ArrayRef<int> Mask = getShuffleMask();
  2212. if (!isIdentityMaskImpl(Mask, NumOpElts))
  2213. return false;
  2214. // All extending must be with undef elements.
  2215. for (int i = NumOpElts; i < NumMaskElts; ++i)
  2216. if (Mask[i] != -1)
  2217. return false;
  2218. return true;
  2219. }
  2220. bool ShuffleVectorInst::isIdentityWithExtract() const {
  2221. if (isa<UndefValue>(Op<2>()))
  2222. return false;
  2223. // FIXME: Not currently possible to express a shuffle mask for a scalable
  2224. // vector for this case.
  2225. if (isa<ScalableVectorType>(getType()))
  2226. return false;
  2227. int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
  2228. int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
  2229. if (NumMaskElts >= NumOpElts)
  2230. return false;
  2231. return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
  2232. }
  2233. bool ShuffleVectorInst::isConcat() const {
  2234. // Vector concatenation is differentiated from identity with padding.
  2235. if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()) ||
  2236. isa<UndefValue>(Op<2>()))
  2237. return false;
  2238. // FIXME: Not currently possible to express a shuffle mask for a scalable
  2239. // vector for this case.
  2240. if (isa<ScalableVectorType>(getType()))
  2241. return false;
  2242. int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
  2243. int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
  2244. if (NumMaskElts != NumOpElts * 2)
  2245. return false;
  2246. // Use the mask length rather than the operands' vector lengths here. We
  2247. // already know that the shuffle returns a vector twice as long as the inputs,
  2248. // and neither of the inputs are undef vectors. If the mask picks consecutive
  2249. // elements from both inputs, then this is a concatenation of the inputs.
  2250. return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
  2251. }
  2252. static bool isReplicationMaskWithParams(ArrayRef<int> Mask,
  2253. int ReplicationFactor, int VF) {
  2254. assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
  2255. "Unexpected mask size.");
  2256. for (int CurrElt : seq(0, VF)) {
  2257. ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
  2258. assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
  2259. "Run out of mask?");
  2260. Mask = Mask.drop_front(ReplicationFactor);
  2261. if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
  2262. return MaskElt == UndefMaskElem || MaskElt == CurrElt;
  2263. }))
  2264. return false;
  2265. }
  2266. assert(Mask.empty() && "Did not consume the whole mask?");
  2267. return true;
  2268. }
  2269. bool ShuffleVectorInst::isReplicationMask(ArrayRef<int> Mask,
  2270. int &ReplicationFactor, int &VF) {
  2271. // undef-less case is trivial.
  2272. if (!llvm::is_contained(Mask, UndefMaskElem)) {
  2273. ReplicationFactor =
  2274. Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
  2275. if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
  2276. return false;
  2277. VF = Mask.size() / ReplicationFactor;
  2278. return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
  2279. }
  2280. // However, if the mask contains undef's, we have to enumerate possible tuples
  2281. // and pick one. There are bounds on replication factor: [1, mask size]
  2282. // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
  2283. // Additionally, mask size is a replication factor multiplied by vector size,
  2284. // which further significantly reduces the search space.
  2285. // Before doing that, let's perform basic correctness checking first.
  2286. int Largest = -1;
  2287. for (int MaskElt : Mask) {
  2288. if (MaskElt == UndefMaskElem)
  2289. continue;
  2290. // Elements must be in non-decreasing order.
  2291. if (MaskElt < Largest)
  2292. return false;
  2293. Largest = std::max(Largest, MaskElt);
  2294. }
  2295. // Prefer larger replication factor if all else equal.
  2296. for (int PossibleReplicationFactor :
  2297. reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
  2298. if (Mask.size() % PossibleReplicationFactor != 0)
  2299. continue;
  2300. int PossibleVF = Mask.size() / PossibleReplicationFactor;
  2301. if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
  2302. PossibleVF))
  2303. continue;
  2304. ReplicationFactor = PossibleReplicationFactor;
  2305. VF = PossibleVF;
  2306. return true;
  2307. }
  2308. return false;
  2309. }
  2310. bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
  2311. int &VF) const {
  2312. // Not possible to express a shuffle mask for a scalable vector for this
  2313. // case.
  2314. if (isa<ScalableVectorType>(getType()))
  2315. return false;
  2316. VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
  2317. if (ShuffleMask.size() % VF != 0)
  2318. return false;
  2319. ReplicationFactor = ShuffleMask.size() / VF;
  2320. return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
  2321. }
  2322. bool ShuffleVectorInst::isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF) {
  2323. if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
  2324. Mask.size() % VF != 0)
  2325. return false;
  2326. for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
  2327. ArrayRef<int> SubMask = Mask.slice(K, VF);
  2328. if (all_of(SubMask, [](int Idx) { return Idx == UndefMaskElem; }))
  2329. continue;
  2330. SmallBitVector Used(VF, false);
  2331. for_each(SubMask, [&Used, VF](int Idx) {
  2332. if (Idx != UndefMaskElem && Idx < VF)
  2333. Used.set(Idx);
  2334. });
  2335. if (!Used.all())
  2336. return false;
  2337. }
  2338. return true;
  2339. }
  2340. /// Return true if this shuffle mask is a replication mask.
  2341. bool ShuffleVectorInst::isOneUseSingleSourceMask(int VF) const {
  2342. // Not possible to express a shuffle mask for a scalable vector for this
  2343. // case.
  2344. if (isa<ScalableVectorType>(getType()))
  2345. return false;
  2346. if (!isSingleSourceMask(ShuffleMask))
  2347. return false;
  2348. return isOneUseSingleSourceMask(ShuffleMask, VF);
  2349. }
  2350. //===----------------------------------------------------------------------===//
  2351. // InsertValueInst Class
  2352. //===----------------------------------------------------------------------===//
  2353. void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
  2354. const Twine &Name) {
  2355. assert(getNumOperands() == 2 && "NumOperands not initialized?");
  2356. // There's no fundamental reason why we require at least one index
  2357. // (other than weirdness with &*IdxBegin being invalid; see
  2358. // getelementptr's init routine for example). But there's no
  2359. // present need to support it.
  2360. assert(!Idxs.empty() && "InsertValueInst must have at least one index");
  2361. assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
  2362. Val->getType() && "Inserted value must match indexed type!");
  2363. Op<0>() = Agg;
  2364. Op<1>() = Val;
  2365. Indices.append(Idxs.begin(), Idxs.end());
  2366. setName(Name);
  2367. }
  2368. InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
  2369. : Instruction(IVI.getType(), InsertValue,
  2370. OperandTraits<InsertValueInst>::op_begin(this), 2),
  2371. Indices(IVI.Indices) {
  2372. Op<0>() = IVI.getOperand(0);
  2373. Op<1>() = IVI.getOperand(1);
  2374. SubclassOptionalData = IVI.SubclassOptionalData;
  2375. }
  2376. //===----------------------------------------------------------------------===//
  2377. // ExtractValueInst Class
  2378. //===----------------------------------------------------------------------===//
  2379. void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
  2380. assert(getNumOperands() == 1 && "NumOperands not initialized?");
  2381. // There's no fundamental reason why we require at least one index.
  2382. // But there's no present need to support it.
  2383. assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
  2384. Indices.append(Idxs.begin(), Idxs.end());
  2385. setName(Name);
  2386. }
  2387. ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
  2388. : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
  2389. Indices(EVI.Indices) {
  2390. SubclassOptionalData = EVI.SubclassOptionalData;
  2391. }
  2392. // getIndexedType - Returns the type of the element that would be extracted
  2393. // with an extractvalue instruction with the specified parameters.
  2394. //
  2395. // A null type is returned if the indices are invalid for the specified
  2396. // pointer type.
  2397. //
  2398. Type *ExtractValueInst::getIndexedType(Type *Agg,
  2399. ArrayRef<unsigned> Idxs) {
  2400. for (unsigned Index : Idxs) {
  2401. // We can't use CompositeType::indexValid(Index) here.
  2402. // indexValid() always returns true for arrays because getelementptr allows
  2403. // out-of-bounds indices. Since we don't allow those for extractvalue and
  2404. // insertvalue we need to check array indexing manually.
  2405. // Since the only other types we can index into are struct types it's just
  2406. // as easy to check those manually as well.
  2407. if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
  2408. if (Index >= AT->getNumElements())
  2409. return nullptr;
  2410. Agg = AT->getElementType();
  2411. } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
  2412. if (Index >= ST->getNumElements())
  2413. return nullptr;
  2414. Agg = ST->getElementType(Index);
  2415. } else {
  2416. // Not a valid type to index into.
  2417. return nullptr;
  2418. }
  2419. }
  2420. return const_cast<Type*>(Agg);
  2421. }
  2422. //===----------------------------------------------------------------------===//
  2423. // UnaryOperator Class
  2424. //===----------------------------------------------------------------------===//
  2425. UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
  2426. Type *Ty, const Twine &Name,
  2427. Instruction *InsertBefore)
  2428. : UnaryInstruction(Ty, iType, S, InsertBefore) {
  2429. Op<0>() = S;
  2430. setName(Name);
  2431. AssertOK();
  2432. }
  2433. UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
  2434. Type *Ty, const Twine &Name,
  2435. BasicBlock *InsertAtEnd)
  2436. : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
  2437. Op<0>() = S;
  2438. setName(Name);
  2439. AssertOK();
  2440. }
  2441. UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
  2442. const Twine &Name,
  2443. Instruction *InsertBefore) {
  2444. return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
  2445. }
  2446. UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
  2447. const Twine &Name,
  2448. BasicBlock *InsertAtEnd) {
  2449. UnaryOperator *Res = Create(Op, S, Name);
  2450. Res->insertInto(InsertAtEnd, InsertAtEnd->end());
  2451. return Res;
  2452. }
  2453. void UnaryOperator::AssertOK() {
  2454. Value *LHS = getOperand(0);
  2455. (void)LHS; // Silence warnings.
  2456. #ifndef NDEBUG
  2457. switch (getOpcode()) {
  2458. case FNeg:
  2459. assert(getType() == LHS->getType() &&
  2460. "Unary operation should return same type as operand!");
  2461. assert(getType()->isFPOrFPVectorTy() &&
  2462. "Tried to create a floating-point operation on a "
  2463. "non-floating-point type!");
  2464. break;
  2465. default: llvm_unreachable("Invalid opcode provided");
  2466. }
  2467. #endif
  2468. }
  2469. //===----------------------------------------------------------------------===//
  2470. // BinaryOperator Class
  2471. //===----------------------------------------------------------------------===//
  2472. BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
  2473. Type *Ty, const Twine &Name,
  2474. Instruction *InsertBefore)
  2475. : Instruction(Ty, iType,
  2476. OperandTraits<BinaryOperator>::op_begin(this),
  2477. OperandTraits<BinaryOperator>::operands(this),
  2478. InsertBefore) {
  2479. Op<0>() = S1;
  2480. Op<1>() = S2;
  2481. setName(Name);
  2482. AssertOK();
  2483. }
  2484. BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
  2485. Type *Ty, const Twine &Name,
  2486. BasicBlock *InsertAtEnd)
  2487. : Instruction(Ty, iType,
  2488. OperandTraits<BinaryOperator>::op_begin(this),
  2489. OperandTraits<BinaryOperator>::operands(this),
  2490. InsertAtEnd) {
  2491. Op<0>() = S1;
  2492. Op<1>() = S2;
  2493. setName(Name);
  2494. AssertOK();
  2495. }
  2496. void BinaryOperator::AssertOK() {
  2497. Value *LHS = getOperand(0), *RHS = getOperand(1);
  2498. (void)LHS; (void)RHS; // Silence warnings.
  2499. assert(LHS->getType() == RHS->getType() &&
  2500. "Binary operator operand types must match!");
  2501. #ifndef NDEBUG
  2502. switch (getOpcode()) {
  2503. case Add: case Sub:
  2504. case Mul:
  2505. assert(getType() == LHS->getType() &&
  2506. "Arithmetic operation should return same type as operands!");
  2507. assert(getType()->isIntOrIntVectorTy() &&
  2508. "Tried to create an integer operation on a non-integer type!");
  2509. break;
  2510. case FAdd: case FSub:
  2511. case FMul:
  2512. assert(getType() == LHS->getType() &&
  2513. "Arithmetic operation should return same type as operands!");
  2514. assert(getType()->isFPOrFPVectorTy() &&
  2515. "Tried to create a floating-point operation on a "
  2516. "non-floating-point type!");
  2517. break;
  2518. case UDiv:
  2519. case SDiv:
  2520. assert(getType() == LHS->getType() &&
  2521. "Arithmetic operation should return same type as operands!");
  2522. assert(getType()->isIntOrIntVectorTy() &&
  2523. "Incorrect operand type (not integer) for S/UDIV");
  2524. break;
  2525. case FDiv:
  2526. assert(getType() == LHS->getType() &&
  2527. "Arithmetic operation should return same type as operands!");
  2528. assert(getType()->isFPOrFPVectorTy() &&
  2529. "Incorrect operand type (not floating point) for FDIV");
  2530. break;
  2531. case URem:
  2532. case SRem:
  2533. assert(getType() == LHS->getType() &&
  2534. "Arithmetic operation should return same type as operands!");
  2535. assert(getType()->isIntOrIntVectorTy() &&
  2536. "Incorrect operand type (not integer) for S/UREM");
  2537. break;
  2538. case FRem:
  2539. assert(getType() == LHS->getType() &&
  2540. "Arithmetic operation should return same type as operands!");
  2541. assert(getType()->isFPOrFPVectorTy() &&
  2542. "Incorrect operand type (not floating point) for FREM");
  2543. break;
  2544. case Shl:
  2545. case LShr:
  2546. case AShr:
  2547. assert(getType() == LHS->getType() &&
  2548. "Shift operation should return same type as operands!");
  2549. assert(getType()->isIntOrIntVectorTy() &&
  2550. "Tried to create a shift operation on a non-integral type!");
  2551. break;
  2552. case And: case Or:
  2553. case Xor:
  2554. assert(getType() == LHS->getType() &&
  2555. "Logical operation should return same type as operands!");
  2556. assert(getType()->isIntOrIntVectorTy() &&
  2557. "Tried to create a logical operation on a non-integral type!");
  2558. break;
  2559. default: llvm_unreachable("Invalid opcode provided");
  2560. }
  2561. #endif
  2562. }
  2563. BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
  2564. const Twine &Name,
  2565. Instruction *InsertBefore) {
  2566. assert(S1->getType() == S2->getType() &&
  2567. "Cannot create binary operator with two operands of differing type!");
  2568. return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
  2569. }
  2570. BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
  2571. const Twine &Name,
  2572. BasicBlock *InsertAtEnd) {
  2573. BinaryOperator *Res = Create(Op, S1, S2, Name);
  2574. Res->insertInto(InsertAtEnd, InsertAtEnd->end());
  2575. return Res;
  2576. }
  2577. BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
  2578. Instruction *InsertBefore) {
  2579. Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
  2580. return new BinaryOperator(Instruction::Sub,
  2581. zero, Op,
  2582. Op->getType(), Name, InsertBefore);
  2583. }
  2584. BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
  2585. BasicBlock *InsertAtEnd) {
  2586. Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
  2587. return new BinaryOperator(Instruction::Sub,
  2588. zero, Op,
  2589. Op->getType(), Name, InsertAtEnd);
  2590. }
  2591. BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
  2592. Instruction *InsertBefore) {
  2593. Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
  2594. return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore);
  2595. }
  2596. BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
  2597. BasicBlock *InsertAtEnd) {
  2598. Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
  2599. return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd);
  2600. }
  2601. BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
  2602. Instruction *InsertBefore) {
  2603. Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
  2604. return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore);
  2605. }
  2606. BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
  2607. BasicBlock *InsertAtEnd) {
  2608. Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
  2609. return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd);
  2610. }
  2611. BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
  2612. Instruction *InsertBefore) {
  2613. Constant *C = Constant::getAllOnesValue(Op->getType());
  2614. return new BinaryOperator(Instruction::Xor, Op, C,
  2615. Op->getType(), Name, InsertBefore);
  2616. }
  2617. BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
  2618. BasicBlock *InsertAtEnd) {
  2619. Constant *AllOnes = Constant::getAllOnesValue(Op->getType());
  2620. return new BinaryOperator(Instruction::Xor, Op, AllOnes,
  2621. Op->getType(), Name, InsertAtEnd);
  2622. }
  2623. // Exchange the two operands to this instruction. This instruction is safe to
  2624. // use on any binary instruction and does not modify the semantics of the
  2625. // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
  2626. // is changed.
  2627. bool BinaryOperator::swapOperands() {
  2628. if (!isCommutative())
  2629. return true; // Can't commute operands
  2630. Op<0>().swap(Op<1>());
  2631. return false;
  2632. }
  2633. //===----------------------------------------------------------------------===//
  2634. // FPMathOperator Class
  2635. //===----------------------------------------------------------------------===//
  2636. float FPMathOperator::getFPAccuracy() const {
  2637. const MDNode *MD =
  2638. cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
  2639. if (!MD)
  2640. return 0.0;
  2641. ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
  2642. return Accuracy->getValueAPF().convertToFloat();
  2643. }
  2644. //===----------------------------------------------------------------------===//
  2645. // CastInst Class
  2646. //===----------------------------------------------------------------------===//
  2647. // Just determine if this cast only deals with integral->integral conversion.
  2648. bool CastInst::isIntegerCast() const {
  2649. switch (getOpcode()) {
  2650. default: return false;
  2651. case Instruction::ZExt:
  2652. case Instruction::SExt:
  2653. case Instruction::Trunc:
  2654. return true;
  2655. case Instruction::BitCast:
  2656. return getOperand(0)->getType()->isIntegerTy() &&
  2657. getType()->isIntegerTy();
  2658. }
  2659. }
  2660. bool CastInst::isLosslessCast() const {
  2661. // Only BitCast can be lossless, exit fast if we're not BitCast
  2662. if (getOpcode() != Instruction::BitCast)
  2663. return false;
  2664. // Identity cast is always lossless
  2665. Type *SrcTy = getOperand(0)->getType();
  2666. Type *DstTy = getType();
  2667. if (SrcTy == DstTy)
  2668. return true;
  2669. // Pointer to pointer is always lossless.
  2670. if (SrcTy->isPointerTy())
  2671. return DstTy->isPointerTy();
  2672. return false; // Other types have no identity values
  2673. }
  2674. /// This function determines if the CastInst does not require any bits to be
  2675. /// changed in order to effect the cast. Essentially, it identifies cases where
  2676. /// no code gen is necessary for the cast, hence the name no-op cast. For
  2677. /// example, the following are all no-op casts:
  2678. /// # bitcast i32* %x to i8*
  2679. /// # bitcast <2 x i32> %x to <4 x i16>
  2680. /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
  2681. /// Determine if the described cast is a no-op.
  2682. bool CastInst::isNoopCast(Instruction::CastOps Opcode,
  2683. Type *SrcTy,
  2684. Type *DestTy,
  2685. const DataLayout &DL) {
  2686. assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
  2687. switch (Opcode) {
  2688. default: llvm_unreachable("Invalid CastOp");
  2689. case Instruction::Trunc:
  2690. case Instruction::ZExt:
  2691. case Instruction::SExt:
  2692. case Instruction::FPTrunc:
  2693. case Instruction::FPExt:
  2694. case Instruction::UIToFP:
  2695. case Instruction::SIToFP:
  2696. case Instruction::FPToUI:
  2697. case Instruction::FPToSI:
  2698. case Instruction::AddrSpaceCast:
  2699. // TODO: Target informations may give a more accurate answer here.
  2700. return false;
  2701. case Instruction::BitCast:
  2702. return true; // BitCast never modifies bits.
  2703. case Instruction::PtrToInt:
  2704. return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
  2705. DestTy->getScalarSizeInBits();
  2706. case Instruction::IntToPtr:
  2707. return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
  2708. SrcTy->getScalarSizeInBits();
  2709. }
  2710. }
  2711. bool CastInst::isNoopCast(const DataLayout &DL) const {
  2712. return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
  2713. }
  2714. /// This function determines if a pair of casts can be eliminated and what
  2715. /// opcode should be used in the elimination. This assumes that there are two
  2716. /// instructions like this:
  2717. /// * %F = firstOpcode SrcTy %x to MidTy
  2718. /// * %S = secondOpcode MidTy %F to DstTy
  2719. /// The function returns a resultOpcode so these two casts can be replaced with:
  2720. /// * %Replacement = resultOpcode %SrcTy %x to DstTy
  2721. /// If no such cast is permitted, the function returns 0.
  2722. unsigned CastInst::isEliminableCastPair(
  2723. Instruction::CastOps firstOp, Instruction::CastOps secondOp,
  2724. Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
  2725. Type *DstIntPtrTy) {
  2726. // Define the 144 possibilities for these two cast instructions. The values
  2727. // in this matrix determine what to do in a given situation and select the
  2728. // case in the switch below. The rows correspond to firstOp, the columns
  2729. // correspond to secondOp. In looking at the table below, keep in mind
  2730. // the following cast properties:
  2731. //
  2732. // Size Compare Source Destination
  2733. // Operator Src ? Size Type Sign Type Sign
  2734. // -------- ------------ ------------------- ---------------------
  2735. // TRUNC > Integer Any Integral Any
  2736. // ZEXT < Integral Unsigned Integer Any
  2737. // SEXT < Integral Signed Integer Any
  2738. // FPTOUI n/a FloatPt n/a Integral Unsigned
  2739. // FPTOSI n/a FloatPt n/a Integral Signed
  2740. // UITOFP n/a Integral Unsigned FloatPt n/a
  2741. // SITOFP n/a Integral Signed FloatPt n/a
  2742. // FPTRUNC > FloatPt n/a FloatPt n/a
  2743. // FPEXT < FloatPt n/a FloatPt n/a
  2744. // PTRTOINT n/a Pointer n/a Integral Unsigned
  2745. // INTTOPTR n/a Integral Unsigned Pointer n/a
  2746. // BITCAST = FirstClass n/a FirstClass n/a
  2747. // ADDRSPCST n/a Pointer n/a Pointer n/a
  2748. //
  2749. // NOTE: some transforms are safe, but we consider them to be non-profitable.
  2750. // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
  2751. // into "fptoui double to i64", but this loses information about the range
  2752. // of the produced value (we no longer know the top-part is all zeros).
  2753. // Further this conversion is often much more expensive for typical hardware,
  2754. // and causes issues when building libgcc. We disallow fptosi+sext for the
  2755. // same reason.
  2756. const unsigned numCastOps =
  2757. Instruction::CastOpsEnd - Instruction::CastOpsBegin;
  2758. static const uint8_t CastResults[numCastOps][numCastOps] = {
  2759. // T F F U S F F P I B A -+
  2760. // R Z S P P I I T P 2 N T S |
  2761. // U E E 2 2 2 2 R E I T C C +- secondOp
  2762. // N X X U S F F N X N 2 V V |
  2763. // C T T I I P P C T T P T T -+
  2764. { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
  2765. { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
  2766. { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
  2767. { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
  2768. { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
  2769. { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
  2770. { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
  2771. { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
  2772. { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
  2773. { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
  2774. { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
  2775. { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast |
  2776. { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
  2777. };
  2778. // TODO: This logic could be encoded into the table above and handled in the
  2779. // switch below.
  2780. // If either of the casts are a bitcast from scalar to vector, disallow the
  2781. // merging. However, any pair of bitcasts are allowed.
  2782. bool IsFirstBitcast = (firstOp == Instruction::BitCast);
  2783. bool IsSecondBitcast = (secondOp == Instruction::BitCast);
  2784. bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
  2785. // Check if any of the casts convert scalars <-> vectors.
  2786. if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
  2787. (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
  2788. if (!AreBothBitcasts)
  2789. return 0;
  2790. int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
  2791. [secondOp-Instruction::CastOpsBegin];
  2792. switch (ElimCase) {
  2793. case 0:
  2794. // Categorically disallowed.
  2795. return 0;
  2796. case 1:
  2797. // Allowed, use first cast's opcode.
  2798. return firstOp;
  2799. case 2:
  2800. // Allowed, use second cast's opcode.
  2801. return secondOp;
  2802. case 3:
  2803. // No-op cast in second op implies firstOp as long as the DestTy
  2804. // is integer and we are not converting between a vector and a
  2805. // non-vector type.
  2806. if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
  2807. return firstOp;
  2808. return 0;
  2809. case 4:
  2810. // No-op cast in second op implies firstOp as long as the DestTy
  2811. // is floating point.
  2812. if (DstTy->isFloatingPointTy())
  2813. return firstOp;
  2814. return 0;
  2815. case 5:
  2816. // No-op cast in first op implies secondOp as long as the SrcTy
  2817. // is an integer.
  2818. if (SrcTy->isIntegerTy())
  2819. return secondOp;
  2820. return 0;
  2821. case 6:
  2822. // No-op cast in first op implies secondOp as long as the SrcTy
  2823. // is a floating point.
  2824. if (SrcTy->isFloatingPointTy())
  2825. return secondOp;
  2826. return 0;
  2827. case 7: {
  2828. // Disable inttoptr/ptrtoint optimization if enabled.
  2829. if (DisableI2pP2iOpt)
  2830. return 0;
  2831. // Cannot simplify if address spaces are different!
  2832. if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
  2833. return 0;
  2834. unsigned MidSize = MidTy->getScalarSizeInBits();
  2835. // We can still fold this without knowing the actual sizes as long we
  2836. // know that the intermediate pointer is the largest possible
  2837. // pointer size.
  2838. // FIXME: Is this always true?
  2839. if (MidSize == 64)
  2840. return Instruction::BitCast;
  2841. // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
  2842. if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
  2843. return 0;
  2844. unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
  2845. if (MidSize >= PtrSize)
  2846. return Instruction::BitCast;
  2847. return 0;
  2848. }
  2849. case 8: {
  2850. // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
  2851. // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
  2852. // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
  2853. unsigned SrcSize = SrcTy->getScalarSizeInBits();
  2854. unsigned DstSize = DstTy->getScalarSizeInBits();
  2855. if (SrcTy == DstTy)
  2856. return Instruction::BitCast;
  2857. if (SrcSize < DstSize)
  2858. return firstOp;
  2859. if (SrcSize > DstSize)
  2860. return secondOp;
  2861. return 0;
  2862. }
  2863. case 9:
  2864. // zext, sext -> zext, because sext can't sign extend after zext
  2865. return Instruction::ZExt;
  2866. case 11: {
  2867. // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
  2868. if (!MidIntPtrTy)
  2869. return 0;
  2870. unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
  2871. unsigned SrcSize = SrcTy->getScalarSizeInBits();
  2872. unsigned DstSize = DstTy->getScalarSizeInBits();
  2873. if (SrcSize <= PtrSize && SrcSize == DstSize)
  2874. return Instruction::BitCast;
  2875. return 0;
  2876. }
  2877. case 12:
  2878. // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
  2879. // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
  2880. if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
  2881. return Instruction::AddrSpaceCast;
  2882. return Instruction::BitCast;
  2883. case 13:
  2884. // FIXME: this state can be merged with (1), but the following assert
  2885. // is useful to check the correcteness of the sequence due to semantic
  2886. // change of bitcast.
  2887. assert(
  2888. SrcTy->isPtrOrPtrVectorTy() &&
  2889. MidTy->isPtrOrPtrVectorTy() &&
  2890. DstTy->isPtrOrPtrVectorTy() &&
  2891. SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
  2892. MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
  2893. "Illegal addrspacecast, bitcast sequence!");
  2894. // Allowed, use first cast's opcode
  2895. return firstOp;
  2896. case 14: {
  2897. // bitcast, addrspacecast -> addrspacecast if the element type of
  2898. // bitcast's source is the same as that of addrspacecast's destination.
  2899. PointerType *SrcPtrTy = cast<PointerType>(SrcTy->getScalarType());
  2900. PointerType *DstPtrTy = cast<PointerType>(DstTy->getScalarType());
  2901. if (SrcPtrTy->hasSameElementTypeAs(DstPtrTy))
  2902. return Instruction::AddrSpaceCast;
  2903. return 0;
  2904. }
  2905. case 15:
  2906. // FIXME: this state can be merged with (1), but the following assert
  2907. // is useful to check the correcteness of the sequence due to semantic
  2908. // change of bitcast.
  2909. assert(
  2910. SrcTy->isIntOrIntVectorTy() &&
  2911. MidTy->isPtrOrPtrVectorTy() &&
  2912. DstTy->isPtrOrPtrVectorTy() &&
  2913. MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
  2914. "Illegal inttoptr, bitcast sequence!");
  2915. // Allowed, use first cast's opcode
  2916. return firstOp;
  2917. case 16:
  2918. // FIXME: this state can be merged with (2), but the following assert
  2919. // is useful to check the correcteness of the sequence due to semantic
  2920. // change of bitcast.
  2921. assert(
  2922. SrcTy->isPtrOrPtrVectorTy() &&
  2923. MidTy->isPtrOrPtrVectorTy() &&
  2924. DstTy->isIntOrIntVectorTy() &&
  2925. SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
  2926. "Illegal bitcast, ptrtoint sequence!");
  2927. // Allowed, use second cast's opcode
  2928. return secondOp;
  2929. case 17:
  2930. // (sitofp (zext x)) -> (uitofp x)
  2931. return Instruction::UIToFP;
  2932. case 99:
  2933. // Cast combination can't happen (error in input). This is for all cases
  2934. // where the MidTy is not the same for the two cast instructions.
  2935. llvm_unreachable("Invalid Cast Combination");
  2936. default:
  2937. llvm_unreachable("Error in CastResults table!!!");
  2938. }
  2939. }
  2940. CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
  2941. const Twine &Name, Instruction *InsertBefore) {
  2942. assert(castIsValid(op, S, Ty) && "Invalid cast!");
  2943. // Construct and return the appropriate CastInst subclass
  2944. switch (op) {
  2945. case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
  2946. case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
  2947. case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
  2948. case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
  2949. case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
  2950. case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
  2951. case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
  2952. case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
  2953. case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
  2954. case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
  2955. case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
  2956. case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
  2957. case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
  2958. default: llvm_unreachable("Invalid opcode provided");
  2959. }
  2960. }
  2961. CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
  2962. const Twine &Name, BasicBlock *InsertAtEnd) {
  2963. assert(castIsValid(op, S, Ty) && "Invalid cast!");
  2964. // Construct and return the appropriate CastInst subclass
  2965. switch (op) {
  2966. case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
  2967. case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
  2968. case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
  2969. case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
  2970. case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
  2971. case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
  2972. case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
  2973. case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
  2974. case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
  2975. case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
  2976. case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
  2977. case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
  2978. case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
  2979. default: llvm_unreachable("Invalid opcode provided");
  2980. }
  2981. }
  2982. CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
  2983. const Twine &Name,
  2984. Instruction *InsertBefore) {
  2985. if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
  2986. return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
  2987. return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
  2988. }
  2989. CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
  2990. const Twine &Name,
  2991. BasicBlock *InsertAtEnd) {
  2992. if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
  2993. return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
  2994. return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
  2995. }
  2996. CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
  2997. const Twine &Name,
  2998. Instruction *InsertBefore) {
  2999. if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
  3000. return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
  3001. return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
  3002. }
  3003. CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
  3004. const Twine &Name,
  3005. BasicBlock *InsertAtEnd) {
  3006. if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
  3007. return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
  3008. return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
  3009. }
  3010. CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
  3011. const Twine &Name,
  3012. Instruction *InsertBefore) {
  3013. if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
  3014. return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
  3015. return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
  3016. }
  3017. CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
  3018. const Twine &Name,
  3019. BasicBlock *InsertAtEnd) {
  3020. if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
  3021. return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
  3022. return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
  3023. }
  3024. CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
  3025. const Twine &Name,
  3026. BasicBlock *InsertAtEnd) {
  3027. assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
  3028. assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
  3029. "Invalid cast");
  3030. assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
  3031. assert((!Ty->isVectorTy() ||
  3032. cast<VectorType>(Ty)->getElementCount() ==
  3033. cast<VectorType>(S->getType())->getElementCount()) &&
  3034. "Invalid cast");
  3035. if (Ty->isIntOrIntVectorTy())
  3036. return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
  3037. return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
  3038. }
  3039. /// Create a BitCast or a PtrToInt cast instruction
  3040. CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
  3041. const Twine &Name,
  3042. Instruction *InsertBefore) {
  3043. assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
  3044. assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
  3045. "Invalid cast");
  3046. assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
  3047. assert((!Ty->isVectorTy() ||
  3048. cast<VectorType>(Ty)->getElementCount() ==
  3049. cast<VectorType>(S->getType())->getElementCount()) &&
  3050. "Invalid cast");
  3051. if (Ty->isIntOrIntVectorTy())
  3052. return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
  3053. return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
  3054. }
  3055. CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
  3056. Value *S, Type *Ty,
  3057. const Twine &Name,
  3058. BasicBlock *InsertAtEnd) {
  3059. assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
  3060. assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
  3061. if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
  3062. return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
  3063. return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
  3064. }
  3065. CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
  3066. Value *S, Type *Ty,
  3067. const Twine &Name,
  3068. Instruction *InsertBefore) {
  3069. assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
  3070. assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
  3071. if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
  3072. return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
  3073. return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
  3074. }
  3075. CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
  3076. const Twine &Name,
  3077. Instruction *InsertBefore) {
  3078. if (S->getType()->isPointerTy() && Ty->isIntegerTy())
  3079. return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
  3080. if (S->getType()->isIntegerTy() && Ty->isPointerTy())
  3081. return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
  3082. return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
  3083. }
  3084. CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
  3085. bool isSigned, const Twine &Name,
  3086. Instruction *InsertBefore) {
  3087. assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
  3088. "Invalid integer cast");
  3089. unsigned SrcBits = C->getType()->getScalarSizeInBits();
  3090. unsigned DstBits = Ty->getScalarSizeInBits();
  3091. Instruction::CastOps opcode =
  3092. (SrcBits == DstBits ? Instruction::BitCast :
  3093. (SrcBits > DstBits ? Instruction::Trunc :
  3094. (isSigned ? Instruction::SExt : Instruction::ZExt)));
  3095. return Create(opcode, C, Ty, Name, InsertBefore);
  3096. }
  3097. CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
  3098. bool isSigned, const Twine &Name,
  3099. BasicBlock *InsertAtEnd) {
  3100. assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
  3101. "Invalid cast");
  3102. unsigned SrcBits = C->getType()->getScalarSizeInBits();
  3103. unsigned DstBits = Ty->getScalarSizeInBits();
  3104. Instruction::CastOps opcode =
  3105. (SrcBits == DstBits ? Instruction::BitCast :
  3106. (SrcBits > DstBits ? Instruction::Trunc :
  3107. (isSigned ? Instruction::SExt : Instruction::ZExt)));
  3108. return Create(opcode, C, Ty, Name, InsertAtEnd);
  3109. }
  3110. CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
  3111. const Twine &Name,
  3112. Instruction *InsertBefore) {
  3113. assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
  3114. "Invalid cast");
  3115. unsigned SrcBits = C->getType()->getScalarSizeInBits();
  3116. unsigned DstBits = Ty->getScalarSizeInBits();
  3117. Instruction::CastOps opcode =
  3118. (SrcBits == DstBits ? Instruction::BitCast :
  3119. (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
  3120. return Create(opcode, C, Ty, Name, InsertBefore);
  3121. }
  3122. CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
  3123. const Twine &Name,
  3124. BasicBlock *InsertAtEnd) {
  3125. assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
  3126. "Invalid cast");
  3127. unsigned SrcBits = C->getType()->getScalarSizeInBits();
  3128. unsigned DstBits = Ty->getScalarSizeInBits();
  3129. Instruction::CastOps opcode =
  3130. (SrcBits == DstBits ? Instruction::BitCast :
  3131. (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
  3132. return Create(opcode, C, Ty, Name, InsertAtEnd);
  3133. }
  3134. bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
  3135. if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
  3136. return false;
  3137. if (SrcTy == DestTy)
  3138. return true;
  3139. if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
  3140. if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
  3141. if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
  3142. // An element by element cast. Valid if casting the elements is valid.
  3143. SrcTy = SrcVecTy->getElementType();
  3144. DestTy = DestVecTy->getElementType();
  3145. }
  3146. }
  3147. }
  3148. if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
  3149. if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
  3150. return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
  3151. }
  3152. }
  3153. TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
  3154. TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
  3155. // Could still have vectors of pointers if the number of elements doesn't
  3156. // match
  3157. if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
  3158. return false;
  3159. if (SrcBits != DestBits)
  3160. return false;
  3161. if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
  3162. return false;
  3163. return true;
  3164. }
  3165. bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,
  3166. const DataLayout &DL) {
  3167. // ptrtoint and inttoptr are not allowed on non-integral pointers
  3168. if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
  3169. if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
  3170. return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
  3171. !DL.isNonIntegralPointerType(PtrTy));
  3172. if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
  3173. if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
  3174. return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
  3175. !DL.isNonIntegralPointerType(PtrTy));
  3176. return isBitCastable(SrcTy, DestTy);
  3177. }
  3178. // Provide a way to get a "cast" where the cast opcode is inferred from the
  3179. // types and size of the operand. This, basically, is a parallel of the
  3180. // logic in the castIsValid function below. This axiom should hold:
  3181. // castIsValid( getCastOpcode(Val, Ty), Val, Ty)
  3182. // should not assert in castIsValid. In other words, this produces a "correct"
  3183. // casting opcode for the arguments passed to it.
  3184. Instruction::CastOps
  3185. CastInst::getCastOpcode(
  3186. const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
  3187. Type *SrcTy = Src->getType();
  3188. assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
  3189. "Only first class types are castable!");
  3190. if (SrcTy == DestTy)
  3191. return BitCast;
  3192. // FIXME: Check address space sizes here
  3193. if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
  3194. if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
  3195. if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
  3196. // An element by element cast. Find the appropriate opcode based on the
  3197. // element types.
  3198. SrcTy = SrcVecTy->getElementType();
  3199. DestTy = DestVecTy->getElementType();
  3200. }
  3201. // Get the bit sizes, we'll need these
  3202. unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
  3203. unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
  3204. // Run through the possibilities ...
  3205. if (DestTy->isIntegerTy()) { // Casting to integral
  3206. if (SrcTy->isIntegerTy()) { // Casting from integral
  3207. if (DestBits < SrcBits)
  3208. return Trunc; // int -> smaller int
  3209. else if (DestBits > SrcBits) { // its an extension
  3210. if (SrcIsSigned)
  3211. return SExt; // signed -> SEXT
  3212. else
  3213. return ZExt; // unsigned -> ZEXT
  3214. } else {
  3215. return BitCast; // Same size, No-op cast
  3216. }
  3217. } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
  3218. if (DestIsSigned)
  3219. return FPToSI; // FP -> sint
  3220. else
  3221. return FPToUI; // FP -> uint
  3222. } else if (SrcTy->isVectorTy()) {
  3223. assert(DestBits == SrcBits &&
  3224. "Casting vector to integer of different width");
  3225. return BitCast; // Same size, no-op cast
  3226. } else {
  3227. assert(SrcTy->isPointerTy() &&
  3228. "Casting from a value that is not first-class type");
  3229. return PtrToInt; // ptr -> int
  3230. }
  3231. } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
  3232. if (SrcTy->isIntegerTy()) { // Casting from integral
  3233. if (SrcIsSigned)
  3234. return SIToFP; // sint -> FP
  3235. else
  3236. return UIToFP; // uint -> FP
  3237. } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
  3238. if (DestBits < SrcBits) {
  3239. return FPTrunc; // FP -> smaller FP
  3240. } else if (DestBits > SrcBits) {
  3241. return FPExt; // FP -> larger FP
  3242. } else {
  3243. return BitCast; // same size, no-op cast
  3244. }
  3245. } else if (SrcTy->isVectorTy()) {
  3246. assert(DestBits == SrcBits &&
  3247. "Casting vector to floating point of different width");
  3248. return BitCast; // same size, no-op cast
  3249. }
  3250. llvm_unreachable("Casting pointer or non-first class to float");
  3251. } else if (DestTy->isVectorTy()) {
  3252. assert(DestBits == SrcBits &&
  3253. "Illegal cast to vector (wrong type or size)");
  3254. return BitCast;
  3255. } else if (DestTy->isPointerTy()) {
  3256. if (SrcTy->isPointerTy()) {
  3257. if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
  3258. return AddrSpaceCast;
  3259. return BitCast; // ptr -> ptr
  3260. } else if (SrcTy->isIntegerTy()) {
  3261. return IntToPtr; // int -> ptr
  3262. }
  3263. llvm_unreachable("Casting pointer to other than pointer or int");
  3264. } else if (DestTy->isX86_MMXTy()) {
  3265. if (SrcTy->isVectorTy()) {
  3266. assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
  3267. return BitCast; // 64-bit vector to MMX
  3268. }
  3269. llvm_unreachable("Illegal cast to X86_MMX");
  3270. }
  3271. llvm_unreachable("Casting to type that is not first-class");
  3272. }
  3273. //===----------------------------------------------------------------------===//
  3274. // CastInst SubClass Constructors
  3275. //===----------------------------------------------------------------------===//
  3276. /// Check that the construction parameters for a CastInst are correct. This
  3277. /// could be broken out into the separate constructors but it is useful to have
  3278. /// it in one place and to eliminate the redundant code for getting the sizes
  3279. /// of the types involved.
  3280. bool
  3281. CastInst::castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy) {
  3282. if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
  3283. SrcTy->isAggregateType() || DstTy->isAggregateType())
  3284. return false;
  3285. // Get the size of the types in bits, and whether we are dealing
  3286. // with vector types, we'll need this later.
  3287. bool SrcIsVec = isa<VectorType>(SrcTy);
  3288. bool DstIsVec = isa<VectorType>(DstTy);
  3289. unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
  3290. unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
  3291. // If these are vector types, get the lengths of the vectors (using zero for
  3292. // scalar types means that checking that vector lengths match also checks that
  3293. // scalars are not being converted to vectors or vectors to scalars).
  3294. ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
  3295. : ElementCount::getFixed(0);
  3296. ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
  3297. : ElementCount::getFixed(0);
  3298. // Switch on the opcode provided
  3299. switch (op) {
  3300. default: return false; // This is an input error
  3301. case Instruction::Trunc:
  3302. return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
  3303. SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
  3304. case Instruction::ZExt:
  3305. return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
  3306. SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
  3307. case Instruction::SExt:
  3308. return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
  3309. SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
  3310. case Instruction::FPTrunc:
  3311. return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
  3312. SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
  3313. case Instruction::FPExt:
  3314. return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
  3315. SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
  3316. case Instruction::UIToFP:
  3317. case Instruction::SIToFP:
  3318. return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
  3319. SrcEC == DstEC;
  3320. case Instruction::FPToUI:
  3321. case Instruction::FPToSI:
  3322. return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
  3323. SrcEC == DstEC;
  3324. case Instruction::PtrToInt:
  3325. if (SrcEC != DstEC)
  3326. return false;
  3327. return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
  3328. case Instruction::IntToPtr:
  3329. if (SrcEC != DstEC)
  3330. return false;
  3331. return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
  3332. case Instruction::BitCast: {
  3333. PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
  3334. PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
  3335. // BitCast implies a no-op cast of type only. No bits change.
  3336. // However, you can't cast pointers to anything but pointers.
  3337. if (!SrcPtrTy != !DstPtrTy)
  3338. return false;
  3339. // For non-pointer cases, the cast is okay if the source and destination bit
  3340. // widths are identical.
  3341. if (!SrcPtrTy)
  3342. return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
  3343. // If both are pointers then the address spaces must match.
  3344. if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
  3345. return false;
  3346. // A vector of pointers must have the same number of elements.
  3347. if (SrcIsVec && DstIsVec)
  3348. return SrcEC == DstEC;
  3349. if (SrcIsVec)
  3350. return SrcEC == ElementCount::getFixed(1);
  3351. if (DstIsVec)
  3352. return DstEC == ElementCount::getFixed(1);
  3353. return true;
  3354. }
  3355. case Instruction::AddrSpaceCast: {
  3356. PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
  3357. if (!SrcPtrTy)
  3358. return false;
  3359. PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
  3360. if (!DstPtrTy)
  3361. return false;
  3362. if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
  3363. return false;
  3364. return SrcEC == DstEC;
  3365. }
  3366. }
  3367. }
  3368. TruncInst::TruncInst(
  3369. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3370. ) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
  3371. assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
  3372. }
  3373. TruncInst::TruncInst(
  3374. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3375. ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
  3376. assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
  3377. }
  3378. ZExtInst::ZExtInst(
  3379. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3380. ) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
  3381. assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
  3382. }
  3383. ZExtInst::ZExtInst(
  3384. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3385. ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
  3386. assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
  3387. }
  3388. SExtInst::SExtInst(
  3389. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3390. ) : CastInst(Ty, SExt, S, Name, InsertBefore) {
  3391. assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
  3392. }
  3393. SExtInst::SExtInst(
  3394. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3395. ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
  3396. assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
  3397. }
  3398. FPTruncInst::FPTruncInst(
  3399. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3400. ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
  3401. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
  3402. }
  3403. FPTruncInst::FPTruncInst(
  3404. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3405. ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
  3406. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
  3407. }
  3408. FPExtInst::FPExtInst(
  3409. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3410. ) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
  3411. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
  3412. }
  3413. FPExtInst::FPExtInst(
  3414. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3415. ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
  3416. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
  3417. }
  3418. UIToFPInst::UIToFPInst(
  3419. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3420. ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
  3421. assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
  3422. }
  3423. UIToFPInst::UIToFPInst(
  3424. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3425. ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
  3426. assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
  3427. }
  3428. SIToFPInst::SIToFPInst(
  3429. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3430. ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
  3431. assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
  3432. }
  3433. SIToFPInst::SIToFPInst(
  3434. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3435. ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
  3436. assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
  3437. }
  3438. FPToUIInst::FPToUIInst(
  3439. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3440. ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
  3441. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
  3442. }
  3443. FPToUIInst::FPToUIInst(
  3444. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3445. ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
  3446. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
  3447. }
  3448. FPToSIInst::FPToSIInst(
  3449. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3450. ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
  3451. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
  3452. }
  3453. FPToSIInst::FPToSIInst(
  3454. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3455. ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
  3456. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
  3457. }
  3458. PtrToIntInst::PtrToIntInst(
  3459. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3460. ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
  3461. assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
  3462. }
  3463. PtrToIntInst::PtrToIntInst(
  3464. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3465. ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
  3466. assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
  3467. }
  3468. IntToPtrInst::IntToPtrInst(
  3469. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3470. ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
  3471. assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
  3472. }
  3473. IntToPtrInst::IntToPtrInst(
  3474. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3475. ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
  3476. assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
  3477. }
  3478. BitCastInst::BitCastInst(
  3479. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3480. ) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
  3481. assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
  3482. }
  3483. BitCastInst::BitCastInst(
  3484. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3485. ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
  3486. assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
  3487. }
  3488. AddrSpaceCastInst::AddrSpaceCastInst(
  3489. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3490. ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
  3491. assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
  3492. }
  3493. AddrSpaceCastInst::AddrSpaceCastInst(
  3494. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3495. ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
  3496. assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
  3497. }
  3498. //===----------------------------------------------------------------------===//
  3499. // CmpInst Classes
  3500. //===----------------------------------------------------------------------===//
  3501. CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
  3502. Value *RHS, const Twine &Name, Instruction *InsertBefore,
  3503. Instruction *FlagsSource)
  3504. : Instruction(ty, op,
  3505. OperandTraits<CmpInst>::op_begin(this),
  3506. OperandTraits<CmpInst>::operands(this),
  3507. InsertBefore) {
  3508. Op<0>() = LHS;
  3509. Op<1>() = RHS;
  3510. setPredicate((Predicate)predicate);
  3511. setName(Name);
  3512. if (FlagsSource)
  3513. copyIRFlags(FlagsSource);
  3514. }
  3515. CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
  3516. Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
  3517. : Instruction(ty, op,
  3518. OperandTraits<CmpInst>::op_begin(this),
  3519. OperandTraits<CmpInst>::operands(this),
  3520. InsertAtEnd) {
  3521. Op<0>() = LHS;
  3522. Op<1>() = RHS;
  3523. setPredicate((Predicate)predicate);
  3524. setName(Name);
  3525. }
  3526. CmpInst *
  3527. CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
  3528. const Twine &Name, Instruction *InsertBefore) {
  3529. if (Op == Instruction::ICmp) {
  3530. if (InsertBefore)
  3531. return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
  3532. S1, S2, Name);
  3533. else
  3534. return new ICmpInst(CmpInst::Predicate(predicate),
  3535. S1, S2, Name);
  3536. }
  3537. if (InsertBefore)
  3538. return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
  3539. S1, S2, Name);
  3540. else
  3541. return new FCmpInst(CmpInst::Predicate(predicate),
  3542. S1, S2, Name);
  3543. }
  3544. CmpInst *
  3545. CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
  3546. const Twine &Name, BasicBlock *InsertAtEnd) {
  3547. if (Op == Instruction::ICmp) {
  3548. return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
  3549. S1, S2, Name);
  3550. }
  3551. return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
  3552. S1, S2, Name);
  3553. }
  3554. void CmpInst::swapOperands() {
  3555. if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
  3556. IC->swapOperands();
  3557. else
  3558. cast<FCmpInst>(this)->swapOperands();
  3559. }
  3560. bool CmpInst::isCommutative() const {
  3561. if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
  3562. return IC->isCommutative();
  3563. return cast<FCmpInst>(this)->isCommutative();
  3564. }
  3565. bool CmpInst::isEquality(Predicate P) {
  3566. if (ICmpInst::isIntPredicate(P))
  3567. return ICmpInst::isEquality(P);
  3568. if (FCmpInst::isFPPredicate(P))
  3569. return FCmpInst::isEquality(P);
  3570. llvm_unreachable("Unsupported predicate kind");
  3571. }
  3572. CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
  3573. switch (pred) {
  3574. default: llvm_unreachable("Unknown cmp predicate!");
  3575. case ICMP_EQ: return ICMP_NE;
  3576. case ICMP_NE: return ICMP_EQ;
  3577. case ICMP_UGT: return ICMP_ULE;
  3578. case ICMP_ULT: return ICMP_UGE;
  3579. case ICMP_UGE: return ICMP_ULT;
  3580. case ICMP_ULE: return ICMP_UGT;
  3581. case ICMP_SGT: return ICMP_SLE;
  3582. case ICMP_SLT: return ICMP_SGE;
  3583. case ICMP_SGE: return ICMP_SLT;
  3584. case ICMP_SLE: return ICMP_SGT;
  3585. case FCMP_OEQ: return FCMP_UNE;
  3586. case FCMP_ONE: return FCMP_UEQ;
  3587. case FCMP_OGT: return FCMP_ULE;
  3588. case FCMP_OLT: return FCMP_UGE;
  3589. case FCMP_OGE: return FCMP_ULT;
  3590. case FCMP_OLE: return FCMP_UGT;
  3591. case FCMP_UEQ: return FCMP_ONE;
  3592. case FCMP_UNE: return FCMP_OEQ;
  3593. case FCMP_UGT: return FCMP_OLE;
  3594. case FCMP_ULT: return FCMP_OGE;
  3595. case FCMP_UGE: return FCMP_OLT;
  3596. case FCMP_ULE: return FCMP_OGT;
  3597. case FCMP_ORD: return FCMP_UNO;
  3598. case FCMP_UNO: return FCMP_ORD;
  3599. case FCMP_TRUE: return FCMP_FALSE;
  3600. case FCMP_FALSE: return FCMP_TRUE;
  3601. }
  3602. }
  3603. StringRef CmpInst::getPredicateName(Predicate Pred) {
  3604. switch (Pred) {
  3605. default: return "unknown";
  3606. case FCmpInst::FCMP_FALSE: return "false";
  3607. case FCmpInst::FCMP_OEQ: return "oeq";
  3608. case FCmpInst::FCMP_OGT: return "ogt";
  3609. case FCmpInst::FCMP_OGE: return "oge";
  3610. case FCmpInst::FCMP_OLT: return "olt";
  3611. case FCmpInst::FCMP_OLE: return "ole";
  3612. case FCmpInst::FCMP_ONE: return "one";
  3613. case FCmpInst::FCMP_ORD: return "ord";
  3614. case FCmpInst::FCMP_UNO: return "uno";
  3615. case FCmpInst::FCMP_UEQ: return "ueq";
  3616. case FCmpInst::FCMP_UGT: return "ugt";
  3617. case FCmpInst::FCMP_UGE: return "uge";
  3618. case FCmpInst::FCMP_ULT: return "ult";
  3619. case FCmpInst::FCMP_ULE: return "ule";
  3620. case FCmpInst::FCMP_UNE: return "une";
  3621. case FCmpInst::FCMP_TRUE: return "true";
  3622. case ICmpInst::ICMP_EQ: return "eq";
  3623. case ICmpInst::ICMP_NE: return "ne";
  3624. case ICmpInst::ICMP_SGT: return "sgt";
  3625. case ICmpInst::ICMP_SGE: return "sge";
  3626. case ICmpInst::ICMP_SLT: return "slt";
  3627. case ICmpInst::ICMP_SLE: return "sle";
  3628. case ICmpInst::ICMP_UGT: return "ugt";
  3629. case ICmpInst::ICMP_UGE: return "uge";
  3630. case ICmpInst::ICMP_ULT: return "ult";
  3631. case ICmpInst::ICMP_ULE: return "ule";
  3632. }
  3633. }
  3634. ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
  3635. switch (pred) {
  3636. default: llvm_unreachable("Unknown icmp predicate!");
  3637. case ICMP_EQ: case ICMP_NE:
  3638. case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
  3639. return pred;
  3640. case ICMP_UGT: return ICMP_SGT;
  3641. case ICMP_ULT: return ICMP_SLT;
  3642. case ICMP_UGE: return ICMP_SGE;
  3643. case ICMP_ULE: return ICMP_SLE;
  3644. }
  3645. }
  3646. ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
  3647. switch (pred) {
  3648. default: llvm_unreachable("Unknown icmp predicate!");
  3649. case ICMP_EQ: case ICMP_NE:
  3650. case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
  3651. return pred;
  3652. case ICMP_SGT: return ICMP_UGT;
  3653. case ICMP_SLT: return ICMP_ULT;
  3654. case ICMP_SGE: return ICMP_UGE;
  3655. case ICMP_SLE: return ICMP_ULE;
  3656. }
  3657. }
  3658. CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
  3659. switch (pred) {
  3660. default: llvm_unreachable("Unknown cmp predicate!");
  3661. case ICMP_EQ: case ICMP_NE:
  3662. return pred;
  3663. case ICMP_SGT: return ICMP_SLT;
  3664. case ICMP_SLT: return ICMP_SGT;
  3665. case ICMP_SGE: return ICMP_SLE;
  3666. case ICMP_SLE: return ICMP_SGE;
  3667. case ICMP_UGT: return ICMP_ULT;
  3668. case ICMP_ULT: return ICMP_UGT;
  3669. case ICMP_UGE: return ICMP_ULE;
  3670. case ICMP_ULE: return ICMP_UGE;
  3671. case FCMP_FALSE: case FCMP_TRUE:
  3672. case FCMP_OEQ: case FCMP_ONE:
  3673. case FCMP_UEQ: case FCMP_UNE:
  3674. case FCMP_ORD: case FCMP_UNO:
  3675. return pred;
  3676. case FCMP_OGT: return FCMP_OLT;
  3677. case FCMP_OLT: return FCMP_OGT;
  3678. case FCMP_OGE: return FCMP_OLE;
  3679. case FCMP_OLE: return FCMP_OGE;
  3680. case FCMP_UGT: return FCMP_ULT;
  3681. case FCMP_ULT: return FCMP_UGT;
  3682. case FCMP_UGE: return FCMP_ULE;
  3683. case FCMP_ULE: return FCMP_UGE;
  3684. }
  3685. }
  3686. bool CmpInst::isNonStrictPredicate(Predicate pred) {
  3687. switch (pred) {
  3688. case ICMP_SGE:
  3689. case ICMP_SLE:
  3690. case ICMP_UGE:
  3691. case ICMP_ULE:
  3692. case FCMP_OGE:
  3693. case FCMP_OLE:
  3694. case FCMP_UGE:
  3695. case FCMP_ULE:
  3696. return true;
  3697. default:
  3698. return false;
  3699. }
  3700. }
  3701. bool CmpInst::isStrictPredicate(Predicate pred) {
  3702. switch (pred) {
  3703. case ICMP_SGT:
  3704. case ICMP_SLT:
  3705. case ICMP_UGT:
  3706. case ICMP_ULT:
  3707. case FCMP_OGT:
  3708. case FCMP_OLT:
  3709. case FCMP_UGT:
  3710. case FCMP_ULT:
  3711. return true;
  3712. default:
  3713. return false;
  3714. }
  3715. }
  3716. CmpInst::Predicate CmpInst::getStrictPredicate(Predicate pred) {
  3717. switch (pred) {
  3718. case ICMP_SGE:
  3719. return ICMP_SGT;
  3720. case ICMP_SLE:
  3721. return ICMP_SLT;
  3722. case ICMP_UGE:
  3723. return ICMP_UGT;
  3724. case ICMP_ULE:
  3725. return ICMP_ULT;
  3726. case FCMP_OGE:
  3727. return FCMP_OGT;
  3728. case FCMP_OLE:
  3729. return FCMP_OLT;
  3730. case FCMP_UGE:
  3731. return FCMP_UGT;
  3732. case FCMP_ULE:
  3733. return FCMP_ULT;
  3734. default:
  3735. return pred;
  3736. }
  3737. }
  3738. CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) {
  3739. switch (pred) {
  3740. case ICMP_SGT:
  3741. return ICMP_SGE;
  3742. case ICMP_SLT:
  3743. return ICMP_SLE;
  3744. case ICMP_UGT:
  3745. return ICMP_UGE;
  3746. case ICMP_ULT:
  3747. return ICMP_ULE;
  3748. case FCMP_OGT:
  3749. return FCMP_OGE;
  3750. case FCMP_OLT:
  3751. return FCMP_OLE;
  3752. case FCMP_UGT:
  3753. return FCMP_UGE;
  3754. case FCMP_ULT:
  3755. return FCMP_ULE;
  3756. default:
  3757. return pred;
  3758. }
  3759. }
  3760. CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) {
  3761. assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
  3762. if (isStrictPredicate(pred))
  3763. return getNonStrictPredicate(pred);
  3764. if (isNonStrictPredicate(pred))
  3765. return getStrictPredicate(pred);
  3766. llvm_unreachable("Unknown predicate!");
  3767. }
  3768. CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) {
  3769. assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
  3770. switch (pred) {
  3771. default:
  3772. llvm_unreachable("Unknown predicate!");
  3773. case CmpInst::ICMP_ULT:
  3774. return CmpInst::ICMP_SLT;
  3775. case CmpInst::ICMP_ULE:
  3776. return CmpInst::ICMP_SLE;
  3777. case CmpInst::ICMP_UGT:
  3778. return CmpInst::ICMP_SGT;
  3779. case CmpInst::ICMP_UGE:
  3780. return CmpInst::ICMP_SGE;
  3781. }
  3782. }
  3783. CmpInst::Predicate CmpInst::getUnsignedPredicate(Predicate pred) {
  3784. assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
  3785. switch (pred) {
  3786. default:
  3787. llvm_unreachable("Unknown predicate!");
  3788. case CmpInst::ICMP_SLT:
  3789. return CmpInst::ICMP_ULT;
  3790. case CmpInst::ICMP_SLE:
  3791. return CmpInst::ICMP_ULE;
  3792. case CmpInst::ICMP_SGT:
  3793. return CmpInst::ICMP_UGT;
  3794. case CmpInst::ICMP_SGE:
  3795. return CmpInst::ICMP_UGE;
  3796. }
  3797. }
  3798. bool CmpInst::isUnsigned(Predicate predicate) {
  3799. switch (predicate) {
  3800. default: return false;
  3801. case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
  3802. case ICmpInst::ICMP_UGE: return true;
  3803. }
  3804. }
  3805. bool CmpInst::isSigned(Predicate predicate) {
  3806. switch (predicate) {
  3807. default: return false;
  3808. case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
  3809. case ICmpInst::ICMP_SGE: return true;
  3810. }
  3811. }
  3812. bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
  3813. ICmpInst::Predicate Pred) {
  3814. assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
  3815. switch (Pred) {
  3816. case ICmpInst::Predicate::ICMP_EQ:
  3817. return LHS.eq(RHS);
  3818. case ICmpInst::Predicate::ICMP_NE:
  3819. return LHS.ne(RHS);
  3820. case ICmpInst::Predicate::ICMP_UGT:
  3821. return LHS.ugt(RHS);
  3822. case ICmpInst::Predicate::ICMP_UGE:
  3823. return LHS.uge(RHS);
  3824. case ICmpInst::Predicate::ICMP_ULT:
  3825. return LHS.ult(RHS);
  3826. case ICmpInst::Predicate::ICMP_ULE:
  3827. return LHS.ule(RHS);
  3828. case ICmpInst::Predicate::ICMP_SGT:
  3829. return LHS.sgt(RHS);
  3830. case ICmpInst::Predicate::ICMP_SGE:
  3831. return LHS.sge(RHS);
  3832. case ICmpInst::Predicate::ICMP_SLT:
  3833. return LHS.slt(RHS);
  3834. case ICmpInst::Predicate::ICMP_SLE:
  3835. return LHS.sle(RHS);
  3836. default:
  3837. llvm_unreachable("Unexpected non-integer predicate.");
  3838. };
  3839. }
  3840. bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
  3841. FCmpInst::Predicate Pred) {
  3842. APFloat::cmpResult R = LHS.compare(RHS);
  3843. switch (Pred) {
  3844. default:
  3845. llvm_unreachable("Invalid FCmp Predicate");
  3846. case FCmpInst::FCMP_FALSE:
  3847. return false;
  3848. case FCmpInst::FCMP_TRUE:
  3849. return true;
  3850. case FCmpInst::FCMP_UNO:
  3851. return R == APFloat::cmpUnordered;
  3852. case FCmpInst::FCMP_ORD:
  3853. return R != APFloat::cmpUnordered;
  3854. case FCmpInst::FCMP_UEQ:
  3855. return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
  3856. case FCmpInst::FCMP_OEQ:
  3857. return R == APFloat::cmpEqual;
  3858. case FCmpInst::FCMP_UNE:
  3859. return R != APFloat::cmpEqual;
  3860. case FCmpInst::FCMP_ONE:
  3861. return R == APFloat::cmpLessThan || R == APFloat::cmpGreaterThan;
  3862. case FCmpInst::FCMP_ULT:
  3863. return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
  3864. case FCmpInst::FCMP_OLT:
  3865. return R == APFloat::cmpLessThan;
  3866. case FCmpInst::FCMP_UGT:
  3867. return R == APFloat::cmpUnordered || R == APFloat::cmpGreaterThan;
  3868. case FCmpInst::FCMP_OGT:
  3869. return R == APFloat::cmpGreaterThan;
  3870. case FCmpInst::FCMP_ULE:
  3871. return R != APFloat::cmpGreaterThan;
  3872. case FCmpInst::FCMP_OLE:
  3873. return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
  3874. case FCmpInst::FCMP_UGE:
  3875. return R != APFloat::cmpLessThan;
  3876. case FCmpInst::FCMP_OGE:
  3877. return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
  3878. }
  3879. }
  3880. CmpInst::Predicate CmpInst::getFlippedSignednessPredicate(Predicate pred) {
  3881. assert(CmpInst::isRelational(pred) &&
  3882. "Call only with non-equality predicates!");
  3883. if (isSigned(pred))
  3884. return getUnsignedPredicate(pred);
  3885. if (isUnsigned(pred))
  3886. return getSignedPredicate(pred);
  3887. llvm_unreachable("Unknown predicate!");
  3888. }
  3889. bool CmpInst::isOrdered(Predicate predicate) {
  3890. switch (predicate) {
  3891. default: return false;
  3892. case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
  3893. case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
  3894. case FCmpInst::FCMP_ORD: return true;
  3895. }
  3896. }
  3897. bool CmpInst::isUnordered(Predicate predicate) {
  3898. switch (predicate) {
  3899. default: return false;
  3900. case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
  3901. case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
  3902. case FCmpInst::FCMP_UNO: return true;
  3903. }
  3904. }
  3905. bool CmpInst::isTrueWhenEqual(Predicate predicate) {
  3906. switch(predicate) {
  3907. default: return false;
  3908. case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
  3909. case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
  3910. }
  3911. }
  3912. bool CmpInst::isFalseWhenEqual(Predicate predicate) {
  3913. switch(predicate) {
  3914. case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
  3915. case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
  3916. default: return false;
  3917. }
  3918. }
  3919. bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) {
  3920. // If the predicates match, then we know the first condition implies the
  3921. // second is true.
  3922. if (Pred1 == Pred2)
  3923. return true;
  3924. switch (Pred1) {
  3925. default:
  3926. break;
  3927. case ICMP_EQ:
  3928. // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
  3929. return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
  3930. Pred2 == ICMP_SLE;
  3931. case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
  3932. return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
  3933. case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
  3934. return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
  3935. case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
  3936. return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
  3937. case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
  3938. return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
  3939. }
  3940. return false;
  3941. }
  3942. bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) {
  3943. return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
  3944. }
  3945. //===----------------------------------------------------------------------===//
  3946. // SwitchInst Implementation
  3947. //===----------------------------------------------------------------------===//
  3948. void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
  3949. assert(Value && Default && NumReserved);
  3950. ReservedSpace = NumReserved;
  3951. setNumHungOffUseOperands(2);
  3952. allocHungoffUses(ReservedSpace);
  3953. Op<0>() = Value;
  3954. Op<1>() = Default;
  3955. }
  3956. /// SwitchInst ctor - Create a new switch instruction, specifying a value to
  3957. /// switch on and a default destination. The number of additional cases can
  3958. /// be specified here to make memory allocation more efficient. This
  3959. /// constructor can also autoinsert before another instruction.
  3960. SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
  3961. Instruction *InsertBefore)
  3962. : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
  3963. nullptr, 0, InsertBefore) {
  3964. init(Value, Default, 2+NumCases*2);
  3965. }
  3966. /// SwitchInst ctor - Create a new switch instruction, specifying a value to
  3967. /// switch on and a default destination. The number of additional cases can
  3968. /// be specified here to make memory allocation more efficient. This
  3969. /// constructor also autoinserts at the end of the specified BasicBlock.
  3970. SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
  3971. BasicBlock *InsertAtEnd)
  3972. : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
  3973. nullptr, 0, InsertAtEnd) {
  3974. init(Value, Default, 2+NumCases*2);
  3975. }
  3976. SwitchInst::SwitchInst(const SwitchInst &SI)
  3977. : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
  3978. init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
  3979. setNumHungOffUseOperands(SI.getNumOperands());
  3980. Use *OL = getOperandList();
  3981. const Use *InOL = SI.getOperandList();
  3982. for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
  3983. OL[i] = InOL[i];
  3984. OL[i+1] = InOL[i+1];
  3985. }
  3986. SubclassOptionalData = SI.SubclassOptionalData;
  3987. }
  3988. /// addCase - Add an entry to the switch instruction...
  3989. ///
  3990. void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
  3991. unsigned NewCaseIdx = getNumCases();
  3992. unsigned OpNo = getNumOperands();
  3993. if (OpNo+2 > ReservedSpace)
  3994. growOperands(); // Get more space!
  3995. // Initialize some new operands.
  3996. assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
  3997. setNumHungOffUseOperands(OpNo+2);
  3998. CaseHandle Case(this, NewCaseIdx);
  3999. Case.setValue(OnVal);
  4000. Case.setSuccessor(Dest);
  4001. }
  4002. /// removeCase - This method removes the specified case and its successor
  4003. /// from the switch instruction.
  4004. SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) {
  4005. unsigned idx = I->getCaseIndex();
  4006. assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
  4007. unsigned NumOps = getNumOperands();
  4008. Use *OL = getOperandList();
  4009. // Overwrite this case with the end of the list.
  4010. if (2 + (idx + 1) * 2 != NumOps) {
  4011. OL[2 + idx * 2] = OL[NumOps - 2];
  4012. OL[2 + idx * 2 + 1] = OL[NumOps - 1];
  4013. }
  4014. // Nuke the last value.
  4015. OL[NumOps-2].set(nullptr);
  4016. OL[NumOps-2+1].set(nullptr);
  4017. setNumHungOffUseOperands(NumOps-2);
  4018. return CaseIt(this, idx);
  4019. }
  4020. /// growOperands - grow operands - This grows the operand list in response
  4021. /// to a push_back style of operation. This grows the number of ops by 3 times.
  4022. ///
  4023. void SwitchInst::growOperands() {
  4024. unsigned e = getNumOperands();
  4025. unsigned NumOps = e*3;
  4026. ReservedSpace = NumOps;
  4027. growHungoffUses(ReservedSpace);
  4028. }
  4029. MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
  4030. assert(Changed && "called only if metadata has changed");
  4031. if (!Weights)
  4032. return nullptr;
  4033. assert(SI.getNumSuccessors() == Weights->size() &&
  4034. "num of prof branch_weights must accord with num of successors");
  4035. bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });
  4036. if (AllZeroes || Weights->size() < 2)
  4037. return nullptr;
  4038. return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
  4039. }
  4040. void SwitchInstProfUpdateWrapper::init() {
  4041. MDNode *ProfileData = getBranchWeightMDNode(SI);
  4042. if (!ProfileData)
  4043. return;
  4044. if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) {
  4045. llvm_unreachable("number of prof branch_weights metadata operands does "
  4046. "not correspond to number of succesors");
  4047. }
  4048. SmallVector<uint32_t, 8> Weights;
  4049. if (!extractBranchWeights(ProfileData, Weights))
  4050. return;
  4051. this->Weights = std::move(Weights);
  4052. }
  4053. SwitchInst::CaseIt
  4054. SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) {
  4055. if (Weights) {
  4056. assert(SI.getNumSuccessors() == Weights->size() &&
  4057. "num of prof branch_weights must accord with num of successors");
  4058. Changed = true;
  4059. // Copy the last case to the place of the removed one and shrink.
  4060. // This is tightly coupled with the way SwitchInst::removeCase() removes
  4061. // the cases in SwitchInst::removeCase(CaseIt).
  4062. (*Weights)[I->getCaseIndex() + 1] = Weights->back();
  4063. Weights->pop_back();
  4064. }
  4065. return SI.removeCase(I);
  4066. }
  4067. void SwitchInstProfUpdateWrapper::addCase(
  4068. ConstantInt *OnVal, BasicBlock *Dest,
  4069. SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
  4070. SI.addCase(OnVal, Dest);
  4071. if (!Weights && W && *W) {
  4072. Changed = true;
  4073. Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
  4074. (*Weights)[SI.getNumSuccessors() - 1] = *W;
  4075. } else if (Weights) {
  4076. Changed = true;
  4077. Weights->push_back(W.value_or(0));
  4078. }
  4079. if (Weights)
  4080. assert(SI.getNumSuccessors() == Weights->size() &&
  4081. "num of prof branch_weights must accord with num of successors");
  4082. }
  4083. SymbolTableList<Instruction>::iterator
  4084. SwitchInstProfUpdateWrapper::eraseFromParent() {
  4085. // Instruction is erased. Mark as unchanged to not touch it in the destructor.
  4086. Changed = false;
  4087. if (Weights)
  4088. Weights->resize(0);
  4089. return SI.eraseFromParent();
  4090. }
  4091. SwitchInstProfUpdateWrapper::CaseWeightOpt
  4092. SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) {
  4093. if (!Weights)
  4094. return std::nullopt;
  4095. return (*Weights)[idx];
  4096. }
  4097. void SwitchInstProfUpdateWrapper::setSuccessorWeight(
  4098. unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
  4099. if (!W)
  4100. return;
  4101. if (!Weights && *W)
  4102. Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
  4103. if (Weights) {
  4104. auto &OldW = (*Weights)[idx];
  4105. if (*W != OldW) {
  4106. Changed = true;
  4107. OldW = *W;
  4108. }
  4109. }
  4110. }
  4111. SwitchInstProfUpdateWrapper::CaseWeightOpt
  4112. SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI,
  4113. unsigned idx) {
  4114. if (MDNode *ProfileData = getBranchWeightMDNode(SI))
  4115. if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
  4116. return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
  4117. ->getValue()
  4118. .getZExtValue();
  4119. return std::nullopt;
  4120. }
  4121. //===----------------------------------------------------------------------===//
  4122. // IndirectBrInst Implementation
  4123. //===----------------------------------------------------------------------===//
  4124. void IndirectBrInst::init(Value *Address, unsigned NumDests) {
  4125. assert(Address && Address->getType()->isPointerTy() &&
  4126. "Address of indirectbr must be a pointer");
  4127. ReservedSpace = 1+NumDests;
  4128. setNumHungOffUseOperands(1);
  4129. allocHungoffUses(ReservedSpace);
  4130. Op<0>() = Address;
  4131. }
  4132. /// growOperands - grow operands - This grows the operand list in response
  4133. /// to a push_back style of operation. This grows the number of ops by 2 times.
  4134. ///
  4135. void IndirectBrInst::growOperands() {
  4136. unsigned e = getNumOperands();
  4137. unsigned NumOps = e*2;
  4138. ReservedSpace = NumOps;
  4139. growHungoffUses(ReservedSpace);
  4140. }
  4141. IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
  4142. Instruction *InsertBefore)
  4143. : Instruction(Type::getVoidTy(Address->getContext()),
  4144. Instruction::IndirectBr, nullptr, 0, InsertBefore) {
  4145. init(Address, NumCases);
  4146. }
  4147. IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
  4148. BasicBlock *InsertAtEnd)
  4149. : Instruction(Type::getVoidTy(Address->getContext()),
  4150. Instruction::IndirectBr, nullptr, 0, InsertAtEnd) {
  4151. init(Address, NumCases);
  4152. }
  4153. IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
  4154. : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
  4155. nullptr, IBI.getNumOperands()) {
  4156. allocHungoffUses(IBI.getNumOperands());
  4157. Use *OL = getOperandList();
  4158. const Use *InOL = IBI.getOperandList();
  4159. for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
  4160. OL[i] = InOL[i];
  4161. SubclassOptionalData = IBI.SubclassOptionalData;
  4162. }
  4163. /// addDestination - Add a destination.
  4164. ///
  4165. void IndirectBrInst::addDestination(BasicBlock *DestBB) {
  4166. unsigned OpNo = getNumOperands();
  4167. if (OpNo+1 > ReservedSpace)
  4168. growOperands(); // Get more space!
  4169. // Initialize some new operands.
  4170. assert(OpNo < ReservedSpace && "Growing didn't work!");
  4171. setNumHungOffUseOperands(OpNo+1);
  4172. getOperandList()[OpNo] = DestBB;
  4173. }
  4174. /// removeDestination - This method removes the specified successor from the
  4175. /// indirectbr instruction.
  4176. void IndirectBrInst::removeDestination(unsigned idx) {
  4177. assert(idx < getNumOperands()-1 && "Successor index out of range!");
  4178. unsigned NumOps = getNumOperands();
  4179. Use *OL = getOperandList();
  4180. // Replace this value with the last one.
  4181. OL[idx+1] = OL[NumOps-1];
  4182. // Nuke the last value.
  4183. OL[NumOps-1].set(nullptr);
  4184. setNumHungOffUseOperands(NumOps-1);
  4185. }
  4186. //===----------------------------------------------------------------------===//
  4187. // FreezeInst Implementation
  4188. //===----------------------------------------------------------------------===//
  4189. FreezeInst::FreezeInst(Value *S,
  4190. const Twine &Name, Instruction *InsertBefore)
  4191. : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
  4192. setName(Name);
  4193. }
  4194. FreezeInst::FreezeInst(Value *S,
  4195. const Twine &Name, BasicBlock *InsertAtEnd)
  4196. : UnaryInstruction(S->getType(), Freeze, S, InsertAtEnd) {
  4197. setName(Name);
  4198. }
  4199. //===----------------------------------------------------------------------===//
  4200. // cloneImpl() implementations
  4201. //===----------------------------------------------------------------------===//
  4202. // Define these methods here so vtables don't get emitted into every translation
  4203. // unit that uses these classes.
  4204. GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
  4205. return new (getNumOperands()) GetElementPtrInst(*this);
  4206. }
  4207. UnaryOperator *UnaryOperator::cloneImpl() const {
  4208. return Create(getOpcode(), Op<0>());
  4209. }
  4210. BinaryOperator *BinaryOperator::cloneImpl() const {
  4211. return Create(getOpcode(), Op<0>(), Op<1>());
  4212. }
  4213. FCmpInst *FCmpInst::cloneImpl() const {
  4214. return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
  4215. }
  4216. ICmpInst *ICmpInst::cloneImpl() const {
  4217. return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
  4218. }
  4219. ExtractValueInst *ExtractValueInst::cloneImpl() const {
  4220. return new ExtractValueInst(*this);
  4221. }
  4222. InsertValueInst *InsertValueInst::cloneImpl() const {
  4223. return new InsertValueInst(*this);
  4224. }
  4225. AllocaInst *AllocaInst::cloneImpl() const {
  4226. AllocaInst *Result = new AllocaInst(getAllocatedType(), getAddressSpace(),
  4227. getOperand(0), getAlign());
  4228. Result->setUsedWithInAlloca(isUsedWithInAlloca());
  4229. Result->setSwiftError(isSwiftError());
  4230. return Result;
  4231. }
  4232. LoadInst *LoadInst::cloneImpl() const {
  4233. return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
  4234. getAlign(), getOrdering(), getSyncScopeID());
  4235. }
  4236. StoreInst *StoreInst::cloneImpl() const {
  4237. return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
  4238. getOrdering(), getSyncScopeID());
  4239. }
  4240. AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
  4241. AtomicCmpXchgInst *Result = new AtomicCmpXchgInst(
  4242. getOperand(0), getOperand(1), getOperand(2), getAlign(),
  4243. getSuccessOrdering(), getFailureOrdering(), getSyncScopeID());
  4244. Result->setVolatile(isVolatile());
  4245. Result->setWeak(isWeak());
  4246. return Result;
  4247. }
  4248. AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
  4249. AtomicRMWInst *Result =
  4250. new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
  4251. getAlign(), getOrdering(), getSyncScopeID());
  4252. Result->setVolatile(isVolatile());
  4253. return Result;
  4254. }
  4255. FenceInst *FenceInst::cloneImpl() const {
  4256. return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
  4257. }
  4258. TruncInst *TruncInst::cloneImpl() const {
  4259. return new TruncInst(getOperand(0), getType());
  4260. }
  4261. ZExtInst *ZExtInst::cloneImpl() const {
  4262. return new ZExtInst(getOperand(0), getType());
  4263. }
  4264. SExtInst *SExtInst::cloneImpl() const {
  4265. return new SExtInst(getOperand(0), getType());
  4266. }
  4267. FPTruncInst *FPTruncInst::cloneImpl() const {
  4268. return new FPTruncInst(getOperand(0), getType());
  4269. }
  4270. FPExtInst *FPExtInst::cloneImpl() const {
  4271. return new FPExtInst(getOperand(0), getType());
  4272. }
  4273. UIToFPInst *UIToFPInst::cloneImpl() const {
  4274. return new UIToFPInst(getOperand(0), getType());
  4275. }
  4276. SIToFPInst *SIToFPInst::cloneImpl() const {
  4277. return new SIToFPInst(getOperand(0), getType());
  4278. }
  4279. FPToUIInst *FPToUIInst::cloneImpl() const {
  4280. return new FPToUIInst(getOperand(0), getType());
  4281. }
  4282. FPToSIInst *FPToSIInst::cloneImpl() const {
  4283. return new FPToSIInst(getOperand(0), getType());
  4284. }
  4285. PtrToIntInst *PtrToIntInst::cloneImpl() const {
  4286. return new PtrToIntInst(getOperand(0), getType());
  4287. }
  4288. IntToPtrInst *IntToPtrInst::cloneImpl() const {
  4289. return new IntToPtrInst(getOperand(0), getType());
  4290. }
  4291. BitCastInst *BitCastInst::cloneImpl() const {
  4292. return new BitCastInst(getOperand(0), getType());
  4293. }
  4294. AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const {
  4295. return new AddrSpaceCastInst(getOperand(0), getType());
  4296. }
  4297. CallInst *CallInst::cloneImpl() const {
  4298. if (hasOperandBundles()) {
  4299. unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
  4300. return new(getNumOperands(), DescriptorBytes) CallInst(*this);
  4301. }
  4302. return new(getNumOperands()) CallInst(*this);
  4303. }
  4304. SelectInst *SelectInst::cloneImpl() const {
  4305. return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));
  4306. }
  4307. VAArgInst *VAArgInst::cloneImpl() const {
  4308. return new VAArgInst(getOperand(0), getType());
  4309. }
  4310. ExtractElementInst *ExtractElementInst::cloneImpl() const {
  4311. return ExtractElementInst::Create(getOperand(0), getOperand(1));
  4312. }
  4313. InsertElementInst *InsertElementInst::cloneImpl() const {
  4314. return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
  4315. }
  4316. ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const {
  4317. return new ShuffleVectorInst(getOperand(0), getOperand(1), getShuffleMask());
  4318. }
  4319. PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
  4320. LandingPadInst *LandingPadInst::cloneImpl() const {
  4321. return new LandingPadInst(*this);
  4322. }
  4323. ReturnInst *ReturnInst::cloneImpl() const {
  4324. return new(getNumOperands()) ReturnInst(*this);
  4325. }
  4326. BranchInst *BranchInst::cloneImpl() const {
  4327. return new(getNumOperands()) BranchInst(*this);
  4328. }
  4329. SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
  4330. IndirectBrInst *IndirectBrInst::cloneImpl() const {
  4331. return new IndirectBrInst(*this);
  4332. }
  4333. InvokeInst *InvokeInst::cloneImpl() const {
  4334. if (hasOperandBundles()) {
  4335. unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
  4336. return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
  4337. }
  4338. return new(getNumOperands()) InvokeInst(*this);
  4339. }
  4340. CallBrInst *CallBrInst::cloneImpl() const {
  4341. if (hasOperandBundles()) {
  4342. unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
  4343. return new (getNumOperands(), DescriptorBytes) CallBrInst(*this);
  4344. }
  4345. return new (getNumOperands()) CallBrInst(*this);
  4346. }
  4347. ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
  4348. CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
  4349. return new (getNumOperands()) CleanupReturnInst(*this);
  4350. }
  4351. CatchReturnInst *CatchReturnInst::cloneImpl() const {
  4352. return new (getNumOperands()) CatchReturnInst(*this);
  4353. }
  4354. CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
  4355. return new CatchSwitchInst(*this);
  4356. }
  4357. FuncletPadInst *FuncletPadInst::cloneImpl() const {
  4358. return new (getNumOperands()) FuncletPadInst(*this);
  4359. }
  4360. UnreachableInst *UnreachableInst::cloneImpl() const {
  4361. LLVMContext &Context = getContext();
  4362. return new UnreachableInst(Context);
  4363. }
  4364. FreezeInst *FreezeInst::cloneImpl() const {
  4365. return new FreezeInst(getOperand(0));
  4366. }