12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769 |
- //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- //
- // InstructionCombining - Combine instructions to form fewer, simple
- // instructions. This pass does not modify the CFG. This pass is where
- // algebraic simplification happens.
- //
- // This pass combines things like:
- // %Y = add i32 %X, 1
- // %Z = add i32 %Y, 1
- // into:
- // %Z = add i32 %X, 2
- //
- // This is a simple worklist driven algorithm.
- //
- // This pass guarantees that the following canonicalizations are performed on
- // the program:
- // 1. If a binary operator has a constant operand, it is moved to the RHS
- // 2. Bitwise operators with constant operands are always grouped so that
- // shifts are performed first, then or's, then and's, then xor's.
- // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
- // 4. All cmp instructions on boolean values are replaced with logical ops
- // 5. add X, X is represented as (X*2) => (X << 1)
- // 6. Multiplies with a power-of-two constant argument are transformed into
- // shifts.
- // ... etc.
- //
- //===----------------------------------------------------------------------===//
- #include "InstCombineInternal.h"
- #include "llvm-c/Initialization.h"
- #include "llvm-c/Transforms/InstCombine.h"
- #include "llvm/ADT/APInt.h"
- #include "llvm/ADT/ArrayRef.h"
- #include "llvm/ADT/DenseMap.h"
- #include "llvm/ADT/SmallPtrSet.h"
- #include "llvm/ADT/SmallVector.h"
- #include "llvm/ADT/Statistic.h"
- #include "llvm/Analysis/AliasAnalysis.h"
- #include "llvm/Analysis/AssumptionCache.h"
- #include "llvm/Analysis/BasicAliasAnalysis.h"
- #include "llvm/Analysis/BlockFrequencyInfo.h"
- #include "llvm/Analysis/CFG.h"
- #include "llvm/Analysis/ConstantFolding.h"
- #include "llvm/Analysis/EHPersonalities.h"
- #include "llvm/Analysis/GlobalsModRef.h"
- #include "llvm/Analysis/InstructionSimplify.h"
- #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
- #include "llvm/Analysis/LoopInfo.h"
- #include "llvm/Analysis/MemoryBuiltins.h"
- #include "llvm/Analysis/OptimizationRemarkEmitter.h"
- #include "llvm/Analysis/ProfileSummaryInfo.h"
- #include "llvm/Analysis/TargetFolder.h"
- #include "llvm/Analysis/TargetLibraryInfo.h"
- #include "llvm/Analysis/TargetTransformInfo.h"
- #include "llvm/Analysis/Utils/Local.h"
- #include "llvm/Analysis/ValueTracking.h"
- #include "llvm/Analysis/VectorUtils.h"
- #include "llvm/IR/BasicBlock.h"
- #include "llvm/IR/CFG.h"
- #include "llvm/IR/Constant.h"
- #include "llvm/IR/Constants.h"
- #include "llvm/IR/DIBuilder.h"
- #include "llvm/IR/DataLayout.h"
- #include "llvm/IR/DebugInfo.h"
- #include "llvm/IR/DerivedTypes.h"
- #include "llvm/IR/Dominators.h"
- #include "llvm/IR/Function.h"
- #include "llvm/IR/GetElementPtrTypeIterator.h"
- #include "llvm/IR/IRBuilder.h"
- #include "llvm/IR/InstrTypes.h"
- #include "llvm/IR/Instruction.h"
- #include "llvm/IR/Instructions.h"
- #include "llvm/IR/IntrinsicInst.h"
- #include "llvm/IR/Intrinsics.h"
- #include "llvm/IR/LegacyPassManager.h"
- #include "llvm/IR/Metadata.h"
- #include "llvm/IR/Operator.h"
- #include "llvm/IR/PassManager.h"
- #include "llvm/IR/PatternMatch.h"
- #include "llvm/IR/Type.h"
- #include "llvm/IR/Use.h"
- #include "llvm/IR/User.h"
- #include "llvm/IR/Value.h"
- #include "llvm/IR/ValueHandle.h"
- #include "llvm/InitializePasses.h"
- #include "llvm/Support/Casting.h"
- #include "llvm/Support/CommandLine.h"
- #include "llvm/Support/Compiler.h"
- #include "llvm/Support/Debug.h"
- #include "llvm/Support/DebugCounter.h"
- #include "llvm/Support/ErrorHandling.h"
- #include "llvm/Support/KnownBits.h"
- #include "llvm/Support/raw_ostream.h"
- #include "llvm/Transforms/InstCombine/InstCombine.h"
- #include "llvm/Transforms/Utils/BasicBlockUtils.h"
- #include "llvm/Transforms/Utils/Local.h"
- #include <algorithm>
- #include <cassert>
- #include <cstdint>
- #include <memory>
- #include <optional>
- #include <string>
- #include <utility>
- #define DEBUG_TYPE "instcombine"
- #include "llvm/Transforms/Utils/InstructionWorklist.h"
- #include <optional>
- using namespace llvm;
- using namespace llvm::PatternMatch;
- STATISTIC(NumWorklistIterations,
- "Number of instruction combining iterations performed");
- STATISTIC(NumCombined , "Number of insts combined");
- STATISTIC(NumConstProp, "Number of constant folds");
- STATISTIC(NumDeadInst , "Number of dead inst eliminated");
- STATISTIC(NumSunkInst , "Number of instructions sunk");
- STATISTIC(NumExpand, "Number of expansions");
- STATISTIC(NumFactor , "Number of factorizations");
- STATISTIC(NumReassoc , "Number of reassociations");
- DEBUG_COUNTER(VisitCounter, "instcombine-visit",
- "Controls which instructions are visited");
- // FIXME: these limits eventually should be as low as 2.
- static constexpr unsigned InstCombineDefaultMaxIterations = 1000;
- #ifndef NDEBUG
- static constexpr unsigned InstCombineDefaultInfiniteLoopThreshold = 100;
- #else
- static constexpr unsigned InstCombineDefaultInfiniteLoopThreshold = 1000;
- #endif
- static cl::opt<bool>
- EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"),
- cl::init(true));
- static cl::opt<unsigned> MaxSinkNumUsers(
- "instcombine-max-sink-users", cl::init(32),
- cl::desc("Maximum number of undroppable users for instruction sinking"));
- static cl::opt<unsigned> LimitMaxIterations(
- "instcombine-max-iterations",
- cl::desc("Limit the maximum number of instruction combining iterations"),
- cl::init(InstCombineDefaultMaxIterations));
- static cl::opt<unsigned> InfiniteLoopDetectionThreshold(
- "instcombine-infinite-loop-threshold",
- cl::desc("Number of instruction combining iterations considered an "
- "infinite loop"),
- cl::init(InstCombineDefaultInfiniteLoopThreshold), cl::Hidden);
- static cl::opt<unsigned>
- MaxArraySize("instcombine-maxarray-size", cl::init(1024),
- cl::desc("Maximum array size considered when doing a combine"));
- // FIXME: Remove this flag when it is no longer necessary to convert
- // llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
- // increases variable availability at the cost of accuracy. Variables that
- // cannot be promoted by mem2reg or SROA will be described as living in memory
- // for their entire lifetime. However, passes like DSE and instcombine can
- // delete stores to the alloca, leading to misleading and inaccurate debug
- // information. This flag can be removed when those passes are fixed.
- static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
- cl::Hidden, cl::init(true));
- std::optional<Instruction *>
- InstCombiner::targetInstCombineIntrinsic(IntrinsicInst &II) {
- // Handle target specific intrinsics
- if (II.getCalledFunction()->isTargetIntrinsic()) {
- return TTI.instCombineIntrinsic(*this, II);
- }
- return std::nullopt;
- }
- std::optional<Value *> InstCombiner::targetSimplifyDemandedUseBitsIntrinsic(
- IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
- bool &KnownBitsComputed) {
- // Handle target specific intrinsics
- if (II.getCalledFunction()->isTargetIntrinsic()) {
- return TTI.simplifyDemandedUseBitsIntrinsic(*this, II, DemandedMask, Known,
- KnownBitsComputed);
- }
- return std::nullopt;
- }
- std::optional<Value *> InstCombiner::targetSimplifyDemandedVectorEltsIntrinsic(
- IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2,
- APInt &UndefElts3,
- std::function<void(Instruction *, unsigned, APInt, APInt &)>
- SimplifyAndSetOp) {
- // Handle target specific intrinsics
- if (II.getCalledFunction()->isTargetIntrinsic()) {
- return TTI.simplifyDemandedVectorEltsIntrinsic(
- *this, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
- SimplifyAndSetOp);
- }
- return std::nullopt;
- }
- Value *InstCombinerImpl::EmitGEPOffset(User *GEP) {
- return llvm::emitGEPOffset(&Builder, DL, GEP);
- }
- /// Legal integers and common types are considered desirable. This is used to
- /// avoid creating instructions with types that may not be supported well by the
- /// the backend.
- /// NOTE: This treats i8, i16 and i32 specially because they are common
- /// types in frontend languages.
- bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
- switch (BitWidth) {
- case 8:
- case 16:
- case 32:
- return true;
- default:
- return DL.isLegalInteger(BitWidth);
- }
- }
- /// Return true if it is desirable to convert an integer computation from a
- /// given bit width to a new bit width.
- /// We don't want to convert from a legal or desirable type (like i8) to an
- /// illegal type or from a smaller to a larger illegal type. A width of '1'
- /// is always treated as a desirable type because i1 is a fundamental type in
- /// IR, and there are many specialized optimizations for i1 types.
- /// Common/desirable widths are equally treated as legal to convert to, in
- /// order to open up more combining opportunities.
- bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
- unsigned ToWidth) const {
- bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
- bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
- // Convert to desirable widths even if they are not legal types.
- // Only shrink types, to prevent infinite loops.
- if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
- return true;
- // If this is a legal or desiable integer from type, and the result would be
- // an illegal type, don't do the transformation.
- if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
- return false;
- // Otherwise, if both are illegal, do not increase the size of the result. We
- // do allow things like i160 -> i64, but not i64 -> i160.
- if (!FromLegal && !ToLegal && ToWidth > FromWidth)
- return false;
- return true;
- }
- /// Return true if it is desirable to convert a computation from 'From' to 'To'.
- /// We don't want to convert from a legal to an illegal type or from a smaller
- /// to a larger illegal type. i1 is always treated as a legal type because it is
- /// a fundamental type in IR, and there are many specialized optimizations for
- /// i1 types.
- bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
- // TODO: This could be extended to allow vectors. Datalayout changes might be
- // needed to properly support that.
- if (!From->isIntegerTy() || !To->isIntegerTy())
- return false;
- unsigned FromWidth = From->getPrimitiveSizeInBits();
- unsigned ToWidth = To->getPrimitiveSizeInBits();
- return shouldChangeType(FromWidth, ToWidth);
- }
- // Return true, if No Signed Wrap should be maintained for I.
- // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
- // where both B and C should be ConstantInts, results in a constant that does
- // not overflow. This function only handles the Add and Sub opcodes. For
- // all other opcodes, the function conservatively returns false.
- static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
- auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
- if (!OBO || !OBO->hasNoSignedWrap())
- return false;
- // We reason about Add and Sub Only.
- Instruction::BinaryOps Opcode = I.getOpcode();
- if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
- return false;
- const APInt *BVal, *CVal;
- if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
- return false;
- bool Overflow = false;
- if (Opcode == Instruction::Add)
- (void)BVal->sadd_ov(*CVal, Overflow);
- else
- (void)BVal->ssub_ov(*CVal, Overflow);
- return !Overflow;
- }
- static bool hasNoUnsignedWrap(BinaryOperator &I) {
- auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
- return OBO && OBO->hasNoUnsignedWrap();
- }
- static bool hasNoSignedWrap(BinaryOperator &I) {
- auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
- return OBO && OBO->hasNoSignedWrap();
- }
- /// Conservatively clears subclassOptionalData after a reassociation or
- /// commutation. We preserve fast-math flags when applicable as they can be
- /// preserved.
- static void ClearSubclassDataAfterReassociation(BinaryOperator &I) {
- FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
- if (!FPMO) {
- I.clearSubclassOptionalData();
- return;
- }
- FastMathFlags FMF = I.getFastMathFlags();
- I.clearSubclassOptionalData();
- I.setFastMathFlags(FMF);
- }
- /// Combine constant operands of associative operations either before or after a
- /// cast to eliminate one of the associative operations:
- /// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
- /// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
- static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1,
- InstCombinerImpl &IC) {
- auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
- if (!Cast || !Cast->hasOneUse())
- return false;
- // TODO: Enhance logic for other casts and remove this check.
- auto CastOpcode = Cast->getOpcode();
- if (CastOpcode != Instruction::ZExt)
- return false;
- // TODO: Enhance logic for other BinOps and remove this check.
- if (!BinOp1->isBitwiseLogicOp())
- return false;
- auto AssocOpcode = BinOp1->getOpcode();
- auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
- if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
- return false;
- Constant *C1, *C2;
- if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
- !match(BinOp2->getOperand(1), m_Constant(C2)))
- return false;
- // TODO: This assumes a zext cast.
- // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
- // to the destination type might lose bits.
- // Fold the constants together in the destination type:
- // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
- Type *DestTy = C1->getType();
- Constant *CastC2 = ConstantExpr::getCast(CastOpcode, C2, DestTy);
- Constant *FoldedC = ConstantExpr::get(AssocOpcode, C1, CastC2);
- IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
- IC.replaceOperand(*BinOp1, 1, FoldedC);
- return true;
- }
- // Simplifies IntToPtr/PtrToInt RoundTrip Cast To BitCast.
- // inttoptr ( ptrtoint (x) ) --> x
- Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
- auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
- if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
- DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
- auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
- Type *CastTy = IntToPtr->getDestTy();
- if (PtrToInt &&
- CastTy->getPointerAddressSpace() ==
- PtrToInt->getSrcTy()->getPointerAddressSpace() &&
- DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
- DL.getTypeSizeInBits(PtrToInt->getDestTy())) {
- return CastInst::CreateBitOrPointerCast(PtrToInt->getOperand(0), CastTy,
- "", PtrToInt);
- }
- }
- return nullptr;
- }
- /// This performs a few simplifications for operators that are associative or
- /// commutative:
- ///
- /// Commutative operators:
- ///
- /// 1. Order operands such that they are listed from right (least complex) to
- /// left (most complex). This puts constants before unary operators before
- /// binary operators.
- ///
- /// Associative operators:
- ///
- /// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
- /// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
- ///
- /// Associative and commutative operators:
- ///
- /// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
- /// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
- /// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
- /// if C1 and C2 are constants.
- bool InstCombinerImpl::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
- Instruction::BinaryOps Opcode = I.getOpcode();
- bool Changed = false;
- do {
- // Order operands such that they are listed from right (least complex) to
- // left (most complex). This puts constants before unary operators before
- // binary operators.
- if (I.isCommutative() && getComplexity(I.getOperand(0)) <
- getComplexity(I.getOperand(1)))
- Changed = !I.swapOperands();
- BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
- BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
- if (I.isAssociative()) {
- // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
- if (Op0 && Op0->getOpcode() == Opcode) {
- Value *A = Op0->getOperand(0);
- Value *B = Op0->getOperand(1);
- Value *C = I.getOperand(1);
- // Does "B op C" simplify?
- if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
- // It simplifies to V. Form "A op V".
- replaceOperand(I, 0, A);
- replaceOperand(I, 1, V);
- bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
- bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
- // Conservatively clear all optional flags since they may not be
- // preserved by the reassociation. Reset nsw/nuw based on the above
- // analysis.
- ClearSubclassDataAfterReassociation(I);
- // Note: this is only valid because SimplifyBinOp doesn't look at
- // the operands to Op0.
- if (IsNUW)
- I.setHasNoUnsignedWrap(true);
- if (IsNSW)
- I.setHasNoSignedWrap(true);
- Changed = true;
- ++NumReassoc;
- continue;
- }
- }
- // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
- if (Op1 && Op1->getOpcode() == Opcode) {
- Value *A = I.getOperand(0);
- Value *B = Op1->getOperand(0);
- Value *C = Op1->getOperand(1);
- // Does "A op B" simplify?
- if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
- // It simplifies to V. Form "V op C".
- replaceOperand(I, 0, V);
- replaceOperand(I, 1, C);
- // Conservatively clear the optional flags, since they may not be
- // preserved by the reassociation.
- ClearSubclassDataAfterReassociation(I);
- Changed = true;
- ++NumReassoc;
- continue;
- }
- }
- }
- if (I.isAssociative() && I.isCommutative()) {
- if (simplifyAssocCastAssoc(&I, *this)) {
- Changed = true;
- ++NumReassoc;
- continue;
- }
- // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
- if (Op0 && Op0->getOpcode() == Opcode) {
- Value *A = Op0->getOperand(0);
- Value *B = Op0->getOperand(1);
- Value *C = I.getOperand(1);
- // Does "C op A" simplify?
- if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
- // It simplifies to V. Form "V op B".
- replaceOperand(I, 0, V);
- replaceOperand(I, 1, B);
- // Conservatively clear the optional flags, since they may not be
- // preserved by the reassociation.
- ClearSubclassDataAfterReassociation(I);
- Changed = true;
- ++NumReassoc;
- continue;
- }
- }
- // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
- if (Op1 && Op1->getOpcode() == Opcode) {
- Value *A = I.getOperand(0);
- Value *B = Op1->getOperand(0);
- Value *C = Op1->getOperand(1);
- // Does "C op A" simplify?
- if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
- // It simplifies to V. Form "B op V".
- replaceOperand(I, 0, B);
- replaceOperand(I, 1, V);
- // Conservatively clear the optional flags, since they may not be
- // preserved by the reassociation.
- ClearSubclassDataAfterReassociation(I);
- Changed = true;
- ++NumReassoc;
- continue;
- }
- }
- // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
- // if C1 and C2 are constants.
- Value *A, *B;
- Constant *C1, *C2, *CRes;
- if (Op0 && Op1 &&
- Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
- match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
- match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
- (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
- bool IsNUW = hasNoUnsignedWrap(I) &&
- hasNoUnsignedWrap(*Op0) &&
- hasNoUnsignedWrap(*Op1);
- BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
- BinaryOperator::CreateNUW(Opcode, A, B) :
- BinaryOperator::Create(Opcode, A, B);
- if (isa<FPMathOperator>(NewBO)) {
- FastMathFlags Flags = I.getFastMathFlags();
- Flags &= Op0->getFastMathFlags();
- Flags &= Op1->getFastMathFlags();
- NewBO->setFastMathFlags(Flags);
- }
- InsertNewInstWith(NewBO, I);
- NewBO->takeName(Op1);
- replaceOperand(I, 0, NewBO);
- replaceOperand(I, 1, CRes);
- // Conservatively clear the optional flags, since they may not be
- // preserved by the reassociation.
- ClearSubclassDataAfterReassociation(I);
- if (IsNUW)
- I.setHasNoUnsignedWrap(true);
- Changed = true;
- continue;
- }
- }
- // No further simplifications.
- return Changed;
- } while (true);
- }
- /// Return whether "X LOp (Y ROp Z)" is always equal to
- /// "(X LOp Y) ROp (X LOp Z)".
- static bool leftDistributesOverRight(Instruction::BinaryOps LOp,
- Instruction::BinaryOps ROp) {
- // X & (Y | Z) <--> (X & Y) | (X & Z)
- // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
- if (LOp == Instruction::And)
- return ROp == Instruction::Or || ROp == Instruction::Xor;
- // X | (Y & Z) <--> (X | Y) & (X | Z)
- if (LOp == Instruction::Or)
- return ROp == Instruction::And;
- // X * (Y + Z) <--> (X * Y) + (X * Z)
- // X * (Y - Z) <--> (X * Y) - (X * Z)
- if (LOp == Instruction::Mul)
- return ROp == Instruction::Add || ROp == Instruction::Sub;
- return false;
- }
- /// Return whether "(X LOp Y) ROp Z" is always equal to
- /// "(X ROp Z) LOp (Y ROp Z)".
- static bool rightDistributesOverLeft(Instruction::BinaryOps LOp,
- Instruction::BinaryOps ROp) {
- if (Instruction::isCommutative(ROp))
- return leftDistributesOverRight(ROp, LOp);
- // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
- return Instruction::isBitwiseLogicOp(LOp) && Instruction::isShift(ROp);
- // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
- // but this requires knowing that the addition does not overflow and other
- // such subtleties.
- }
- /// This function returns identity value for given opcode, which can be used to
- /// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
- static Value *getIdentityValue(Instruction::BinaryOps Opcode, Value *V) {
- if (isa<Constant>(V))
- return nullptr;
- return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
- }
- /// This function predicates factorization using distributive laws. By default,
- /// it just returns the 'Op' inputs. But for special-cases like
- /// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
- /// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
- /// allow more factorization opportunities.
- static Instruction::BinaryOps
- getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op,
- Value *&LHS, Value *&RHS) {
- assert(Op && "Expected a binary operator");
- LHS = Op->getOperand(0);
- RHS = Op->getOperand(1);
- if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
- Constant *C;
- if (match(Op, m_Shl(m_Value(), m_Constant(C)))) {
- // X << C --> X * (1 << C)
- RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), C);
- return Instruction::Mul;
- }
- // TODO: We can add other conversions e.g. shr => div etc.
- }
- return Op->getOpcode();
- }
- /// This tries to simplify binary operations by factorizing out common terms
- /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
- static Value *tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ,
- InstCombiner::BuilderTy &Builder,
- Instruction::BinaryOps InnerOpcode, Value *A,
- Value *B, Value *C, Value *D) {
- assert(A && B && C && D && "All values must be provided");
- Value *V = nullptr;
- Value *RetVal = nullptr;
- Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
- Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
- // Does "X op' Y" always equal "Y op' X"?
- bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
- // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
- if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
- // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
- // commutative case, "(A op' B) op (C op' A)"?
- if (A == C || (InnerCommutative && A == D)) {
- if (A != C)
- std::swap(C, D);
- // Consider forming "A op' (B op D)".
- // If "B op D" simplifies then it can be formed with no cost.
- V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
- // If "B op D" doesn't simplify then only go on if one of the existing
- // operations "A op' B" and "C op' D" will be zapped as no longer used.
- if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
- V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
- if (V)
- RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
- }
- }
- // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
- if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
- // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
- // commutative case, "(A op' B) op (B op' D)"?
- if (B == D || (InnerCommutative && B == C)) {
- if (B != D)
- std::swap(C, D);
- // Consider forming "(A op C) op' B".
- // If "A op C" simplifies then it can be formed with no cost.
- V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
- // If "A op C" doesn't simplify then only go on if one of the existing
- // operations "A op' B" and "C op' D" will be zapped as no longer used.
- if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
- V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
- if (V)
- RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
- }
- }
- if (!RetVal)
- return nullptr;
- ++NumFactor;
- RetVal->takeName(&I);
- // Try to add no-overflow flags to the final value.
- if (isa<OverflowingBinaryOperator>(RetVal)) {
- bool HasNSW = false;
- bool HasNUW = false;
- if (isa<OverflowingBinaryOperator>(&I)) {
- HasNSW = I.hasNoSignedWrap();
- HasNUW = I.hasNoUnsignedWrap();
- }
- if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
- HasNSW &= LOBO->hasNoSignedWrap();
- HasNUW &= LOBO->hasNoUnsignedWrap();
- }
- if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
- HasNSW &= ROBO->hasNoSignedWrap();
- HasNUW &= ROBO->hasNoUnsignedWrap();
- }
- if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
- // We can propagate 'nsw' if we know that
- // %Y = mul nsw i16 %X, C
- // %Z = add nsw i16 %Y, %X
- // =>
- // %Z = mul nsw i16 %X, C+1
- //
- // iff C+1 isn't INT_MIN
- const APInt *CInt;
- if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
- cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
- // nuw can be propagated with any constant or nuw value.
- cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
- }
- }
- return RetVal;
- }
- Value *InstCombinerImpl::tryFactorizationFolds(BinaryOperator &I) {
- Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
- BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
- BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
- Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
- Value *A, *B, *C, *D;
- Instruction::BinaryOps LHSOpcode, RHSOpcode;
- if (Op0)
- LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B);
- if (Op1)
- RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D);
- // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
- // a common term.
- if (Op0 && Op1 && LHSOpcode == RHSOpcode)
- if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
- return V;
- // The instruction has the form "(A op' B) op (C)". Try to factorize common
- // term.
- if (Op0)
- if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
- if (Value *V =
- tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
- return V;
- // The instruction has the form "(B) op (C op' D)". Try to factorize common
- // term.
- if (Op1)
- if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
- if (Value *V =
- tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
- return V;
- return nullptr;
- }
- /// This tries to simplify binary operations which some other binary operation
- /// distributes over either by factorizing out common terms
- /// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
- /// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
- /// Returns the simplified value, or null if it didn't simplify.
- Value *InstCombinerImpl::foldUsingDistributiveLaws(BinaryOperator &I) {
- Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
- BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
- BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
- Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
- // Factorization.
- if (Value *R = tryFactorizationFolds(I))
- return R;
- // Expansion.
- if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
- // The instruction has the form "(A op' B) op C". See if expanding it out
- // to "(A op C) op' (B op C)" results in simplifications.
- Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
- Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
- // Disable the use of undef because it's not safe to distribute undef.
- auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
- Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
- Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
- // Do "A op C" and "B op C" both simplify?
- if (L && R) {
- // They do! Return "L op' R".
- ++NumExpand;
- C = Builder.CreateBinOp(InnerOpcode, L, R);
- C->takeName(&I);
- return C;
- }
- // Does "A op C" simplify to the identity value for the inner opcode?
- if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
- // They do! Return "B op C".
- ++NumExpand;
- C = Builder.CreateBinOp(TopLevelOpcode, B, C);
- C->takeName(&I);
- return C;
- }
- // Does "B op C" simplify to the identity value for the inner opcode?
- if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
- // They do! Return "A op C".
- ++NumExpand;
- C = Builder.CreateBinOp(TopLevelOpcode, A, C);
- C->takeName(&I);
- return C;
- }
- }
- if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
- // The instruction has the form "A op (B op' C)". See if expanding it out
- // to "(A op B) op' (A op C)" results in simplifications.
- Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
- Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
- // Disable the use of undef because it's not safe to distribute undef.
- auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
- Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
- Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
- // Do "A op B" and "A op C" both simplify?
- if (L && R) {
- // They do! Return "L op' R".
- ++NumExpand;
- A = Builder.CreateBinOp(InnerOpcode, L, R);
- A->takeName(&I);
- return A;
- }
- // Does "A op B" simplify to the identity value for the inner opcode?
- if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
- // They do! Return "A op C".
- ++NumExpand;
- A = Builder.CreateBinOp(TopLevelOpcode, A, C);
- A->takeName(&I);
- return A;
- }
- // Does "A op C" simplify to the identity value for the inner opcode?
- if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
- // They do! Return "A op B".
- ++NumExpand;
- A = Builder.CreateBinOp(TopLevelOpcode, A, B);
- A->takeName(&I);
- return A;
- }
- }
- return SimplifySelectsFeedingBinaryOp(I, LHS, RHS);
- }
- Value *InstCombinerImpl::SimplifySelectsFeedingBinaryOp(BinaryOperator &I,
- Value *LHS,
- Value *RHS) {
- Value *A, *B, *C, *D, *E, *F;
- bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
- bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
- if (!LHSIsSelect && !RHSIsSelect)
- return nullptr;
- FastMathFlags FMF;
- BuilderTy::FastMathFlagGuard Guard(Builder);
- if (isa<FPMathOperator>(&I)) {
- FMF = I.getFastMathFlags();
- Builder.setFastMathFlags(FMF);
- }
- Instruction::BinaryOps Opcode = I.getOpcode();
- SimplifyQuery Q = SQ.getWithInstruction(&I);
- Value *Cond, *True = nullptr, *False = nullptr;
- // Special-case for add/negate combination. Replace the zero in the negation
- // with the trailing add operand:
- // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
- // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
- auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
- // We need an 'add' and exactly 1 arm of the select to have been simplified.
- if (Opcode != Instruction::Add || (!True && !False) || (True && False))
- return nullptr;
- Value *N;
- if (True && match(FVal, m_Neg(m_Value(N)))) {
- Value *Sub = Builder.CreateSub(Z, N);
- return Builder.CreateSelect(Cond, True, Sub, I.getName());
- }
- if (False && match(TVal, m_Neg(m_Value(N)))) {
- Value *Sub = Builder.CreateSub(Z, N);
- return Builder.CreateSelect(Cond, Sub, False, I.getName());
- }
- return nullptr;
- };
- if (LHSIsSelect && RHSIsSelect && A == D) {
- // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
- Cond = A;
- True = simplifyBinOp(Opcode, B, E, FMF, Q);
- False = simplifyBinOp(Opcode, C, F, FMF, Q);
- if (LHS->hasOneUse() && RHS->hasOneUse()) {
- if (False && !True)
- True = Builder.CreateBinOp(Opcode, B, E);
- else if (True && !False)
- False = Builder.CreateBinOp(Opcode, C, F);
- }
- } else if (LHSIsSelect && LHS->hasOneUse()) {
- // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
- Cond = A;
- True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
- False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
- if (Value *NewSel = foldAddNegate(B, C, RHS))
- return NewSel;
- } else if (RHSIsSelect && RHS->hasOneUse()) {
- // X op (D ? E : F) -> D ? (X op E) : (X op F)
- Cond = D;
- True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
- False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
- if (Value *NewSel = foldAddNegate(E, F, LHS))
- return NewSel;
- }
- if (!True || !False)
- return nullptr;
- Value *SI = Builder.CreateSelect(Cond, True, False);
- SI->takeName(&I);
- return SI;
- }
- /// Freely adapt every user of V as-if V was changed to !V.
- /// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
- void InstCombinerImpl::freelyInvertAllUsersOf(Value *I, Value *IgnoredUser) {
- for (User *U : make_early_inc_range(I->users())) {
- if (U == IgnoredUser)
- continue; // Don't consider this user.
- switch (cast<Instruction>(U)->getOpcode()) {
- case Instruction::Select: {
- auto *SI = cast<SelectInst>(U);
- SI->swapValues();
- SI->swapProfMetadata();
- break;
- }
- case Instruction::Br:
- cast<BranchInst>(U)->swapSuccessors(); // swaps prof metadata too
- break;
- case Instruction::Xor:
- replaceInstUsesWith(cast<Instruction>(*U), I);
- break;
- default:
- llvm_unreachable("Got unexpected user - out of sync with "
- "canFreelyInvertAllUsersOf() ?");
- }
- }
- }
- /// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
- /// constant zero (which is the 'negate' form).
- Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
- Value *NegV;
- if (match(V, m_Neg(m_Value(NegV))))
- return NegV;
- // Constants can be considered to be negated values if they can be folded.
- if (ConstantInt *C = dyn_cast<ConstantInt>(V))
- return ConstantExpr::getNeg(C);
- if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
- if (C->getType()->getElementType()->isIntegerTy())
- return ConstantExpr::getNeg(C);
- if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
- for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
- Constant *Elt = CV->getAggregateElement(i);
- if (!Elt)
- return nullptr;
- if (isa<UndefValue>(Elt))
- continue;
- if (!isa<ConstantInt>(Elt))
- return nullptr;
- }
- return ConstantExpr::getNeg(CV);
- }
- // Negate integer vector splats.
- if (auto *CV = dyn_cast<Constant>(V))
- if (CV->getType()->isVectorTy() &&
- CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
- return ConstantExpr::getNeg(CV);
- return nullptr;
- }
- /// A binop with a constant operand and a sign-extended boolean operand may be
- /// converted into a select of constants by applying the binary operation to
- /// the constant with the two possible values of the extended boolean (0 or -1).
- Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
- // TODO: Handle non-commutative binop (constant is operand 0).
- // TODO: Handle zext.
- // TODO: Peek through 'not' of cast.
- Value *BO0 = BO.getOperand(0);
- Value *BO1 = BO.getOperand(1);
- Value *X;
- Constant *C;
- if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
- !X->getType()->isIntOrIntVectorTy(1))
- return nullptr;
- // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
- Constant *Ones = ConstantInt::getAllOnesValue(BO.getType());
- Constant *Zero = ConstantInt::getNullValue(BO.getType());
- Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
- Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
- return SelectInst::Create(X, TVal, FVal);
- }
- static Constant *constantFoldOperationIntoSelectOperand(
- Instruction &I, SelectInst *SI, Value *SO) {
- auto *ConstSO = dyn_cast<Constant>(SO);
- if (!ConstSO)
- return nullptr;
- SmallVector<Constant *> ConstOps;
- for (Value *Op : I.operands()) {
- if (Op == SI)
- ConstOps.push_back(ConstSO);
- else if (auto *C = dyn_cast<Constant>(Op))
- ConstOps.push_back(C);
- else
- llvm_unreachable("Operands should be select or constant");
- }
- return ConstantFoldInstOperands(&I, ConstOps, I.getModule()->getDataLayout());
- }
- static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO,
- InstCombiner::BuilderTy &Builder) {
- if (auto *Cast = dyn_cast<CastInst>(&I))
- return Builder.CreateCast(Cast->getOpcode(), SO, I.getType());
- if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
- assert(canConstantFoldCallTo(II, cast<Function>(II->getCalledOperand())) &&
- "Expected constant-foldable intrinsic");
- Intrinsic::ID IID = II->getIntrinsicID();
- if (II->arg_size() == 1)
- return Builder.CreateUnaryIntrinsic(IID, SO);
- // This works for real binary ops like min/max (where we always expect the
- // constant operand to be canonicalized as op1) and unary ops with a bonus
- // constant argument like ctlz/cttz.
- // TODO: Handle non-commutative binary intrinsics as below for binops.
- assert(II->arg_size() == 2 && "Expected binary intrinsic");
- assert(isa<Constant>(II->getArgOperand(1)) && "Expected constant operand");
- return Builder.CreateBinaryIntrinsic(IID, SO, II->getArgOperand(1));
- }
- if (auto *EI = dyn_cast<ExtractElementInst>(&I))
- return Builder.CreateExtractElement(SO, EI->getIndexOperand());
- assert(I.isBinaryOp() && "Unexpected opcode for select folding");
- // Figure out if the constant is the left or the right argument.
- bool ConstIsRHS = isa<Constant>(I.getOperand(1));
- Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
- Value *Op0 = SO, *Op1 = ConstOperand;
- if (!ConstIsRHS)
- std::swap(Op0, Op1);
- Value *NewBO = Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), Op0,
- Op1, SO->getName() + ".op");
- if (auto *NewBOI = dyn_cast<Instruction>(NewBO))
- NewBOI->copyIRFlags(&I);
- return NewBO;
- }
- Instruction *InstCombinerImpl::FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
- bool FoldWithMultiUse) {
- // Don't modify shared select instructions unless set FoldWithMultiUse
- if (!SI->hasOneUse() && !FoldWithMultiUse)
- return nullptr;
- Value *TV = SI->getTrueValue();
- Value *FV = SI->getFalseValue();
- if (!(isa<Constant>(TV) || isa<Constant>(FV)))
- return nullptr;
- // Bool selects with constant operands can be folded to logical ops.
- if (SI->getType()->isIntOrIntVectorTy(1))
- return nullptr;
- // If it's a bitcast involving vectors, make sure it has the same number of
- // elements on both sides.
- if (auto *BC = dyn_cast<BitCastInst>(&Op)) {
- VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
- VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
- // Verify that either both or neither are vectors.
- if ((SrcTy == nullptr) != (DestTy == nullptr))
- return nullptr;
- // If vectors, verify that they have the same number of elements.
- if (SrcTy && SrcTy->getElementCount() != DestTy->getElementCount())
- return nullptr;
- }
- // Test if a CmpInst instruction is used exclusively by a select as
- // part of a minimum or maximum operation. If so, refrain from doing
- // any other folding. This helps out other analyses which understand
- // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
- // and CodeGen. And in this case, at least one of the comparison
- // operands has at least one user besides the compare (the select),
- // which would often largely negate the benefit of folding anyway.
- if (auto *CI = dyn_cast<CmpInst>(SI->getCondition())) {
- if (CI->hasOneUse()) {
- Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
- // FIXME: This is a hack to avoid infinite looping with min/max patterns.
- // We have to ensure that vector constants that only differ with
- // undef elements are treated as equivalent.
- auto areLooselyEqual = [](Value *A, Value *B) {
- if (A == B)
- return true;
- // Test for vector constants.
- Constant *ConstA, *ConstB;
- if (!match(A, m_Constant(ConstA)) || !match(B, m_Constant(ConstB)))
- return false;
- // TODO: Deal with FP constants?
- if (!A->getType()->isIntOrIntVectorTy() || A->getType() != B->getType())
- return false;
- // Compare for equality including undefs as equal.
- auto *Cmp = ConstantExpr::getCompare(ICmpInst::ICMP_EQ, ConstA, ConstB);
- const APInt *C;
- return match(Cmp, m_APIntAllowUndef(C)) && C->isOne();
- };
- if ((areLooselyEqual(TV, Op0) && areLooselyEqual(FV, Op1)) ||
- (areLooselyEqual(FV, Op0) && areLooselyEqual(TV, Op1)))
- return nullptr;
- }
- }
- // Make sure that one of the select arms constant folds successfully.
- Value *NewTV = constantFoldOperationIntoSelectOperand(Op, SI, TV);
- Value *NewFV = constantFoldOperationIntoSelectOperand(Op, SI, FV);
- if (!NewTV && !NewFV)
- return nullptr;
- // Create an instruction for the arm that did not fold.
- if (!NewTV)
- NewTV = foldOperationIntoSelectOperand(Op, TV, Builder);
- if (!NewFV)
- NewFV = foldOperationIntoSelectOperand(Op, FV, Builder);
- return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
- }
- Instruction *InstCombinerImpl::foldOpIntoPhi(Instruction &I, PHINode *PN) {
- unsigned NumPHIValues = PN->getNumIncomingValues();
- if (NumPHIValues == 0)
- return nullptr;
- // We normally only transform phis with a single use. However, if a PHI has
- // multiple uses and they are all the same operation, we can fold *all* of the
- // uses into the PHI.
- if (!PN->hasOneUse()) {
- // Walk the use list for the instruction, comparing them to I.
- for (User *U : PN->users()) {
- Instruction *UI = cast<Instruction>(U);
- if (UI != &I && !I.isIdenticalTo(UI))
- return nullptr;
- }
- // Otherwise, we can replace *all* users with the new PHI we form.
- }
- // Check to see whether the instruction can be folded into each phi operand.
- // If there is one operand that does not fold, remember the BB it is in.
- // If there is more than one or if *it* is a PHI, bail out.
- SmallVector<Value *> NewPhiValues;
- BasicBlock *NonSimplifiedBB = nullptr;
- Value *NonSimplifiedInVal = nullptr;
- for (unsigned i = 0; i != NumPHIValues; ++i) {
- Value *InVal = PN->getIncomingValue(i);
- BasicBlock *InBB = PN->getIncomingBlock(i);
- // NB: It is a precondition of this transform that the operands be
- // phi translatable! This is usually trivially satisfied by limiting it
- // to constant ops, and for selects we do a more sophisticated check.
- SmallVector<Value *> Ops;
- for (Value *Op : I.operands()) {
- if (Op == PN)
- Ops.push_back(InVal);
- else
- Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
- }
- // Don't consider the simplification successful if we get back a constant
- // expression. That's just an instruction in hiding.
- // Also reject the case where we simplify back to the phi node. We wouldn't
- // be able to remove it in that case.
- Value *NewVal = simplifyInstructionWithOperands(
- &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
- if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr())) {
- NewPhiValues.push_back(NewVal);
- continue;
- }
- if (isa<PHINode>(InVal)) return nullptr; // Itself a phi.
- if (NonSimplifiedBB) return nullptr; // More than one non-simplified value.
- NonSimplifiedBB = InBB;
- NonSimplifiedInVal = InVal;
- NewPhiValues.push_back(nullptr);
- // If the InVal is an invoke at the end of the pred block, then we can't
- // insert a computation after it without breaking the edge.
- if (isa<InvokeInst>(InVal))
- if (cast<Instruction>(InVal)->getParent() == NonSimplifiedBB)
- return nullptr;
- // If the incoming non-constant value is reachable from the phis block,
- // we'll push the operation across a loop backedge. This could result in
- // an infinite combine loop, and is generally non-profitable (especially
- // if the operation was originally outside the loop).
- if (isPotentiallyReachable(PN->getParent(), NonSimplifiedBB, nullptr, &DT,
- LI))
- return nullptr;
- }
- // If there is exactly one non-simplified value, we can insert a copy of the
- // operation in that block. However, if this is a critical edge, we would be
- // inserting the computation on some other paths (e.g. inside a loop). Only
- // do this if the pred block is unconditionally branching into the phi block.
- // Also, make sure that the pred block is not dead code.
- if (NonSimplifiedBB != nullptr) {
- BranchInst *BI = dyn_cast<BranchInst>(NonSimplifiedBB->getTerminator());
- if (!BI || !BI->isUnconditional() ||
- !DT.isReachableFromEntry(NonSimplifiedBB))
- return nullptr;
- }
- // Okay, we can do the transformation: create the new PHI node.
- PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
- InsertNewInstBefore(NewPN, *PN);
- NewPN->takeName(PN);
- // If we are going to have to insert a new computation, do so right before the
- // predecessor's terminator.
- Instruction *Clone = nullptr;
- if (NonSimplifiedBB) {
- Clone = I.clone();
- for (Use &U : Clone->operands()) {
- if (U == PN)
- U = NonSimplifiedInVal;
- else
- U = U->DoPHITranslation(PN->getParent(), NonSimplifiedBB);
- }
- InsertNewInstBefore(Clone, *NonSimplifiedBB->getTerminator());
- }
- for (unsigned i = 0; i != NumPHIValues; ++i) {
- if (NewPhiValues[i])
- NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
- else
- NewPN->addIncoming(Clone, PN->getIncomingBlock(i));
- }
- for (User *U : make_early_inc_range(PN->users())) {
- Instruction *User = cast<Instruction>(U);
- if (User == &I) continue;
- replaceInstUsesWith(*User, NewPN);
- eraseInstFromFunction(*User);
- }
- return replaceInstUsesWith(I, NewPN);
- }
- Instruction *InstCombinerImpl::foldBinopWithPhiOperands(BinaryOperator &BO) {
- // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
- // we are guarding against replicating the binop in >1 predecessor.
- // This could miss matching a phi with 2 constant incoming values.
- auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
- auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
- if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
- Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
- return nullptr;
- // TODO: Remove the restriction for binop being in the same block as the phis.
- if (BO.getParent() != Phi0->getParent() ||
- BO.getParent() != Phi1->getParent())
- return nullptr;
- // Match a pair of incoming constants for one of the predecessor blocks.
- BasicBlock *ConstBB, *OtherBB;
- Constant *C0, *C1;
- if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
- ConstBB = Phi0->getIncomingBlock(0);
- OtherBB = Phi0->getIncomingBlock(1);
- } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
- ConstBB = Phi0->getIncomingBlock(1);
- OtherBB = Phi0->getIncomingBlock(0);
- } else {
- return nullptr;
- }
- if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
- return nullptr;
- // The block that we are hoisting to must reach here unconditionally.
- // Otherwise, we could be speculatively executing an expensive or
- // non-speculative op.
- auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
- if (!PredBlockBranch || PredBlockBranch->isConditional() ||
- !DT.isReachableFromEntry(OtherBB))
- return nullptr;
- // TODO: This check could be tightened to only apply to binops (div/rem) that
- // are not safe to speculatively execute. But that could allow hoisting
- // potentially expensive instructions (fdiv for example).
- for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
- if (!isGuaranteedToTransferExecutionToSuccessor(&*BBIter))
- return nullptr;
- // Fold constants for the predecessor block with constant incoming values.
- Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
- if (!NewC)
- return nullptr;
- // Make a new binop in the predecessor block with the non-constant incoming
- // values.
- Builder.SetInsertPoint(PredBlockBranch);
- Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
- Phi0->getIncomingValueForBlock(OtherBB),
- Phi1->getIncomingValueForBlock(OtherBB));
- if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
- NotFoldedNewBO->copyIRFlags(&BO);
- // Replace the binop with a phi of the new values. The old phis are dead.
- PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
- NewPhi->addIncoming(NewBO, OtherBB);
- NewPhi->addIncoming(NewC, ConstBB);
- return NewPhi;
- }
- Instruction *InstCombinerImpl::foldBinOpIntoSelectOrPhi(BinaryOperator &I) {
- if (!isa<Constant>(I.getOperand(1)))
- return nullptr;
- if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
- if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
- return NewSel;
- } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
- if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
- return NewPhi;
- }
- return nullptr;
- }
- /// Given a pointer type and a constant offset, determine whether or not there
- /// is a sequence of GEP indices into the pointed type that will land us at the
- /// specified offset. If so, fill them into NewIndices and return the resultant
- /// element type, otherwise return null.
- static Type *findElementAtOffset(PointerType *PtrTy, int64_t IntOffset,
- SmallVectorImpl<Value *> &NewIndices,
- const DataLayout &DL) {
- // Only used by visitGEPOfBitcast(), which is skipped for opaque pointers.
- Type *Ty = PtrTy->getNonOpaquePointerElementType();
- if (!Ty->isSized())
- return nullptr;
- APInt Offset(DL.getIndexTypeSizeInBits(PtrTy), IntOffset);
- SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(Ty, Offset);
- if (!Offset.isZero())
- return nullptr;
- for (const APInt &Index : Indices)
- NewIndices.push_back(ConstantInt::get(PtrTy->getContext(), Index));
- return Ty;
- }
- static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
- // If this GEP has only 0 indices, it is the same pointer as
- // Src. If Src is not a trivial GEP too, don't combine
- // the indices.
- if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
- !Src.hasOneUse())
- return false;
- return true;
- }
- /// Return a value X such that Val = X * Scale, or null if none.
- /// If the multiplication is known not to overflow, then NoSignedWrap is set.
- Value *InstCombinerImpl::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
- assert(isa<IntegerType>(Val->getType()) && "Can only descale integers!");
- assert(cast<IntegerType>(Val->getType())->getBitWidth() ==
- Scale.getBitWidth() && "Scale not compatible with value!");
- // If Val is zero or Scale is one then Val = Val * Scale.
- if (match(Val, m_Zero()) || Scale == 1) {
- NoSignedWrap = true;
- return Val;
- }
- // If Scale is zero then it does not divide Val.
- if (Scale.isMinValue())
- return nullptr;
- // Look through chains of multiplications, searching for a constant that is
- // divisible by Scale. For example, descaling X*(Y*(Z*4)) by a factor of 4
- // will find the constant factor 4 and produce X*(Y*Z). Descaling X*(Y*8) by
- // a factor of 4 will produce X*(Y*2). The principle of operation is to bore
- // down from Val:
- //
- // Val = M1 * X || Analysis starts here and works down
- // M1 = M2 * Y || Doesn't descend into terms with more
- // M2 = Z * 4 \/ than one use
- //
- // Then to modify a term at the bottom:
- //
- // Val = M1 * X
- // M1 = Z * Y || Replaced M2 with Z
- //
- // Then to work back up correcting nsw flags.
- // Op - the term we are currently analyzing. Starts at Val then drills down.
- // Replaced with its descaled value before exiting from the drill down loop.
- Value *Op = Val;
- // Parent - initially null, but after drilling down notes where Op came from.
- // In the example above, Parent is (Val, 0) when Op is M1, because M1 is the
- // 0'th operand of Val.
- std::pair<Instruction *, unsigned> Parent;
- // Set if the transform requires a descaling at deeper levels that doesn't
- // overflow.
- bool RequireNoSignedWrap = false;
- // Log base 2 of the scale. Negative if not a power of 2.
- int32_t logScale = Scale.exactLogBase2();
- for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
- // If Op is a constant divisible by Scale then descale to the quotient.
- APInt Quotient(Scale), Remainder(Scale); // Init ensures right bitwidth.
- APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder);
- if (!Remainder.isMinValue())
- // Not divisible by Scale.
- return nullptr;
- // Replace with the quotient in the parent.
- Op = ConstantInt::get(CI->getType(), Quotient);
- NoSignedWrap = true;
- break;
- }
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) {
- if (BO->getOpcode() == Instruction::Mul) {
- // Multiplication.
- NoSignedWrap = BO->hasNoSignedWrap();
- if (RequireNoSignedWrap && !NoSignedWrap)
- return nullptr;
- // There are three cases for multiplication: multiplication by exactly
- // the scale, multiplication by a constant different to the scale, and
- // multiplication by something else.
- Value *LHS = BO->getOperand(0);
- Value *RHS = BO->getOperand(1);
- if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
- // Multiplication by a constant.
- if (CI->getValue() == Scale) {
- // Multiplication by exactly the scale, replace the multiplication
- // by its left-hand side in the parent.
- Op = LHS;
- break;
- }
- // Otherwise drill down into the constant.
- if (!Op->hasOneUse())
- return nullptr;
- Parent = std::make_pair(BO, 1);
- continue;
- }
- // Multiplication by something else. Drill down into the left-hand side
- // since that's where the reassociate pass puts the good stuff.
- if (!Op->hasOneUse())
- return nullptr;
- Parent = std::make_pair(BO, 0);
- continue;
- }
- if (logScale > 0 && BO->getOpcode() == Instruction::Shl &&
- isa<ConstantInt>(BO->getOperand(1))) {
- // Multiplication by a power of 2.
- NoSignedWrap = BO->hasNoSignedWrap();
- if (RequireNoSignedWrap && !NoSignedWrap)
- return nullptr;
- Value *LHS = BO->getOperand(0);
- int32_t Amt = cast<ConstantInt>(BO->getOperand(1))->
- getLimitedValue(Scale.getBitWidth());
- // Op = LHS << Amt.
- if (Amt == logScale) {
- // Multiplication by exactly the scale, replace the multiplication
- // by its left-hand side in the parent.
- Op = LHS;
- break;
- }
- if (Amt < logScale || !Op->hasOneUse())
- return nullptr;
- // Multiplication by more than the scale. Reduce the multiplying amount
- // by the scale in the parent.
- Parent = std::make_pair(BO, 1);
- Op = ConstantInt::get(BO->getType(), Amt - logScale);
- break;
- }
- }
- if (!Op->hasOneUse())
- return nullptr;
- if (CastInst *Cast = dyn_cast<CastInst>(Op)) {
- if (Cast->getOpcode() == Instruction::SExt) {
- // Op is sign-extended from a smaller type, descale in the smaller type.
- unsigned SmallSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
- APInt SmallScale = Scale.trunc(SmallSize);
- // Suppose Op = sext X, and we descale X as Y * SmallScale. We want to
- // descale Op as (sext Y) * Scale. In order to have
- // sext (Y * SmallScale) = (sext Y) * Scale
- // some conditions need to hold however: SmallScale must sign-extend to
- // Scale and the multiplication Y * SmallScale should not overflow.
- if (SmallScale.sext(Scale.getBitWidth()) != Scale)
- // SmallScale does not sign-extend to Scale.
- return nullptr;
- assert(SmallScale.exactLogBase2() == logScale);
- // Require that Y * SmallScale must not overflow.
- RequireNoSignedWrap = true;
- // Drill down through the cast.
- Parent = std::make_pair(Cast, 0);
- Scale = SmallScale;
- continue;
- }
- if (Cast->getOpcode() == Instruction::Trunc) {
- // Op is truncated from a larger type, descale in the larger type.
- // Suppose Op = trunc X, and we descale X as Y * sext Scale. Then
- // trunc (Y * sext Scale) = (trunc Y) * Scale
- // always holds. However (trunc Y) * Scale may overflow even if
- // trunc (Y * sext Scale) does not, so nsw flags need to be cleared
- // from this point up in the expression (see later).
- if (RequireNoSignedWrap)
- return nullptr;
- // Drill down through the cast.
- unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
- Parent = std::make_pair(Cast, 0);
- Scale = Scale.sext(LargeSize);
- if (logScale + 1 == (int32_t)Cast->getType()->getPrimitiveSizeInBits())
- logScale = -1;
- assert(Scale.exactLogBase2() == logScale);
- continue;
- }
- }
- // Unsupported expression, bail out.
- return nullptr;
- }
- // If Op is zero then Val = Op * Scale.
- if (match(Op, m_Zero())) {
- NoSignedWrap = true;
- return Op;
- }
- // We know that we can successfully descale, so from here on we can safely
- // modify the IR. Op holds the descaled version of the deepest term in the
- // expression. NoSignedWrap is 'true' if multiplying Op by Scale is known
- // not to overflow.
- if (!Parent.first)
- // The expression only had one term.
- return Op;
- // Rewrite the parent using the descaled version of its operand.
- assert(Parent.first->hasOneUse() && "Drilled down when more than one use!");
- assert(Op != Parent.first->getOperand(Parent.second) &&
- "Descaling was a no-op?");
- replaceOperand(*Parent.first, Parent.second, Op);
- Worklist.push(Parent.first);
- // Now work back up the expression correcting nsw flags. The logic is based
- // on the following observation: if X * Y is known not to overflow as a signed
- // multiplication, and Y is replaced by a value Z with smaller absolute value,
- // then X * Z will not overflow as a signed multiplication either. As we work
- // our way up, having NoSignedWrap 'true' means that the descaled value at the
- // current level has strictly smaller absolute value than the original.
- Instruction *Ancestor = Parent.first;
- do {
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Ancestor)) {
- // If the multiplication wasn't nsw then we can't say anything about the
- // value of the descaled multiplication, and we have to clear nsw flags
- // from this point on up.
- bool OpNoSignedWrap = BO->hasNoSignedWrap();
- NoSignedWrap &= OpNoSignedWrap;
- if (NoSignedWrap != OpNoSignedWrap) {
- BO->setHasNoSignedWrap(NoSignedWrap);
- Worklist.push(Ancestor);
- }
- } else if (Ancestor->getOpcode() == Instruction::Trunc) {
- // The fact that the descaled input to the trunc has smaller absolute
- // value than the original input doesn't tell us anything useful about
- // the absolute values of the truncations.
- NoSignedWrap = false;
- }
- assert((Ancestor->getOpcode() != Instruction::SExt || NoSignedWrap) &&
- "Failed to keep proper track of nsw flags while drilling down?");
- if (Ancestor == Val)
- // Got to the top, all done!
- return Val;
- // Move up one level in the expression.
- assert(Ancestor->hasOneUse() && "Drilled down when more than one use!");
- Ancestor = Ancestor->user_back();
- } while (true);
- }
- Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) {
- if (!isa<VectorType>(Inst.getType()))
- return nullptr;
- BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
- Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
- assert(cast<VectorType>(LHS->getType())->getElementCount() ==
- cast<VectorType>(Inst.getType())->getElementCount());
- assert(cast<VectorType>(RHS->getType())->getElementCount() ==
- cast<VectorType>(Inst.getType())->getElementCount());
- // If both operands of the binop are vector concatenations, then perform the
- // narrow binop on each pair of the source operands followed by concatenation
- // of the results.
- Value *L0, *L1, *R0, *R1;
- ArrayRef<int> Mask;
- if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
- match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
- LHS->hasOneUse() && RHS->hasOneUse() &&
- cast<ShuffleVectorInst>(LHS)->isConcat() &&
- cast<ShuffleVectorInst>(RHS)->isConcat()) {
- // This transform does not have the speculative execution constraint as
- // below because the shuffle is a concatenation. The new binops are
- // operating on exactly the same elements as the existing binop.
- // TODO: We could ease the mask requirement to allow different undef lanes,
- // but that requires an analysis of the binop-with-undef output value.
- Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
- if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
- BO->copyIRFlags(&Inst);
- Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
- if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
- BO->copyIRFlags(&Inst);
- return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
- }
- auto createBinOpReverse = [&](Value *X, Value *Y) {
- Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
- if (auto *BO = dyn_cast<BinaryOperator>(V))
- BO->copyIRFlags(&Inst);
- Module *M = Inst.getModule();
- Function *F = Intrinsic::getDeclaration(
- M, Intrinsic::experimental_vector_reverse, V->getType());
- return CallInst::Create(F, V);
- };
- // NOTE: Reverse shuffles don't require the speculative execution protection
- // below because they don't affect which lanes take part in the computation.
- Value *V1, *V2;
- if (match(LHS, m_VecReverse(m_Value(V1)))) {
- // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
- if (match(RHS, m_VecReverse(m_Value(V2))) &&
- (LHS->hasOneUse() || RHS->hasOneUse() ||
- (LHS == RHS && LHS->hasNUses(2))))
- return createBinOpReverse(V1, V2);
- // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
- if (LHS->hasOneUse() && isSplatValue(RHS))
- return createBinOpReverse(V1, RHS);
- }
- // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
- else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
- return createBinOpReverse(LHS, V2);
- // It may not be safe to reorder shuffles and things like div, urem, etc.
- // because we may trap when executing those ops on unknown vector elements.
- // See PR20059.
- if (!isSafeToSpeculativelyExecute(&Inst))
- return nullptr;
- auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
- Value *XY = Builder.CreateBinOp(Opcode, X, Y);
- if (auto *BO = dyn_cast<BinaryOperator>(XY))
- BO->copyIRFlags(&Inst);
- return new ShuffleVectorInst(XY, M);
- };
- // If both arguments of the binary operation are shuffles that use the same
- // mask and shuffle within a single vector, move the shuffle after the binop.
- if (match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(Mask))) &&
- match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(Mask))) &&
- V1->getType() == V2->getType() &&
- (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
- // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
- return createBinOpShuffle(V1, V2, Mask);
- }
- // If both arguments of a commutative binop are select-shuffles that use the
- // same mask with commuted operands, the shuffles are unnecessary.
- if (Inst.isCommutative() &&
- match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
- match(RHS,
- m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
- auto *LShuf = cast<ShuffleVectorInst>(LHS);
- auto *RShuf = cast<ShuffleVectorInst>(RHS);
- // TODO: Allow shuffles that contain undefs in the mask?
- // That is legal, but it reduces undef knowledge.
- // TODO: Allow arbitrary shuffles by shuffling after binop?
- // That might be legal, but we have to deal with poison.
- if (LShuf->isSelect() &&
- !is_contained(LShuf->getShuffleMask(), UndefMaskElem) &&
- RShuf->isSelect() &&
- !is_contained(RShuf->getShuffleMask(), UndefMaskElem)) {
- // Example:
- // LHS = shuffle V1, V2, <0, 5, 6, 3>
- // RHS = shuffle V2, V1, <0, 5, 6, 3>
- // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
- Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
- NewBO->copyIRFlags(&Inst);
- return NewBO;
- }
- }
- // If one argument is a shuffle within one vector and the other is a constant,
- // try moving the shuffle after the binary operation. This canonicalization
- // intends to move shuffles closer to other shuffles and binops closer to
- // other binops, so they can be folded. It may also enable demanded elements
- // transforms.
- Constant *C;
- auto *InstVTy = dyn_cast<FixedVectorType>(Inst.getType());
- if (InstVTy &&
- match(&Inst,
- m_c_BinOp(m_OneUse(m_Shuffle(m_Value(V1), m_Undef(), m_Mask(Mask))),
- m_ImmConstant(C))) &&
- cast<FixedVectorType>(V1->getType())->getNumElements() <=
- InstVTy->getNumElements()) {
- assert(InstVTy->getScalarType() == V1->getType()->getScalarType() &&
- "Shuffle should not change scalar type");
- // Find constant NewC that has property:
- // shuffle(NewC, ShMask) = C
- // If such constant does not exist (example: ShMask=<0,0> and C=<1,2>)
- // reorder is not possible. A 1-to-1 mapping is not required. Example:
- // ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <undef,5,6,undef>
- bool ConstOp1 = isa<Constant>(RHS);
- ArrayRef<int> ShMask = Mask;
- unsigned SrcVecNumElts =
- cast<FixedVectorType>(V1->getType())->getNumElements();
- UndefValue *UndefScalar = UndefValue::get(C->getType()->getScalarType());
- SmallVector<Constant *, 16> NewVecC(SrcVecNumElts, UndefScalar);
- bool MayChange = true;
- unsigned NumElts = InstVTy->getNumElements();
- for (unsigned I = 0; I < NumElts; ++I) {
- Constant *CElt = C->getAggregateElement(I);
- if (ShMask[I] >= 0) {
- assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
- Constant *NewCElt = NewVecC[ShMask[I]];
- // Bail out if:
- // 1. The constant vector contains a constant expression.
- // 2. The shuffle needs an element of the constant vector that can't
- // be mapped to a new constant vector.
- // 3. This is a widening shuffle that copies elements of V1 into the
- // extended elements (extending with undef is allowed).
- if (!CElt || (!isa<UndefValue>(NewCElt) && NewCElt != CElt) ||
- I >= SrcVecNumElts) {
- MayChange = false;
- break;
- }
- NewVecC[ShMask[I]] = CElt;
- }
- // If this is a widening shuffle, we must be able to extend with undef
- // elements. If the original binop does not produce an undef in the high
- // lanes, then this transform is not safe.
- // Similarly for undef lanes due to the shuffle mask, we can only
- // transform binops that preserve undef.
- // TODO: We could shuffle those non-undef constant values into the
- // result by using a constant vector (rather than an undef vector)
- // as operand 1 of the new binop, but that might be too aggressive
- // for target-independent shuffle creation.
- if (I >= SrcVecNumElts || ShMask[I] < 0) {
- Constant *MaybeUndef =
- ConstOp1
- ? ConstantFoldBinaryOpOperands(Opcode, UndefScalar, CElt, DL)
- : ConstantFoldBinaryOpOperands(Opcode, CElt, UndefScalar, DL);
- if (!MaybeUndef || !match(MaybeUndef, m_Undef())) {
- MayChange = false;
- break;
- }
- }
- }
- if (MayChange) {
- Constant *NewC = ConstantVector::get(NewVecC);
- // It may not be safe to execute a binop on a vector with undef elements
- // because the entire instruction can be folded to undef or create poison
- // that did not exist in the original code.
- if (Inst.isIntDivRem() || (Inst.isShift() && ConstOp1))
- NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
- // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
- // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
- Value *NewLHS = ConstOp1 ? V1 : NewC;
- Value *NewRHS = ConstOp1 ? NewC : V1;
- return createBinOpShuffle(NewLHS, NewRHS, Mask);
- }
- }
- // Try to reassociate to sink a splat shuffle after a binary operation.
- if (Inst.isAssociative() && Inst.isCommutative()) {
- // Canonicalize shuffle operand as LHS.
- if (isa<ShuffleVectorInst>(RHS))
- std::swap(LHS, RHS);
- Value *X;
- ArrayRef<int> MaskC;
- int SplatIndex;
- Value *Y, *OtherOp;
- if (!match(LHS,
- m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
- !match(MaskC, m_SplatOrUndefMask(SplatIndex)) ||
- X->getType() != Inst.getType() ||
- !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
- return nullptr;
- // FIXME: This may not be safe if the analysis allows undef elements. By
- // moving 'Y' before the splat shuffle, we are implicitly assuming
- // that it is not undef/poison at the splat index.
- if (isSplatValue(OtherOp, SplatIndex)) {
- std::swap(Y, OtherOp);
- } else if (!isSplatValue(Y, SplatIndex)) {
- return nullptr;
- }
- // X and Y are splatted values, so perform the binary operation on those
- // values followed by a splat followed by the 2nd binary operation:
- // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
- Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
- SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
- Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
- Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
- // Intersect FMF on both new binops. Other (poison-generating) flags are
- // dropped to be safe.
- if (isa<FPMathOperator>(R)) {
- R->copyFastMathFlags(&Inst);
- R->andIRFlags(RHS);
- }
- if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
- NewInstBO->copyIRFlags(R);
- return R;
- }
- return nullptr;
- }
- /// Try to narrow the width of a binop if at least 1 operand is an extend of
- /// of a value. This requires a potentially expensive known bits check to make
- /// sure the narrow op does not overflow.
- Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
- // We need at least one extended operand.
- Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
- // If this is a sub, we swap the operands since we always want an extension
- // on the RHS. The LHS can be an extension or a constant.
- if (BO.getOpcode() == Instruction::Sub)
- std::swap(Op0, Op1);
- Value *X;
- bool IsSext = match(Op0, m_SExt(m_Value(X)));
- if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
- return nullptr;
- // If both operands are the same extension from the same source type and we
- // can eliminate at least one (hasOneUse), this might work.
- CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
- Value *Y;
- if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
- cast<Operator>(Op1)->getOpcode() == CastOpc &&
- (Op0->hasOneUse() || Op1->hasOneUse()))) {
- // If that did not match, see if we have a suitable constant operand.
- // Truncating and extending must produce the same constant.
- Constant *WideC;
- if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
- return nullptr;
- Constant *NarrowC = ConstantExpr::getTrunc(WideC, X->getType());
- if (ConstantExpr::getCast(CastOpc, NarrowC, BO.getType()) != WideC)
- return nullptr;
- Y = NarrowC;
- }
- // Swap back now that we found our operands.
- if (BO.getOpcode() == Instruction::Sub)
- std::swap(X, Y);
- // Both operands have narrow versions. Last step: the math must not overflow
- // in the narrow width.
- if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
- return nullptr;
- // bo (ext X), (ext Y) --> ext (bo X, Y)
- // bo (ext X), C --> ext (bo X, C')
- Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
- if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
- if (IsSext)
- NewBinOp->setHasNoSignedWrap();
- else
- NewBinOp->setHasNoUnsignedWrap();
- }
- return CastInst::Create(CastOpc, NarrowBO, BO.getType());
- }
- static bool isMergedGEPInBounds(GEPOperator &GEP1, GEPOperator &GEP2) {
- // At least one GEP must be inbounds.
- if (!GEP1.isInBounds() && !GEP2.isInBounds())
- return false;
- return (GEP1.isInBounds() || GEP1.hasAllZeroIndices()) &&
- (GEP2.isInBounds() || GEP2.hasAllZeroIndices());
- }
- /// Thread a GEP operation with constant indices through the constant true/false
- /// arms of a select.
- static Instruction *foldSelectGEP(GetElementPtrInst &GEP,
- InstCombiner::BuilderTy &Builder) {
- if (!GEP.hasAllConstantIndices())
- return nullptr;
- Instruction *Sel;
- Value *Cond;
- Constant *TrueC, *FalseC;
- if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
- !match(Sel,
- m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
- return nullptr;
- // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
- // Propagate 'inbounds' and metadata from existing instructions.
- // Note: using IRBuilder to create the constants for efficiency.
- SmallVector<Value *, 4> IndexC(GEP.indices());
- bool IsInBounds = GEP.isInBounds();
- Type *Ty = GEP.getSourceElementType();
- Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", IsInBounds);
- Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", IsInBounds);
- return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
- }
- Instruction *InstCombinerImpl::visitGEPOfGEP(GetElementPtrInst &GEP,
- GEPOperator *Src) {
- // Combine Indices - If the source pointer to this getelementptr instruction
- // is a getelementptr instruction with matching element type, combine the
- // indices of the two getelementptr instructions into a single instruction.
- if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
- return nullptr;
- if (Src->getResultElementType() == GEP.getSourceElementType() &&
- Src->getNumOperands() == 2 && GEP.getNumOperands() == 2 &&
- Src->hasOneUse()) {
- Value *GO1 = GEP.getOperand(1);
- Value *SO1 = Src->getOperand(1);
- if (LI) {
- // Try to reassociate loop invariant GEP chains to enable LICM.
- if (Loop *L = LI->getLoopFor(GEP.getParent())) {
- // Reassociate the two GEPs if SO1 is variant in the loop and GO1 is
- // invariant: this breaks the dependence between GEPs and allows LICM
- // to hoist the invariant part out of the loop.
- if (L->isLoopInvariant(GO1) && !L->isLoopInvariant(SO1)) {
- // The swapped GEPs are inbounds if both original GEPs are inbounds
- // and the sign of the offsets is the same. For simplicity, only
- // handle both offsets being non-negative.
- bool IsInBounds = Src->isInBounds() && GEP.isInBounds() &&
- isKnownNonNegative(SO1, DL, 0, &AC, &GEP, &DT) &&
- isKnownNonNegative(GO1, DL, 0, &AC, &GEP, &DT);
- // Put NewSrc at same location as %src.
- Builder.SetInsertPoint(cast<Instruction>(Src));
- Value *NewSrc = Builder.CreateGEP(GEP.getSourceElementType(),
- Src->getPointerOperand(), GO1,
- Src->getName(), IsInBounds);
- GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
- GEP.getSourceElementType(), NewSrc, {SO1});
- NewGEP->setIsInBounds(IsInBounds);
- return NewGEP;
- }
- }
- }
- }
- // Note that if our source is a gep chain itself then we wait for that
- // chain to be resolved before we perform this transformation. This
- // avoids us creating a TON of code in some cases.
- if (auto *SrcGEP = dyn_cast<GEPOperator>(Src->getOperand(0)))
- if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
- return nullptr; // Wait until our source is folded to completion.
- // For constant GEPs, use a more general offset-based folding approach.
- // Only do this for opaque pointers, as the result element type may change.
- Type *PtrTy = Src->getType()->getScalarType();
- if (PtrTy->isOpaquePointerTy() && GEP.hasAllConstantIndices() &&
- (Src->hasOneUse() || Src->hasAllConstantIndices())) {
- // Split Src into a variable part and a constant suffix.
- gep_type_iterator GTI = gep_type_begin(*Src);
- Type *BaseType = GTI.getIndexedType();
- bool IsFirstType = true;
- unsigned NumVarIndices = 0;
- for (auto Pair : enumerate(Src->indices())) {
- if (!isa<ConstantInt>(Pair.value())) {
- BaseType = GTI.getIndexedType();
- IsFirstType = false;
- NumVarIndices = Pair.index() + 1;
- }
- ++GTI;
- }
- // Determine the offset for the constant suffix of Src.
- APInt Offset(DL.getIndexTypeSizeInBits(PtrTy), 0);
- if (NumVarIndices != Src->getNumIndices()) {
- // FIXME: getIndexedOffsetInType() does not handled scalable vectors.
- if (isa<ScalableVectorType>(BaseType))
- return nullptr;
- SmallVector<Value *> ConstantIndices;
- if (!IsFirstType)
- ConstantIndices.push_back(
- Constant::getNullValue(Type::getInt32Ty(GEP.getContext())));
- append_range(ConstantIndices, drop_begin(Src->indices(), NumVarIndices));
- Offset += DL.getIndexedOffsetInType(BaseType, ConstantIndices);
- }
- // Add the offset for GEP (which is fully constant).
- if (!GEP.accumulateConstantOffset(DL, Offset))
- return nullptr;
- APInt OffsetOld = Offset;
- // Convert the total offset back into indices.
- SmallVector<APInt> ConstIndices =
- DL.getGEPIndicesForOffset(BaseType, Offset);
- if (!Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero())) {
- // If both GEP are constant-indexed, and cannot be merged in either way,
- // convert them to a GEP of i8.
- if (Src->hasAllConstantIndices())
- return isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP))
- ? GetElementPtrInst::CreateInBounds(
- Builder.getInt8Ty(), Src->getOperand(0),
- Builder.getInt(OffsetOld), GEP.getName())
- : GetElementPtrInst::Create(
- Builder.getInt8Ty(), Src->getOperand(0),
- Builder.getInt(OffsetOld), GEP.getName());
- return nullptr;
- }
- bool IsInBounds = isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP));
- SmallVector<Value *> Indices;
- append_range(Indices, drop_end(Src->indices(),
- Src->getNumIndices() - NumVarIndices));
- for (const APInt &Idx : drop_begin(ConstIndices, !IsFirstType)) {
- Indices.push_back(ConstantInt::get(GEP.getContext(), Idx));
- // Even if the total offset is inbounds, we may end up representing it
- // by first performing a larger negative offset, and then a smaller
- // positive one. The large negative offset might go out of bounds. Only
- // preserve inbounds if all signs are the same.
- IsInBounds &= Idx.isNonNegative() == ConstIndices[0].isNonNegative();
- }
- return IsInBounds
- ? GetElementPtrInst::CreateInBounds(Src->getSourceElementType(),
- Src->getOperand(0), Indices,
- GEP.getName())
- : GetElementPtrInst::Create(Src->getSourceElementType(),
- Src->getOperand(0), Indices,
- GEP.getName());
- }
- if (Src->getResultElementType() != GEP.getSourceElementType())
- return nullptr;
- SmallVector<Value*, 8> Indices;
- // Find out whether the last index in the source GEP is a sequential idx.
- bool EndsWithSequential = false;
- for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
- I != E; ++I)
- EndsWithSequential = I.isSequential();
- // Can we combine the two pointer arithmetics offsets?
- if (EndsWithSequential) {
- // Replace: gep (gep %P, long B), long A, ...
- // With: T = long A+B; gep %P, T, ...
- Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
- Value *GO1 = GEP.getOperand(1);
- // If they aren't the same type, then the input hasn't been processed
- // by the loop above yet (which canonicalizes sequential index types to
- // intptr_t). Just avoid transforming this until the input has been
- // normalized.
- if (SO1->getType() != GO1->getType())
- return nullptr;
- Value *Sum =
- simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
- // Only do the combine when we are sure the cost after the
- // merge is never more than that before the merge.
- if (Sum == nullptr)
- return nullptr;
- // Update the GEP in place if possible.
- if (Src->getNumOperands() == 2) {
- GEP.setIsInBounds(isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP)));
- replaceOperand(GEP, 0, Src->getOperand(0));
- replaceOperand(GEP, 1, Sum);
- return &GEP;
- }
- Indices.append(Src->op_begin()+1, Src->op_end()-1);
- Indices.push_back(Sum);
- Indices.append(GEP.op_begin()+2, GEP.op_end());
- } else if (isa<Constant>(*GEP.idx_begin()) &&
- cast<Constant>(*GEP.idx_begin())->isNullValue() &&
- Src->getNumOperands() != 1) {
- // Otherwise we can do the fold if the first index of the GEP is a zero
- Indices.append(Src->op_begin()+1, Src->op_end());
- Indices.append(GEP.idx_begin()+1, GEP.idx_end());
- }
- if (!Indices.empty())
- return isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP))
- ? GetElementPtrInst::CreateInBounds(
- Src->getSourceElementType(), Src->getOperand(0), Indices,
- GEP.getName())
- : GetElementPtrInst::Create(Src->getSourceElementType(),
- Src->getOperand(0), Indices,
- GEP.getName());
- return nullptr;
- }
- // Note that we may have also stripped an address space cast in between.
- Instruction *InstCombinerImpl::visitGEPOfBitcast(BitCastInst *BCI,
- GetElementPtrInst &GEP) {
- // With opaque pointers, there is no pointer element type we can use to
- // adjust the GEP type.
- PointerType *SrcType = cast<PointerType>(BCI->getSrcTy());
- if (SrcType->isOpaque())
- return nullptr;
- Type *GEPEltType = GEP.getSourceElementType();
- Type *SrcEltType = SrcType->getNonOpaquePointerElementType();
- Value *SrcOp = BCI->getOperand(0);
- // GEP directly using the source operand if this GEP is accessing an element
- // of a bitcasted pointer to vector or array of the same dimensions:
- // gep (bitcast <c x ty>* X to [c x ty]*), Y, Z --> gep X, Y, Z
- // gep (bitcast [c x ty]* X to <c x ty>*), Y, Z --> gep X, Y, Z
- auto areMatchingArrayAndVecTypes = [](Type *ArrTy, Type *VecTy,
- const DataLayout &DL) {
- auto *VecVTy = cast<FixedVectorType>(VecTy);
- return ArrTy->getArrayElementType() == VecVTy->getElementType() &&
- ArrTy->getArrayNumElements() == VecVTy->getNumElements() &&
- DL.getTypeAllocSize(ArrTy) == DL.getTypeAllocSize(VecTy);
- };
- if (GEP.getNumOperands() == 3 &&
- ((GEPEltType->isArrayTy() && isa<FixedVectorType>(SrcEltType) &&
- areMatchingArrayAndVecTypes(GEPEltType, SrcEltType, DL)) ||
- (isa<FixedVectorType>(GEPEltType) && SrcEltType->isArrayTy() &&
- areMatchingArrayAndVecTypes(SrcEltType, GEPEltType, DL)))) {
- // Create a new GEP here, as using `setOperand()` followed by
- // `setSourceElementType()` won't actually update the type of the
- // existing GEP Value. Causing issues if this Value is accessed when
- // constructing an AddrSpaceCastInst
- SmallVector<Value *, 8> Indices(GEP.indices());
- Value *NGEP =
- Builder.CreateGEP(SrcEltType, SrcOp, Indices, "", GEP.isInBounds());
- NGEP->takeName(&GEP);
- // Preserve GEP address space to satisfy users
- if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
- return new AddrSpaceCastInst(NGEP, GEP.getType());
- return replaceInstUsesWith(GEP, NGEP);
- }
- // See if we can simplify:
- // X = bitcast A* to B*
- // Y = gep X, <...constant indices...>
- // into a gep of the original struct. This is important for SROA and alias
- // analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
- unsigned OffsetBits = DL.getIndexTypeSizeInBits(GEP.getType());
- APInt Offset(OffsetBits, 0);
- // If the bitcast argument is an allocation, The bitcast is for convertion
- // to actual type of allocation. Removing such bitcasts, results in having
- // GEPs with i8* base and pure byte offsets. That means GEP is not aware of
- // struct or array hierarchy.
- // By avoiding such GEPs, phi translation and MemoryDependencyAnalysis have
- // a better chance to succeed.
- if (!isa<BitCastInst>(SrcOp) && GEP.accumulateConstantOffset(DL, Offset) &&
- !isAllocationFn(SrcOp, &TLI)) {
- // If this GEP instruction doesn't move the pointer, just replace the GEP
- // with a bitcast of the real input to the dest type.
- if (!Offset) {
- // If the bitcast is of an allocation, and the allocation will be
- // converted to match the type of the cast, don't touch this.
- if (isa<AllocaInst>(SrcOp)) {
- // See if the bitcast simplifies, if so, don't nuke this GEP yet.
- if (Instruction *I = visitBitCast(*BCI)) {
- if (I != BCI) {
- I->takeName(BCI);
- I->insertInto(BCI->getParent(), BCI->getIterator());
- replaceInstUsesWith(*BCI, I);
- }
- return &GEP;
- }
- }
- if (SrcType->getPointerAddressSpace() != GEP.getAddressSpace())
- return new AddrSpaceCastInst(SrcOp, GEP.getType());
- return new BitCastInst(SrcOp, GEP.getType());
- }
- // Otherwise, if the offset is non-zero, we need to find out if there is a
- // field at Offset in 'A's type. If so, we can pull the cast through the
- // GEP.
- SmallVector<Value *, 8> NewIndices;
- if (findElementAtOffset(SrcType, Offset.getSExtValue(), NewIndices, DL)) {
- Value *NGEP = Builder.CreateGEP(SrcEltType, SrcOp, NewIndices, "",
- GEP.isInBounds());
- if (NGEP->getType() == GEP.getType())
- return replaceInstUsesWith(GEP, NGEP);
- NGEP->takeName(&GEP);
- if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
- return new AddrSpaceCastInst(NGEP, GEP.getType());
- return new BitCastInst(NGEP, GEP.getType());
- }
- }
- return nullptr;
- }
- Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
- Value *PtrOp = GEP.getOperand(0);
- SmallVector<Value *, 8> Indices(GEP.indices());
- Type *GEPType = GEP.getType();
- Type *GEPEltType = GEP.getSourceElementType();
- bool IsGEPSrcEleScalable = isa<ScalableVectorType>(GEPEltType);
- if (Value *V = simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.isInBounds(),
- SQ.getWithInstruction(&GEP)))
- return replaceInstUsesWith(GEP, V);
- // For vector geps, use the generic demanded vector support.
- // Skip if GEP return type is scalable. The number of elements is unknown at
- // compile-time.
- if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
- auto VWidth = GEPFVTy->getNumElements();
- APInt UndefElts(VWidth, 0);
- APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
- if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
- UndefElts)) {
- if (V != &GEP)
- return replaceInstUsesWith(GEP, V);
- return &GEP;
- }
- // TODO: 1) Scalarize splat operands, 2) scalarize entire instruction if
- // possible (decide on canonical form for pointer broadcast), 3) exploit
- // undef elements to decrease demanded bits
- }
- // Eliminate unneeded casts for indices, and replace indices which displace
- // by multiples of a zero size type with zero.
- bool MadeChange = false;
- // Index width may not be the same width as pointer width.
- // Data layout chooses the right type based on supported integer types.
- Type *NewScalarIndexTy =
- DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
- gep_type_iterator GTI = gep_type_begin(GEP);
- for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
- ++I, ++GTI) {
- // Skip indices into struct types.
- if (GTI.isStruct())
- continue;
- Type *IndexTy = (*I)->getType();
- Type *NewIndexType =
- IndexTy->isVectorTy()
- ? VectorType::get(NewScalarIndexTy,
- cast<VectorType>(IndexTy)->getElementCount())
- : NewScalarIndexTy;
- // If the element type has zero size then any index over it is equivalent
- // to an index of zero, so replace it with zero if it is not zero already.
- Type *EltTy = GTI.getIndexedType();
- if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
- if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
- *I = Constant::getNullValue(NewIndexType);
- MadeChange = true;
- }
- if (IndexTy != NewIndexType) {
- // If we are using a wider index than needed for this platform, shrink
- // it to what we need. If narrower, sign-extend it to what we need.
- // This explicit cast can make subsequent optimizations more obvious.
- *I = Builder.CreateIntCast(*I, NewIndexType, true);
- MadeChange = true;
- }
- }
- if (MadeChange)
- return &GEP;
- // Check to see if the inputs to the PHI node are getelementptr instructions.
- if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
- auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
- if (!Op1)
- return nullptr;
- // Don't fold a GEP into itself through a PHI node. This can only happen
- // through the back-edge of a loop. Folding a GEP into itself means that
- // the value of the previous iteration needs to be stored in the meantime,
- // thus requiring an additional register variable to be live, but not
- // actually achieving anything (the GEP still needs to be executed once per
- // loop iteration).
- if (Op1 == &GEP)
- return nullptr;
- int DI = -1;
- for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
- auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
- if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
- Op1->getSourceElementType() != Op2->getSourceElementType())
- return nullptr;
- // As for Op1 above, don't try to fold a GEP into itself.
- if (Op2 == &GEP)
- return nullptr;
- // Keep track of the type as we walk the GEP.
- Type *CurTy = nullptr;
- for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
- if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
- return nullptr;
- if (Op1->getOperand(J) != Op2->getOperand(J)) {
- if (DI == -1) {
- // We have not seen any differences yet in the GEPs feeding the
- // PHI yet, so we record this one if it is allowed to be a
- // variable.
- // The first two arguments can vary for any GEP, the rest have to be
- // static for struct slots
- if (J > 1) {
- assert(CurTy && "No current type?");
- if (CurTy->isStructTy())
- return nullptr;
- }
- DI = J;
- } else {
- // The GEP is different by more than one input. While this could be
- // extended to support GEPs that vary by more than one variable it
- // doesn't make sense since it greatly increases the complexity and
- // would result in an R+R+R addressing mode which no backend
- // directly supports and would need to be broken into several
- // simpler instructions anyway.
- return nullptr;
- }
- }
- // Sink down a layer of the type for the next iteration.
- if (J > 0) {
- if (J == 1) {
- CurTy = Op1->getSourceElementType();
- } else {
- CurTy =
- GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
- }
- }
- }
- }
- // If not all GEPs are identical we'll have to create a new PHI node.
- // Check that the old PHI node has only one use so that it will get
- // removed.
- if (DI != -1 && !PN->hasOneUse())
- return nullptr;
- auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
- if (DI == -1) {
- // All the GEPs feeding the PHI are identical. Clone one down into our
- // BB so that it can be merged with the current GEP.
- } else {
- // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
- // into the current block so it can be merged, and create a new PHI to
- // set that index.
- PHINode *NewPN;
- {
- IRBuilderBase::InsertPointGuard Guard(Builder);
- Builder.SetInsertPoint(PN);
- NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
- PN->getNumOperands());
- }
- for (auto &I : PN->operands())
- NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
- PN->getIncomingBlock(I));
- NewGEP->setOperand(DI, NewPN);
- }
- NewGEP->insertInto(GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
- return replaceOperand(GEP, 0, NewGEP);
- }
- if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
- if (Instruction *I = visitGEPOfGEP(GEP, Src))
- return I;
- // Skip if GEP source element type is scalable. The type alloc size is unknown
- // at compile-time.
- if (GEP.getNumIndices() == 1 && !IsGEPSrcEleScalable) {
- unsigned AS = GEP.getPointerAddressSpace();
- if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
- DL.getIndexSizeInBits(AS)) {
- uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
- bool Matched = false;
- uint64_t C;
- Value *V = nullptr;
- if (TyAllocSize == 1) {
- V = GEP.getOperand(1);
- Matched = true;
- } else if (match(GEP.getOperand(1),
- m_AShr(m_Value(V), m_ConstantInt(C)))) {
- if (TyAllocSize == 1ULL << C)
- Matched = true;
- } else if (match(GEP.getOperand(1),
- m_SDiv(m_Value(V), m_ConstantInt(C)))) {
- if (TyAllocSize == C)
- Matched = true;
- }
- // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y), but
- // only if both point to the same underlying object (otherwise provenance
- // is not necessarily retained).
- Value *Y;
- Value *X = GEP.getOperand(0);
- if (Matched &&
- match(V, m_Sub(m_PtrToInt(m_Value(Y)), m_PtrToInt(m_Specific(X)))) &&
- getUnderlyingObject(X) == getUnderlyingObject(Y))
- return CastInst::CreatePointerBitCastOrAddrSpaceCast(Y, GEPType);
- }
- }
- // We do not handle pointer-vector geps here.
- if (GEPType->isVectorTy())
- return nullptr;
- // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
- Value *StrippedPtr = PtrOp->stripPointerCasts();
- PointerType *StrippedPtrTy = cast<PointerType>(StrippedPtr->getType());
- // TODO: The basic approach of these folds is not compatible with opaque
- // pointers, because we can't use bitcasts as a hint for a desirable GEP
- // type. Instead, we should perform canonicalization directly on the GEP
- // type. For now, skip these.
- if (StrippedPtr != PtrOp && !StrippedPtrTy->isOpaque()) {
- bool HasZeroPointerIndex = false;
- Type *StrippedPtrEltTy = StrippedPtrTy->getNonOpaquePointerElementType();
- if (auto *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
- HasZeroPointerIndex = C->isZero();
- // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
- // into : GEP [10 x i8]* X, i32 0, ...
- //
- // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
- // into : GEP i8* X, ...
- //
- // This occurs when the program declares an array extern like "int X[];"
- if (HasZeroPointerIndex) {
- if (auto *CATy = dyn_cast<ArrayType>(GEPEltType)) {
- // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
- if (CATy->getElementType() == StrippedPtrEltTy) {
- // -> GEP i8* X, ...
- SmallVector<Value *, 8> Idx(drop_begin(GEP.indices()));
- GetElementPtrInst *Res = GetElementPtrInst::Create(
- StrippedPtrEltTy, StrippedPtr, Idx, GEP.getName());
- Res->setIsInBounds(GEP.isInBounds());
- if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace())
- return Res;
- // Insert Res, and create an addrspacecast.
- // e.g.,
- // GEP (addrspacecast i8 addrspace(1)* X to [0 x i8]*), i32 0, ...
- // ->
- // %0 = GEP i8 addrspace(1)* X, ...
- // addrspacecast i8 addrspace(1)* %0 to i8*
- return new AddrSpaceCastInst(Builder.Insert(Res), GEPType);
- }
- if (auto *XATy = dyn_cast<ArrayType>(StrippedPtrEltTy)) {
- // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
- if (CATy->getElementType() == XATy->getElementType()) {
- // -> GEP [10 x i8]* X, i32 0, ...
- // At this point, we know that the cast source type is a pointer
- // to an array of the same type as the destination pointer
- // array. Because the array type is never stepped over (there
- // is a leading zero) we can fold the cast into this GEP.
- if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) {
- GEP.setSourceElementType(XATy);
- return replaceOperand(GEP, 0, StrippedPtr);
- }
- // Cannot replace the base pointer directly because StrippedPtr's
- // address space is different. Instead, create a new GEP followed by
- // an addrspacecast.
- // e.g.,
- // GEP (addrspacecast [10 x i8] addrspace(1)* X to [0 x i8]*),
- // i32 0, ...
- // ->
- // %0 = GEP [10 x i8] addrspace(1)* X, ...
- // addrspacecast i8 addrspace(1)* %0 to i8*
- SmallVector<Value *, 8> Idx(GEP.indices());
- Value *NewGEP =
- Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, Idx,
- GEP.getName(), GEP.isInBounds());
- return new AddrSpaceCastInst(NewGEP, GEPType);
- }
- }
- }
- } else if (GEP.getNumOperands() == 2 && !IsGEPSrcEleScalable) {
- // Skip if GEP source element type is scalable. The type alloc size is
- // unknown at compile-time.
- // Transform things like: %t = getelementptr i32*
- // bitcast ([2 x i32]* %str to i32*), i32 %V into: %t1 = getelementptr [2
- // x i32]* %str, i32 0, i32 %V; bitcast
- if (StrippedPtrEltTy->isArrayTy() &&
- DL.getTypeAllocSize(StrippedPtrEltTy->getArrayElementType()) ==
- DL.getTypeAllocSize(GEPEltType)) {
- Type *IdxType = DL.getIndexType(GEPType);
- Value *Idx[2] = {Constant::getNullValue(IdxType), GEP.getOperand(1)};
- Value *NewGEP = Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, Idx,
- GEP.getName(), GEP.isInBounds());
- // V and GEP are both pointer types --> BitCast
- return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, GEPType);
- }
- // Transform things like:
- // %V = mul i64 %N, 4
- // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V
- // into: %t1 = getelementptr i32* %arr, i32 %N; bitcast
- if (GEPEltType->isSized() && StrippedPtrEltTy->isSized()) {
- // Check that changing the type amounts to dividing the index by a scale
- // factor.
- uint64_t ResSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
- uint64_t SrcSize =
- DL.getTypeAllocSize(StrippedPtrEltTy).getFixedValue();
- if (ResSize && SrcSize % ResSize == 0) {
- Value *Idx = GEP.getOperand(1);
- unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
- uint64_t Scale = SrcSize / ResSize;
- // Earlier transforms ensure that the index has the right type
- // according to Data Layout, which considerably simplifies the
- // logic by eliminating implicit casts.
- assert(Idx->getType() == DL.getIndexType(GEPType) &&
- "Index type does not match the Data Layout preferences");
- bool NSW;
- if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
- // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
- // If the multiplication NewIdx * Scale may overflow then the new
- // GEP may not be "inbounds".
- Value *NewGEP =
- Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, NewIdx,
- GEP.getName(), GEP.isInBounds() && NSW);
- // The NewGEP must be pointer typed, so must the old one -> BitCast
- return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
- GEPType);
- }
- }
- }
- // Similarly, transform things like:
- // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
- // (where tmp = 8*tmp2) into:
- // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
- if (GEPEltType->isSized() && StrippedPtrEltTy->isSized() &&
- StrippedPtrEltTy->isArrayTy()) {
- // Check that changing to the array element type amounts to dividing the
- // index by a scale factor.
- uint64_t ResSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
- uint64_t ArrayEltSize =
- DL.getTypeAllocSize(StrippedPtrEltTy->getArrayElementType())
- .getFixedValue();
- if (ResSize && ArrayEltSize % ResSize == 0) {
- Value *Idx = GEP.getOperand(1);
- unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
- uint64_t Scale = ArrayEltSize / ResSize;
- // Earlier transforms ensure that the index has the right type
- // according to the Data Layout, which considerably simplifies
- // the logic by eliminating implicit casts.
- assert(Idx->getType() == DL.getIndexType(GEPType) &&
- "Index type does not match the Data Layout preferences");
- bool NSW;
- if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
- // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
- // If the multiplication NewIdx * Scale may overflow then the new
- // GEP may not be "inbounds".
- Type *IndTy = DL.getIndexType(GEPType);
- Value *Off[2] = {Constant::getNullValue(IndTy), NewIdx};
- Value *NewGEP =
- Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, Off,
- GEP.getName(), GEP.isInBounds() && NSW);
- // The NewGEP must be pointer typed, so must the old one -> BitCast
- return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
- GEPType);
- }
- }
- }
- }
- }
- // addrspacecast between types is canonicalized as a bitcast, then an
- // addrspacecast. To take advantage of the below bitcast + struct GEP, look
- // through the addrspacecast.
- Value *ASCStrippedPtrOp = PtrOp;
- if (auto *ASC = dyn_cast<AddrSpaceCastInst>(PtrOp)) {
- // X = bitcast A addrspace(1)* to B addrspace(1)*
- // Y = addrspacecast A addrspace(1)* to B addrspace(2)*
- // Z = gep Y, <...constant indices...>
- // Into an addrspacecasted GEP of the struct.
- if (auto *BC = dyn_cast<BitCastInst>(ASC->getOperand(0)))
- ASCStrippedPtrOp = BC;
- }
- if (auto *BCI = dyn_cast<BitCastInst>(ASCStrippedPtrOp))
- if (Instruction *I = visitGEPOfBitcast(BCI, GEP))
- return I;
- if (!GEP.isInBounds()) {
- unsigned IdxWidth =
- DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
- APInt BasePtrOffset(IdxWidth, 0);
- Value *UnderlyingPtrOp =
- PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL,
- BasePtrOffset);
- if (auto *AI = dyn_cast<AllocaInst>(UnderlyingPtrOp)) {
- if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
- BasePtrOffset.isNonNegative()) {
- APInt AllocSize(
- IdxWidth,
- DL.getTypeAllocSize(AI->getAllocatedType()).getKnownMinValue());
- if (BasePtrOffset.ule(AllocSize)) {
- return GetElementPtrInst::CreateInBounds(
- GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
- }
- }
- }
- }
- if (Instruction *R = foldSelectGEP(GEP, Builder))
- return R;
- return nullptr;
- }
- static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI,
- Instruction *AI) {
- if (isa<ConstantPointerNull>(V))
- return true;
- if (auto *LI = dyn_cast<LoadInst>(V))
- return isa<GlobalVariable>(LI->getPointerOperand());
- // Two distinct allocations will never be equal.
- return isAllocLikeFn(V, &TLI) && V != AI;
- }
- /// Given a call CB which uses an address UsedV, return true if we can prove the
- /// call's only possible effect is storing to V.
- static bool isRemovableWrite(CallBase &CB, Value *UsedV,
- const TargetLibraryInfo &TLI) {
- if (!CB.use_empty())
- // TODO: add recursion if returned attribute is present
- return false;
- if (CB.isTerminator())
- // TODO: remove implementation restriction
- return false;
- if (!CB.willReturn() || !CB.doesNotThrow())
- return false;
- // If the only possible side effect of the call is writing to the alloca,
- // and the result isn't used, we can safely remove any reads implied by the
- // call including those which might read the alloca itself.
- std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
- return Dest && Dest->Ptr == UsedV;
- }
- static bool isAllocSiteRemovable(Instruction *AI,
- SmallVectorImpl<WeakTrackingVH> &Users,
- const TargetLibraryInfo &TLI) {
- SmallVector<Instruction*, 4> Worklist;
- const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
- Worklist.push_back(AI);
- do {
- Instruction *PI = Worklist.pop_back_val();
- for (User *U : PI->users()) {
- Instruction *I = cast<Instruction>(U);
- switch (I->getOpcode()) {
- default:
- // Give up the moment we see something we can't handle.
- return false;
- case Instruction::AddrSpaceCast:
- case Instruction::BitCast:
- case Instruction::GetElementPtr:
- Users.emplace_back(I);
- Worklist.push_back(I);
- continue;
- case Instruction::ICmp: {
- ICmpInst *ICI = cast<ICmpInst>(I);
- // We can fold eq/ne comparisons with null to false/true, respectively.
- // We also fold comparisons in some conditions provided the alloc has
- // not escaped (see isNeverEqualToUnescapedAlloc).
- if (!ICI->isEquality())
- return false;
- unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
- if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
- return false;
- Users.emplace_back(I);
- continue;
- }
- case Instruction::Call:
- // Ignore no-op and store intrinsics.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
- switch (II->getIntrinsicID()) {
- default:
- return false;
- case Intrinsic::memmove:
- case Intrinsic::memcpy:
- case Intrinsic::memset: {
- MemIntrinsic *MI = cast<MemIntrinsic>(II);
- if (MI->isVolatile() || MI->getRawDest() != PI)
- return false;
- [[fallthrough]];
- }
- case Intrinsic::assume:
- case Intrinsic::invariant_start:
- case Intrinsic::invariant_end:
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
- case Intrinsic::objectsize:
- Users.emplace_back(I);
- continue;
- case Intrinsic::launder_invariant_group:
- case Intrinsic::strip_invariant_group:
- Users.emplace_back(I);
- Worklist.push_back(I);
- continue;
- }
- }
- if (isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
- Users.emplace_back(I);
- continue;
- }
- if (getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
- getAllocationFamily(I, &TLI) == Family) {
- assert(Family);
- Users.emplace_back(I);
- continue;
- }
- if (getReallocatedOperand(cast<CallBase>(I)) == PI &&
- getAllocationFamily(I, &TLI) == Family) {
- assert(Family);
- Users.emplace_back(I);
- Worklist.push_back(I);
- continue;
- }
- return false;
- case Instruction::Store: {
- StoreInst *SI = cast<StoreInst>(I);
- if (SI->isVolatile() || SI->getPointerOperand() != PI)
- return false;
- Users.emplace_back(I);
- continue;
- }
- }
- llvm_unreachable("missing a return?");
- }
- } while (!Worklist.empty());
- return true;
- }
- Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) {
- assert(isa<AllocaInst>(MI) || isRemovableAlloc(&cast<CallBase>(MI), &TLI));
- // If we have a malloc call which is only used in any amount of comparisons to
- // null and free calls, delete the calls and replace the comparisons with true
- // or false as appropriate.
- // This is based on the principle that we can substitute our own allocation
- // function (which will never return null) rather than knowledge of the
- // specific function being called. In some sense this can change the permitted
- // outputs of a program (when we convert a malloc to an alloca, the fact that
- // the allocation is now on the stack is potentially visible, for example),
- // but we believe in a permissible manner.
- SmallVector<WeakTrackingVH, 64> Users;
- // If we are removing an alloca with a dbg.declare, insert dbg.value calls
- // before each store.
- SmallVector<DbgVariableIntrinsic *, 8> DVIs;
- std::unique_ptr<DIBuilder> DIB;
- if (isa<AllocaInst>(MI)) {
- findDbgUsers(DVIs, &MI);
- DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
- }
- if (isAllocSiteRemovable(&MI, Users, TLI)) {
- for (unsigned i = 0, e = Users.size(); i != e; ++i) {
- // Lowering all @llvm.objectsize calls first because they may
- // use a bitcast/GEP of the alloca we are removing.
- if (!Users[i])
- continue;
- Instruction *I = cast<Instruction>(&*Users[i]);
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
- if (II->getIntrinsicID() == Intrinsic::objectsize) {
- Value *Result =
- lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/true);
- replaceInstUsesWith(*I, Result);
- eraseInstFromFunction(*I);
- Users[i] = nullptr; // Skip examining in the next loop.
- }
- }
- }
- for (unsigned i = 0, e = Users.size(); i != e; ++i) {
- if (!Users[i])
- continue;
- Instruction *I = cast<Instruction>(&*Users[i]);
- if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
- replaceInstUsesWith(*C,
- ConstantInt::get(Type::getInt1Ty(C->getContext()),
- C->isFalseWhenEqual()));
- } else if (auto *SI = dyn_cast<StoreInst>(I)) {
- for (auto *DVI : DVIs)
- if (DVI->isAddressOfVariable())
- ConvertDebugDeclareToDebugValue(DVI, SI, *DIB);
- } else {
- // Casts, GEP, or anything else: we're about to delete this instruction,
- // so it can not have any valid uses.
- replaceInstUsesWith(*I, PoisonValue::get(I->getType()));
- }
- eraseInstFromFunction(*I);
- }
- if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
- // Replace invoke with a NOP intrinsic to maintain the original CFG
- Module *M = II->getModule();
- Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing);
- InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(),
- std::nullopt, "", II->getParent());
- }
- // Remove debug intrinsics which describe the value contained within the
- // alloca. In addition to removing dbg.{declare,addr} which simply point to
- // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
- //
- // ```
- // define void @foo(i32 %0) {
- // %a = alloca i32 ; Deleted.
- // store i32 %0, i32* %a
- // dbg.value(i32 %0, "arg0") ; Not deleted.
- // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
- // call void @trivially_inlinable_no_op(i32* %a)
- // ret void
- // }
- // ```
- //
- // This may not be required if we stop describing the contents of allocas
- // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
- // the LowerDbgDeclare utility.
- //
- // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
- // "arg0" dbg.value may be stale after the call. However, failing to remove
- // the DW_OP_deref dbg.value causes large gaps in location coverage.
- for (auto *DVI : DVIs)
- if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
- DVI->eraseFromParent();
- return eraseInstFromFunction(MI);
- }
- return nullptr;
- }
- /// Move the call to free before a NULL test.
- ///
- /// Check if this free is accessed after its argument has been test
- /// against NULL (property 0).
- /// If yes, it is legal to move this call in its predecessor block.
- ///
- /// The move is performed only if the block containing the call to free
- /// will be removed, i.e.:
- /// 1. it has only one predecessor P, and P has two successors
- /// 2. it contains the call, noops, and an unconditional branch
- /// 3. its successor is the same as its predecessor's successor
- ///
- /// The profitability is out-of concern here and this function should
- /// be called only if the caller knows this transformation would be
- /// profitable (e.g., for code size).
- static Instruction *tryToMoveFreeBeforeNullTest(CallInst &FI,
- const DataLayout &DL) {
- Value *Op = FI.getArgOperand(0);
- BasicBlock *FreeInstrBB = FI.getParent();
- BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
- // Validate part of constraint #1: Only one predecessor
- // FIXME: We can extend the number of predecessor, but in that case, we
- // would duplicate the call to free in each predecessor and it may
- // not be profitable even for code size.
- if (!PredBB)
- return nullptr;
- // Validate constraint #2: Does this block contains only the call to
- // free, noops, and an unconditional branch?
- BasicBlock *SuccBB;
- Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
- if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
- return nullptr;
- // If there are only 2 instructions in the block, at this point,
- // this is the call to free and unconditional.
- // If there are more than 2 instructions, check that they are noops
- // i.e., they won't hurt the performance of the generated code.
- if (FreeInstrBB->size() != 2) {
- for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
- if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
- continue;
- auto *Cast = dyn_cast<CastInst>(&Inst);
- if (!Cast || !Cast->isNoopCast(DL))
- return nullptr;
- }
- }
- // Validate the rest of constraint #1 by matching on the pred branch.
- Instruction *TI = PredBB->getTerminator();
- BasicBlock *TrueBB, *FalseBB;
- ICmpInst::Predicate Pred;
- if (!match(TI, m_Br(m_ICmp(Pred,
- m_CombineOr(m_Specific(Op),
- m_Specific(Op->stripPointerCasts())),
- m_Zero()),
- TrueBB, FalseBB)))
- return nullptr;
- if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
- return nullptr;
- // Validate constraint #3: Ensure the null case just falls through.
- if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
- return nullptr;
- assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
- "Broken CFG: missing edge from predecessor to successor");
- // At this point, we know that everything in FreeInstrBB can be moved
- // before TI.
- for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
- if (&Instr == FreeInstrBBTerminator)
- break;
- Instr.moveBefore(TI);
- }
- assert(FreeInstrBB->size() == 1 &&
- "Only the branch instruction should remain");
- // Now that we've moved the call to free before the NULL check, we have to
- // remove any attributes on its parameter that imply it's non-null, because
- // those attributes might have only been valid because of the NULL check, and
- // we can get miscompiles if we keep them. This is conservative if non-null is
- // also implied by something other than the NULL check, but it's guaranteed to
- // be correct, and the conservativeness won't matter in practice, since the
- // attributes are irrelevant for the call to free itself and the pointer
- // shouldn't be used after the call.
- AttributeList Attrs = FI.getAttributes();
- Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
- Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
- if (Dereferenceable.isValid()) {
- uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
- Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
- Attribute::Dereferenceable);
- Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
- }
- FI.setAttributes(Attrs);
- return &FI;
- }
- Instruction *InstCombinerImpl::visitFree(CallInst &FI, Value *Op) {
- // free undef -> unreachable.
- if (isa<UndefValue>(Op)) {
- // Leave a marker since we can't modify the CFG here.
- CreateNonTerminatorUnreachable(&FI);
- return eraseInstFromFunction(FI);
- }
- // If we have 'free null' delete the instruction. This can happen in stl code
- // when lots of inlining happens.
- if (isa<ConstantPointerNull>(Op))
- return eraseInstFromFunction(FI);
- // If we had free(realloc(...)) with no intervening uses, then eliminate the
- // realloc() entirely.
- CallInst *CI = dyn_cast<CallInst>(Op);
- if (CI && CI->hasOneUse())
- if (Value *ReallocatedOp = getReallocatedOperand(CI))
- return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
- // If we optimize for code size, try to move the call to free before the null
- // test so that simplify cfg can remove the empty block and dead code
- // elimination the branch. I.e., helps to turn something like:
- // if (foo) free(foo);
- // into
- // free(foo);
- //
- // Note that we can only do this for 'free' and not for any flavor of
- // 'operator delete'; there is no 'operator delete' symbol for which we are
- // permitted to invent a call, even if we're passing in a null pointer.
- if (MinimizeSize) {
- LibFunc Func;
- if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
- if (Instruction *I = tryToMoveFreeBeforeNullTest(FI, DL))
- return I;
- }
- return nullptr;
- }
- static bool isMustTailCall(Value *V) {
- if (auto *CI = dyn_cast<CallInst>(V))
- return CI->isMustTailCall();
- return false;
- }
- Instruction *InstCombinerImpl::visitReturnInst(ReturnInst &RI) {
- if (RI.getNumOperands() == 0) // ret void
- return nullptr;
- Value *ResultOp = RI.getOperand(0);
- Type *VTy = ResultOp->getType();
- if (!VTy->isIntegerTy() || isa<Constant>(ResultOp))
- return nullptr;
- // Don't replace result of musttail calls.
- if (isMustTailCall(ResultOp))
- return nullptr;
- // There might be assume intrinsics dominating this return that completely
- // determine the value. If so, constant fold it.
- KnownBits Known = computeKnownBits(ResultOp, 0, &RI);
- if (Known.isConstant())
- return replaceOperand(RI, 0,
- Constant::getIntegerValue(VTy, Known.getConstant()));
- return nullptr;
- }
- // WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
- Instruction *InstCombinerImpl::visitUnreachableInst(UnreachableInst &I) {
- // Try to remove the previous instruction if it must lead to unreachable.
- // This includes instructions like stores and "llvm.assume" that may not get
- // removed by simple dead code elimination.
- while (Instruction *Prev = I.getPrevNonDebugInstruction()) {
- // While we theoretically can erase EH, that would result in a block that
- // used to start with an EH no longer starting with EH, which is invalid.
- // To make it valid, we'd need to fixup predecessors to no longer refer to
- // this block, but that changes CFG, which is not allowed in InstCombine.
- if (Prev->isEHPad())
- return nullptr; // Can not drop any more instructions. We're done here.
- if (!isGuaranteedToTransferExecutionToSuccessor(Prev))
- return nullptr; // Can not drop any more instructions. We're done here.
- // Otherwise, this instruction can be freely erased,
- // even if it is not side-effect free.
- // A value may still have uses before we process it here (for example, in
- // another unreachable block), so convert those to poison.
- replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
- eraseInstFromFunction(*Prev);
- }
- assert(I.getParent()->sizeWithoutDebug() == 1 && "The block is now empty.");
- // FIXME: recurse into unconditional predecessors?
- return nullptr;
- }
- Instruction *InstCombinerImpl::visitUnconditionalBranchInst(BranchInst &BI) {
- assert(BI.isUnconditional() && "Only for unconditional branches.");
- // If this store is the second-to-last instruction in the basic block
- // (excluding debug info and bitcasts of pointers) and if the block ends with
- // an unconditional branch, try to move the store to the successor block.
- auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
- auto IsNoopInstrForStoreMerging = [](BasicBlock::iterator BBI) {
- return BBI->isDebugOrPseudoInst() ||
- (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
- };
- BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
- do {
- if (BBI != FirstInstr)
- --BBI;
- } while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
- return dyn_cast<StoreInst>(BBI);
- };
- if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
- if (mergeStoreIntoSuccessor(*SI))
- return &BI;
- return nullptr;
- }
- Instruction *InstCombinerImpl::visitBranchInst(BranchInst &BI) {
- if (BI.isUnconditional())
- return visitUnconditionalBranchInst(BI);
- // Change br (not X), label True, label False to: br X, label False, True
- Value *Cond = BI.getCondition();
- Value *X;
- if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
- // Swap Destinations and condition...
- BI.swapSuccessors();
- return replaceOperand(BI, 0, X);
- }
- // Canonicalize logical-and-with-invert as logical-or-with-invert.
- // This is done by inverting the condition and swapping successors:
- // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
- Value *Y;
- if (isa<SelectInst>(Cond) &&
- match(Cond,
- m_OneUse(m_LogicalAnd(m_Value(X), m_OneUse(m_Not(m_Value(Y))))))) {
- Value *NotX = Builder.CreateNot(X, "not." + X->getName());
- Value *Or = Builder.CreateLogicalOr(NotX, Y);
- BI.swapSuccessors();
- return replaceOperand(BI, 0, Or);
- }
- // If the condition is irrelevant, remove the use so that other
- // transforms on the condition become more effective.
- if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
- return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
- // Canonicalize, for example, fcmp_one -> fcmp_oeq.
- CmpInst::Predicate Pred;
- if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
- !isCanonicalPredicate(Pred)) {
- // Swap destinations and condition.
- auto *Cmp = cast<CmpInst>(Cond);
- Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
- BI.swapSuccessors();
- Worklist.push(Cmp);
- return &BI;
- }
- return nullptr;
- }
- Instruction *InstCombinerImpl::visitSwitchInst(SwitchInst &SI) {
- Value *Cond = SI.getCondition();
- Value *Op0;
- ConstantInt *AddRHS;
- if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
- // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
- for (auto Case : SI.cases()) {
- Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
- assert(isa<ConstantInt>(NewCase) &&
- "Result of expression should be constant");
- Case.setValue(cast<ConstantInt>(NewCase));
- }
- return replaceOperand(SI, 0, Op0);
- }
- KnownBits Known = computeKnownBits(Cond, 0, &SI);
- unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
- unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
- // Compute the number of leading bits we can ignore.
- // TODO: A better way to determine this would use ComputeNumSignBits().
- for (const auto &C : SI.cases()) {
- LeadingKnownZeros = std::min(
- LeadingKnownZeros, C.getCaseValue()->getValue().countLeadingZeros());
- LeadingKnownOnes = std::min(
- LeadingKnownOnes, C.getCaseValue()->getValue().countLeadingOnes());
- }
- unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
- // Shrink the condition operand if the new type is smaller than the old type.
- // But do not shrink to a non-standard type, because backend can't generate
- // good code for that yet.
- // TODO: We can make it aggressive again after fixing PR39569.
- if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
- shouldChangeType(Known.getBitWidth(), NewWidth)) {
- IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
- Builder.SetInsertPoint(&SI);
- Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
- for (auto Case : SI.cases()) {
- APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
- Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
- }
- return replaceOperand(SI, 0, NewCond);
- }
- return nullptr;
- }
- Instruction *
- InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
- auto *WO = dyn_cast<WithOverflowInst>(EV.getAggregateOperand());
- if (!WO)
- return nullptr;
- Intrinsic::ID OvID = WO->getIntrinsicID();
- const APInt *C = nullptr;
- if (match(WO->getRHS(), m_APIntAllowUndef(C))) {
- if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
- OvID == Intrinsic::umul_with_overflow)) {
- // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
- if (C->isAllOnes())
- return BinaryOperator::CreateNeg(WO->getLHS());
- // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
- if (C->isPowerOf2()) {
- return BinaryOperator::CreateShl(
- WO->getLHS(),
- ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
- }
- }
- }
- // We're extracting from an overflow intrinsic. See if we're the only user.
- // That allows us to simplify multiple result intrinsics to simpler things
- // that just get one value.
- if (!WO->hasOneUse())
- return nullptr;
- // Check if we're grabbing only the result of a 'with overflow' intrinsic
- // and replace it with a traditional binary instruction.
- if (*EV.idx_begin() == 0) {
- Instruction::BinaryOps BinOp = WO->getBinaryOp();
- Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
- // Replace the old instruction's uses with poison.
- replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
- eraseInstFromFunction(*WO);
- return BinaryOperator::Create(BinOp, LHS, RHS);
- }
- assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
- // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
- if (OvID == Intrinsic::usub_with_overflow)
- return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
- // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
- // +1 is not possible because we assume signed values.
- if (OvID == Intrinsic::smul_with_overflow &&
- WO->getLHS()->getType()->isIntOrIntVectorTy(1))
- return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
- // If only the overflow result is used, and the right hand side is a
- // constant (or constant splat), we can remove the intrinsic by directly
- // checking for overflow.
- if (C) {
- // Compute the no-wrap range for LHS given RHS=C, then construct an
- // equivalent icmp, potentially using an offset.
- ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
- WO->getBinaryOp(), *C, WO->getNoWrapKind());
- CmpInst::Predicate Pred;
- APInt NewRHSC, Offset;
- NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
- auto *OpTy = WO->getRHS()->getType();
- auto *NewLHS = WO->getLHS();
- if (Offset != 0)
- NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
- return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
- ConstantInt::get(OpTy, NewRHSC));
- }
- return nullptr;
- }
- Instruction *InstCombinerImpl::visitExtractValueInst(ExtractValueInst &EV) {
- Value *Agg = EV.getAggregateOperand();
- if (!EV.hasIndices())
- return replaceInstUsesWith(EV, Agg);
- if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
- SQ.getWithInstruction(&EV)))
- return replaceInstUsesWith(EV, V);
- if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
- // We're extracting from an insertvalue instruction, compare the indices
- const unsigned *exti, *exte, *insi, *inse;
- for (exti = EV.idx_begin(), insi = IV->idx_begin(),
- exte = EV.idx_end(), inse = IV->idx_end();
- exti != exte && insi != inse;
- ++exti, ++insi) {
- if (*insi != *exti)
- // The insert and extract both reference distinctly different elements.
- // This means the extract is not influenced by the insert, and we can
- // replace the aggregate operand of the extract with the aggregate
- // operand of the insert. i.e., replace
- // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
- // %E = extractvalue { i32, { i32 } } %I, 0
- // with
- // %E = extractvalue { i32, { i32 } } %A, 0
- return ExtractValueInst::Create(IV->getAggregateOperand(),
- EV.getIndices());
- }
- if (exti == exte && insi == inse)
- // Both iterators are at the end: Index lists are identical. Replace
- // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
- // %C = extractvalue { i32, { i32 } } %B, 1, 0
- // with "i32 42"
- return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
- if (exti == exte) {
- // The extract list is a prefix of the insert list. i.e. replace
- // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
- // %E = extractvalue { i32, { i32 } } %I, 1
- // with
- // %X = extractvalue { i32, { i32 } } %A, 1
- // %E = insertvalue { i32 } %X, i32 42, 0
- // by switching the order of the insert and extract (though the
- // insertvalue should be left in, since it may have other uses).
- Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
- EV.getIndices());
- return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
- ArrayRef(insi, inse));
- }
- if (insi == inse)
- // The insert list is a prefix of the extract list
- // We can simply remove the common indices from the extract and make it
- // operate on the inserted value instead of the insertvalue result.
- // i.e., replace
- // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
- // %E = extractvalue { i32, { i32 } } %I, 1, 0
- // with
- // %E extractvalue { i32 } { i32 42 }, 0
- return ExtractValueInst::Create(IV->getInsertedValueOperand(),
- ArrayRef(exti, exte));
- }
- if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
- return R;
- if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
- // If the (non-volatile) load only has one use, we can rewrite this to a
- // load from a GEP. This reduces the size of the load. If a load is used
- // only by extractvalue instructions then this either must have been
- // optimized before, or it is a struct with padding, in which case we
- // don't want to do the transformation as it loses padding knowledge.
- if (L->isSimple() && L->hasOneUse()) {
- // extractvalue has integer indices, getelementptr has Value*s. Convert.
- SmallVector<Value*, 4> Indices;
- // Prefix an i32 0 since we need the first element.
- Indices.push_back(Builder.getInt32(0));
- for (unsigned Idx : EV.indices())
- Indices.push_back(Builder.getInt32(Idx));
- // We need to insert these at the location of the old load, not at that of
- // the extractvalue.
- Builder.SetInsertPoint(L);
- Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
- L->getPointerOperand(), Indices);
- Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
- // Whatever aliasing information we had for the orignal load must also
- // hold for the smaller load, so propagate the annotations.
- NL->setAAMetadata(L->getAAMetadata());
- // Returning the load directly will cause the main loop to insert it in
- // the wrong spot, so use replaceInstUsesWith().
- return replaceInstUsesWith(EV, NL);
- }
- }
- if (auto *PN = dyn_cast<PHINode>(Agg))
- if (Instruction *Res = foldOpIntoPhi(EV, PN))
- return Res;
- // We could simplify extracts from other values. Note that nested extracts may
- // already be simplified implicitly by the above: extract (extract (insert) )
- // will be translated into extract ( insert ( extract ) ) first and then just
- // the value inserted, if appropriate. Similarly for extracts from single-use
- // loads: extract (extract (load)) will be translated to extract (load (gep))
- // and if again single-use then via load (gep (gep)) to load (gep).
- // However, double extracts from e.g. function arguments or return values
- // aren't handled yet.
- return nullptr;
- }
- /// Return 'true' if the given typeinfo will match anything.
- static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
- switch (Personality) {
- case EHPersonality::GNU_C:
- case EHPersonality::GNU_C_SjLj:
- case EHPersonality::Rust:
- // The GCC C EH and Rust personality only exists to support cleanups, so
- // it's not clear what the semantics of catch clauses are.
- return false;
- case EHPersonality::Unknown:
- return false;
- case EHPersonality::GNU_Ada:
- // While __gnat_all_others_value will match any Ada exception, it doesn't
- // match foreign exceptions (or didn't, before gcc-4.7).
- return false;
- case EHPersonality::GNU_CXX:
- case EHPersonality::GNU_CXX_SjLj:
- case EHPersonality::GNU_ObjC:
- case EHPersonality::MSVC_X86SEH:
- case EHPersonality::MSVC_TableSEH:
- case EHPersonality::MSVC_CXX:
- case EHPersonality::CoreCLR:
- case EHPersonality::Wasm_CXX:
- case EHPersonality::XL_CXX:
- return TypeInfo->isNullValue();
- }
- llvm_unreachable("invalid enum");
- }
- static bool shorter_filter(const Value *LHS, const Value *RHS) {
- return
- cast<ArrayType>(LHS->getType())->getNumElements()
- <
- cast<ArrayType>(RHS->getType())->getNumElements();
- }
- Instruction *InstCombinerImpl::visitLandingPadInst(LandingPadInst &LI) {
- // The logic here should be correct for any real-world personality function.
- // However if that turns out not to be true, the offending logic can always
- // be conditioned on the personality function, like the catch-all logic is.
- EHPersonality Personality =
- classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
- // Simplify the list of clauses, eg by removing repeated catch clauses
- // (these are often created by inlining).
- bool MakeNewInstruction = false; // If true, recreate using the following:
- SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
- bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
- SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
- for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
- bool isLastClause = i + 1 == e;
- if (LI.isCatch(i)) {
- // A catch clause.
- Constant *CatchClause = LI.getClause(i);
- Constant *TypeInfo = CatchClause->stripPointerCasts();
- // If we already saw this clause, there is no point in having a second
- // copy of it.
- if (AlreadyCaught.insert(TypeInfo).second) {
- // This catch clause was not already seen.
- NewClauses.push_back(CatchClause);
- } else {
- // Repeated catch clause - drop the redundant copy.
- MakeNewInstruction = true;
- }
- // If this is a catch-all then there is no point in keeping any following
- // clauses or marking the landingpad as having a cleanup.
- if (isCatchAll(Personality, TypeInfo)) {
- if (!isLastClause)
- MakeNewInstruction = true;
- CleanupFlag = false;
- break;
- }
- } else {
- // A filter clause. If any of the filter elements were already caught
- // then they can be dropped from the filter. It is tempting to try to
- // exploit the filter further by saying that any typeinfo that does not
- // occur in the filter can't be caught later (and thus can be dropped).
- // However this would be wrong, since typeinfos can match without being
- // equal (for example if one represents a C++ class, and the other some
- // class derived from it).
- assert(LI.isFilter(i) && "Unsupported landingpad clause!");
- Constant *FilterClause = LI.getClause(i);
- ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
- unsigned NumTypeInfos = FilterType->getNumElements();
- // An empty filter catches everything, so there is no point in keeping any
- // following clauses or marking the landingpad as having a cleanup. By
- // dealing with this case here the following code is made a bit simpler.
- if (!NumTypeInfos) {
- NewClauses.push_back(FilterClause);
- if (!isLastClause)
- MakeNewInstruction = true;
- CleanupFlag = false;
- break;
- }
- bool MakeNewFilter = false; // If true, make a new filter.
- SmallVector<Constant *, 16> NewFilterElts; // New elements.
- if (isa<ConstantAggregateZero>(FilterClause)) {
- // Not an empty filter - it contains at least one null typeinfo.
- assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
- Constant *TypeInfo =
- Constant::getNullValue(FilterType->getElementType());
- // If this typeinfo is a catch-all then the filter can never match.
- if (isCatchAll(Personality, TypeInfo)) {
- // Throw the filter away.
- MakeNewInstruction = true;
- continue;
- }
- // There is no point in having multiple copies of this typeinfo, so
- // discard all but the first copy if there is more than one.
- NewFilterElts.push_back(TypeInfo);
- if (NumTypeInfos > 1)
- MakeNewFilter = true;
- } else {
- ConstantArray *Filter = cast<ConstantArray>(FilterClause);
- SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
- NewFilterElts.reserve(NumTypeInfos);
- // Remove any filter elements that were already caught or that already
- // occurred in the filter. While there, see if any of the elements are
- // catch-alls. If so, the filter can be discarded.
- bool SawCatchAll = false;
- for (unsigned j = 0; j != NumTypeInfos; ++j) {
- Constant *Elt = Filter->getOperand(j);
- Constant *TypeInfo = Elt->stripPointerCasts();
- if (isCatchAll(Personality, TypeInfo)) {
- // This element is a catch-all. Bail out, noting this fact.
- SawCatchAll = true;
- break;
- }
- // Even if we've seen a type in a catch clause, we don't want to
- // remove it from the filter. An unexpected type handler may be
- // set up for a call site which throws an exception of the same
- // type caught. In order for the exception thrown by the unexpected
- // handler to propagate correctly, the filter must be correctly
- // described for the call site.
- //
- // Example:
- //
- // void unexpected() { throw 1;}
- // void foo() throw (int) {
- // std::set_unexpected(unexpected);
- // try {
- // throw 2.0;
- // } catch (int i) {}
- // }
- // There is no point in having multiple copies of the same typeinfo in
- // a filter, so only add it if we didn't already.
- if (SeenInFilter.insert(TypeInfo).second)
- NewFilterElts.push_back(cast<Constant>(Elt));
- }
- // A filter containing a catch-all cannot match anything by definition.
- if (SawCatchAll) {
- // Throw the filter away.
- MakeNewInstruction = true;
- continue;
- }
- // If we dropped something from the filter, make a new one.
- if (NewFilterElts.size() < NumTypeInfos)
- MakeNewFilter = true;
- }
- if (MakeNewFilter) {
- FilterType = ArrayType::get(FilterType->getElementType(),
- NewFilterElts.size());
- FilterClause = ConstantArray::get(FilterType, NewFilterElts);
- MakeNewInstruction = true;
- }
- NewClauses.push_back(FilterClause);
- // If the new filter is empty then it will catch everything so there is
- // no point in keeping any following clauses or marking the landingpad
- // as having a cleanup. The case of the original filter being empty was
- // already handled above.
- if (MakeNewFilter && !NewFilterElts.size()) {
- assert(MakeNewInstruction && "New filter but not a new instruction!");
- CleanupFlag = false;
- break;
- }
- }
- }
- // If several filters occur in a row then reorder them so that the shortest
- // filters come first (those with the smallest number of elements). This is
- // advantageous because shorter filters are more likely to match, speeding up
- // unwinding, but mostly because it increases the effectiveness of the other
- // filter optimizations below.
- for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
- unsigned j;
- // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
- for (j = i; j != e; ++j)
- if (!isa<ArrayType>(NewClauses[j]->getType()))
- break;
- // Check whether the filters are already sorted by length. We need to know
- // if sorting them is actually going to do anything so that we only make a
- // new landingpad instruction if it does.
- for (unsigned k = i; k + 1 < j; ++k)
- if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
- // Not sorted, so sort the filters now. Doing an unstable sort would be
- // correct too but reordering filters pointlessly might confuse users.
- std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
- shorter_filter);
- MakeNewInstruction = true;
- break;
- }
- // Look for the next batch of filters.
- i = j + 1;
- }
- // If typeinfos matched if and only if equal, then the elements of a filter L
- // that occurs later than a filter F could be replaced by the intersection of
- // the elements of F and L. In reality two typeinfos can match without being
- // equal (for example if one represents a C++ class, and the other some class
- // derived from it) so it would be wrong to perform this transform in general.
- // However the transform is correct and useful if F is a subset of L. In that
- // case L can be replaced by F, and thus removed altogether since repeating a
- // filter is pointless. So here we look at all pairs of filters F and L where
- // L follows F in the list of clauses, and remove L if every element of F is
- // an element of L. This can occur when inlining C++ functions with exception
- // specifications.
- for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
- // Examine each filter in turn.
- Value *Filter = NewClauses[i];
- ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
- if (!FTy)
- // Not a filter - skip it.
- continue;
- unsigned FElts = FTy->getNumElements();
- // Examine each filter following this one. Doing this backwards means that
- // we don't have to worry about filters disappearing under us when removed.
- for (unsigned j = NewClauses.size() - 1; j != i; --j) {
- Value *LFilter = NewClauses[j];
- ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
- if (!LTy)
- // Not a filter - skip it.
- continue;
- // If Filter is a subset of LFilter, i.e. every element of Filter is also
- // an element of LFilter, then discard LFilter.
- SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
- // If Filter is empty then it is a subset of LFilter.
- if (!FElts) {
- // Discard LFilter.
- NewClauses.erase(J);
- MakeNewInstruction = true;
- // Move on to the next filter.
- continue;
- }
- unsigned LElts = LTy->getNumElements();
- // If Filter is longer than LFilter then it cannot be a subset of it.
- if (FElts > LElts)
- // Move on to the next filter.
- continue;
- // At this point we know that LFilter has at least one element.
- if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
- // Filter is a subset of LFilter iff Filter contains only zeros (as we
- // already know that Filter is not longer than LFilter).
- if (isa<ConstantAggregateZero>(Filter)) {
- assert(FElts <= LElts && "Should have handled this case earlier!");
- // Discard LFilter.
- NewClauses.erase(J);
- MakeNewInstruction = true;
- }
- // Move on to the next filter.
- continue;
- }
- ConstantArray *LArray = cast<ConstantArray>(LFilter);
- if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
- // Since Filter is non-empty and contains only zeros, it is a subset of
- // LFilter iff LFilter contains a zero.
- assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
- for (unsigned l = 0; l != LElts; ++l)
- if (LArray->getOperand(l)->isNullValue()) {
- // LFilter contains a zero - discard it.
- NewClauses.erase(J);
- MakeNewInstruction = true;
- break;
- }
- // Move on to the next filter.
- continue;
- }
- // At this point we know that both filters are ConstantArrays. Loop over
- // operands to see whether every element of Filter is also an element of
- // LFilter. Since filters tend to be short this is probably faster than
- // using a method that scales nicely.
- ConstantArray *FArray = cast<ConstantArray>(Filter);
- bool AllFound = true;
- for (unsigned f = 0; f != FElts; ++f) {
- Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
- AllFound = false;
- for (unsigned l = 0; l != LElts; ++l) {
- Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
- if (LTypeInfo == FTypeInfo) {
- AllFound = true;
- break;
- }
- }
- if (!AllFound)
- break;
- }
- if (AllFound) {
- // Discard LFilter.
- NewClauses.erase(J);
- MakeNewInstruction = true;
- }
- // Move on to the next filter.
- }
- }
- // If we changed any of the clauses, replace the old landingpad instruction
- // with a new one.
- if (MakeNewInstruction) {
- LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
- NewClauses.size());
- for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
- NLI->addClause(NewClauses[i]);
- // A landing pad with no clauses must have the cleanup flag set. It is
- // theoretically possible, though highly unlikely, that we eliminated all
- // clauses. If so, force the cleanup flag to true.
- if (NewClauses.empty())
- CleanupFlag = true;
- NLI->setCleanup(CleanupFlag);
- return NLI;
- }
- // Even if none of the clauses changed, we may nonetheless have understood
- // that the cleanup flag is pointless. Clear it if so.
- if (LI.isCleanup() != CleanupFlag) {
- assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
- LI.setCleanup(CleanupFlag);
- return &LI;
- }
- return nullptr;
- }
- Value *
- InstCombinerImpl::pushFreezeToPreventPoisonFromPropagating(FreezeInst &OrigFI) {
- // Try to push freeze through instructions that propagate but don't produce
- // poison as far as possible. If an operand of freeze follows three
- // conditions 1) one-use, 2) does not produce poison, and 3) has all but one
- // guaranteed-non-poison operands then push the freeze through to the one
- // operand that is not guaranteed non-poison. The actual transform is as
- // follows.
- // Op1 = ... ; Op1 can be posion
- // Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have
- // ; single guaranteed-non-poison operands
- // ... = Freeze(Op0)
- // =>
- // Op1 = ...
- // Op1.fr = Freeze(Op1)
- // ... = Inst(Op1.fr, NonPoisonOps...)
- auto *OrigOp = OrigFI.getOperand(0);
- auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
- // While we could change the other users of OrigOp to use freeze(OrigOp), that
- // potentially reduces their optimization potential, so let's only do this iff
- // the OrigOp is only used by the freeze.
- if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
- return nullptr;
- // We can't push the freeze through an instruction which can itself create
- // poison. If the only source of new poison is flags, we can simply
- // strip them (since we know the only use is the freeze and nothing can
- // benefit from them.)
- if (canCreateUndefOrPoison(cast<Operator>(OrigOp),
- /*ConsiderFlagsAndMetadata*/ false))
- return nullptr;
- // If operand is guaranteed not to be poison, there is no need to add freeze
- // to the operand. So we first find the operand that is not guaranteed to be
- // poison.
- Use *MaybePoisonOperand = nullptr;
- for (Use &U : OrigOpInst->operands()) {
- if (isa<MetadataAsValue>(U.get()) ||
- isGuaranteedNotToBeUndefOrPoison(U.get()))
- continue;
- if (!MaybePoisonOperand)
- MaybePoisonOperand = &U;
- else
- return nullptr;
- }
- OrigOpInst->dropPoisonGeneratingFlagsAndMetadata();
- // If all operands are guaranteed to be non-poison, we can drop freeze.
- if (!MaybePoisonOperand)
- return OrigOp;
- Builder.SetInsertPoint(OrigOpInst);
- auto *FrozenMaybePoisonOperand = Builder.CreateFreeze(
- MaybePoisonOperand->get(), MaybePoisonOperand->get()->getName() + ".fr");
- replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
- return OrigOp;
- }
- Instruction *InstCombinerImpl::foldFreezeIntoRecurrence(FreezeInst &FI,
- PHINode *PN) {
- // Detect whether this is a recurrence with a start value and some number of
- // backedge values. We'll check whether we can push the freeze through the
- // backedge values (possibly dropping poison flags along the way) until we
- // reach the phi again. In that case, we can move the freeze to the start
- // value.
- Use *StartU = nullptr;
- SmallVector<Value *> Worklist;
- for (Use &U : PN->incoming_values()) {
- if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
- // Add backedge value to worklist.
- Worklist.push_back(U.get());
- continue;
- }
- // Don't bother handling multiple start values.
- if (StartU)
- return nullptr;
- StartU = &U;
- }
- if (!StartU || Worklist.empty())
- return nullptr; // Not a recurrence.
- Value *StartV = StartU->get();
- BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
- bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
- // We can't insert freeze if the the start value is the result of the
- // terminator (e.g. an invoke).
- if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
- return nullptr;
- SmallPtrSet<Value *, 32> Visited;
- SmallVector<Instruction *> DropFlags;
- while (!Worklist.empty()) {
- Value *V = Worklist.pop_back_val();
- if (!Visited.insert(V).second)
- continue;
- if (Visited.size() > 32)
- return nullptr; // Limit the total number of values we inspect.
- // Assume that PN is non-poison, because it will be after the transform.
- if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
- continue;
- Instruction *I = dyn_cast<Instruction>(V);
- if (!I || canCreateUndefOrPoison(cast<Operator>(I),
- /*ConsiderFlagsAndMetadata*/ false))
- return nullptr;
- DropFlags.push_back(I);
- append_range(Worklist, I->operands());
- }
- for (Instruction *I : DropFlags)
- I->dropPoisonGeneratingFlagsAndMetadata();
- if (StartNeedsFreeze) {
- Builder.SetInsertPoint(StartBB->getTerminator());
- Value *FrozenStartV = Builder.CreateFreeze(StartV,
- StartV->getName() + ".fr");
- replaceUse(*StartU, FrozenStartV);
- }
- return replaceInstUsesWith(FI, PN);
- }
- bool InstCombinerImpl::freezeOtherUses(FreezeInst &FI) {
- Value *Op = FI.getOperand(0);
- if (isa<Constant>(Op) || Op->hasOneUse())
- return false;
- // Move the freeze directly after the definition of its operand, so that
- // it dominates the maximum number of uses. Note that it may not dominate
- // *all* uses if the operand is an invoke/callbr and the use is in a phi on
- // the normal/default destination. This is why the domination check in the
- // replacement below is still necessary.
- Instruction *MoveBefore;
- if (isa<Argument>(Op)) {
- MoveBefore =
- &*FI.getFunction()->getEntryBlock().getFirstNonPHIOrDbgOrAlloca();
- } else {
- MoveBefore = cast<Instruction>(Op)->getInsertionPointAfterDef();
- if (!MoveBefore)
- return false;
- }
- bool Changed = false;
- if (&FI != MoveBefore) {
- FI.moveBefore(MoveBefore);
- Changed = true;
- }
- Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
- bool Dominates = DT.dominates(&FI, U);
- Changed |= Dominates;
- return Dominates;
- });
- return Changed;
- }
- Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) {
- Value *Op0 = I.getOperand(0);
- if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
- return replaceInstUsesWith(I, V);
- // freeze (phi const, x) --> phi const, (freeze x)
- if (auto *PN = dyn_cast<PHINode>(Op0)) {
- if (Instruction *NV = foldOpIntoPhi(I, PN))
- return NV;
- if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
- return NV;
- }
- if (Value *NI = pushFreezeToPreventPoisonFromPropagating(I))
- return replaceInstUsesWith(I, NI);
- // If I is freeze(undef), check its uses and fold it to a fixed constant.
- // - or: pick -1
- // - select's condition: if the true value is constant, choose it by making
- // the condition true.
- // - default: pick 0
- //
- // Note that this transform is intentionally done here rather than
- // via an analysis in InstSimplify or at individual user sites. That is
- // because we must produce the same value for all uses of the freeze -
- // it's the reason "freeze" exists!
- //
- // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
- // duplicating logic for binops at least.
- auto getUndefReplacement = [&I](Type *Ty) {
- Constant *BestValue = nullptr;
- Constant *NullValue = Constant::getNullValue(Ty);
- for (const auto *U : I.users()) {
- Constant *C = NullValue;
- if (match(U, m_Or(m_Value(), m_Value())))
- C = ConstantInt::getAllOnesValue(Ty);
- else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
- C = ConstantInt::getTrue(Ty);
- if (!BestValue)
- BestValue = C;
- else if (BestValue != C)
- BestValue = NullValue;
- }
- assert(BestValue && "Must have at least one use");
- return BestValue;
- };
- if (match(Op0, m_Undef()))
- return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
- Constant *C;
- if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement()) {
- Constant *ReplaceC = getUndefReplacement(I.getType()->getScalarType());
- return replaceInstUsesWith(I, Constant::replaceUndefsWith(C, ReplaceC));
- }
- // Replace uses of Op with freeze(Op).
- if (freezeOtherUses(I))
- return &I;
- return nullptr;
- }
- /// Check for case where the call writes to an otherwise dead alloca. This
- /// shows up for unused out-params in idiomatic C/C++ code. Note that this
- /// helper *only* analyzes the write; doesn't check any other legality aspect.
- static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI) {
- auto *CB = dyn_cast<CallBase>(I);
- if (!CB)
- // TODO: handle e.g. store to alloca here - only worth doing if we extend
- // to allow reload along used path as described below. Otherwise, this
- // is simply a store to a dead allocation which will be removed.
- return false;
- std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
- if (!Dest)
- return false;
- auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
- if (!AI)
- // TODO: allow malloc?
- return false;
- // TODO: allow memory access dominated by move point? Note that since AI
- // could have a reference to itself captured by the call, we would need to
- // account for cycles in doing so.
- SmallVector<const User *> AllocaUsers;
- SmallPtrSet<const User *, 4> Visited;
- auto pushUsers = [&](const Instruction &I) {
- for (const User *U : I.users()) {
- if (Visited.insert(U).second)
- AllocaUsers.push_back(U);
- }
- };
- pushUsers(*AI);
- while (!AllocaUsers.empty()) {
- auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
- if (isa<BitCastInst>(UserI) || isa<GetElementPtrInst>(UserI) ||
- isa<AddrSpaceCastInst>(UserI)) {
- pushUsers(*UserI);
- continue;
- }
- if (UserI == CB)
- continue;
- // TODO: support lifetime.start/end here
- return false;
- }
- return true;
- }
- /// Try to move the specified instruction from its current block into the
- /// beginning of DestBlock, which can only happen if it's safe to move the
- /// instruction past all of the instructions between it and the end of its
- /// block.
- static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock,
- TargetLibraryInfo &TLI) {
- BasicBlock *SrcBlock = I->getParent();
- // Cannot move control-flow-involving, volatile loads, vaarg, etc.
- if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
- I->isTerminator())
- return false;
- // Do not sink static or dynamic alloca instructions. Static allocas must
- // remain in the entry block, and dynamic allocas must not be sunk in between
- // a stacksave / stackrestore pair, which would incorrectly shorten its
- // lifetime.
- if (isa<AllocaInst>(I))
- return false;
- // Do not sink into catchswitch blocks.
- if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
- return false;
- // Do not sink convergent call instructions.
- if (auto *CI = dyn_cast<CallInst>(I)) {
- if (CI->isConvergent())
- return false;
- }
- // Unless we can prove that the memory write isn't visibile except on the
- // path we're sinking to, we must bail.
- if (I->mayWriteToMemory()) {
- if (!SoleWriteToDeadLocal(I, TLI))
- return false;
- }
- // We can only sink load instructions if there is nothing between the load and
- // the end of block that could change the value.
- if (I->mayReadFromMemory()) {
- // We don't want to do any sophisticated alias analysis, so we only check
- // the instructions after I in I's parent block if we try to sink to its
- // successor block.
- if (DestBlock->getUniquePredecessor() != I->getParent())
- return false;
- for (BasicBlock::iterator Scan = std::next(I->getIterator()),
- E = I->getParent()->end();
- Scan != E; ++Scan)
- if (Scan->mayWriteToMemory())
- return false;
- }
- I->dropDroppableUses([DestBlock](const Use *U) {
- if (auto *I = dyn_cast<Instruction>(U->getUser()))
- return I->getParent() != DestBlock;
- return true;
- });
- /// FIXME: We could remove droppable uses that are not dominated by
- /// the new position.
- BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
- I->moveBefore(&*InsertPos);
- ++NumSunkInst;
- // Also sink all related debug uses from the source basic block. Otherwise we
- // get debug use before the def. Attempt to salvage debug uses first, to
- // maximise the range variables have location for. If we cannot salvage, then
- // mark the location undef: we know it was supposed to receive a new location
- // here, but that computation has been sunk.
- SmallVector<DbgVariableIntrinsic *, 2> DbgUsers;
- findDbgUsers(DbgUsers, I);
- // Process the sinking DbgUsers in reverse order, as we only want to clone the
- // last appearing debug intrinsic for each given variable.
- SmallVector<DbgVariableIntrinsic *, 2> DbgUsersToSink;
- for (DbgVariableIntrinsic *DVI : DbgUsers)
- if (DVI->getParent() == SrcBlock)
- DbgUsersToSink.push_back(DVI);
- llvm::sort(DbgUsersToSink,
- [](auto *A, auto *B) { return B->comesBefore(A); });
- SmallVector<DbgVariableIntrinsic *, 2> DIIClones;
- SmallSet<DebugVariable, 4> SunkVariables;
- for (auto *User : DbgUsersToSink) {
- // A dbg.declare instruction should not be cloned, since there can only be
- // one per variable fragment. It should be left in the original place
- // because the sunk instruction is not an alloca (otherwise we could not be
- // here).
- if (isa<DbgDeclareInst>(User))
- continue;
- DebugVariable DbgUserVariable =
- DebugVariable(User->getVariable(), User->getExpression(),
- User->getDebugLoc()->getInlinedAt());
- if (!SunkVariables.insert(DbgUserVariable).second)
- continue;
- // Leave dbg.assign intrinsics in their original positions and there should
- // be no need to insert a clone.
- if (isa<DbgAssignIntrinsic>(User))
- continue;
- DIIClones.emplace_back(cast<DbgVariableIntrinsic>(User->clone()));
- if (isa<DbgDeclareInst>(User) && isa<CastInst>(I))
- DIIClones.back()->replaceVariableLocationOp(I, I->getOperand(0));
- LLVM_DEBUG(dbgs() << "CLONE: " << *DIIClones.back() << '\n');
- }
- // Perform salvaging without the clones, then sink the clones.
- if (!DIIClones.empty()) {
- salvageDebugInfoForDbgValues(*I, DbgUsers);
- // The clones are in reverse order of original appearance, reverse again to
- // maintain the original order.
- for (auto &DIIClone : llvm::reverse(DIIClones)) {
- DIIClone->insertBefore(&*InsertPos);
- LLVM_DEBUG(dbgs() << "SINK: " << *DIIClone << '\n');
- }
- }
- return true;
- }
- bool InstCombinerImpl::run() {
- while (!Worklist.isEmpty()) {
- // Walk deferred instructions in reverse order, and push them to the
- // worklist, which means they'll end up popped from the worklist in-order.
- while (Instruction *I = Worklist.popDeferred()) {
- // Check to see if we can DCE the instruction. We do this already here to
- // reduce the number of uses and thus allow other folds to trigger.
- // Note that eraseInstFromFunction() may push additional instructions on
- // the deferred worklist, so this will DCE whole instruction chains.
- if (isInstructionTriviallyDead(I, &TLI)) {
- eraseInstFromFunction(*I);
- ++NumDeadInst;
- continue;
- }
- Worklist.push(I);
- }
- Instruction *I = Worklist.removeOne();
- if (I == nullptr) continue; // skip null values.
- // Check to see if we can DCE the instruction.
- if (isInstructionTriviallyDead(I, &TLI)) {
- eraseInstFromFunction(*I);
- ++NumDeadInst;
- continue;
- }
- if (!DebugCounter::shouldExecute(VisitCounter))
- continue;
- // Instruction isn't dead, see if we can constant propagate it.
- if (!I->use_empty() &&
- (I->getNumOperands() == 0 || isa<Constant>(I->getOperand(0)))) {
- if (Constant *C = ConstantFoldInstruction(I, DL, &TLI)) {
- LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I
- << '\n');
- // Add operands to the worklist.
- replaceInstUsesWith(*I, C);
- ++NumConstProp;
- if (isInstructionTriviallyDead(I, &TLI))
- eraseInstFromFunction(*I);
- MadeIRChange = true;
- continue;
- }
- }
- // See if we can trivially sink this instruction to its user if we can
- // prove that the successor is not executed more frequently than our block.
- // Return the UserBlock if successful.
- auto getOptionalSinkBlockForInst =
- [this](Instruction *I) -> std::optional<BasicBlock *> {
- if (!EnableCodeSinking)
- return std::nullopt;
- BasicBlock *BB = I->getParent();
- BasicBlock *UserParent = nullptr;
- unsigned NumUsers = 0;
- for (auto *U : I->users()) {
- if (U->isDroppable())
- continue;
- if (NumUsers > MaxSinkNumUsers)
- return std::nullopt;
- Instruction *UserInst = cast<Instruction>(U);
- // Special handling for Phi nodes - get the block the use occurs in.
- if (PHINode *PN = dyn_cast<PHINode>(UserInst)) {
- for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
- if (PN->getIncomingValue(i) == I) {
- // Bail out if we have uses in different blocks. We don't do any
- // sophisticated analysis (i.e finding NearestCommonDominator of
- // these use blocks).
- if (UserParent && UserParent != PN->getIncomingBlock(i))
- return std::nullopt;
- UserParent = PN->getIncomingBlock(i);
- }
- }
- assert(UserParent && "expected to find user block!");
- } else {
- if (UserParent && UserParent != UserInst->getParent())
- return std::nullopt;
- UserParent = UserInst->getParent();
- }
- // Make sure these checks are done only once, naturally we do the checks
- // the first time we get the userparent, this will save compile time.
- if (NumUsers == 0) {
- // Try sinking to another block. If that block is unreachable, then do
- // not bother. SimplifyCFG should handle it.
- if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
- return std::nullopt;
- auto *Term = UserParent->getTerminator();
- // See if the user is one of our successors that has only one
- // predecessor, so that we don't have to split the critical edge.
- // Another option where we can sink is a block that ends with a
- // terminator that does not pass control to other block (such as
- // return or unreachable or resume). In this case:
- // - I dominates the User (by SSA form);
- // - the User will be executed at most once.
- // So sinking I down to User is always profitable or neutral.
- if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
- return std::nullopt;
- assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
- }
- NumUsers++;
- }
- // No user or only has droppable users.
- if (!UserParent)
- return std::nullopt;
- return UserParent;
- };
- auto OptBB = getOptionalSinkBlockForInst(I);
- if (OptBB) {
- auto *UserParent = *OptBB;
- // Okay, the CFG is simple enough, try to sink this instruction.
- if (TryToSinkInstruction(I, UserParent, TLI)) {
- LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
- MadeIRChange = true;
- // We'll add uses of the sunk instruction below, but since
- // sinking can expose opportunities for it's *operands* add
- // them to the worklist
- for (Use &U : I->operands())
- if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
- Worklist.push(OpI);
- }
- }
- // Now that we have an instruction, try combining it to simplify it.
- Builder.SetInsertPoint(I);
- Builder.CollectMetadataToCopy(
- I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
- #ifndef NDEBUG
- std::string OrigI;
- #endif
- LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
- LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
- if (Instruction *Result = visit(*I)) {
- ++NumCombined;
- // Should we replace the old instruction with a new one?
- if (Result != I) {
- LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
- << " New = " << *Result << '\n');
- Result->copyMetadata(*I,
- {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
- // Everything uses the new instruction now.
- I->replaceAllUsesWith(Result);
- // Move the name to the new instruction first.
- Result->takeName(I);
- // Insert the new instruction into the basic block...
- BasicBlock *InstParent = I->getParent();
- BasicBlock::iterator InsertPos = I->getIterator();
- // Are we replace a PHI with something that isn't a PHI, or vice versa?
- if (isa<PHINode>(Result) != isa<PHINode>(I)) {
- // We need to fix up the insertion point.
- if (isa<PHINode>(I)) // PHI -> Non-PHI
- InsertPos = InstParent->getFirstInsertionPt();
- else // Non-PHI -> PHI
- InsertPos = InstParent->getFirstNonPHI()->getIterator();
- }
- Result->insertInto(InstParent, InsertPos);
- // Push the new instruction and any users onto the worklist.
- Worklist.pushUsersToWorkList(*Result);
- Worklist.push(Result);
- eraseInstFromFunction(*I);
- } else {
- LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
- << " New = " << *I << '\n');
- // If the instruction was modified, it's possible that it is now dead.
- // if so, remove it.
- if (isInstructionTriviallyDead(I, &TLI)) {
- eraseInstFromFunction(*I);
- } else {
- Worklist.pushUsersToWorkList(*I);
- Worklist.push(I);
- }
- }
- MadeIRChange = true;
- }
- }
- Worklist.zap();
- return MadeIRChange;
- }
- // Track the scopes used by !alias.scope and !noalias. In a function, a
- // @llvm.experimental.noalias.scope.decl is only useful if that scope is used
- // by both sets. If not, the declaration of the scope can be safely omitted.
- // The MDNode of the scope can be omitted as well for the instructions that are
- // part of this function. We do not do that at this point, as this might become
- // too time consuming to do.
- class AliasScopeTracker {
- SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
- SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
- public:
- void analyse(Instruction *I) {
- // This seems to be faster than checking 'mayReadOrWriteMemory()'.
- if (!I->hasMetadataOtherThanDebugLoc())
- return;
- auto Track = [](Metadata *ScopeList, auto &Container) {
- const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
- if (!MDScopeList || !Container.insert(MDScopeList).second)
- return;
- for (const auto &MDOperand : MDScopeList->operands())
- if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
- Container.insert(MDScope);
- };
- Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
- Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
- }
- bool isNoAliasScopeDeclDead(Instruction *Inst) {
- NoAliasScopeDeclInst *Decl = dyn_cast<NoAliasScopeDeclInst>(Inst);
- if (!Decl)
- return false;
- assert(Decl->use_empty() &&
- "llvm.experimental.noalias.scope.decl in use ?");
- const MDNode *MDSL = Decl->getScopeList();
- assert(MDSL->getNumOperands() == 1 &&
- "llvm.experimental.noalias.scope should refer to a single scope");
- auto &MDOperand = MDSL->getOperand(0);
- if (auto *MD = dyn_cast<MDNode>(MDOperand))
- return !UsedAliasScopesAndLists.contains(MD) ||
- !UsedNoAliasScopesAndLists.contains(MD);
- // Not an MDNode ? throw away.
- return true;
- }
- };
- /// Populate the IC worklist from a function, by walking it in depth-first
- /// order and adding all reachable code to the worklist.
- ///
- /// This has a couple of tricks to make the code faster and more powerful. In
- /// particular, we constant fold and DCE instructions as we go, to avoid adding
- /// them to the worklist (this significantly speeds up instcombine on code where
- /// many instructions are dead or constant). Additionally, if we find a branch
- /// whose condition is a known constant, we only visit the reachable successors.
- static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- InstructionWorklist &ICWorklist) {
- bool MadeIRChange = false;
- SmallPtrSet<BasicBlock *, 32> Visited;
- SmallVector<BasicBlock*, 256> Worklist;
- Worklist.push_back(&F.front());
- SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
- DenseMap<Constant *, Constant *> FoldedConstants;
- AliasScopeTracker SeenAliasScopes;
- do {
- BasicBlock *BB = Worklist.pop_back_val();
- // We have now visited this block! If we've already been here, ignore it.
- if (!Visited.insert(BB).second)
- continue;
- for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
- // ConstantProp instruction if trivially constant.
- if (!Inst.use_empty() &&
- (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
- if (Constant *C = ConstantFoldInstruction(&Inst, DL, TLI)) {
- LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
- << '\n');
- Inst.replaceAllUsesWith(C);
- ++NumConstProp;
- if (isInstructionTriviallyDead(&Inst, TLI))
- Inst.eraseFromParent();
- MadeIRChange = true;
- continue;
- }
- // See if we can constant fold its operands.
- for (Use &U : Inst.operands()) {
- if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
- continue;
- auto *C = cast<Constant>(U);
- Constant *&FoldRes = FoldedConstants[C];
- if (!FoldRes)
- FoldRes = ConstantFoldConstant(C, DL, TLI);
- if (FoldRes != C) {
- LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
- << "\n Old = " << *C
- << "\n New = " << *FoldRes << '\n');
- U = FoldRes;
- MadeIRChange = true;
- }
- }
- // Skip processing debug and pseudo intrinsics in InstCombine. Processing
- // these call instructions consumes non-trivial amount of time and
- // provides no value for the optimization.
- if (!Inst.isDebugOrPseudoInst()) {
- InstrsForInstructionWorklist.push_back(&Inst);
- SeenAliasScopes.analyse(&Inst);
- }
- }
- // Recursively visit successors. If this is a branch or switch on a
- // constant, only visit the reachable successor.
- Instruction *TI = BB->getTerminator();
- if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
- if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
- bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
- BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
- Worklist.push_back(ReachableBB);
- continue;
- }
- } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
- if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
- Worklist.push_back(SI->findCaseValue(Cond)->getCaseSuccessor());
- continue;
- }
- }
- append_range(Worklist, successors(TI));
- } while (!Worklist.empty());
- // Remove instructions inside unreachable blocks. This prevents the
- // instcombine code from having to deal with some bad special cases, and
- // reduces use counts of instructions.
- for (BasicBlock &BB : F) {
- if (Visited.count(&BB))
- continue;
- unsigned NumDeadInstInBB;
- unsigned NumDeadDbgInstInBB;
- std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
- removeAllNonTerminatorAndEHPadInstructions(&BB);
- MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
- NumDeadInst += NumDeadInstInBB;
- }
- // Once we've found all of the instructions to add to instcombine's worklist,
- // add them in reverse order. This way instcombine will visit from the top
- // of the function down. This jives well with the way that it adds all uses
- // of instructions to the worklist after doing a transformation, thus avoiding
- // some N^2 behavior in pathological cases.
- ICWorklist.reserve(InstrsForInstructionWorklist.size());
- for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
- // DCE instruction if trivially dead. As we iterate in reverse program
- // order here, we will clean up whole chains of dead instructions.
- if (isInstructionTriviallyDead(Inst, TLI) ||
- SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
- ++NumDeadInst;
- LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
- salvageDebugInfo(*Inst);
- Inst->eraseFromParent();
- MadeIRChange = true;
- continue;
- }
- ICWorklist.push(Inst);
- }
- return MadeIRChange;
- }
- static bool combineInstructionsOverFunction(
- Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA,
- AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
- DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI,
- ProfileSummaryInfo *PSI, unsigned MaxIterations, LoopInfo *LI) {
- auto &DL = F.getParent()->getDataLayout();
- MaxIterations = std::min(MaxIterations, LimitMaxIterations.getValue());
- /// Builder - This is an IRBuilder that automatically inserts new
- /// instructions into the worklist when they are created.
- IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder(
- F.getContext(), TargetFolder(DL),
- IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
- Worklist.add(I);
- if (auto *Assume = dyn_cast<AssumeInst>(I))
- AC.registerAssumption(Assume);
- }));
- // Lower dbg.declare intrinsics otherwise their value may be clobbered
- // by instcombiner.
- bool MadeIRChange = false;
- if (ShouldLowerDbgDeclare)
- MadeIRChange = LowerDbgDeclare(F);
- // LowerDbgDeclare calls RemoveRedundantDbgInstrs, but LowerDbgDeclare will
- // almost never return true when running an assignment tracking build. Take
- // this opportunity to do some clean up for assignment tracking builds too.
- if (!MadeIRChange && isAssignmentTrackingEnabled(*F.getParent())) {
- for (auto &BB : F)
- RemoveRedundantDbgInstrs(&BB);
- }
- // Iterate while there is work to do.
- unsigned Iteration = 0;
- while (true) {
- ++NumWorklistIterations;
- ++Iteration;
- if (Iteration > InfiniteLoopDetectionThreshold) {
- report_fatal_error(
- "Instruction Combining seems stuck in an infinite loop after " +
- Twine(InfiniteLoopDetectionThreshold) + " iterations.");
- }
- if (Iteration > MaxIterations) {
- LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << MaxIterations
- << " on " << F.getName()
- << " reached; stopping before reaching a fixpoint\n");
- break;
- }
- LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
- << F.getName() << "\n");
- MadeIRChange |= prepareICWorklistFromFunction(F, DL, &TLI, Worklist);
- InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT,
- ORE, BFI, PSI, DL, LI);
- IC.MaxArraySizeForCombine = MaxArraySize;
- if (!IC.run())
- break;
- MadeIRChange = true;
- }
- return MadeIRChange;
- }
- InstCombinePass::InstCombinePass() : MaxIterations(LimitMaxIterations) {}
- InstCombinePass::InstCombinePass(unsigned MaxIterations)
- : MaxIterations(MaxIterations) {}
- PreservedAnalyses InstCombinePass::run(Function &F,
- FunctionAnalysisManager &AM) {
- auto &AC = AM.getResult<AssumptionAnalysis>(F);
- auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
- auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
- auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
- auto &TTI = AM.getResult<TargetIRAnalysis>(F);
- auto *LI = AM.getCachedResult<LoopAnalysis>(F);
- auto *AA = &AM.getResult<AAManager>(F);
- auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
- ProfileSummaryInfo *PSI =
- MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
- auto *BFI = (PSI && PSI->hasProfileSummary()) ?
- &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
- if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
- BFI, PSI, MaxIterations, LI))
- // No changes, all analyses are preserved.
- return PreservedAnalyses::all();
- // Mark all the analyses that instcombine updates as preserved.
- PreservedAnalyses PA;
- PA.preserveSet<CFGAnalyses>();
- return PA;
- }
- void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- AU.addRequired<AAResultsWrapperPass>();
- AU.addRequired<AssumptionCacheTracker>();
- AU.addRequired<TargetLibraryInfoWrapperPass>();
- AU.addRequired<TargetTransformInfoWrapperPass>();
- AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
- AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addPreserved<AAResultsWrapperPass>();
- AU.addPreserved<BasicAAWrapperPass>();
- AU.addPreserved<GlobalsAAWrapperPass>();
- AU.addRequired<ProfileSummaryInfoWrapperPass>();
- LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
- }
- bool InstructionCombiningPass::runOnFunction(Function &F) {
- if (skipFunction(F))
- return false;
- // Required analyses.
- auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
- auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
- auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
- auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
- auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
- // Optional analyses.
- auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
- auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
- ProfileSummaryInfo *PSI =
- &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
- BlockFrequencyInfo *BFI =
- (PSI && PSI->hasProfileSummary()) ?
- &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
- nullptr;
- return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
- BFI, PSI, MaxIterations, LI);
- }
- char InstructionCombiningPass::ID = 0;
- InstructionCombiningPass::InstructionCombiningPass()
- : FunctionPass(ID), MaxIterations(InstCombineDefaultMaxIterations) {
- initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry());
- }
- InstructionCombiningPass::InstructionCombiningPass(unsigned MaxIterations)
- : FunctionPass(ID), MaxIterations(MaxIterations) {
- initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry());
- }
- INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine",
- "Combine redundant instructions", false, false)
- INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
- INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
- INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
- INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
- INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
- INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
- INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
- INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass)
- INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
- INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine",
- "Combine redundant instructions", false, false)
- // Initialization Routines
- void llvm::initializeInstCombine(PassRegistry &Registry) {
- initializeInstructionCombiningPassPass(Registry);
- }
- void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
- initializeInstructionCombiningPassPass(*unwrap(R));
- }
- FunctionPass *llvm::createInstructionCombiningPass() {
- return new InstructionCombiningPass();
- }
- FunctionPass *llvm::createInstructionCombiningPass(unsigned MaxIterations) {
- return new InstructionCombiningPass(MaxIterations);
- }
- void LLVMAddInstructionCombiningPass(LLVMPassManagerRef PM) {
- unwrap(PM)->add(createInstructionCombiningPass());
- }
|