12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830 |
- //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- //
- // This file implements an interprocedural pass that deduces and/or propagates
- // attributes. This is done in an abstract interpretation style fixpoint
- // iteration. See the Attributor.h file comment and the class descriptions in
- // that file for more information.
- //
- //===----------------------------------------------------------------------===//
- #include "llvm/Transforms/IPO/Attributor.h"
- #include "llvm/ADT/PointerIntPair.h"
- #include "llvm/ADT/STLExtras.h"
- #include "llvm/ADT/Statistic.h"
- #include "llvm/ADT/TinyPtrVector.h"
- #include "llvm/Analysis/AliasAnalysis.h"
- #include "llvm/Analysis/CallGraph.h"
- #include "llvm/Analysis/CallGraphSCCPass.h"
- #include "llvm/Analysis/InlineCost.h"
- #include "llvm/Analysis/MemoryBuiltins.h"
- #include "llvm/Analysis/MustExecute.h"
- #include "llvm/IR/Attributes.h"
- #include "llvm/IR/Constant.h"
- #include "llvm/IR/ConstantFold.h"
- #include "llvm/IR/Constants.h"
- #include "llvm/IR/DataLayout.h"
- #include "llvm/IR/GlobalValue.h"
- #include "llvm/IR/GlobalVariable.h"
- #include "llvm/IR/Instruction.h"
- #include "llvm/IR/Instructions.h"
- #include "llvm/IR/IntrinsicInst.h"
- #include "llvm/IR/ValueHandle.h"
- #include "llvm/InitializePasses.h"
- #include "llvm/Support/Casting.h"
- #include "llvm/Support/CommandLine.h"
- #include "llvm/Support/Debug.h"
- #include "llvm/Support/DebugCounter.h"
- #include "llvm/Support/FileSystem.h"
- #include "llvm/Support/GraphWriter.h"
- #include "llvm/Support/raw_ostream.h"
- #include "llvm/Transforms/Utils/BasicBlockUtils.h"
- #include "llvm/Transforms/Utils/Cloning.h"
- #include "llvm/Transforms/Utils/Local.h"
- #include <cstdint>
- #ifdef EXPENSIVE_CHECKS
- #include "llvm/IR/Verifier.h"
- #endif
- #include <cassert>
- #include <optional>
- #include <string>
- using namespace llvm;
- #define DEBUG_TYPE "attributor"
- #define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
- DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
- "Determine what attributes are manifested in the IR");
- STATISTIC(NumFnDeleted, "Number of function deleted");
- STATISTIC(NumFnWithExactDefinition,
- "Number of functions with exact definitions");
- STATISTIC(NumFnWithoutExactDefinition,
- "Number of functions without exact definitions");
- STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
- STATISTIC(NumAttributesTimedOut,
- "Number of abstract attributes timed out before fixpoint");
- STATISTIC(NumAttributesValidFixpoint,
- "Number of abstract attributes in a valid fixpoint state");
- STATISTIC(NumAttributesManifested,
- "Number of abstract attributes manifested in IR");
- // TODO: Determine a good default value.
- //
- // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
- // (when run with the first 5 abstract attributes). The results also indicate
- // that we never reach 32 iterations but always find a fixpoint sooner.
- //
- // This will become more evolved once we perform two interleaved fixpoint
- // iterations: bottom-up and top-down.
- static cl::opt<unsigned>
- SetFixpointIterations("attributor-max-iterations", cl::Hidden,
- cl::desc("Maximal number of fixpoint iterations."),
- cl::init(32));
- static cl::opt<unsigned, true> MaxInitializationChainLengthX(
- "attributor-max-initialization-chain-length", cl::Hidden,
- cl::desc(
- "Maximal number of chained initializations (to avoid stack overflows)"),
- cl::location(MaxInitializationChainLength), cl::init(1024));
- unsigned llvm::MaxInitializationChainLength;
- static cl::opt<bool> VerifyMaxFixpointIterations(
- "attributor-max-iterations-verify", cl::Hidden,
- cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
- cl::init(false));
- static cl::opt<bool> AnnotateDeclarationCallSites(
- "attributor-annotate-decl-cs", cl::Hidden,
- cl::desc("Annotate call sites of function declarations."), cl::init(false));
- static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
- cl::init(true), cl::Hidden);
- static cl::opt<bool>
- AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
- cl::desc("Allow the Attributor to create shallow "
- "wrappers for non-exact definitions."),
- cl::init(false));
- static cl::opt<bool>
- AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
- cl::desc("Allow the Attributor to use IP information "
- "derived from non-exact functions via cloning"),
- cl::init(false));
- // These options can only used for debug builds.
- #ifndef NDEBUG
- static cl::list<std::string>
- SeedAllowList("attributor-seed-allow-list", cl::Hidden,
- cl::desc("Comma seperated list of attribute names that are "
- "allowed to be seeded."),
- cl::CommaSeparated);
- static cl::list<std::string> FunctionSeedAllowList(
- "attributor-function-seed-allow-list", cl::Hidden,
- cl::desc("Comma seperated list of function names that are "
- "allowed to be seeded."),
- cl::CommaSeparated);
- #endif
- static cl::opt<bool>
- DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
- cl::desc("Dump the dependency graph to dot files."),
- cl::init(false));
- static cl::opt<std::string> DepGraphDotFileNamePrefix(
- "attributor-depgraph-dot-filename-prefix", cl::Hidden,
- cl::desc("The prefix used for the CallGraph dot file names."));
- static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
- cl::desc("View the dependency graph."),
- cl::init(false));
- static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
- cl::desc("Print attribute dependencies"),
- cl::init(false));
- static cl::opt<bool> EnableCallSiteSpecific(
- "attributor-enable-call-site-specific-deduction", cl::Hidden,
- cl::desc("Allow the Attributor to do call site specific analysis"),
- cl::init(false));
- static cl::opt<bool>
- PrintCallGraph("attributor-print-call-graph", cl::Hidden,
- cl::desc("Print Attributor's internal call graph"),
- cl::init(false));
- static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
- cl::Hidden,
- cl::desc("Try to simplify all loads."),
- cl::init(true));
- /// Logic operators for the change status enum class.
- ///
- ///{
- ChangeStatus llvm::operator|(ChangeStatus L, ChangeStatus R) {
- return L == ChangeStatus::CHANGED ? L : R;
- }
- ChangeStatus &llvm::operator|=(ChangeStatus &L, ChangeStatus R) {
- L = L | R;
- return L;
- }
- ChangeStatus llvm::operator&(ChangeStatus L, ChangeStatus R) {
- return L == ChangeStatus::UNCHANGED ? L : R;
- }
- ChangeStatus &llvm::operator&=(ChangeStatus &L, ChangeStatus R) {
- L = L & R;
- return L;
- }
- ///}
- bool AA::isNoSyncInst(Attributor &A, const Instruction &I,
- const AbstractAttribute &QueryingAA) {
- // We are looking for volatile instructions or non-relaxed atomics.
- if (const auto *CB = dyn_cast<CallBase>(&I)) {
- if (CB->hasFnAttr(Attribute::NoSync))
- return true;
- // Non-convergent and readnone imply nosync.
- if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
- return true;
- if (AANoSync::isNoSyncIntrinsic(&I))
- return true;
- const auto &NoSyncAA = A.getAAFor<AANoSync>(
- QueryingAA, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
- return NoSyncAA.isAssumedNoSync();
- }
- if (!I.mayReadOrWriteMemory())
- return true;
- return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(&I);
- }
- bool AA::isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA,
- const Value &V, bool ForAnalysisOnly) {
- // TODO: See the AAInstanceInfo class comment.
- if (!ForAnalysisOnly)
- return false;
- auto &InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
- QueryingAA, IRPosition::value(V), DepClassTy::OPTIONAL);
- return InstanceInfoAA.isAssumedUniqueForAnalysis();
- }
- Constant *AA::getInitialValueForObj(Value &Obj, Type &Ty,
- const TargetLibraryInfo *TLI,
- const DataLayout &DL,
- AA::RangeTy *RangePtr) {
- if (isa<AllocaInst>(Obj))
- return UndefValue::get(&Ty);
- if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
- return Init;
- auto *GV = dyn_cast<GlobalVariable>(&Obj);
- if (!GV)
- return nullptr;
- if (!GV->hasLocalLinkage() && !(GV->isConstant() && GV->hasInitializer()))
- return nullptr;
- if (!GV->hasInitializer())
- return UndefValue::get(&Ty);
- if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
- APInt Offset = APInt(64, RangePtr->Offset);
- return ConstantFoldLoadFromConst(GV->getInitializer(), &Ty, Offset, DL);
- }
- return ConstantFoldLoadFromUniformValue(GV->getInitializer(), &Ty);
- }
- bool AA::isValidInScope(const Value &V, const Function *Scope) {
- if (isa<Constant>(V))
- return true;
- if (auto *I = dyn_cast<Instruction>(&V))
- return I->getFunction() == Scope;
- if (auto *A = dyn_cast<Argument>(&V))
- return A->getParent() == Scope;
- return false;
- }
- bool AA::isValidAtPosition(const AA::ValueAndContext &VAC,
- InformationCache &InfoCache) {
- if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
- return true;
- const Function *Scope = nullptr;
- const Instruction *CtxI = VAC.getCtxI();
- if (CtxI)
- Scope = CtxI->getFunction();
- if (auto *A = dyn_cast<Argument>(VAC.getValue()))
- return A->getParent() == Scope;
- if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
- if (I->getFunction() == Scope) {
- if (const DominatorTree *DT =
- InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
- *Scope))
- return DT->dominates(I, CtxI);
- // Local dominance check mostly for the old PM passes.
- if (CtxI && I->getParent() == CtxI->getParent())
- return llvm::any_of(
- make_range(I->getIterator(), I->getParent()->end()),
- [&](const Instruction &AfterI) { return &AfterI == CtxI; });
- }
- }
- return false;
- }
- Value *AA::getWithType(Value &V, Type &Ty) {
- if (V.getType() == &Ty)
- return &V;
- if (isa<PoisonValue>(V))
- return PoisonValue::get(&Ty);
- if (isa<UndefValue>(V))
- return UndefValue::get(&Ty);
- if (auto *C = dyn_cast<Constant>(&V)) {
- if (C->isNullValue())
- return Constant::getNullValue(&Ty);
- if (C->getType()->isPointerTy() && Ty.isPointerTy())
- return ConstantExpr::getPointerCast(C, &Ty);
- if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
- if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
- return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true);
- if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
- return ConstantExpr::getFPTrunc(C, &Ty, /* OnlyIfReduced */ true);
- }
- }
- return nullptr;
- }
- std::optional<Value *>
- AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
- const std::optional<Value *> &B,
- Type *Ty) {
- if (A == B)
- return A;
- if (!B)
- return A;
- if (*B == nullptr)
- return nullptr;
- if (!A)
- return Ty ? getWithType(**B, *Ty) : nullptr;
- if (*A == nullptr)
- return nullptr;
- if (!Ty)
- Ty = (*A)->getType();
- if (isa_and_nonnull<UndefValue>(*A))
- return getWithType(**B, *Ty);
- if (isa<UndefValue>(*B))
- return A;
- if (*A && *B && *A == getWithType(**B, *Ty))
- return A;
- return nullptr;
- }
- template <bool IsLoad, typename Ty>
- static bool getPotentialCopiesOfMemoryValue(
- Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
- SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
- const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
- bool OnlyExact) {
- LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
- << " (only exact: " << OnlyExact << ")\n";);
- Value &Ptr = *I.getPointerOperand();
- // Containers to remember the pointer infos and new copies while we are not
- // sure that we can find all of them. If we abort we want to avoid spurious
- // dependences and potential copies in the provided container.
- SmallVector<const AAPointerInfo *> PIs;
- SmallVector<Value *> NewCopies;
- SmallVector<Instruction *> NewCopyOrigins;
- const auto *TLI =
- A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
- auto Pred = [&](Value &Obj) {
- LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
- if (isa<UndefValue>(&Obj))
- return true;
- if (isa<ConstantPointerNull>(&Obj)) {
- // A null pointer access can be undefined but any offset from null may
- // be OK. We do not try to optimize the latter.
- if (!NullPointerIsDefined(I.getFunction(),
- Ptr.getType()->getPointerAddressSpace()) &&
- A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
- AA::Interprocedural) == &Obj)
- return true;
- LLVM_DEBUG(
- dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
- return false;
- }
- // TODO: Use assumed noalias return.
- if (!isa<AllocaInst>(&Obj) && !isa<GlobalVariable>(&Obj) &&
- !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(&Obj))) {
- LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
- << "\n";);
- return false;
- }
- if (auto *GV = dyn_cast<GlobalVariable>(&Obj))
- if (!GV->hasLocalLinkage() &&
- !(GV->isConstant() && GV->hasInitializer())) {
- LLVM_DEBUG(dbgs() << "Underlying object is global with external "
- "linkage, not supported yet: "
- << Obj << "\n";);
- return false;
- }
- bool NullOnly = true;
- bool NullRequired = false;
- auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
- bool IsExact) {
- if (!V || *V == nullptr)
- NullOnly = false;
- else if (isa<UndefValue>(*V))
- /* No op */;
- else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
- NullRequired = !IsExact;
- else
- NullOnly = false;
- };
- auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
- Value &V) {
- Value *AdjV = AA::getWithType(V, *I.getType());
- if (!AdjV) {
- LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
- "cannot be converted to read type: "
- << *Acc.getRemoteInst() << " : " << *I.getType()
- << "\n";);
- }
- return AdjV;
- };
- auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
- if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
- return true;
- if (IsLoad && Acc.isWrittenValueYetUndetermined())
- return true;
- CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
- if (OnlyExact && !IsExact && !NullOnly &&
- !isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) {
- LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
- << ", abort!\n");
- return false;
- }
- if (NullRequired && !NullOnly) {
- LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
- "one, however found non-null one: "
- << *Acc.getRemoteInst() << ", abort!\n");
- return false;
- }
- if (IsLoad) {
- assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
- if (!Acc.isWrittenValueUnknown()) {
- Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
- if (!V)
- return false;
- NewCopies.push_back(V);
- NewCopyOrigins.push_back(Acc.getRemoteInst());
- return true;
- }
- auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
- if (!SI) {
- LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
- "instruction not supported yet: "
- << *Acc.getRemoteInst() << "\n";);
- return false;
- }
- Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
- if (!V)
- return false;
- NewCopies.push_back(V);
- NewCopyOrigins.push_back(SI);
- } else {
- assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
- auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
- if (!LI && OnlyExact) {
- LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
- "instruction not supported yet: "
- << *Acc.getRemoteInst() << "\n";);
- return false;
- }
- NewCopies.push_back(Acc.getRemoteInst());
- }
- return true;
- };
- // If the value has been written to we don't need the initial value of the
- // object.
- bool HasBeenWrittenTo = false;
- AA::RangeTy Range;
- auto &PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(Obj),
- DepClassTy::NONE);
- if (!PI.forallInterferingAccesses(A, QueryingAA, I, CheckAccess,
- HasBeenWrittenTo, Range)) {
- LLVM_DEBUG(
- dbgs()
- << "Failed to verify all interfering accesses for underlying object: "
- << Obj << "\n");
- return false;
- }
- if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
- const DataLayout &DL = A.getDataLayout();
- Value *InitialValue =
- AA::getInitialValueForObj(Obj, *I.getType(), TLI, DL, &Range);
- if (!InitialValue) {
- LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
- "underlying object, abort!\n");
- return false;
- }
- CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
- if (NullRequired && !NullOnly) {
- LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
- "null or undef, abort!\n");
- return false;
- }
- NewCopies.push_back(InitialValue);
- NewCopyOrigins.push_back(nullptr);
- }
- PIs.push_back(&PI);
- return true;
- };
- const auto &AAUO = A.getAAFor<AAUnderlyingObjects>(
- QueryingAA, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
- if (!AAUO.forallUnderlyingObjects(Pred)) {
- LLVM_DEBUG(
- dbgs() << "Underlying objects stored into could not be determined\n";);
- return false;
- }
- // Only if we were successful collection all potential copies we record
- // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
- // given PotentialCopies container.
- for (const auto *PI : PIs) {
- if (!PI->getState().isAtFixpoint())
- UsedAssumedInformation = true;
- A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
- }
- PotentialCopies.insert(NewCopies.begin(), NewCopies.end());
- PotentialValueOrigins.insert(NewCopyOrigins.begin(), NewCopyOrigins.end());
- return true;
- }
- bool AA::getPotentiallyLoadedValues(
- Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
- SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
- const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
- bool OnlyExact) {
- return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
- A, LI, PotentialValues, PotentialValueOrigins, QueryingAA,
- UsedAssumedInformation, OnlyExact);
- }
- bool AA::getPotentialCopiesOfStoredValue(
- Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
- const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
- bool OnlyExact) {
- SmallSetVector<Instruction *, 4> PotentialValueOrigins;
- return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
- A, SI, PotentialCopies, PotentialValueOrigins, QueryingAA,
- UsedAssumedInformation, OnlyExact);
- }
- static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP,
- const AbstractAttribute &QueryingAA,
- bool RequireReadNone, bool &IsKnown) {
- IRPosition::Kind Kind = IRP.getPositionKind();
- if (Kind == IRPosition::IRP_FUNCTION || Kind == IRPosition::IRP_CALL_SITE) {
- const auto &MemLocAA =
- A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE);
- if (MemLocAA.isAssumedReadNone()) {
- IsKnown = MemLocAA.isKnownReadNone();
- if (!IsKnown)
- A.recordDependence(MemLocAA, QueryingAA, DepClassTy::OPTIONAL);
- return true;
- }
- }
- const auto &MemBehaviorAA =
- A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE);
- if (MemBehaviorAA.isAssumedReadNone() ||
- (!RequireReadNone && MemBehaviorAA.isAssumedReadOnly())) {
- IsKnown = RequireReadNone ? MemBehaviorAA.isKnownReadNone()
- : MemBehaviorAA.isKnownReadOnly();
- if (!IsKnown)
- A.recordDependence(MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL);
- return true;
- }
- return false;
- }
- bool AA::isAssumedReadOnly(Attributor &A, const IRPosition &IRP,
- const AbstractAttribute &QueryingAA, bool &IsKnown) {
- return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
- /* RequireReadNone */ false, IsKnown);
- }
- bool AA::isAssumedReadNone(Attributor &A, const IRPosition &IRP,
- const AbstractAttribute &QueryingAA, bool &IsKnown) {
- return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
- /* RequireReadNone */ true, IsKnown);
- }
- static bool
- isPotentiallyReachable(Attributor &A, const Instruction &FromI,
- const Instruction *ToI, const Function &ToFn,
- const AbstractAttribute &QueryingAA,
- const AA::InstExclusionSetTy *ExclusionSet,
- std::function<bool(const Function &F)> GoBackwardsCB) {
- LLVM_DEBUG({
- dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
- << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
- << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
- << "]\n";
- if (ExclusionSet)
- for (auto *ES : *ExclusionSet)
- dbgs() << *ES << "\n";
- });
- // If we can go arbitrarily backwards we will eventually reach an entry point
- // that can reach ToI. Only if a set of blocks through which we cannot go is
- // provided, or once we track internal functions not accessible from the
- // outside, it makes sense to perform backwards analysis in the absence of a
- // GoBackwardsCB.
- if (!GoBackwardsCB && !ExclusionSet) {
- LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
- << " is not checked backwards and does not have an "
- "exclusion set, abort\n");
- return true;
- }
- SmallPtrSet<const Instruction *, 8> Visited;
- SmallVector<const Instruction *> Worklist;
- Worklist.push_back(&FromI);
- while (!Worklist.empty()) {
- const Instruction *CurFromI = Worklist.pop_back_val();
- if (!Visited.insert(CurFromI).second)
- continue;
- const Function *FromFn = CurFromI->getFunction();
- if (FromFn == &ToFn) {
- if (!ToI)
- return true;
- LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
- << " intraprocedurally\n");
- const auto &ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
- QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
- bool Result =
- ReachabilityAA.isAssumedReachable(A, *CurFromI, *ToI, ExclusionSet);
- LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
- << (Result ? "can potentially " : "cannot ") << "reach "
- << *ToI << " [Intra]\n");
- if (Result)
- return true;
- }
- bool Result = true;
- if (!ToFn.isDeclaration() && ToI) {
- const auto &ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
- QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
- const Instruction &EntryI = ToFn.getEntryBlock().front();
- Result =
- ToReachabilityAA.isAssumedReachable(A, EntryI, *ToI, ExclusionSet);
- LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
- << " " << (Result ? "can potentially " : "cannot ")
- << "reach @" << *ToI << " [ToFn]\n");
- }
- if (Result) {
- // The entry of the ToFn can reach the instruction ToI. If the current
- // instruction is already known to reach the ToFn.
- const auto &FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
- QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
- Result = FnReachabilityAA.instructionCanReach(A, *CurFromI, ToFn,
- ExclusionSet);
- LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
- << " " << (Result ? "can potentially " : "cannot ")
- << "reach @" << ToFn.getName() << " [FromFn]\n");
- if (Result)
- return true;
- }
- // TODO: Check assumed nounwind.
- const auto &ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
- QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
- auto ReturnInstCB = [&](Instruction &Ret) {
- bool Result =
- ReachabilityAA.isAssumedReachable(A, *CurFromI, Ret, ExclusionSet);
- LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
- << (Result ? "can potentially " : "cannot ") << "reach "
- << Ret << " [Intra]\n");
- return !Result;
- };
- // Check if we can reach returns.
- bool UsedAssumedInformation = false;
- if (A.checkForAllInstructions(ReturnInstCB, FromFn, QueryingAA,
- {Instruction::Ret}, UsedAssumedInformation)) {
- LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
- continue;
- }
- if (!GoBackwardsCB) {
- LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
- << " is not checked backwards, abort\n");
- return true;
- }
- // If we do not go backwards from the FromFn we are done here and so far we
- // could not find a way to reach ToFn/ToI.
- if (!GoBackwardsCB(*FromFn))
- continue;
- LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
- << FromFn->getName() << "\n");
- auto CheckCallSite = [&](AbstractCallSite ACS) {
- CallBase *CB = ACS.getInstruction();
- if (!CB)
- return false;
- if (isa<InvokeInst>(CB))
- return false;
- Instruction *Inst = CB->getNextNonDebugInstruction();
- Worklist.push_back(Inst);
- return true;
- };
- Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
- /* RequireAllCallSites */ true,
- &QueryingAA, UsedAssumedInformation);
- if (Result) {
- LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
- << " in @" << FromFn->getName()
- << " failed, give up\n");
- return true;
- }
- LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
- << " in @" << FromFn->getName()
- << " worklist size is: " << Worklist.size() << "\n");
- }
- return false;
- }
- bool AA::isPotentiallyReachable(
- Attributor &A, const Instruction &FromI, const Instruction &ToI,
- const AbstractAttribute &QueryingAA,
- const AA::InstExclusionSetTy *ExclusionSet,
- std::function<bool(const Function &F)> GoBackwardsCB) {
- const Function *ToFn = ToI.getFunction();
- return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA,
- ExclusionSet, GoBackwardsCB);
- }
- bool AA::isPotentiallyReachable(
- Attributor &A, const Instruction &FromI, const Function &ToFn,
- const AbstractAttribute &QueryingAA,
- const AA::InstExclusionSetTy *ExclusionSet,
- std::function<bool(const Function &F)> GoBackwardsCB) {
- return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
- ExclusionSet, GoBackwardsCB);
- }
- bool AA::isAssumedThreadLocalObject(Attributor &A, Value &Obj,
- const AbstractAttribute &QueryingAA) {
- if (isa<UndefValue>(Obj))
- return true;
- if (isa<AllocaInst>(Obj)) {
- InformationCache &InfoCache = A.getInfoCache();
- if (!InfoCache.stackIsAccessibleByOtherThreads()) {
- LLVM_DEBUG(
- dbgs() << "[AA] Object '" << Obj
- << "' is thread local; stack objects are thread local.\n");
- return true;
- }
- const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
- QueryingAA, IRPosition::value(Obj), DepClassTy::OPTIONAL);
- LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
- << (NoCaptureAA.isAssumedNoCapture() ? "" : "not")
- << " thread local; "
- << (NoCaptureAA.isAssumedNoCapture() ? "non-" : "")
- << "captured stack object.\n");
- return NoCaptureAA.isAssumedNoCapture();
- }
- if (auto *GV = dyn_cast<GlobalVariable>(&Obj)) {
- if (GV->isConstant()) {
- LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
- << "' is thread local; constant global\n");
- return true;
- }
- if (GV->isThreadLocal()) {
- LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
- << "' is thread local; thread local global\n");
- return true;
- }
- }
- if (A.getInfoCache().targetIsGPU()) {
- if (Obj.getType()->getPointerAddressSpace() ==
- (int)AA::GPUAddressSpace::Local) {
- LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
- << "' is thread local; GPU local memory\n");
- return true;
- }
- if (Obj.getType()->getPointerAddressSpace() ==
- (int)AA::GPUAddressSpace::Constant) {
- LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
- << "' is thread local; GPU constant memory\n");
- return true;
- }
- }
- LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
- return false;
- }
- bool AA::isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I,
- const AbstractAttribute &QueryingAA) {
- if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
- return false;
- SmallSetVector<const Value *, 8> Ptrs;
- auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
- if (!Loc || !Loc->Ptr) {
- LLVM_DEBUG(
- dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
- return false;
- }
- Ptrs.insert(Loc->Ptr);
- return true;
- };
- if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&I)) {
- if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
- return true;
- if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(&I))
- if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
- return true;
- } else if (!AddLocationPtr(MemoryLocation::getOrNone(&I)))
- return true;
- return isPotentiallyAffectedByBarrier(A, Ptrs.getArrayRef(), QueryingAA, &I);
- }
- bool AA::isPotentiallyAffectedByBarrier(Attributor &A,
- ArrayRef<const Value *> Ptrs,
- const AbstractAttribute &QueryingAA,
- const Instruction *CtxI) {
- for (const Value *Ptr : Ptrs) {
- if (!Ptr) {
- LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
- return true;
- }
- auto Pred = [&](Value &Obj) {
- if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
- return true;
- LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
- << "'; -> requires barrier\n");
- return false;
- };
- const auto &UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
- QueryingAA, IRPosition::value(*Ptr), DepClassTy::OPTIONAL);
- if (!UnderlyingObjsAA.forallUnderlyingObjects(Pred))
- return true;
- }
- return false;
- }
- /// Return true if \p New is equal or worse than \p Old.
- static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
- if (!Old.isIntAttribute())
- return true;
- return Old.getValueAsInt() >= New.getValueAsInt();
- }
- /// Return true if the information provided by \p Attr was added to the
- /// attribute list \p Attrs. This is only the case if it was not already present
- /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
- static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
- AttributeList &Attrs, int AttrIdx,
- bool ForceReplace = false) {
- if (Attr.isEnumAttribute()) {
- Attribute::AttrKind Kind = Attr.getKindAsEnum();
- if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
- if (!ForceReplace &&
- isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
- return false;
- Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
- return true;
- }
- if (Attr.isStringAttribute()) {
- StringRef Kind = Attr.getKindAsString();
- if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
- if (!ForceReplace &&
- isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
- return false;
- Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
- return true;
- }
- if (Attr.isIntAttribute()) {
- Attribute::AttrKind Kind = Attr.getKindAsEnum();
- if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
- if (!ForceReplace &&
- isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
- return false;
- Attrs = Attrs.removeAttributeAtIndex(Ctx, AttrIdx, Kind);
- Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
- return true;
- }
- llvm_unreachable("Expected enum or string attribute!");
- }
- Argument *IRPosition::getAssociatedArgument() const {
- if (getPositionKind() == IRP_ARGUMENT)
- return cast<Argument>(&getAnchorValue());
- // Not an Argument and no argument number means this is not a call site
- // argument, thus we cannot find a callback argument to return.
- int ArgNo = getCallSiteArgNo();
- if (ArgNo < 0)
- return nullptr;
- // Use abstract call sites to make the connection between the call site
- // values and the ones in callbacks. If a callback was found that makes use
- // of the underlying call site operand, we want the corresponding callback
- // callee argument and not the direct callee argument.
- std::optional<Argument *> CBCandidateArg;
- SmallVector<const Use *, 4> CallbackUses;
- const auto &CB = cast<CallBase>(getAnchorValue());
- AbstractCallSite::getCallbackUses(CB, CallbackUses);
- for (const Use *U : CallbackUses) {
- AbstractCallSite ACS(U);
- assert(ACS && ACS.isCallbackCall());
- if (!ACS.getCalledFunction())
- continue;
- for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
- // Test if the underlying call site operand is argument number u of the
- // callback callee.
- if (ACS.getCallArgOperandNo(u) != ArgNo)
- continue;
- assert(ACS.getCalledFunction()->arg_size() > u &&
- "ACS mapped into var-args arguments!");
- if (CBCandidateArg) {
- CBCandidateArg = nullptr;
- break;
- }
- CBCandidateArg = ACS.getCalledFunction()->getArg(u);
- }
- }
- // If we found a unique callback candidate argument, return it.
- if (CBCandidateArg && *CBCandidateArg)
- return *CBCandidateArg;
- // If no callbacks were found, or none used the underlying call site operand
- // exclusively, use the direct callee argument if available.
- const Function *Callee = CB.getCalledFunction();
- if (Callee && Callee->arg_size() > unsigned(ArgNo))
- return Callee->getArg(ArgNo);
- return nullptr;
- }
- ChangeStatus AbstractAttribute::update(Attributor &A) {
- ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
- if (getState().isAtFixpoint())
- return HasChanged;
- LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
- HasChanged = updateImpl(A);
- LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
- << "\n");
- return HasChanged;
- }
- ChangeStatus
- IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
- const ArrayRef<Attribute> &DeducedAttrs,
- bool ForceReplace) {
- Function *ScopeFn = IRP.getAnchorScope();
- IRPosition::Kind PK = IRP.getPositionKind();
- // In the following some generic code that will manifest attributes in
- // DeducedAttrs if they improve the current IR. Due to the different
- // annotation positions we use the underlying AttributeList interface.
- AttributeList Attrs;
- switch (PK) {
- case IRPosition::IRP_INVALID:
- case IRPosition::IRP_FLOAT:
- return ChangeStatus::UNCHANGED;
- case IRPosition::IRP_ARGUMENT:
- case IRPosition::IRP_FUNCTION:
- case IRPosition::IRP_RETURNED:
- Attrs = ScopeFn->getAttributes();
- break;
- case IRPosition::IRP_CALL_SITE:
- case IRPosition::IRP_CALL_SITE_RETURNED:
- case IRPosition::IRP_CALL_SITE_ARGUMENT:
- Attrs = cast<CallBase>(IRP.getAnchorValue()).getAttributes();
- break;
- }
- ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
- LLVMContext &Ctx = IRP.getAnchorValue().getContext();
- for (const Attribute &Attr : DeducedAttrs) {
- if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx(), ForceReplace))
- continue;
- HasChanged = ChangeStatus::CHANGED;
- }
- if (HasChanged == ChangeStatus::UNCHANGED)
- return HasChanged;
- switch (PK) {
- case IRPosition::IRP_ARGUMENT:
- case IRPosition::IRP_FUNCTION:
- case IRPosition::IRP_RETURNED:
- ScopeFn->setAttributes(Attrs);
- break;
- case IRPosition::IRP_CALL_SITE:
- case IRPosition::IRP_CALL_SITE_RETURNED:
- case IRPosition::IRP_CALL_SITE_ARGUMENT:
- cast<CallBase>(IRP.getAnchorValue()).setAttributes(Attrs);
- break;
- case IRPosition::IRP_INVALID:
- case IRPosition::IRP_FLOAT:
- break;
- }
- return HasChanged;
- }
- const IRPosition IRPosition::EmptyKey(DenseMapInfo<void *>::getEmptyKey());
- const IRPosition
- IRPosition::TombstoneKey(DenseMapInfo<void *>::getTombstoneKey());
- SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
- IRPositions.emplace_back(IRP);
- // Helper to determine if operand bundles on a call site are benin or
- // potentially problematic. We handle only llvm.assume for now.
- auto CanIgnoreOperandBundles = [](const CallBase &CB) {
- return (isa<IntrinsicInst>(CB) &&
- cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume);
- };
- const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
- switch (IRP.getPositionKind()) {
- case IRPosition::IRP_INVALID:
- case IRPosition::IRP_FLOAT:
- case IRPosition::IRP_FUNCTION:
- return;
- case IRPosition::IRP_ARGUMENT:
- case IRPosition::IRP_RETURNED:
- IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
- return;
- case IRPosition::IRP_CALL_SITE:
- assert(CB && "Expected call site!");
- // TODO: We need to look at the operand bundles similar to the redirection
- // in CallBase.
- if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
- if (const Function *Callee = CB->getCalledFunction())
- IRPositions.emplace_back(IRPosition::function(*Callee));
- return;
- case IRPosition::IRP_CALL_SITE_RETURNED:
- assert(CB && "Expected call site!");
- // TODO: We need to look at the operand bundles similar to the redirection
- // in CallBase.
- if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
- if (const Function *Callee = CB->getCalledFunction()) {
- IRPositions.emplace_back(IRPosition::returned(*Callee));
- IRPositions.emplace_back(IRPosition::function(*Callee));
- for (const Argument &Arg : Callee->args())
- if (Arg.hasReturnedAttr()) {
- IRPositions.emplace_back(
- IRPosition::callsite_argument(*CB, Arg.getArgNo()));
- IRPositions.emplace_back(
- IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
- IRPositions.emplace_back(IRPosition::argument(Arg));
- }
- }
- }
- IRPositions.emplace_back(IRPosition::callsite_function(*CB));
- return;
- case IRPosition::IRP_CALL_SITE_ARGUMENT: {
- assert(CB && "Expected call site!");
- // TODO: We need to look at the operand bundles similar to the redirection
- // in CallBase.
- if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
- const Function *Callee = CB->getCalledFunction();
- if (Callee) {
- if (Argument *Arg = IRP.getAssociatedArgument())
- IRPositions.emplace_back(IRPosition::argument(*Arg));
- IRPositions.emplace_back(IRPosition::function(*Callee));
- }
- }
- IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
- return;
- }
- }
- }
- bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs,
- bool IgnoreSubsumingPositions, Attributor *A) const {
- SmallVector<Attribute, 4> Attrs;
- for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
- for (Attribute::AttrKind AK : AKs)
- if (EquivIRP.getAttrsFromIRAttr(AK, Attrs))
- return true;
- // The first position returned by the SubsumingPositionIterator is
- // always the position itself. If we ignore subsuming positions we
- // are done after the first iteration.
- if (IgnoreSubsumingPositions)
- break;
- }
- if (A)
- for (Attribute::AttrKind AK : AKs)
- if (getAttrsFromAssumes(AK, Attrs, *A))
- return true;
- return false;
- }
- void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
- SmallVectorImpl<Attribute> &Attrs,
- bool IgnoreSubsumingPositions, Attributor *A) const {
- for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
- for (Attribute::AttrKind AK : AKs)
- EquivIRP.getAttrsFromIRAttr(AK, Attrs);
- // The first position returned by the SubsumingPositionIterator is
- // always the position itself. If we ignore subsuming positions we
- // are done after the first iteration.
- if (IgnoreSubsumingPositions)
- break;
- }
- if (A)
- for (Attribute::AttrKind AK : AKs)
- getAttrsFromAssumes(AK, Attrs, *A);
- }
- bool IRPosition::getAttrsFromIRAttr(Attribute::AttrKind AK,
- SmallVectorImpl<Attribute> &Attrs) const {
- if (getPositionKind() == IRP_INVALID || getPositionKind() == IRP_FLOAT)
- return false;
- AttributeList AttrList;
- if (const auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
- AttrList = CB->getAttributes();
- else
- AttrList = getAssociatedFunction()->getAttributes();
- bool HasAttr = AttrList.hasAttributeAtIndex(getAttrIdx(), AK);
- if (HasAttr)
- Attrs.push_back(AttrList.getAttributeAtIndex(getAttrIdx(), AK));
- return HasAttr;
- }
- bool IRPosition::getAttrsFromAssumes(Attribute::AttrKind AK,
- SmallVectorImpl<Attribute> &Attrs,
- Attributor &A) const {
- assert(getPositionKind() != IRP_INVALID && "Did expect a valid position!");
- Value &AssociatedValue = getAssociatedValue();
- const Assume2KnowledgeMap &A2K =
- A.getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK});
- // Check if we found any potential assume use, if not we don't need to create
- // explorer iterators.
- if (A2K.empty())
- return false;
- LLVMContext &Ctx = AssociatedValue.getContext();
- unsigned AttrsSize = Attrs.size();
- MustBeExecutedContextExplorer &Explorer =
- A.getInfoCache().getMustBeExecutedContextExplorer();
- auto EIt = Explorer.begin(getCtxI()), EEnd = Explorer.end(getCtxI());
- for (const auto &It : A2K)
- if (Explorer.findInContextOf(It.first, EIt, EEnd))
- Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max));
- return AttrsSize != Attrs.size();
- }
- void IRPosition::verify() {
- #ifdef EXPENSIVE_CHECKS
- switch (getPositionKind()) {
- case IRP_INVALID:
- assert((CBContext == nullptr) &&
- "Invalid position must not have CallBaseContext!");
- assert(!Enc.getOpaqueValue() &&
- "Expected a nullptr for an invalid position!");
- return;
- case IRP_FLOAT:
- assert((!isa<Argument>(&getAssociatedValue())) &&
- "Expected specialized kind for argument values!");
- return;
- case IRP_RETURNED:
- assert(isa<Function>(getAsValuePtr()) &&
- "Expected function for a 'returned' position!");
- assert(getAsValuePtr() == &getAssociatedValue() &&
- "Associated value mismatch!");
- return;
- case IRP_CALL_SITE_RETURNED:
- assert((CBContext == nullptr) &&
- "'call site returned' position must not have CallBaseContext!");
- assert((isa<CallBase>(getAsValuePtr())) &&
- "Expected call base for 'call site returned' position!");
- assert(getAsValuePtr() == &getAssociatedValue() &&
- "Associated value mismatch!");
- return;
- case IRP_CALL_SITE:
- assert((CBContext == nullptr) &&
- "'call site function' position must not have CallBaseContext!");
- assert((isa<CallBase>(getAsValuePtr())) &&
- "Expected call base for 'call site function' position!");
- assert(getAsValuePtr() == &getAssociatedValue() &&
- "Associated value mismatch!");
- return;
- case IRP_FUNCTION:
- assert(isa<Function>(getAsValuePtr()) &&
- "Expected function for a 'function' position!");
- assert(getAsValuePtr() == &getAssociatedValue() &&
- "Associated value mismatch!");
- return;
- case IRP_ARGUMENT:
- assert(isa<Argument>(getAsValuePtr()) &&
- "Expected argument for a 'argument' position!");
- assert(getAsValuePtr() == &getAssociatedValue() &&
- "Associated value mismatch!");
- return;
- case IRP_CALL_SITE_ARGUMENT: {
- assert((CBContext == nullptr) &&
- "'call site argument' position must not have CallBaseContext!");
- Use *U = getAsUsePtr();
- (void)U; // Silence unused variable warning.
- assert(U && "Expected use for a 'call site argument' position!");
- assert(isa<CallBase>(U->getUser()) &&
- "Expected call base user for a 'call site argument' position!");
- assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
- "Expected call base argument operand for a 'call site argument' "
- "position");
- assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
- unsigned(getCallSiteArgNo()) &&
- "Argument number mismatch!");
- assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
- return;
- }
- }
- #endif
- }
- std::optional<Constant *>
- Attributor::getAssumedConstant(const IRPosition &IRP,
- const AbstractAttribute &AA,
- bool &UsedAssumedInformation) {
- // First check all callbacks provided by outside AAs. If any of them returns
- // a non-null value that is different from the associated value, or
- // std::nullopt, we assume it's simplified.
- for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
- std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
- if (!SimplifiedV)
- return std::nullopt;
- if (isa_and_nonnull<Constant>(*SimplifiedV))
- return cast<Constant>(*SimplifiedV);
- return nullptr;
- }
- if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
- return C;
- SmallVector<AA::ValueAndContext> Values;
- if (getAssumedSimplifiedValues(IRP, &AA, Values,
- AA::ValueScope::Interprocedural,
- UsedAssumedInformation)) {
- if (Values.empty())
- return std::nullopt;
- if (auto *C = dyn_cast_or_null<Constant>(
- AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
- return C;
- }
- return nullptr;
- }
- std::optional<Value *> Attributor::getAssumedSimplified(
- const IRPosition &IRP, const AbstractAttribute *AA,
- bool &UsedAssumedInformation, AA::ValueScope S) {
- // First check all callbacks provided by outside AAs. If any of them returns
- // a non-null value that is different from the associated value, or
- // std::nullopt, we assume it's simplified.
- for (auto &CB : SimplificationCallbacks.lookup(IRP))
- return CB(IRP, AA, UsedAssumedInformation);
- SmallVector<AA::ValueAndContext> Values;
- if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
- return &IRP.getAssociatedValue();
- if (Values.empty())
- return std::nullopt;
- if (AA)
- if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
- return V;
- if (IRP.getPositionKind() == IRPosition::IRP_RETURNED ||
- IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED)
- return nullptr;
- return &IRP.getAssociatedValue();
- }
- bool Attributor::getAssumedSimplifiedValues(
- const IRPosition &IRP, const AbstractAttribute *AA,
- SmallVectorImpl<AA::ValueAndContext> &Values, AA::ValueScope S,
- bool &UsedAssumedInformation) {
- // First check all callbacks provided by outside AAs. If any of them returns
- // a non-null value that is different from the associated value, or
- // std::nullopt, we assume it's simplified.
- const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
- for (const auto &CB : SimplificationCBs) {
- std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
- if (!CBResult.has_value())
- continue;
- Value *V = *CBResult;
- if (!V)
- return false;
- if ((S & AA::ValueScope::Interprocedural) ||
- AA::isValidInScope(*V, IRP.getAnchorScope()))
- Values.push_back(AA::ValueAndContext{*V, nullptr});
- else
- return false;
- }
- if (!SimplificationCBs.empty())
- return true;
- // If no high-level/outside simplification occurred, use AAPotentialValues.
- const auto &PotentialValuesAA =
- getOrCreateAAFor<AAPotentialValues>(IRP, AA, DepClassTy::OPTIONAL);
- if (!PotentialValuesAA.getAssumedSimplifiedValues(*this, Values, S))
- return false;
- UsedAssumedInformation |= !PotentialValuesAA.isAtFixpoint();
- return true;
- }
- std::optional<Value *> Attributor::translateArgumentToCallSiteContent(
- std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
- bool &UsedAssumedInformation) {
- if (!V)
- return V;
- if (*V == nullptr || isa<Constant>(*V))
- return V;
- if (auto *Arg = dyn_cast<Argument>(*V))
- if (CB.getCalledFunction() == Arg->getParent())
- if (!Arg->hasPointeeInMemoryValueAttr())
- return getAssumedSimplified(
- IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
- UsedAssumedInformation, AA::Intraprocedural);
- return nullptr;
- }
- Attributor::~Attributor() {
- // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
- // thus we cannot delete them. We can, and want to, destruct them though.
- for (auto &It : AAMap) {
- AbstractAttribute *AA = It.getSecond();
- AA->~AbstractAttribute();
- }
- }
- bool Attributor::isAssumedDead(const AbstractAttribute &AA,
- const AAIsDead *FnLivenessAA,
- bool &UsedAssumedInformation,
- bool CheckBBLivenessOnly, DepClassTy DepClass) {
- const IRPosition &IRP = AA.getIRPosition();
- if (!Functions.count(IRP.getAnchorScope()))
- return false;
- return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation,
- CheckBBLivenessOnly, DepClass);
- }
- bool Attributor::isAssumedDead(const Use &U,
- const AbstractAttribute *QueryingAA,
- const AAIsDead *FnLivenessAA,
- bool &UsedAssumedInformation,
- bool CheckBBLivenessOnly, DepClassTy DepClass) {
- Instruction *UserI = dyn_cast<Instruction>(U.getUser());
- if (!UserI)
- return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
- UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
- if (auto *CB = dyn_cast<CallBase>(UserI)) {
- // For call site argument uses we can check if the argument is
- // unused/dead.
- if (CB->isArgOperand(&U)) {
- const IRPosition &CSArgPos =
- IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
- return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
- UsedAssumedInformation, CheckBBLivenessOnly,
- DepClass);
- }
- } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
- const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
- return isAssumedDead(RetPos, QueryingAA, FnLivenessAA,
- UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
- } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
- BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
- return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
- UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
- } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
- if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
- const IRPosition IRP = IRPosition::inst(*SI);
- const AAIsDead &IsDeadAA =
- getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
- if (IsDeadAA.isRemovableStore()) {
- if (QueryingAA)
- recordDependence(IsDeadAA, *QueryingAA, DepClass);
- if (!IsDeadAA.isKnown(AAIsDead::IS_REMOVABLE))
- UsedAssumedInformation = true;
- return true;
- }
- }
- }
- return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
- UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
- }
- bool Attributor::isAssumedDead(const Instruction &I,
- const AbstractAttribute *QueryingAA,
- const AAIsDead *FnLivenessAA,
- bool &UsedAssumedInformation,
- bool CheckBBLivenessOnly, DepClassTy DepClass,
- bool CheckForDeadStore) {
- const IRPosition::CallBaseContext *CBCtx =
- QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
- if (ManifestAddedBlocks.contains(I.getParent()))
- return false;
- const Function &F = *I.getFunction();
- if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
- FnLivenessAA = &getOrCreateAAFor<AAIsDead>(IRPosition::function(F, CBCtx),
- QueryingAA, DepClassTy::NONE);
- // Don't use recursive reasoning.
- if (QueryingAA == FnLivenessAA)
- return false;
- // If we have a context instruction and a liveness AA we use it.
- if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent())
- : FnLivenessAA->isAssumedDead(&I)) {
- if (QueryingAA)
- recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
- if (!FnLivenessAA->isKnownDead(&I))
- UsedAssumedInformation = true;
- return true;
- }
- if (CheckBBLivenessOnly)
- return false;
- const IRPosition IRP = IRPosition::inst(I, CBCtx);
- const AAIsDead &IsDeadAA =
- getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
- // Don't use recursive reasoning.
- if (QueryingAA == &IsDeadAA)
- return false;
- if (IsDeadAA.isAssumedDead()) {
- if (QueryingAA)
- recordDependence(IsDeadAA, *QueryingAA, DepClass);
- if (!IsDeadAA.isKnownDead())
- UsedAssumedInformation = true;
- return true;
- }
- if (CheckForDeadStore && isa<StoreInst>(I) && IsDeadAA.isRemovableStore()) {
- if (QueryingAA)
- recordDependence(IsDeadAA, *QueryingAA, DepClass);
- if (!IsDeadAA.isKnownDead())
- UsedAssumedInformation = true;
- return true;
- }
- return false;
- }
- bool Attributor::isAssumedDead(const IRPosition &IRP,
- const AbstractAttribute *QueryingAA,
- const AAIsDead *FnLivenessAA,
- bool &UsedAssumedInformation,
- bool CheckBBLivenessOnly, DepClassTy DepClass) {
- // Don't check liveness for constants, e.g. functions, used as (floating)
- // values since the context instruction and such is here meaningless.
- if (IRP.getPositionKind() == IRPosition::IRP_FLOAT &&
- isa<Constant>(IRP.getAssociatedValue())) {
- return false;
- }
- Instruction *CtxI = IRP.getCtxI();
- if (CtxI &&
- isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
- /* CheckBBLivenessOnly */ true,
- CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
- return true;
- if (CheckBBLivenessOnly)
- return false;
- // If we haven't succeeded we query the specific liveness info for the IRP.
- const AAIsDead *IsDeadAA;
- if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE)
- IsDeadAA = &getOrCreateAAFor<AAIsDead>(
- IRPosition::callsite_returned(cast<CallBase>(IRP.getAssociatedValue())),
- QueryingAA, DepClassTy::NONE);
- else
- IsDeadAA = &getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
- // Don't use recursive reasoning.
- if (QueryingAA == IsDeadAA)
- return false;
- if (IsDeadAA->isAssumedDead()) {
- if (QueryingAA)
- recordDependence(*IsDeadAA, *QueryingAA, DepClass);
- if (!IsDeadAA->isKnownDead())
- UsedAssumedInformation = true;
- return true;
- }
- return false;
- }
- bool Attributor::isAssumedDead(const BasicBlock &BB,
- const AbstractAttribute *QueryingAA,
- const AAIsDead *FnLivenessAA,
- DepClassTy DepClass) {
- const Function &F = *BB.getParent();
- if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
- FnLivenessAA = &getOrCreateAAFor<AAIsDead>(IRPosition::function(F),
- QueryingAA, DepClassTy::NONE);
- // Don't use recursive reasoning.
- if (QueryingAA == FnLivenessAA)
- return false;
- if (FnLivenessAA->isAssumedDead(&BB)) {
- if (QueryingAA)
- recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
- return true;
- }
- return false;
- }
- bool Attributor::checkForAllUses(
- function_ref<bool(const Use &, bool &)> Pred,
- const AbstractAttribute &QueryingAA, const Value &V,
- bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
- bool IgnoreDroppableUses,
- function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
- // Check virtual uses first.
- for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&V))
- if (!CB(*this, &QueryingAA))
- return false;
- // Check the trivial case first as it catches void values.
- if (V.use_empty())
- return true;
- const IRPosition &IRP = QueryingAA.getIRPosition();
- SmallVector<const Use *, 16> Worklist;
- SmallPtrSet<const Use *, 16> Visited;
- auto AddUsers = [&](const Value &V, const Use *OldUse) {
- for (const Use &UU : V.uses()) {
- if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
- LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
- "rejected by the equivalence call back: "
- << *UU << "!\n");
- return false;
- }
- Worklist.push_back(&UU);
- }
- return true;
- };
- AddUsers(V, /* OldUse */ nullptr);
- LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
- << " initial uses to check\n");
- const Function *ScopeFn = IRP.getAnchorScope();
- const auto *LivenessAA =
- ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
- DepClassTy::NONE)
- : nullptr;
- while (!Worklist.empty()) {
- const Use *U = Worklist.pop_back_val();
- if (isa<PHINode>(U->getUser()) && !Visited.insert(U).second)
- continue;
- DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
- if (auto *Fn = dyn_cast<Function>(U->getUser()))
- dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
- << "\n";
- else
- dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
- << "\n";
- });
- bool UsedAssumedInformation = false;
- if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation,
- CheckBBLivenessOnly, LivenessDepClass)) {
- DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
- dbgs() << "[Attributor] Dead use, skip!\n");
- continue;
- }
- if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
- DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
- dbgs() << "[Attributor] Droppable user, skip!\n");
- continue;
- }
- if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
- if (&SI->getOperandUse(0) == U) {
- if (!Visited.insert(U).second)
- continue;
- SmallSetVector<Value *, 4> PotentialCopies;
- if (AA::getPotentialCopiesOfStoredValue(
- *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
- /* OnlyExact */ true)) {
- DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
- dbgs()
- << "[Attributor] Value is stored, continue with "
- << PotentialCopies.size()
- << " potential copies instead!\n");
- for (Value *PotentialCopy : PotentialCopies)
- if (!AddUsers(*PotentialCopy, U))
- return false;
- continue;
- }
- }
- }
- bool Follow = false;
- if (!Pred(*U, Follow))
- return false;
- if (!Follow)
- continue;
- User &Usr = *U->getUser();
- AddUsers(Usr, /* OldUse */ nullptr);
- auto *RI = dyn_cast<ReturnInst>(&Usr);
- if (!RI)
- continue;
- Function &F = *RI->getFunction();
- auto CallSitePred = [&](AbstractCallSite ACS) {
- return AddUsers(*ACS.getInstruction(), U);
- };
- if (!checkForAllCallSites(CallSitePred, F, /* RequireAllCallSites */ true,
- &QueryingAA, UsedAssumedInformation)) {
- LLVM_DEBUG(dbgs() << "[Attributor] Could not follow return instruction "
- "to all call sites: "
- << *RI << "\n");
- return false;
- }
- }
- return true;
- }
- bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
- const AbstractAttribute &QueryingAA,
- bool RequireAllCallSites,
- bool &UsedAssumedInformation) {
- // We can try to determine information from
- // the call sites. However, this is only possible all call sites are known,
- // hence the function has internal linkage.
- const IRPosition &IRP = QueryingAA.getIRPosition();
- const Function *AssociatedFunction = IRP.getAssociatedFunction();
- if (!AssociatedFunction) {
- LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
- << "\n");
- return false;
- }
- return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
- &QueryingAA, UsedAssumedInformation);
- }
- bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
- const Function &Fn,
- bool RequireAllCallSites,
- const AbstractAttribute *QueryingAA,
- bool &UsedAssumedInformation,
- bool CheckPotentiallyDead) {
- if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
- LLVM_DEBUG(
- dbgs()
- << "[Attributor] Function " << Fn.getName()
- << " has no internal linkage, hence not all call sites are known\n");
- return false;
- }
- // Check virtual uses first.
- for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&Fn))
- if (!CB(*this, QueryingAA))
- return false;
- SmallVector<const Use *, 8> Uses(make_pointer_range(Fn.uses()));
- for (unsigned u = 0; u < Uses.size(); ++u) {
- const Use &U = *Uses[u];
- DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
- if (auto *Fn = dyn_cast<Function>(U))
- dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
- << *U.getUser() << "\n";
- else
- dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
- << "\n";
- });
- if (!CheckPotentiallyDead &&
- isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
- /* CheckBBLivenessOnly */ true)) {
- DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
- dbgs() << "[Attributor] Dead use, skip!\n");
- continue;
- }
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
- if (CE->isCast() && CE->getType()->isPointerTy()) {
- DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
- dbgs() << "[Attributor] Use, is constant cast expression, add "
- << CE->getNumUses() << " uses of that expression instead!\n";
- });
- for (const Use &CEU : CE->uses())
- Uses.push_back(&CEU);
- continue;
- }
- }
- AbstractCallSite ACS(&U);
- if (!ACS) {
- LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
- << " has non call site use " << *U.get() << " in "
- << *U.getUser() << "\n");
- // BlockAddress users are allowed.
- if (isa<BlockAddress>(U.getUser()))
- continue;
- return false;
- }
- const Use *EffectiveUse =
- ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
- if (!ACS.isCallee(EffectiveUse)) {
- if (!RequireAllCallSites) {
- LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
- << " is not a call of " << Fn.getName()
- << ", skip use\n");
- continue;
- }
- LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
- << " is an invalid use of " << Fn.getName() << "\n");
- return false;
- }
- // Make sure the arguments that can be matched between the call site and the
- // callee argee on their type. It is unlikely they do not and it doesn't
- // make sense for all attributes to know/care about this.
- assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
- unsigned MinArgsParams =
- std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
- for (unsigned u = 0; u < MinArgsParams; ++u) {
- Value *CSArgOp = ACS.getCallArgOperand(u);
- if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
- LLVM_DEBUG(
- dbgs() << "[Attributor] Call site / callee argument type mismatch ["
- << u << "@" << Fn.getName() << ": "
- << *Fn.getArg(u)->getType() << " vs. "
- << *ACS.getCallArgOperand(u)->getType() << "\n");
- return false;
- }
- }
- if (Pred(ACS))
- continue;
- LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
- << *ACS.getInstruction() << "\n");
- return false;
- }
- return true;
- }
- bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
- // TODO: Maintain a cache of Values that are
- // on the pathway from a Argument to a Instruction that would effect the
- // liveness/return state etc.
- return EnableCallSiteSpecific;
- }
- bool Attributor::checkForAllReturnedValuesAndReturnInsts(
- function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred,
- const AbstractAttribute &QueryingAA) {
- const IRPosition &IRP = QueryingAA.getIRPosition();
- // Since we need to provide return instructions we have to have an exact
- // definition.
- const Function *AssociatedFunction = IRP.getAssociatedFunction();
- if (!AssociatedFunction)
- return false;
- // If this is a call site query we use the call site specific return values
- // and liveness information.
- // TODO: use the function scope once we have call site AAReturnedValues.
- const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
- const auto &AARetVal =
- getAAFor<AAReturnedValues>(QueryingAA, QueryIRP, DepClassTy::REQUIRED);
- if (!AARetVal.getState().isValidState())
- return false;
- return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
- }
- bool Attributor::checkForAllReturnedValues(
- function_ref<bool(Value &)> Pred, const AbstractAttribute &QueryingAA) {
- const IRPosition &IRP = QueryingAA.getIRPosition();
- const Function *AssociatedFunction = IRP.getAssociatedFunction();
- if (!AssociatedFunction)
- return false;
- // TODO: use the function scope once we have call site AAReturnedValues.
- const IRPosition &QueryIRP = IRPosition::function(
- *AssociatedFunction, QueryingAA.getCallBaseContext());
- const auto &AARetVal =
- getAAFor<AAReturnedValues>(QueryingAA, QueryIRP, DepClassTy::REQUIRED);
- if (!AARetVal.getState().isValidState())
- return false;
- return AARetVal.checkForAllReturnedValuesAndReturnInsts(
- [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
- return Pred(RV);
- });
- }
- static bool checkForAllInstructionsImpl(
- Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap,
- function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
- const AAIsDead *LivenessAA, const ArrayRef<unsigned> &Opcodes,
- bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
- bool CheckPotentiallyDead = false) {
- for (unsigned Opcode : Opcodes) {
- // Check if we have instructions with this opcode at all first.
- auto *Insts = OpcodeInstMap.lookup(Opcode);
- if (!Insts)
- continue;
- for (Instruction *I : *Insts) {
- // Skip dead instructions.
- if (A && !CheckPotentiallyDead &&
- A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA,
- UsedAssumedInformation, CheckBBLivenessOnly)) {
- DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
- dbgs() << "[Attributor] Instruction " << *I
- << " is potentially dead, skip!\n";);
- continue;
- }
- if (!Pred(*I))
- return false;
- }
- }
- return true;
- }
- bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
- const Function *Fn,
- const AbstractAttribute &QueryingAA,
- const ArrayRef<unsigned> &Opcodes,
- bool &UsedAssumedInformation,
- bool CheckBBLivenessOnly,
- bool CheckPotentiallyDead) {
- // Since we need to provide instructions we have to have an exact definition.
- if (!Fn || Fn->isDeclaration())
- return false;
- // TODO: use the function scope once we have call site AAReturnedValues.
- const IRPosition &QueryIRP = IRPosition::function(*Fn);
- const auto *LivenessAA =
- (CheckBBLivenessOnly || CheckPotentiallyDead)
- ? nullptr
- : &(getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE));
- auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
- if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, &QueryingAA,
- LivenessAA, Opcodes, UsedAssumedInformation,
- CheckBBLivenessOnly, CheckPotentiallyDead))
- return false;
- return true;
- }
- bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
- const AbstractAttribute &QueryingAA,
- const ArrayRef<unsigned> &Opcodes,
- bool &UsedAssumedInformation,
- bool CheckBBLivenessOnly,
- bool CheckPotentiallyDead) {
- const IRPosition &IRP = QueryingAA.getIRPosition();
- const Function *AssociatedFunction = IRP.getAssociatedFunction();
- return checkForAllInstructions(Pred, AssociatedFunction, QueryingAA, Opcodes,
- UsedAssumedInformation, CheckBBLivenessOnly,
- CheckPotentiallyDead);
- }
- bool Attributor::checkForAllReadWriteInstructions(
- function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
- bool &UsedAssumedInformation) {
- const Function *AssociatedFunction =
- QueryingAA.getIRPosition().getAssociatedFunction();
- if (!AssociatedFunction)
- return false;
- // TODO: use the function scope once we have call site AAReturnedValues.
- const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
- const auto &LivenessAA =
- getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE);
- for (Instruction *I :
- InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
- // Skip dead instructions.
- if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, &LivenessAA,
- UsedAssumedInformation))
- continue;
- if (!Pred(*I))
- return false;
- }
- return true;
- }
- void Attributor::runTillFixpoint() {
- TimeTraceScope TimeScope("Attributor::runTillFixpoint");
- LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
- << DG.SyntheticRoot.Deps.size()
- << " abstract attributes.\n");
- // Now that all abstract attributes are collected and initialized we start
- // the abstract analysis.
- unsigned IterationCounter = 1;
- unsigned MaxIterations =
- Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
- SmallVector<AbstractAttribute *, 32> ChangedAAs;
- SetVector<AbstractAttribute *> Worklist, InvalidAAs;
- Worklist.insert(DG.SyntheticRoot.begin(), DG.SyntheticRoot.end());
- do {
- // Remember the size to determine new attributes.
- size_t NumAAs = DG.SyntheticRoot.Deps.size();
- LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
- << ", Worklist size: " << Worklist.size() << "\n");
- // For invalid AAs we can fix dependent AAs that have a required dependence,
- // thereby folding long dependence chains in a single step without the need
- // to run updates.
- for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
- AbstractAttribute *InvalidAA = InvalidAAs[u];
- // Check the dependences to fast track invalidation.
- DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
- dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
- << " has " << InvalidAA->Deps.size()
- << " required & optional dependences\n");
- while (!InvalidAA->Deps.empty()) {
- const auto &Dep = InvalidAA->Deps.back();
- InvalidAA->Deps.pop_back();
- AbstractAttribute *DepAA = cast<AbstractAttribute>(Dep.getPointer());
- if (Dep.getInt() == unsigned(DepClassTy::OPTIONAL)) {
- DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
- dbgs() << " - recompute: " << *DepAA);
- Worklist.insert(DepAA);
- continue;
- }
- DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, dbgs()
- << " - invalidate: " << *DepAA);
- DepAA->getState().indicatePessimisticFixpoint();
- assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
- if (!DepAA->getState().isValidState())
- InvalidAAs.insert(DepAA);
- else
- ChangedAAs.push_back(DepAA);
- }
- }
- // Add all abstract attributes that are potentially dependent on one that
- // changed to the work list.
- for (AbstractAttribute *ChangedAA : ChangedAAs)
- while (!ChangedAA->Deps.empty()) {
- Worklist.insert(
- cast<AbstractAttribute>(ChangedAA->Deps.back().getPointer()));
- ChangedAA->Deps.pop_back();
- }
- LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
- << ", Worklist+Dependent size: " << Worklist.size()
- << "\n");
- // Reset the changed and invalid set.
- ChangedAAs.clear();
- InvalidAAs.clear();
- // Update all abstract attribute in the work list and record the ones that
- // changed.
- for (AbstractAttribute *AA : Worklist) {
- const auto &AAState = AA->getState();
- if (!AAState.isAtFixpoint())
- if (updateAA(*AA) == ChangeStatus::CHANGED)
- ChangedAAs.push_back(AA);
- // Use the InvalidAAs vector to propagate invalid states fast transitively
- // without requiring updates.
- if (!AAState.isValidState())
- InvalidAAs.insert(AA);
- }
- // Add attributes to the changed set if they have been created in the last
- // iteration.
- ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs,
- DG.SyntheticRoot.end());
- // Reset the work list and repopulate with the changed abstract attributes.
- // Note that dependent ones are added above.
- Worklist.clear();
- Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
- Worklist.insert(QueryAAsAwaitingUpdate.begin(),
- QueryAAsAwaitingUpdate.end());
- QueryAAsAwaitingUpdate.clear();
- } while (!Worklist.empty() &&
- (IterationCounter++ < MaxIterations || VerifyMaxFixpointIterations));
- if (IterationCounter > MaxIterations && !Functions.empty()) {
- auto Remark = [&](OptimizationRemarkMissed ORM) {
- return ORM << "Attributor did not reach a fixpoint after "
- << ore::NV("Iterations", MaxIterations) << " iterations.";
- };
- Function *F = Functions.front();
- emitRemark<OptimizationRemarkMissed>(F, "FixedPoint", Remark);
- }
- LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
- << IterationCounter << "/" << MaxIterations
- << " iterations\n");
- // Reset abstract arguments not settled in a sound fixpoint by now. This
- // happens when we stopped the fixpoint iteration early. Note that only the
- // ones marked as "changed" *and* the ones transitively depending on them
- // need to be reverted to a pessimistic state. Others might not be in a
- // fixpoint state but we can use the optimistic results for them anyway.
- SmallPtrSet<AbstractAttribute *, 32> Visited;
- for (unsigned u = 0; u < ChangedAAs.size(); u++) {
- AbstractAttribute *ChangedAA = ChangedAAs[u];
- if (!Visited.insert(ChangedAA).second)
- continue;
- AbstractState &State = ChangedAA->getState();
- if (!State.isAtFixpoint()) {
- State.indicatePessimisticFixpoint();
- NumAttributesTimedOut++;
- }
- while (!ChangedAA->Deps.empty()) {
- ChangedAAs.push_back(
- cast<AbstractAttribute>(ChangedAA->Deps.back().getPointer()));
- ChangedAA->Deps.pop_back();
- }
- }
- LLVM_DEBUG({
- if (!Visited.empty())
- dbgs() << "\n[Attributor] Finalized " << Visited.size()
- << " abstract attributes.\n";
- });
- if (VerifyMaxFixpointIterations && IterationCounter != MaxIterations) {
- errs() << "\n[Attributor] Fixpoint iteration done after: "
- << IterationCounter << "/" << MaxIterations << " iterations\n";
- llvm_unreachable("The fixpoint was not reached with exactly the number of "
- "specified iterations!");
- }
- }
- void Attributor::registerForUpdate(AbstractAttribute &AA) {
- assert(AA.isQueryAA() &&
- "Non-query AAs should not be required to register for updates!");
- QueryAAsAwaitingUpdate.insert(&AA);
- }
- ChangeStatus Attributor::manifestAttributes() {
- TimeTraceScope TimeScope("Attributor::manifestAttributes");
- size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
- unsigned NumManifested = 0;
- unsigned NumAtFixpoint = 0;
- ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
- for (auto &DepAA : DG.SyntheticRoot.Deps) {
- AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
- AbstractState &State = AA->getState();
- // If there is not already a fixpoint reached, we can now take the
- // optimistic state. This is correct because we enforced a pessimistic one
- // on abstract attributes that were transitively dependent on a changed one
- // already above.
- if (!State.isAtFixpoint())
- State.indicateOptimisticFixpoint();
- // We must not manifest Attributes that use Callbase info.
- if (AA->hasCallBaseContext())
- continue;
- // If the state is invalid, we do not try to manifest it.
- if (!State.isValidState())
- continue;
- if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
- continue;
- // Skip dead code.
- bool UsedAssumedInformation = false;
- if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
- /* CheckBBLivenessOnly */ true))
- continue;
- // Check if the manifest debug counter that allows skipping manifestation of
- // AAs
- if (!DebugCounter::shouldExecute(ManifestDBGCounter))
- continue;
- // Manifest the state and record if we changed the IR.
- ChangeStatus LocalChange = AA->manifest(*this);
- if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
- AA->trackStatistics();
- LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
- << "\n");
- ManifestChange = ManifestChange | LocalChange;
- NumAtFixpoint++;
- NumManifested += (LocalChange == ChangeStatus::CHANGED);
- }
- (void)NumManifested;
- (void)NumAtFixpoint;
- LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
- << " arguments while " << NumAtFixpoint
- << " were in a valid fixpoint state\n");
- NumAttributesManifested += NumManifested;
- NumAttributesValidFixpoint += NumAtFixpoint;
- (void)NumFinalAAs;
- if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
- for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size(); ++u)
- errs() << "Unexpected abstract attribute: "
- << cast<AbstractAttribute>(DG.SyntheticRoot.Deps[u].getPointer())
- << " :: "
- << cast<AbstractAttribute>(DG.SyntheticRoot.Deps[u].getPointer())
- ->getIRPosition()
- .getAssociatedValue()
- << "\n";
- llvm_unreachable("Expected the final number of abstract attributes to "
- "remain unchanged!");
- }
- return ManifestChange;
- }
- void Attributor::identifyDeadInternalFunctions() {
- // Early exit if we don't intend to delete functions.
- if (!Configuration.DeleteFns)
- return;
- // To avoid triggering an assertion in the lazy call graph we will not delete
- // any internal library functions. We should modify the assertion though and
- // allow internals to be deleted.
- const auto *TLI =
- isModulePass()
- ? nullptr
- : getInfoCache().getTargetLibraryInfoForFunction(*Functions.back());
- LibFunc LF;
- // Identify dead internal functions and delete them. This happens outside
- // the other fixpoint analysis as we might treat potentially dead functions
- // as live to lower the number of iterations. If they happen to be dead, the
- // below fixpoint loop will identify and eliminate them.
- SmallVector<Function *, 8> InternalFns;
- for (Function *F : Functions)
- if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(*F, LF)))
- InternalFns.push_back(F);
- SmallPtrSet<Function *, 8> LiveInternalFns;
- bool FoundLiveInternal = true;
- while (FoundLiveInternal) {
- FoundLiveInternal = false;
- for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
- Function *F = InternalFns[u];
- if (!F)
- continue;
- bool UsedAssumedInformation = false;
- if (checkForAllCallSites(
- [&](AbstractCallSite ACS) {
- Function *Callee = ACS.getInstruction()->getFunction();
- return ToBeDeletedFunctions.count(Callee) ||
- (Functions.count(Callee) && Callee->hasLocalLinkage() &&
- !LiveInternalFns.count(Callee));
- },
- *F, true, nullptr, UsedAssumedInformation)) {
- continue;
- }
- LiveInternalFns.insert(F);
- InternalFns[u] = nullptr;
- FoundLiveInternal = true;
- }
- }
- for (unsigned u = 0, e = InternalFns.size(); u < e; ++u)
- if (Function *F = InternalFns[u])
- ToBeDeletedFunctions.insert(F);
- }
- ChangeStatus Attributor::cleanupIR() {
- TimeTraceScope TimeScope("Attributor::cleanupIR");
- // Delete stuff at the end to avoid invalid references and a nice order.
- LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
- << ToBeDeletedFunctions.size() << " functions and "
- << ToBeDeletedBlocks.size() << " blocks and "
- << ToBeDeletedInsts.size() << " instructions and "
- << ToBeChangedValues.size() << " values and "
- << ToBeChangedUses.size() << " uses. To insert "
- << ToBeChangedToUnreachableInsts.size()
- << " unreachables.\n"
- << "Preserve manifest added " << ManifestAddedBlocks.size()
- << " blocks\n");
- SmallVector<WeakTrackingVH, 32> DeadInsts;
- SmallVector<Instruction *, 32> TerminatorsToFold;
- auto ReplaceUse = [&](Use *U, Value *NewV) {
- Value *OldV = U->get();
- // If we plan to replace NewV we need to update it at this point.
- do {
- const auto &Entry = ToBeChangedValues.lookup(NewV);
- if (!get<0>(Entry))
- break;
- NewV = get<0>(Entry);
- } while (true);
- Instruction *I = dyn_cast<Instruction>(U->getUser());
- assert((!I || isRunOn(*I->getFunction())) &&
- "Cannot replace an instruction outside the current SCC!");
- // Do not replace uses in returns if the value is a must-tail call we will
- // not delete.
- if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
- if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
- if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
- return;
- // If we rewrite a return and the new value is not an argument, strip the
- // `returned` attribute as it is wrong now.
- if (!isa<Argument>(NewV))
- for (auto &Arg : RI->getFunction()->args())
- Arg.removeAttr(Attribute::Returned);
- }
- LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
- << " instead of " << *OldV << "\n");
- U->set(NewV);
- if (Instruction *I = dyn_cast<Instruction>(OldV)) {
- CGModifiedFunctions.insert(I->getFunction());
- if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
- isInstructionTriviallyDead(I))
- DeadInsts.push_back(I);
- }
- if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) {
- auto *CB = cast<CallBase>(U->getUser());
- if (CB->isArgOperand(U)) {
- unsigned Idx = CB->getArgOperandNo(U);
- CB->removeParamAttr(Idx, Attribute::NoUndef);
- Function *Fn = CB->getCalledFunction();
- if (Fn && Fn->arg_size() > Idx)
- Fn->removeParamAttr(Idx, Attribute::NoUndef);
- }
- }
- if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
- Instruction *UserI = cast<Instruction>(U->getUser());
- if (isa<UndefValue>(NewV)) {
- ToBeChangedToUnreachableInsts.insert(UserI);
- } else {
- TerminatorsToFold.push_back(UserI);
- }
- }
- };
- for (auto &It : ToBeChangedUses) {
- Use *U = It.first;
- Value *NewV = It.second;
- ReplaceUse(U, NewV);
- }
- SmallVector<Use *, 4> Uses;
- for (auto &It : ToBeChangedValues) {
- Value *OldV = It.first;
- auto [NewV, Done] = It.second;
- Uses.clear();
- for (auto &U : OldV->uses())
- if (Done || !U.getUser()->isDroppable())
- Uses.push_back(&U);
- for (Use *U : Uses) {
- if (auto *I = dyn_cast<Instruction>(U->getUser()))
- if (!isRunOn(*I->getFunction()))
- continue;
- ReplaceUse(U, NewV);
- }
- }
- for (const auto &V : InvokeWithDeadSuccessor)
- if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
- assert(isRunOn(*II->getFunction()) &&
- "Cannot replace an invoke outside the current SCC!");
- bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
- bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
- bool Invoke2CallAllowed =
- !AAIsDead::mayCatchAsynchronousExceptions(*II->getFunction());
- assert((UnwindBBIsDead || NormalBBIsDead) &&
- "Invoke does not have dead successors!");
- BasicBlock *BB = II->getParent();
- BasicBlock *NormalDestBB = II->getNormalDest();
- if (UnwindBBIsDead) {
- Instruction *NormalNextIP = &NormalDestBB->front();
- if (Invoke2CallAllowed) {
- changeToCall(II);
- NormalNextIP = BB->getTerminator();
- }
- if (NormalBBIsDead)
- ToBeChangedToUnreachableInsts.insert(NormalNextIP);
- } else {
- assert(NormalBBIsDead && "Broken invariant!");
- if (!NormalDestBB->getUniquePredecessor())
- NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
- ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
- }
- }
- for (Instruction *I : TerminatorsToFold) {
- assert(isRunOn(*I->getFunction()) &&
- "Cannot replace a terminator outside the current SCC!");
- CGModifiedFunctions.insert(I->getFunction());
- ConstantFoldTerminator(I->getParent());
- }
- for (const auto &V : ToBeChangedToUnreachableInsts)
- if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
- LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
- << "\n");
- assert(isRunOn(*I->getFunction()) &&
- "Cannot replace an instruction outside the current SCC!");
- CGModifiedFunctions.insert(I->getFunction());
- changeToUnreachable(I);
- }
- for (const auto &V : ToBeDeletedInsts) {
- if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
- if (auto *CB = dyn_cast<CallBase>(I)) {
- assert((isa<IntrinsicInst>(CB) || isRunOn(*I->getFunction())) &&
- "Cannot delete an instruction outside the current SCC!");
- if (!isa<IntrinsicInst>(CB))
- Configuration.CGUpdater.removeCallSite(*CB);
- }
- I->dropDroppableUses();
- CGModifiedFunctions.insert(I->getFunction());
- if (!I->getType()->isVoidTy())
- I->replaceAllUsesWith(UndefValue::get(I->getType()));
- if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
- DeadInsts.push_back(I);
- else
- I->eraseFromParent();
- }
- }
- llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
- LLVM_DEBUG({
- dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
- for (auto &I : DeadInsts)
- if (I)
- dbgs() << " - " << *I << "\n";
- });
- RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
- if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
- SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
- ToBeDeletedBBs.reserve(NumDeadBlocks);
- for (BasicBlock *BB : ToBeDeletedBlocks) {
- assert(isRunOn(*BB->getParent()) &&
- "Cannot delete a block outside the current SCC!");
- CGModifiedFunctions.insert(BB->getParent());
- // Do not delete BBs added during manifests of AAs.
- if (ManifestAddedBlocks.contains(BB))
- continue;
- ToBeDeletedBBs.push_back(BB);
- }
- // Actually we do not delete the blocks but squash them into a single
- // unreachable but untangling branches that jump here is something we need
- // to do in a more generic way.
- detachDeadBlocks(ToBeDeletedBBs, nullptr);
- }
- identifyDeadInternalFunctions();
- // Rewrite the functions as requested during manifest.
- ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions);
- for (Function *Fn : CGModifiedFunctions)
- if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
- Configuration.CGUpdater.reanalyzeFunction(*Fn);
- for (Function *Fn : ToBeDeletedFunctions) {
- if (!Functions.count(Fn))
- continue;
- Configuration.CGUpdater.removeFunction(*Fn);
- }
- if (!ToBeChangedUses.empty())
- ManifestChange = ChangeStatus::CHANGED;
- if (!ToBeChangedToUnreachableInsts.empty())
- ManifestChange = ChangeStatus::CHANGED;
- if (!ToBeDeletedFunctions.empty())
- ManifestChange = ChangeStatus::CHANGED;
- if (!ToBeDeletedBlocks.empty())
- ManifestChange = ChangeStatus::CHANGED;
- if (!ToBeDeletedInsts.empty())
- ManifestChange = ChangeStatus::CHANGED;
- if (!InvokeWithDeadSuccessor.empty())
- ManifestChange = ChangeStatus::CHANGED;
- if (!DeadInsts.empty())
- ManifestChange = ChangeStatus::CHANGED;
- NumFnDeleted += ToBeDeletedFunctions.size();
- LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
- << " functions after manifest.\n");
- #ifdef EXPENSIVE_CHECKS
- for (Function *F : Functions) {
- if (ToBeDeletedFunctions.count(F))
- continue;
- assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
- }
- #endif
- return ManifestChange;
- }
- ChangeStatus Attributor::run() {
- TimeTraceScope TimeScope("Attributor::run");
- AttributorCallGraph ACallGraph(*this);
- if (PrintCallGraph)
- ACallGraph.populateAll();
- Phase = AttributorPhase::UPDATE;
- runTillFixpoint();
- // dump graphs on demand
- if (DumpDepGraph)
- DG.dumpGraph();
- if (ViewDepGraph)
- DG.viewGraph();
- if (PrintDependencies)
- DG.print();
- Phase = AttributorPhase::MANIFEST;
- ChangeStatus ManifestChange = manifestAttributes();
- Phase = AttributorPhase::CLEANUP;
- ChangeStatus CleanupChange = cleanupIR();
- if (PrintCallGraph)
- ACallGraph.print();
- return ManifestChange | CleanupChange;
- }
- ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
- TimeTraceScope TimeScope(
- AA.getName() + std::to_string(AA.getIRPosition().getPositionKind()) +
- "::updateAA");
- assert(Phase == AttributorPhase::UPDATE &&
- "We can update AA only in the update stage!");
- // Use a new dependence vector for this update.
- DependenceVector DV;
- DependenceStack.push_back(&DV);
- auto &AAState = AA.getState();
- ChangeStatus CS = ChangeStatus::UNCHANGED;
- bool UsedAssumedInformation = false;
- if (!isAssumedDead(AA, nullptr, UsedAssumedInformation,
- /* CheckBBLivenessOnly */ true))
- CS = AA.update(*this);
- if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
- // If the AA did not rely on outside information but changed, we run it
- // again to see if it found a fixpoint. Most AAs do but we don't require
- // them to. Hence, it might take the AA multiple iterations to get to a
- // fixpoint even if it does not rely on outside information, which is fine.
- ChangeStatus RerunCS = ChangeStatus::UNCHANGED;
- if (CS == ChangeStatus::CHANGED)
- RerunCS = AA.update(*this);
- // If the attribute did not change during the run or rerun, and it still did
- // not query any non-fix information, the state will not change and we can
- // indicate that right at this point.
- if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
- AAState.indicateOptimisticFixpoint();
- }
- if (!AAState.isAtFixpoint())
- rememberDependences();
- // Verify the stack was used properly, that is we pop the dependence vector we
- // put there earlier.
- DependenceVector *PoppedDV = DependenceStack.pop_back_val();
- (void)PoppedDV;
- assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
- return CS;
- }
- void Attributor::createShallowWrapper(Function &F) {
- assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
- Module &M = *F.getParent();
- LLVMContext &Ctx = M.getContext();
- FunctionType *FnTy = F.getFunctionType();
- Function *Wrapper =
- Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName());
- F.setName(""); // set the inside function anonymous
- M.getFunctionList().insert(F.getIterator(), Wrapper);
- F.setLinkage(GlobalValue::InternalLinkage);
- F.replaceAllUsesWith(Wrapper);
- assert(F.use_empty() && "Uses remained after wrapper was created!");
- // Move the COMDAT section to the wrapper.
- // TODO: Check if we need to keep it for F as well.
- Wrapper->setComdat(F.getComdat());
- F.setComdat(nullptr);
- // Copy all metadata and attributes but keep them on F as well.
- SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
- F.getAllMetadata(MDs);
- for (auto MDIt : MDs)
- Wrapper->addMetadata(MDIt.first, *MDIt.second);
- Wrapper->setAttributes(F.getAttributes());
- // Create the call in the wrapper.
- BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper);
- SmallVector<Value *, 8> Args;
- Argument *FArgIt = F.arg_begin();
- for (Argument &Arg : Wrapper->args()) {
- Args.push_back(&Arg);
- Arg.setName((FArgIt++)->getName());
- }
- CallInst *CI = CallInst::Create(&F, Args, "", EntryBB);
- CI->setTailCall(true);
- CI->addFnAttr(Attribute::NoInline);
- ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB);
- NumFnShallowWrappersCreated++;
- }
- bool Attributor::isInternalizable(Function &F) {
- if (F.isDeclaration() || F.hasLocalLinkage() ||
- GlobalValue::isInterposableLinkage(F.getLinkage()))
- return false;
- return true;
- }
- Function *Attributor::internalizeFunction(Function &F, bool Force) {
- if (!AllowDeepWrapper && !Force)
- return nullptr;
- if (!isInternalizable(F))
- return nullptr;
- SmallPtrSet<Function *, 2> FnSet = {&F};
- DenseMap<Function *, Function *> InternalizedFns;
- internalizeFunctions(FnSet, InternalizedFns);
- return InternalizedFns[&F];
- }
- bool Attributor::internalizeFunctions(SmallPtrSetImpl<Function *> &FnSet,
- DenseMap<Function *, Function *> &FnMap) {
- for (Function *F : FnSet)
- if (!Attributor::isInternalizable(*F))
- return false;
- FnMap.clear();
- // Generate the internalized version of each function.
- for (Function *F : FnSet) {
- Module &M = *F->getParent();
- FunctionType *FnTy = F->getFunctionType();
- // Create a copy of the current function
- Function *Copied =
- Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(),
- F->getName() + ".internalized");
- ValueToValueMapTy VMap;
- auto *NewFArgIt = Copied->arg_begin();
- for (auto &Arg : F->args()) {
- auto ArgName = Arg.getName();
- NewFArgIt->setName(ArgName);
- VMap[&Arg] = &(*NewFArgIt++);
- }
- SmallVector<ReturnInst *, 8> Returns;
- // Copy the body of the original function to the new one
- CloneFunctionInto(Copied, F, VMap,
- CloneFunctionChangeType::LocalChangesOnly, Returns);
- // Set the linakage and visibility late as CloneFunctionInto has some
- // implicit requirements.
- Copied->setVisibility(GlobalValue::DefaultVisibility);
- Copied->setLinkage(GlobalValue::PrivateLinkage);
- // Copy metadata
- SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
- F->getAllMetadata(MDs);
- for (auto MDIt : MDs)
- if (!Copied->hasMetadata())
- Copied->addMetadata(MDIt.first, *MDIt.second);
- M.getFunctionList().insert(F->getIterator(), Copied);
- Copied->setDSOLocal(true);
- FnMap[F] = Copied;
- }
- // Replace all uses of the old function with the new internalized function
- // unless the caller is a function that was just internalized.
- for (Function *F : FnSet) {
- auto &InternalizedFn = FnMap[F];
- auto IsNotInternalized = [&](Use &U) -> bool {
- if (auto *CB = dyn_cast<CallBase>(U.getUser()))
- return !FnMap.lookup(CB->getCaller());
- return false;
- };
- F->replaceUsesWithIf(InternalizedFn, IsNotInternalized);
- }
- return true;
- }
- bool Attributor::isValidFunctionSignatureRewrite(
- Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
- if (!Configuration.RewriteSignatures)
- return false;
- Function *Fn = Arg.getParent();
- auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
- // Forbid the call site to cast the function return type. If we need to
- // rewrite these functions we need to re-create a cast for the new call site
- // (if the old had uses).
- if (!ACS.getCalledFunction() ||
- ACS.getInstruction()->getType() !=
- ACS.getCalledFunction()->getReturnType())
- return false;
- if (ACS.getCalledOperand()->getType() != Fn->getType())
- return false;
- // Forbid must-tail calls for now.
- return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
- };
- // Avoid var-arg functions for now.
- if (Fn->isVarArg()) {
- LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
- return false;
- }
- // Avoid functions with complicated argument passing semantics.
- AttributeList FnAttributeList = Fn->getAttributes();
- if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
- FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
- FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) ||
- FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) {
- LLVM_DEBUG(
- dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
- return false;
- }
- // Avoid callbacks for now.
- bool UsedAssumedInformation = false;
- if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
- UsedAssumedInformation)) {
- LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
- return false;
- }
- auto InstPred = [](Instruction &I) {
- if (auto *CI = dyn_cast<CallInst>(&I))
- return !CI->isMustTailCall();
- return true;
- };
- // Forbid must-tail calls for now.
- // TODO:
- auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
- if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
- nullptr, {Instruction::Call},
- UsedAssumedInformation)) {
- LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
- return false;
- }
- return true;
- }
- bool Attributor::registerFunctionSignatureRewrite(
- Argument &Arg, ArrayRef<Type *> ReplacementTypes,
- ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
- ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
- LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
- << Arg.getParent()->getName() << " with "
- << ReplacementTypes.size() << " replacements\n");
- assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
- "Cannot register an invalid rewrite");
- Function *Fn = Arg.getParent();
- SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
- ArgumentReplacementMap[Fn];
- if (ARIs.empty())
- ARIs.resize(Fn->arg_size());
- // If we have a replacement already with less than or equal new arguments,
- // ignore this request.
- std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
- if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
- LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
- return false;
- }
- // If we have a replacement already but we like the new one better, delete
- // the old.
- ARI.reset();
- LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
- << Arg.getParent()->getName() << " with "
- << ReplacementTypes.size() << " replacements\n");
- // Remember the replacement.
- ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
- std::move(CalleeRepairCB),
- std::move(ACSRepairCB)));
- return true;
- }
- bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
- bool Result = true;
- #ifndef NDEBUG
- if (SeedAllowList.size() != 0)
- Result = llvm::is_contained(SeedAllowList, AA.getName());
- Function *Fn = AA.getAnchorScope();
- if (FunctionSeedAllowList.size() != 0 && Fn)
- Result &= llvm::is_contained(FunctionSeedAllowList, Fn->getName());
- #endif
- return Result;
- }
- ChangeStatus Attributor::rewriteFunctionSignatures(
- SmallSetVector<Function *, 8> &ModifiedFns) {
- ChangeStatus Changed = ChangeStatus::UNCHANGED;
- for (auto &It : ArgumentReplacementMap) {
- Function *OldFn = It.getFirst();
- // Deleted functions do not require rewrites.
- if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn))
- continue;
- const SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
- It.getSecond();
- assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
- SmallVector<Type *, 16> NewArgumentTypes;
- SmallVector<AttributeSet, 16> NewArgumentAttributes;
- // Collect replacement argument types and copy over existing attributes.
- AttributeList OldFnAttributeList = OldFn->getAttributes();
- for (Argument &Arg : OldFn->args()) {
- if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
- ARIs[Arg.getArgNo()]) {
- NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
- ARI->ReplacementTypes.end());
- NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
- AttributeSet());
- } else {
- NewArgumentTypes.push_back(Arg.getType());
- NewArgumentAttributes.push_back(
- OldFnAttributeList.getParamAttrs(Arg.getArgNo()));
- }
- }
- uint64_t LargestVectorWidth = 0;
- for (auto *I : NewArgumentTypes)
- if (auto *VT = dyn_cast<llvm::VectorType>(I))
- LargestVectorWidth =
- std::max(LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getKnownMinValue());
- FunctionType *OldFnTy = OldFn->getFunctionType();
- Type *RetTy = OldFnTy->getReturnType();
- // Construct the new function type using the new arguments types.
- FunctionType *NewFnTy =
- FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
- LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
- << "' from " << *OldFn->getFunctionType() << " to "
- << *NewFnTy << "\n");
- // Create the new function body and insert it into the module.
- Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
- OldFn->getAddressSpace(), "");
- Functions.insert(NewFn);
- OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
- NewFn->takeName(OldFn);
- NewFn->copyAttributesFrom(OldFn);
- // Patch the pointer to LLVM function in debug info descriptor.
- NewFn->setSubprogram(OldFn->getSubprogram());
- OldFn->setSubprogram(nullptr);
- // Recompute the parameter attributes list based on the new arguments for
- // the function.
- LLVMContext &Ctx = OldFn->getContext();
- NewFn->setAttributes(AttributeList::get(
- Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
- NewArgumentAttributes));
- AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
- // Since we have now created the new function, splice the body of the old
- // function right into the new function, leaving the old rotting hulk of the
- // function empty.
- NewFn->splice(NewFn->begin(), OldFn);
- // Fixup block addresses to reference new function.
- SmallVector<BlockAddress *, 8u> BlockAddresses;
- for (User *U : OldFn->users())
- if (auto *BA = dyn_cast<BlockAddress>(U))
- BlockAddresses.push_back(BA);
- for (auto *BA : BlockAddresses)
- BA->replaceAllUsesWith(BlockAddress::get(NewFn, BA->getBasicBlock()));
- // Set of all "call-like" instructions that invoke the old function mapped
- // to their new replacements.
- SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs;
- // Callback to create a new "call-like" instruction for a given one.
- auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
- CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
- const AttributeList &OldCallAttributeList = OldCB->getAttributes();
- // Collect the new argument operands for the replacement call site.
- SmallVector<Value *, 16> NewArgOperands;
- SmallVector<AttributeSet, 16> NewArgOperandAttributes;
- for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
- unsigned NewFirstArgNum = NewArgOperands.size();
- (void)NewFirstArgNum; // only used inside assert.
- if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
- ARIs[OldArgNum]) {
- if (ARI->ACSRepairCB)
- ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
- assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
- NewArgOperands.size() &&
- "ACS repair callback did not provide as many operand as new "
- "types were registered!");
- // TODO: Exose the attribute set to the ACS repair callback
- NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
- AttributeSet());
- } else {
- NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
- NewArgOperandAttributes.push_back(
- OldCallAttributeList.getParamAttrs(OldArgNum));
- }
- }
- assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
- "Mismatch # argument operands vs. # argument operand attributes!");
- assert(NewArgOperands.size() == NewFn->arg_size() &&
- "Mismatch # argument operands vs. # function arguments!");
- SmallVector<OperandBundleDef, 4> OperandBundleDefs;
- OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
- // Create a new call or invoke instruction to replace the old one.
- CallBase *NewCB;
- if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
- NewCB =
- InvokeInst::Create(NewFn, II->getNormalDest(), II->getUnwindDest(),
- NewArgOperands, OperandBundleDefs, "", OldCB);
- } else {
- auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
- "", OldCB);
- NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
- NewCB = NewCI;
- }
- // Copy over various properties and the new attributes.
- NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
- NewCB->setCallingConv(OldCB->getCallingConv());
- NewCB->takeName(OldCB);
- NewCB->setAttributes(AttributeList::get(
- Ctx, OldCallAttributeList.getFnAttrs(),
- OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
- AttributeFuncs::updateMinLegalVectorWidthAttr(*NewCB->getCaller(),
- LargestVectorWidth);
- CallSitePairs.push_back({OldCB, NewCB});
- return true;
- };
- // Use the CallSiteReplacementCreator to create replacement call sites.
- bool UsedAssumedInformation = false;
- bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
- true, nullptr, UsedAssumedInformation,
- /* CheckPotentiallyDead */ true);
- (void)Success;
- assert(Success && "Assumed call site replacement to succeed!");
- // Rewire the arguments.
- Argument *OldFnArgIt = OldFn->arg_begin();
- Argument *NewFnArgIt = NewFn->arg_begin();
- for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
- ++OldArgNum, ++OldFnArgIt) {
- if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
- ARIs[OldArgNum]) {
- if (ARI->CalleeRepairCB)
- ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
- if (ARI->ReplacementTypes.empty())
- OldFnArgIt->replaceAllUsesWith(
- PoisonValue::get(OldFnArgIt->getType()));
- NewFnArgIt += ARI->ReplacementTypes.size();
- } else {
- NewFnArgIt->takeName(&*OldFnArgIt);
- OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
- ++NewFnArgIt;
- }
- }
- // Eliminate the instructions *after* we visited all of them.
- for (auto &CallSitePair : CallSitePairs) {
- CallBase &OldCB = *CallSitePair.first;
- CallBase &NewCB = *CallSitePair.second;
- assert(OldCB.getType() == NewCB.getType() &&
- "Cannot handle call sites with different types!");
- ModifiedFns.insert(OldCB.getFunction());
- Configuration.CGUpdater.replaceCallSite(OldCB, NewCB);
- OldCB.replaceAllUsesWith(&NewCB);
- OldCB.eraseFromParent();
- }
- // Replace the function in the call graph (if any).
- Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
- // If the old function was modified and needed to be reanalyzed, the new one
- // does now.
- if (ModifiedFns.remove(OldFn))
- ModifiedFns.insert(NewFn);
- Changed = ChangeStatus::CHANGED;
- }
- return Changed;
- }
- void InformationCache::initializeInformationCache(const Function &CF,
- FunctionInfo &FI) {
- // As we do not modify the function here we can remove the const
- // withouth breaking implicit assumptions. At the end of the day, we could
- // initialize the cache eagerly which would look the same to the users.
- Function &F = const_cast<Function &>(CF);
- // Walk all instructions to find interesting instructions that might be
- // queried by abstract attributes during their initialization or update.
- // This has to happen before we create attributes.
- DenseMap<const Value *, std::optional<short>> AssumeUsesMap;
- // Add \p V to the assume uses map which track the number of uses outside of
- // "visited" assumes. If no outside uses are left the value is added to the
- // assume only use vector.
- auto AddToAssumeUsesMap = [&](const Value &V) -> void {
- SmallVector<const Instruction *> Worklist;
- if (auto *I = dyn_cast<Instruction>(&V))
- Worklist.push_back(I);
- while (!Worklist.empty()) {
- const Instruction *I = Worklist.pop_back_val();
- std::optional<short> &NumUses = AssumeUsesMap[I];
- if (!NumUses)
- NumUses = I->getNumUses();
- NumUses = *NumUses - /* this assume */ 1;
- if (*NumUses != 0)
- continue;
- AssumeOnlyValues.insert(I);
- for (const Value *Op : I->operands())
- if (auto *OpI = dyn_cast<Instruction>(Op))
- Worklist.push_back(OpI);
- }
- };
- for (Instruction &I : instructions(&F)) {
- bool IsInterestingOpcode = false;
- // To allow easy access to all instructions in a function with a given
- // opcode we store them in the InfoCache. As not all opcodes are interesting
- // to concrete attributes we only cache the ones that are as identified in
- // the following switch.
- // Note: There are no concrete attributes now so this is initially empty.
- switch (I.getOpcode()) {
- default:
- assert(!isa<CallBase>(&I) &&
- "New call base instruction type needs to be known in the "
- "Attributor.");
- break;
- case Instruction::Call:
- // Calls are interesting on their own, additionally:
- // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
- // For `must-tail` calls we remember the caller and callee.
- if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
- AssumeOnlyValues.insert(Assume);
- fillMapFromAssume(*Assume, KnowledgeMap);
- AddToAssumeUsesMap(*Assume->getArgOperand(0));
- } else if (cast<CallInst>(I).isMustTailCall()) {
- FI.ContainsMustTailCall = true;
- if (const Function *Callee = cast<CallInst>(I).getCalledFunction())
- getFunctionInfo(*Callee).CalledViaMustTail = true;
- }
- [[fallthrough]];
- case Instruction::CallBr:
- case Instruction::Invoke:
- case Instruction::CleanupRet:
- case Instruction::CatchSwitch:
- case Instruction::AtomicRMW:
- case Instruction::AtomicCmpXchg:
- case Instruction::Br:
- case Instruction::Resume:
- case Instruction::Ret:
- case Instruction::Load:
- // The alignment of a pointer is interesting for loads.
- case Instruction::Store:
- // The alignment of a pointer is interesting for stores.
- case Instruction::Alloca:
- case Instruction::AddrSpaceCast:
- IsInterestingOpcode = true;
- }
- if (IsInterestingOpcode) {
- auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
- if (!Insts)
- Insts = new (Allocator) InstructionVectorTy();
- Insts->push_back(&I);
- }
- if (I.mayReadOrWriteMemory())
- FI.RWInsts.push_back(&I);
- }
- if (F.hasFnAttribute(Attribute::AlwaysInline) &&
- isInlineViable(F).isSuccess())
- InlineableFunctions.insert(&F);
- }
- AAResults *InformationCache::getAAResultsForFunction(const Function &F) {
- return AG.getAnalysis<AAManager>(F);
- }
- InformationCache::FunctionInfo::~FunctionInfo() {
- // The instruction vectors are allocated using a BumpPtrAllocator, we need to
- // manually destroy them.
- for (auto &It : OpcodeInstMap)
- It.getSecond()->~InstructionVectorTy();
- }
- void Attributor::recordDependence(const AbstractAttribute &FromAA,
- const AbstractAttribute &ToAA,
- DepClassTy DepClass) {
- if (DepClass == DepClassTy::NONE)
- return;
- // If we are outside of an update, thus before the actual fixpoint iteration
- // started (= when we create AAs), we do not track dependences because we will
- // put all AAs into the initial worklist anyway.
- if (DependenceStack.empty())
- return;
- if (FromAA.getState().isAtFixpoint())
- return;
- DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass});
- }
- void Attributor::rememberDependences() {
- assert(!DependenceStack.empty() && "No dependences to remember!");
- for (DepInfo &DI : *DependenceStack.back()) {
- assert((DI.DepClass == DepClassTy::REQUIRED ||
- DI.DepClass == DepClassTy::OPTIONAL) &&
- "Expected required or optional dependence (1 bit)!");
- auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
- DepAAs.push_back(AbstractAttribute::DepTy(
- const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
- }
- }
- void Attributor::identifyDefaultAbstractAttributes(Function &F) {
- if (!VisitedFunctions.insert(&F).second)
- return;
- if (F.isDeclaration())
- return;
- // In non-module runs we need to look at the call sites of a function to
- // determine if it is part of a must-tail call edge. This will influence what
- // attributes we can derive.
- InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
- if (!isModulePass() && !FI.CalledViaMustTail) {
- for (const Use &U : F.uses())
- if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
- if (CB->isCallee(&U) && CB->isMustTailCall())
- FI.CalledViaMustTail = true;
- }
- IRPosition FPos = IRPosition::function(F);
- // Check for dead BasicBlocks in every function.
- // We need dead instruction detection because we do not want to deal with
- // broken IR in which SSA rules do not apply.
- getOrCreateAAFor<AAIsDead>(FPos);
- // Every function might be "will-return".
- getOrCreateAAFor<AAWillReturn>(FPos);
- // Every function might contain instructions that cause "undefined behavior".
- getOrCreateAAFor<AAUndefinedBehavior>(FPos);
- // Every function can be nounwind.
- getOrCreateAAFor<AANoUnwind>(FPos);
- // Every function might be marked "nosync"
- getOrCreateAAFor<AANoSync>(FPos);
- // Every function might be "no-free".
- getOrCreateAAFor<AANoFree>(FPos);
- // Every function might be "no-return".
- getOrCreateAAFor<AANoReturn>(FPos);
- // Every function might be "no-recurse".
- getOrCreateAAFor<AANoRecurse>(FPos);
- // Every function might be "readnone/readonly/writeonly/...".
- getOrCreateAAFor<AAMemoryBehavior>(FPos);
- // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
- getOrCreateAAFor<AAMemoryLocation>(FPos);
- // Every function can track active assumptions.
- getOrCreateAAFor<AAAssumptionInfo>(FPos);
- // Every function might be applicable for Heap-To-Stack conversion.
- if (EnableHeapToStack)
- getOrCreateAAFor<AAHeapToStack>(FPos);
- // Return attributes are only appropriate if the return type is non void.
- Type *ReturnType = F.getReturnType();
- if (!ReturnType->isVoidTy()) {
- // Argument attribute "returned" --- Create only one per function even
- // though it is an argument attribute.
- getOrCreateAAFor<AAReturnedValues>(FPos);
- IRPosition RetPos = IRPosition::returned(F);
- // Every returned value might be dead.
- getOrCreateAAFor<AAIsDead>(RetPos);
- // Every function might be simplified.
- bool UsedAssumedInformation = false;
- getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
- AA::Intraprocedural);
- // Every returned value might be marked noundef.
- getOrCreateAAFor<AANoUndef>(RetPos);
- if (ReturnType->isPointerTy()) {
- // Every function with pointer return type might be marked align.
- getOrCreateAAFor<AAAlign>(RetPos);
- // Every function with pointer return type might be marked nonnull.
- getOrCreateAAFor<AANonNull>(RetPos);
- // Every function with pointer return type might be marked noalias.
- getOrCreateAAFor<AANoAlias>(RetPos);
- // Every function with pointer return type might be marked
- // dereferenceable.
- getOrCreateAAFor<AADereferenceable>(RetPos);
- }
- }
- for (Argument &Arg : F.args()) {
- IRPosition ArgPos = IRPosition::argument(Arg);
- // Every argument might be simplified. We have to go through the Attributor
- // interface though as outside AAs can register custom simplification
- // callbacks.
- bool UsedAssumedInformation = false;
- getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
- AA::Intraprocedural);
- // Every argument might be dead.
- getOrCreateAAFor<AAIsDead>(ArgPos);
- // Every argument might be marked noundef.
- getOrCreateAAFor<AANoUndef>(ArgPos);
- if (Arg.getType()->isPointerTy()) {
- // Every argument with pointer type might be marked nonnull.
- getOrCreateAAFor<AANonNull>(ArgPos);
- // Every argument with pointer type might be marked noalias.
- getOrCreateAAFor<AANoAlias>(ArgPos);
- // Every argument with pointer type might be marked dereferenceable.
- getOrCreateAAFor<AADereferenceable>(ArgPos);
- // Every argument with pointer type might be marked align.
- getOrCreateAAFor<AAAlign>(ArgPos);
- // Every argument with pointer type might be marked nocapture.
- getOrCreateAAFor<AANoCapture>(ArgPos);
- // Every argument with pointer type might be marked
- // "readnone/readonly/writeonly/..."
- getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
- // Every argument with pointer type might be marked nofree.
- getOrCreateAAFor<AANoFree>(ArgPos);
- // Every argument with pointer type might be privatizable (or promotable)
- getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
- }
- }
- auto CallSitePred = [&](Instruction &I) -> bool {
- auto &CB = cast<CallBase>(I);
- IRPosition CBInstPos = IRPosition::inst(CB);
- IRPosition CBFnPos = IRPosition::callsite_function(CB);
- // Call sites might be dead if they do not have side effects and no live
- // users. The return value might be dead if there are no live users.
- getOrCreateAAFor<AAIsDead>(CBInstPos);
- Function *Callee = CB.getCalledFunction();
- // TODO: Even if the callee is not known now we might be able to simplify
- // the call/callee.
- if (!Callee)
- return true;
- // Every call site can track active assumptions.
- getOrCreateAAFor<AAAssumptionInfo>(CBFnPos);
- // Skip declarations except if annotations on their call sites were
- // explicitly requested.
- if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
- !Callee->hasMetadata(LLVMContext::MD_callback))
- return true;
- if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
- IRPosition CBRetPos = IRPosition::callsite_returned(CB);
- bool UsedAssumedInformation = false;
- getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
- AA::Intraprocedural);
- }
- for (int I = 0, E = CB.arg_size(); I < E; ++I) {
- IRPosition CBArgPos = IRPosition::callsite_argument(CB, I);
- // Every call site argument might be dead.
- getOrCreateAAFor<AAIsDead>(CBArgPos);
- // Call site argument might be simplified. We have to go through the
- // Attributor interface though as outside AAs can register custom
- // simplification callbacks.
- bool UsedAssumedInformation = false;
- getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
- AA::Intraprocedural);
- // Every call site argument might be marked "noundef".
- getOrCreateAAFor<AANoUndef>(CBArgPos);
- if (!CB.getArgOperand(I)->getType()->isPointerTy())
- continue;
- // Call site argument attribute "non-null".
- getOrCreateAAFor<AANonNull>(CBArgPos);
- // Call site argument attribute "nocapture".
- getOrCreateAAFor<AANoCapture>(CBArgPos);
- // Call site argument attribute "no-alias".
- getOrCreateAAFor<AANoAlias>(CBArgPos);
- // Call site argument attribute "dereferenceable".
- getOrCreateAAFor<AADereferenceable>(CBArgPos);
- // Call site argument attribute "align".
- getOrCreateAAFor<AAAlign>(CBArgPos);
- // Call site argument attribute
- // "readnone/readonly/writeonly/..."
- getOrCreateAAFor<AAMemoryBehavior>(CBArgPos);
- // Call site argument attribute "nofree".
- getOrCreateAAFor<AANoFree>(CBArgPos);
- }
- return true;
- };
- auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
- bool Success;
- bool UsedAssumedInformation = false;
- Success = checkForAllInstructionsImpl(
- nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
- {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
- (unsigned)Instruction::Call},
- UsedAssumedInformation);
- (void)Success;
- assert(Success && "Expected the check call to be successful!");
- auto LoadStorePred = [&](Instruction &I) -> bool {
- if (isa<LoadInst>(I)) {
- getOrCreateAAFor<AAAlign>(
- IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
- if (SimplifyAllLoads)
- getAssumedSimplified(IRPosition::value(I), nullptr,
- UsedAssumedInformation, AA::Intraprocedural);
- } else {
- auto &SI = cast<StoreInst>(I);
- getOrCreateAAFor<AAIsDead>(IRPosition::inst(I));
- getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
- UsedAssumedInformation, AA::Intraprocedural);
- getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
- }
- return true;
- };
- Success = checkForAllInstructionsImpl(
- nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
- {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
- UsedAssumedInformation);
- (void)Success;
- assert(Success && "Expected the check call to be successful!");
- }
- /// Helpers to ease debugging through output streams and print calls.
- ///
- ///{
- raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
- return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
- }
- raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
- switch (AP) {
- case IRPosition::IRP_INVALID:
- return OS << "inv";
- case IRPosition::IRP_FLOAT:
- return OS << "flt";
- case IRPosition::IRP_RETURNED:
- return OS << "fn_ret";
- case IRPosition::IRP_CALL_SITE_RETURNED:
- return OS << "cs_ret";
- case IRPosition::IRP_FUNCTION:
- return OS << "fn";
- case IRPosition::IRP_CALL_SITE:
- return OS << "cs";
- case IRPosition::IRP_ARGUMENT:
- return OS << "arg";
- case IRPosition::IRP_CALL_SITE_ARGUMENT:
- return OS << "cs_arg";
- }
- llvm_unreachable("Unknown attribute position!");
- }
- raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
- const Value &AV = Pos.getAssociatedValue();
- OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
- << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
- if (Pos.hasCallBaseContext())
- OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
- return OS << "}";
- }
- raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) {
- OS << "range-state(" << S.getBitWidth() << ")<";
- S.getKnown().print(OS);
- OS << " / ";
- S.getAssumed().print(OS);
- OS << ">";
- return OS << static_cast<const AbstractState &>(S);
- }
- raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
- return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
- }
- raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
- AA.print(OS);
- return OS;
- }
- raw_ostream &llvm::operator<<(raw_ostream &OS,
- const PotentialConstantIntValuesState &S) {
- OS << "set-state(< {";
- if (!S.isValidState())
- OS << "full-set";
- else {
- for (const auto &It : S.getAssumedSet())
- OS << It << ", ";
- if (S.undefIsContained())
- OS << "undef ";
- }
- OS << "} >)";
- return OS;
- }
- raw_ostream &llvm::operator<<(raw_ostream &OS,
- const PotentialLLVMValuesState &S) {
- OS << "set-state(< {";
- if (!S.isValidState())
- OS << "full-set";
- else {
- for (const auto &It : S.getAssumedSet()) {
- if (auto *F = dyn_cast<Function>(It.first.getValue()))
- OS << "@" << F->getName() << "[" << int(It.second) << "], ";
- else
- OS << *It.first.getValue() << "[" << int(It.second) << "], ";
- }
- if (S.undefIsContained())
- OS << "undef ";
- }
- OS << "} >)";
- return OS;
- }
- void AbstractAttribute::print(raw_ostream &OS) const {
- OS << "[";
- OS << getName();
- OS << "] for CtxI ";
- if (auto *I = getCtxI()) {
- OS << "'";
- I->print(OS);
- OS << "'";
- } else
- OS << "<<null inst>>";
- OS << " at position " << getIRPosition() << " with state " << getAsStr()
- << '\n';
- }
- void AbstractAttribute::printWithDeps(raw_ostream &OS) const {
- print(OS);
- for (const auto &DepAA : Deps) {
- auto *AA = DepAA.getPointer();
- OS << " updates ";
- AA->print(OS);
- }
- OS << '\n';
- }
- raw_ostream &llvm::operator<<(raw_ostream &OS,
- const AAPointerInfo::Access &Acc) {
- OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
- if (Acc.getLocalInst() != Acc.getRemoteInst())
- OS << " via " << *Acc.getLocalInst();
- if (Acc.getContent()) {
- if (*Acc.getContent())
- OS << " [" << **Acc.getContent() << "]";
- else
- OS << " [ <unknown> ]";
- }
- return OS;
- }
- ///}
- /// ----------------------------------------------------------------------------
- /// Pass (Manager) Boilerplate
- /// ----------------------------------------------------------------------------
- static bool runAttributorOnFunctions(InformationCache &InfoCache,
- SetVector<Function *> &Functions,
- AnalysisGetter &AG,
- CallGraphUpdater &CGUpdater,
- bool DeleteFns, bool IsModulePass) {
- if (Functions.empty())
- return false;
- LLVM_DEBUG({
- dbgs() << "[Attributor] Run on module with " << Functions.size()
- << " functions:\n";
- for (Function *Fn : Functions)
- dbgs() << " - " << Fn->getName() << "\n";
- });
- // Create an Attributor and initially empty information cache that is filled
- // while we identify default attribute opportunities.
- AttributorConfig AC(CGUpdater);
- AC.IsModulePass = IsModulePass;
- AC.DeleteFns = DeleteFns;
- Attributor A(Functions, InfoCache, AC);
- // Create shallow wrappers for all functions that are not IPO amendable
- if (AllowShallowWrappers)
- for (Function *F : Functions)
- if (!A.isFunctionIPOAmendable(*F))
- Attributor::createShallowWrapper(*F);
- // Internalize non-exact functions
- // TODO: for now we eagerly internalize functions without calculating the
- // cost, we need a cost interface to determine whether internalizing
- // a function is "beneficial"
- if (AllowDeepWrapper) {
- unsigned FunSize = Functions.size();
- for (unsigned u = 0; u < FunSize; u++) {
- Function *F = Functions[u];
- if (!F->isDeclaration() && !F->isDefinitionExact() && F->getNumUses() &&
- !GlobalValue::isInterposableLinkage(F->getLinkage())) {
- Function *NewF = Attributor::internalizeFunction(*F);
- assert(NewF && "Could not internalize function.");
- Functions.insert(NewF);
- // Update call graph
- CGUpdater.replaceFunctionWith(*F, *NewF);
- for (const Use &U : NewF->uses())
- if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
- auto *CallerF = CB->getCaller();
- CGUpdater.reanalyzeFunction(*CallerF);
- }
- }
- }
- }
- for (Function *F : Functions) {
- if (F->hasExactDefinition())
- NumFnWithExactDefinition++;
- else
- NumFnWithoutExactDefinition++;
- // We look at internal functions only on-demand but if any use is not a
- // direct call or outside the current set of analyzed functions, we have
- // to do it eagerly.
- if (F->hasLocalLinkage()) {
- if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
- const auto *CB = dyn_cast<CallBase>(U.getUser());
- return CB && CB->isCallee(&U) &&
- Functions.count(const_cast<Function *>(CB->getCaller()));
- }))
- continue;
- }
- // Populate the Attributor with abstract attribute opportunities in the
- // function and the information cache with IR information.
- A.identifyDefaultAbstractAttributes(*F);
- }
- ChangeStatus Changed = A.run();
- LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
- << " functions, result: " << Changed << ".\n");
- return Changed == ChangeStatus::CHANGED;
- }
- void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); }
- void AADepGraph::dumpGraph() {
- static std::atomic<int> CallTimes;
- std::string Prefix;
- if (!DepGraphDotFileNamePrefix.empty())
- Prefix = DepGraphDotFileNamePrefix;
- else
- Prefix = "dep_graph";
- std::string Filename =
- Prefix + "_" + std::to_string(CallTimes.load()) + ".dot";
- outs() << "Dependency graph dump to " << Filename << ".\n";
- std::error_code EC;
- raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
- if (!EC)
- llvm::WriteGraph(File, this);
- CallTimes++;
- }
- void AADepGraph::print() {
- for (auto DepAA : SyntheticRoot.Deps)
- cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs());
- }
- PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
- FunctionAnalysisManager &FAM =
- AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
- AnalysisGetter AG(FAM);
- SetVector<Function *> Functions;
- for (Function &F : M)
- Functions.insert(&F);
- CallGraphUpdater CGUpdater;
- BumpPtrAllocator Allocator;
- InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
- if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
- /* DeleteFns */ true, /* IsModulePass */ true)) {
- // FIXME: Think about passes we will preserve and add them here.
- return PreservedAnalyses::none();
- }
- return PreservedAnalyses::all();
- }
- PreservedAnalyses AttributorCGSCCPass::run(LazyCallGraph::SCC &C,
- CGSCCAnalysisManager &AM,
- LazyCallGraph &CG,
- CGSCCUpdateResult &UR) {
- FunctionAnalysisManager &FAM =
- AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
- AnalysisGetter AG(FAM);
- SetVector<Function *> Functions;
- for (LazyCallGraph::Node &N : C)
- Functions.insert(&N.getFunction());
- if (Functions.empty())
- return PreservedAnalyses::all();
- Module &M = *Functions.back()->getParent();
- CallGraphUpdater CGUpdater;
- CGUpdater.initialize(CG, C, AM, UR);
- BumpPtrAllocator Allocator;
- InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
- if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
- /* DeleteFns */ false,
- /* IsModulePass */ false)) {
- // FIXME: Think about passes we will preserve and add them here.
- PreservedAnalyses PA;
- PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
- return PA;
- }
- return PreservedAnalyses::all();
- }
- namespace llvm {
- template <> struct GraphTraits<AADepGraphNode *> {
- using NodeRef = AADepGraphNode *;
- using DepTy = PointerIntPair<AADepGraphNode *, 1>;
- using EdgeRef = PointerIntPair<AADepGraphNode *, 1>;
- static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; }
- static NodeRef DepGetVal(DepTy &DT) { return DT.getPointer(); }
- using ChildIteratorType =
- mapped_iterator<TinyPtrVector<DepTy>::iterator, decltype(&DepGetVal)>;
- using ChildEdgeIteratorType = TinyPtrVector<DepTy>::iterator;
- static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); }
- static ChildIteratorType child_end(NodeRef N) { return N->child_end(); }
- };
- template <>
- struct GraphTraits<AADepGraph *> : public GraphTraits<AADepGraphNode *> {
- static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
- using nodes_iterator =
- mapped_iterator<TinyPtrVector<DepTy>::iterator, decltype(&DepGetVal)>;
- static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
- static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
- };
- template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
- DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
- static std::string getNodeLabel(const AADepGraphNode *Node,
- const AADepGraph *DG) {
- std::string AAString;
- raw_string_ostream O(AAString);
- Node->print(O);
- return AAString;
- }
- };
- } // end namespace llvm
- namespace {
- struct AttributorLegacyPass : public ModulePass {
- static char ID;
- AttributorLegacyPass() : ModulePass(ID) {
- initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
- }
- bool runOnModule(Module &M) override {
- if (skipModule(M))
- return false;
- AnalysisGetter AG;
- SetVector<Function *> Functions;
- for (Function &F : M)
- Functions.insert(&F);
- CallGraphUpdater CGUpdater;
- BumpPtrAllocator Allocator;
- InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
- return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
- /* DeleteFns*/ true,
- /* IsModulePass */ true);
- }
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- // FIXME: Think about passes we will preserve and add them here.
- AU.addRequired<TargetLibraryInfoWrapperPass>();
- }
- };
- struct AttributorCGSCCLegacyPass : public CallGraphSCCPass {
- static char ID;
- AttributorCGSCCLegacyPass() : CallGraphSCCPass(ID) {
- initializeAttributorCGSCCLegacyPassPass(*PassRegistry::getPassRegistry());
- }
- bool runOnSCC(CallGraphSCC &SCC) override {
- if (skipSCC(SCC))
- return false;
- SetVector<Function *> Functions;
- for (CallGraphNode *CGN : SCC)
- if (Function *Fn = CGN->getFunction())
- if (!Fn->isDeclaration())
- Functions.insert(Fn);
- if (Functions.empty())
- return false;
- AnalysisGetter AG;
- CallGraph &CG = const_cast<CallGraph &>(SCC.getCallGraph());
- CallGraphUpdater CGUpdater;
- CGUpdater.initialize(CG, SCC);
- Module &M = *Functions.back()->getParent();
- BumpPtrAllocator Allocator;
- InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
- return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
- /* DeleteFns */ false,
- /* IsModulePass */ false);
- }
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- // FIXME: Think about passes we will preserve and add them here.
- AU.addRequired<TargetLibraryInfoWrapperPass>();
- CallGraphSCCPass::getAnalysisUsage(AU);
- }
- };
- } // end anonymous namespace
- Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
- Pass *llvm::createAttributorCGSCCLegacyPass() {
- return new AttributorCGSCCLegacyPass();
- }
- char AttributorLegacyPass::ID = 0;
- char AttributorCGSCCLegacyPass::ID = 0;
- INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
- "Deduce and propagate attributes", false, false)
- INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
- INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
- "Deduce and propagate attributes", false, false)
- INITIALIZE_PASS_BEGIN(AttributorCGSCCLegacyPass, "attributor-cgscc",
- "Deduce and propagate attributes (CGSCC pass)", false,
- false)
- INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
- INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
- INITIALIZE_PASS_END(AttributorCGSCCLegacyPass, "attributor-cgscc",
- "Deduce and propagate attributes (CGSCC pass)", false,
- false)
|