1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015 |
- //===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- //
- // This file defines the X86-specific support for the FastISel class. Much
- // of the target-specific code is generated by tablegen in the file
- // X86GenFastISel.inc, which is #included here.
- //
- //===----------------------------------------------------------------------===//
- #include "X86.h"
- #include "X86CallingConv.h"
- #include "X86InstrBuilder.h"
- #include "X86InstrInfo.h"
- #include "X86MachineFunctionInfo.h"
- #include "X86RegisterInfo.h"
- #include "X86Subtarget.h"
- #include "X86TargetMachine.h"
- #include "llvm/Analysis/BranchProbabilityInfo.h"
- #include "llvm/CodeGen/FastISel.h"
- #include "llvm/CodeGen/FunctionLoweringInfo.h"
- #include "llvm/CodeGen/MachineConstantPool.h"
- #include "llvm/CodeGen/MachineFrameInfo.h"
- #include "llvm/CodeGen/MachineRegisterInfo.h"
- #include "llvm/IR/CallingConv.h"
- #include "llvm/IR/DebugInfo.h"
- #include "llvm/IR/DerivedTypes.h"
- #include "llvm/IR/GetElementPtrTypeIterator.h"
- #include "llvm/IR/GlobalAlias.h"
- #include "llvm/IR/GlobalVariable.h"
- #include "llvm/IR/Instructions.h"
- #include "llvm/IR/IntrinsicInst.h"
- #include "llvm/IR/IntrinsicsX86.h"
- #include "llvm/IR/Operator.h"
- #include "llvm/MC/MCAsmInfo.h"
- #include "llvm/MC/MCSymbol.h"
- #include "llvm/Support/ErrorHandling.h"
- #include "llvm/Target/TargetOptions.h"
- using namespace llvm;
- namespace {
- class X86FastISel final : public FastISel {
- /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
- /// make the right decision when generating code for different targets.
- const X86Subtarget *Subtarget;
- public:
- explicit X86FastISel(FunctionLoweringInfo &funcInfo,
- const TargetLibraryInfo *libInfo)
- : FastISel(funcInfo, libInfo) {
- Subtarget = &funcInfo.MF->getSubtarget<X86Subtarget>();
- }
- bool fastSelectInstruction(const Instruction *I) override;
- /// The specified machine instr operand is a vreg, and that
- /// vreg is being provided by the specified load instruction. If possible,
- /// try to fold the load as an operand to the instruction, returning true if
- /// possible.
- bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
- const LoadInst *LI) override;
- bool fastLowerArguments() override;
- bool fastLowerCall(CallLoweringInfo &CLI) override;
- bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
- #include "X86GenFastISel.inc"
- private:
- bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT,
- const DebugLoc &DL);
- bool X86FastEmitLoad(MVT VT, X86AddressMode &AM, MachineMemOperand *MMO,
- unsigned &ResultReg, unsigned Alignment = 1);
- bool X86FastEmitStore(EVT VT, const Value *Val, X86AddressMode &AM,
- MachineMemOperand *MMO = nullptr, bool Aligned = false);
- bool X86FastEmitStore(EVT VT, unsigned ValReg, X86AddressMode &AM,
- MachineMemOperand *MMO = nullptr, bool Aligned = false);
- bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
- unsigned &ResultReg);
- bool X86SelectAddress(const Value *V, X86AddressMode &AM);
- bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
- bool X86SelectLoad(const Instruction *I);
- bool X86SelectStore(const Instruction *I);
- bool X86SelectRet(const Instruction *I);
- bool X86SelectCmp(const Instruction *I);
- bool X86SelectZExt(const Instruction *I);
- bool X86SelectSExt(const Instruction *I);
- bool X86SelectBranch(const Instruction *I);
- bool X86SelectShift(const Instruction *I);
- bool X86SelectDivRem(const Instruction *I);
- bool X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I);
- bool X86FastEmitSSESelect(MVT RetVT, const Instruction *I);
- bool X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I);
- bool X86SelectSelect(const Instruction *I);
- bool X86SelectTrunc(const Instruction *I);
- bool X86SelectFPExtOrFPTrunc(const Instruction *I, unsigned Opc,
- const TargetRegisterClass *RC);
- bool X86SelectFPExt(const Instruction *I);
- bool X86SelectFPTrunc(const Instruction *I);
- bool X86SelectSIToFP(const Instruction *I);
- bool X86SelectUIToFP(const Instruction *I);
- bool X86SelectIntToFP(const Instruction *I, bool IsSigned);
- const X86InstrInfo *getInstrInfo() const {
- return Subtarget->getInstrInfo();
- }
- const X86TargetMachine *getTargetMachine() const {
- return static_cast<const X86TargetMachine *>(&TM);
- }
- bool handleConstantAddresses(const Value *V, X86AddressMode &AM);
- unsigned X86MaterializeInt(const ConstantInt *CI, MVT VT);
- unsigned X86MaterializeFP(const ConstantFP *CFP, MVT VT);
- unsigned X86MaterializeGV(const GlobalValue *GV, MVT VT);
- unsigned fastMaterializeConstant(const Constant *C) override;
- unsigned fastMaterializeAlloca(const AllocaInst *C) override;
- unsigned fastMaterializeFloatZero(const ConstantFP *CF) override;
- /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
- /// computed in an SSE register, not on the X87 floating point stack.
- bool isScalarFPTypeInSSEReg(EVT VT) const {
- return (VT == MVT::f64 && Subtarget->hasSSE2()) ||
- (VT == MVT::f32 && Subtarget->hasSSE1()) || VT == MVT::f16;
- }
- bool isTypeLegal(Type *Ty, MVT &VT, bool AllowI1 = false);
- bool IsMemcpySmall(uint64_t Len);
- bool TryEmitSmallMemcpy(X86AddressMode DestAM,
- X86AddressMode SrcAM, uint64_t Len);
- bool foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
- const Value *Cond);
- const MachineInstrBuilder &addFullAddress(const MachineInstrBuilder &MIB,
- X86AddressMode &AM);
- unsigned fastEmitInst_rrrr(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC, unsigned Op0,
- unsigned Op1, unsigned Op2, unsigned Op3);
- };
- } // end anonymous namespace.
- static std::pair<unsigned, bool>
- getX86SSEConditionCode(CmpInst::Predicate Predicate) {
- unsigned CC;
- bool NeedSwap = false;
- // SSE Condition code mapping:
- // 0 - EQ
- // 1 - LT
- // 2 - LE
- // 3 - UNORD
- // 4 - NEQ
- // 5 - NLT
- // 6 - NLE
- // 7 - ORD
- switch (Predicate) {
- default: llvm_unreachable("Unexpected predicate");
- case CmpInst::FCMP_OEQ: CC = 0; break;
- case CmpInst::FCMP_OGT: NeedSwap = true; [[fallthrough]];
- case CmpInst::FCMP_OLT: CC = 1; break;
- case CmpInst::FCMP_OGE: NeedSwap = true; [[fallthrough]];
- case CmpInst::FCMP_OLE: CC = 2; break;
- case CmpInst::FCMP_UNO: CC = 3; break;
- case CmpInst::FCMP_UNE: CC = 4; break;
- case CmpInst::FCMP_ULE: NeedSwap = true; [[fallthrough]];
- case CmpInst::FCMP_UGE: CC = 5; break;
- case CmpInst::FCMP_ULT: NeedSwap = true; [[fallthrough]];
- case CmpInst::FCMP_UGT: CC = 6; break;
- case CmpInst::FCMP_ORD: CC = 7; break;
- case CmpInst::FCMP_UEQ: CC = 8; break;
- case CmpInst::FCMP_ONE: CC = 12; break;
- }
- return std::make_pair(CC, NeedSwap);
- }
- /// Adds a complex addressing mode to the given machine instr builder.
- /// Note, this will constrain the index register. If its not possible to
- /// constrain the given index register, then a new one will be created. The
- /// IndexReg field of the addressing mode will be updated to match in this case.
- const MachineInstrBuilder &
- X86FastISel::addFullAddress(const MachineInstrBuilder &MIB,
- X86AddressMode &AM) {
- // First constrain the index register. It needs to be a GR64_NOSP.
- AM.IndexReg = constrainOperandRegClass(MIB->getDesc(), AM.IndexReg,
- MIB->getNumOperands() +
- X86::AddrIndexReg);
- return ::addFullAddress(MIB, AM);
- }
- /// Check if it is possible to fold the condition from the XALU intrinsic
- /// into the user. The condition code will only be updated on success.
- bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
- const Value *Cond) {
- if (!isa<ExtractValueInst>(Cond))
- return false;
- const auto *EV = cast<ExtractValueInst>(Cond);
- if (!isa<IntrinsicInst>(EV->getAggregateOperand()))
- return false;
- const auto *II = cast<IntrinsicInst>(EV->getAggregateOperand());
- MVT RetVT;
- const Function *Callee = II->getCalledFunction();
- Type *RetTy =
- cast<StructType>(Callee->getReturnType())->getTypeAtIndex(0U);
- if (!isTypeLegal(RetTy, RetVT))
- return false;
- if (RetVT != MVT::i32 && RetVT != MVT::i64)
- return false;
- X86::CondCode TmpCC;
- switch (II->getIntrinsicID()) {
- default: return false;
- case Intrinsic::sadd_with_overflow:
- case Intrinsic::ssub_with_overflow:
- case Intrinsic::smul_with_overflow:
- case Intrinsic::umul_with_overflow: TmpCC = X86::COND_O; break;
- case Intrinsic::uadd_with_overflow:
- case Intrinsic::usub_with_overflow: TmpCC = X86::COND_B; break;
- }
- // Check if both instructions are in the same basic block.
- if (II->getParent() != I->getParent())
- return false;
- // Make sure nothing is in the way
- BasicBlock::const_iterator Start(I);
- BasicBlock::const_iterator End(II);
- for (auto Itr = std::prev(Start); Itr != End; --Itr) {
- // We only expect extractvalue instructions between the intrinsic and the
- // instruction to be selected.
- if (!isa<ExtractValueInst>(Itr))
- return false;
- // Check that the extractvalue operand comes from the intrinsic.
- const auto *EVI = cast<ExtractValueInst>(Itr);
- if (EVI->getAggregateOperand() != II)
- return false;
- }
- // Make sure no potentially eflags clobbering phi moves can be inserted in
- // between.
- auto HasPhis = [](const BasicBlock *Succ) { return !Succ->phis().empty(); };
- if (I->isTerminator() && llvm::any_of(successors(I), HasPhis))
- return false;
- // Make sure there are no potentially eflags clobbering constant
- // materializations in between.
- if (llvm::any_of(I->operands(), [](Value *V) { return isa<Constant>(V); }))
- return false;
- CC = TmpCC;
- return true;
- }
- bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
- EVT evt = TLI.getValueType(DL, Ty, /*AllowUnknown=*/true);
- if (evt == MVT::Other || !evt.isSimple())
- // Unhandled type. Halt "fast" selection and bail.
- return false;
- VT = evt.getSimpleVT();
- // For now, require SSE/SSE2 for performing floating-point operations,
- // since x87 requires additional work.
- if (VT == MVT::f64 && !Subtarget->hasSSE2())
- return false;
- if (VT == MVT::f32 && !Subtarget->hasSSE1())
- return false;
- // Similarly, no f80 support yet.
- if (VT == MVT::f80)
- return false;
- // We only handle legal types. For example, on x86-32 the instruction
- // selector contains all of the 64-bit instructions from x86-64,
- // under the assumption that i64 won't be used if the target doesn't
- // support it.
- return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
- }
- /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
- /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
- /// Return true and the result register by reference if it is possible.
- bool X86FastISel::X86FastEmitLoad(MVT VT, X86AddressMode &AM,
- MachineMemOperand *MMO, unsigned &ResultReg,
- unsigned Alignment) {
- bool HasSSE1 = Subtarget->hasSSE1();
- bool HasSSE2 = Subtarget->hasSSE2();
- bool HasSSE41 = Subtarget->hasSSE41();
- bool HasAVX = Subtarget->hasAVX();
- bool HasAVX2 = Subtarget->hasAVX2();
- bool HasAVX512 = Subtarget->hasAVX512();
- bool HasVLX = Subtarget->hasVLX();
- bool IsNonTemporal = MMO && MMO->isNonTemporal();
- // Treat i1 loads the same as i8 loads. Masking will be done when storing.
- if (VT == MVT::i1)
- VT = MVT::i8;
- // Get opcode and regclass of the output for the given load instruction.
- unsigned Opc = 0;
- switch (VT.SimpleTy) {
- default: return false;
- case MVT::i8:
- Opc = X86::MOV8rm;
- break;
- case MVT::i16:
- Opc = X86::MOV16rm;
- break;
- case MVT::i32:
- Opc = X86::MOV32rm;
- break;
- case MVT::i64:
- // Must be in x86-64 mode.
- Opc = X86::MOV64rm;
- break;
- case MVT::f32:
- Opc = HasAVX512 ? X86::VMOVSSZrm_alt
- : HasAVX ? X86::VMOVSSrm_alt
- : HasSSE1 ? X86::MOVSSrm_alt
- : X86::LD_Fp32m;
- break;
- case MVT::f64:
- Opc = HasAVX512 ? X86::VMOVSDZrm_alt
- : HasAVX ? X86::VMOVSDrm_alt
- : HasSSE2 ? X86::MOVSDrm_alt
- : X86::LD_Fp64m;
- break;
- case MVT::f80:
- // No f80 support yet.
- return false;
- case MVT::v4f32:
- if (IsNonTemporal && Alignment >= 16 && HasSSE41)
- Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
- HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
- else if (Alignment >= 16)
- Opc = HasVLX ? X86::VMOVAPSZ128rm :
- HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm;
- else
- Opc = HasVLX ? X86::VMOVUPSZ128rm :
- HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm;
- break;
- case MVT::v2f64:
- if (IsNonTemporal && Alignment >= 16 && HasSSE41)
- Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
- HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
- else if (Alignment >= 16)
- Opc = HasVLX ? X86::VMOVAPDZ128rm :
- HasAVX ? X86::VMOVAPDrm : X86::MOVAPDrm;
- else
- Opc = HasVLX ? X86::VMOVUPDZ128rm :
- HasAVX ? X86::VMOVUPDrm : X86::MOVUPDrm;
- break;
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v8i16:
- case MVT::v16i8:
- if (IsNonTemporal && Alignment >= 16 && HasSSE41)
- Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
- HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
- else if (Alignment >= 16)
- Opc = HasVLX ? X86::VMOVDQA64Z128rm :
- HasAVX ? X86::VMOVDQArm : X86::MOVDQArm;
- else
- Opc = HasVLX ? X86::VMOVDQU64Z128rm :
- HasAVX ? X86::VMOVDQUrm : X86::MOVDQUrm;
- break;
- case MVT::v8f32:
- assert(HasAVX);
- if (IsNonTemporal && Alignment >= 32 && HasAVX2)
- Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
- else if (IsNonTemporal && Alignment >= 16)
- return false; // Force split for X86::VMOVNTDQArm
- else if (Alignment >= 32)
- Opc = HasVLX ? X86::VMOVAPSZ256rm : X86::VMOVAPSYrm;
- else
- Opc = HasVLX ? X86::VMOVUPSZ256rm : X86::VMOVUPSYrm;
- break;
- case MVT::v4f64:
- assert(HasAVX);
- if (IsNonTemporal && Alignment >= 32 && HasAVX2)
- Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
- else if (IsNonTemporal && Alignment >= 16)
- return false; // Force split for X86::VMOVNTDQArm
- else if (Alignment >= 32)
- Opc = HasVLX ? X86::VMOVAPDZ256rm : X86::VMOVAPDYrm;
- else
- Opc = HasVLX ? X86::VMOVUPDZ256rm : X86::VMOVUPDYrm;
- break;
- case MVT::v8i32:
- case MVT::v4i64:
- case MVT::v16i16:
- case MVT::v32i8:
- assert(HasAVX);
- if (IsNonTemporal && Alignment >= 32 && HasAVX2)
- Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
- else if (IsNonTemporal && Alignment >= 16)
- return false; // Force split for X86::VMOVNTDQArm
- else if (Alignment >= 32)
- Opc = HasVLX ? X86::VMOVDQA64Z256rm : X86::VMOVDQAYrm;
- else
- Opc = HasVLX ? X86::VMOVDQU64Z256rm : X86::VMOVDQUYrm;
- break;
- case MVT::v16f32:
- assert(HasAVX512);
- if (IsNonTemporal && Alignment >= 64)
- Opc = X86::VMOVNTDQAZrm;
- else
- Opc = (Alignment >= 64) ? X86::VMOVAPSZrm : X86::VMOVUPSZrm;
- break;
- case MVT::v8f64:
- assert(HasAVX512);
- if (IsNonTemporal && Alignment >= 64)
- Opc = X86::VMOVNTDQAZrm;
- else
- Opc = (Alignment >= 64) ? X86::VMOVAPDZrm : X86::VMOVUPDZrm;
- break;
- case MVT::v8i64:
- case MVT::v16i32:
- case MVT::v32i16:
- case MVT::v64i8:
- assert(HasAVX512);
- // Note: There are a lot more choices based on type with AVX-512, but
- // there's really no advantage when the load isn't masked.
- if (IsNonTemporal && Alignment >= 64)
- Opc = X86::VMOVNTDQAZrm;
- else
- Opc = (Alignment >= 64) ? X86::VMOVDQA64Zrm : X86::VMOVDQU64Zrm;
- break;
- }
- const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
- ResultReg = createResultReg(RC);
- MachineInstrBuilder MIB =
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg);
- addFullAddress(MIB, AM);
- if (MMO)
- MIB->addMemOperand(*FuncInfo.MF, MMO);
- return true;
- }
- /// X86FastEmitStore - Emit a machine instruction to store a value Val of
- /// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
- /// and a displacement offset, or a GlobalAddress,
- /// i.e. V. Return true if it is possible.
- bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, X86AddressMode &AM,
- MachineMemOperand *MMO, bool Aligned) {
- bool HasSSE1 = Subtarget->hasSSE1();
- bool HasSSE2 = Subtarget->hasSSE2();
- bool HasSSE4A = Subtarget->hasSSE4A();
- bool HasAVX = Subtarget->hasAVX();
- bool HasAVX512 = Subtarget->hasAVX512();
- bool HasVLX = Subtarget->hasVLX();
- bool IsNonTemporal = MMO && MMO->isNonTemporal();
- // Get opcode and regclass of the output for the given store instruction.
- unsigned Opc = 0;
- switch (VT.getSimpleVT().SimpleTy) {
- case MVT::f80: // No f80 support yet.
- default: return false;
- case MVT::i1: {
- // Mask out all but lowest bit.
- Register AndResult = createResultReg(&X86::GR8RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(X86::AND8ri), AndResult)
- .addReg(ValReg).addImm(1);
- ValReg = AndResult;
- [[fallthrough]]; // handle i1 as i8.
- }
- case MVT::i8: Opc = X86::MOV8mr; break;
- case MVT::i16: Opc = X86::MOV16mr; break;
- case MVT::i32:
- Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTImr : X86::MOV32mr;
- break;
- case MVT::i64:
- // Must be in x86-64 mode.
- Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTI_64mr : X86::MOV64mr;
- break;
- case MVT::f32:
- if (HasSSE1) {
- if (IsNonTemporal && HasSSE4A)
- Opc = X86::MOVNTSS;
- else
- Opc = HasAVX512 ? X86::VMOVSSZmr :
- HasAVX ? X86::VMOVSSmr : X86::MOVSSmr;
- } else
- Opc = X86::ST_Fp32m;
- break;
- case MVT::f64:
- if (HasSSE2) {
- if (IsNonTemporal && HasSSE4A)
- Opc = X86::MOVNTSD;
- else
- Opc = HasAVX512 ? X86::VMOVSDZmr :
- HasAVX ? X86::VMOVSDmr : X86::MOVSDmr;
- } else
- Opc = X86::ST_Fp64m;
- break;
- case MVT::x86mmx:
- Opc = (IsNonTemporal && HasSSE1) ? X86::MMX_MOVNTQmr : X86::MMX_MOVQ64mr;
- break;
- case MVT::v4f32:
- if (Aligned) {
- if (IsNonTemporal)
- Opc = HasVLX ? X86::VMOVNTPSZ128mr :
- HasAVX ? X86::VMOVNTPSmr : X86::MOVNTPSmr;
- else
- Opc = HasVLX ? X86::VMOVAPSZ128mr :
- HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr;
- } else
- Opc = HasVLX ? X86::VMOVUPSZ128mr :
- HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr;
- break;
- case MVT::v2f64:
- if (Aligned) {
- if (IsNonTemporal)
- Opc = HasVLX ? X86::VMOVNTPDZ128mr :
- HasAVX ? X86::VMOVNTPDmr : X86::MOVNTPDmr;
- else
- Opc = HasVLX ? X86::VMOVAPDZ128mr :
- HasAVX ? X86::VMOVAPDmr : X86::MOVAPDmr;
- } else
- Opc = HasVLX ? X86::VMOVUPDZ128mr :
- HasAVX ? X86::VMOVUPDmr : X86::MOVUPDmr;
- break;
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v8i16:
- case MVT::v16i8:
- if (Aligned) {
- if (IsNonTemporal)
- Opc = HasVLX ? X86::VMOVNTDQZ128mr :
- HasAVX ? X86::VMOVNTDQmr : X86::MOVNTDQmr;
- else
- Opc = HasVLX ? X86::VMOVDQA64Z128mr :
- HasAVX ? X86::VMOVDQAmr : X86::MOVDQAmr;
- } else
- Opc = HasVLX ? X86::VMOVDQU64Z128mr :
- HasAVX ? X86::VMOVDQUmr : X86::MOVDQUmr;
- break;
- case MVT::v8f32:
- assert(HasAVX);
- if (Aligned) {
- if (IsNonTemporal)
- Opc = HasVLX ? X86::VMOVNTPSZ256mr : X86::VMOVNTPSYmr;
- else
- Opc = HasVLX ? X86::VMOVAPSZ256mr : X86::VMOVAPSYmr;
- } else
- Opc = HasVLX ? X86::VMOVUPSZ256mr : X86::VMOVUPSYmr;
- break;
- case MVT::v4f64:
- assert(HasAVX);
- if (Aligned) {
- if (IsNonTemporal)
- Opc = HasVLX ? X86::VMOVNTPDZ256mr : X86::VMOVNTPDYmr;
- else
- Opc = HasVLX ? X86::VMOVAPDZ256mr : X86::VMOVAPDYmr;
- } else
- Opc = HasVLX ? X86::VMOVUPDZ256mr : X86::VMOVUPDYmr;
- break;
- case MVT::v8i32:
- case MVT::v4i64:
- case MVT::v16i16:
- case MVT::v32i8:
- assert(HasAVX);
- if (Aligned) {
- if (IsNonTemporal)
- Opc = HasVLX ? X86::VMOVNTDQZ256mr : X86::VMOVNTDQYmr;
- else
- Opc = HasVLX ? X86::VMOVDQA64Z256mr : X86::VMOVDQAYmr;
- } else
- Opc = HasVLX ? X86::VMOVDQU64Z256mr : X86::VMOVDQUYmr;
- break;
- case MVT::v16f32:
- assert(HasAVX512);
- if (Aligned)
- Opc = IsNonTemporal ? X86::VMOVNTPSZmr : X86::VMOVAPSZmr;
- else
- Opc = X86::VMOVUPSZmr;
- break;
- case MVT::v8f64:
- assert(HasAVX512);
- if (Aligned) {
- Opc = IsNonTemporal ? X86::VMOVNTPDZmr : X86::VMOVAPDZmr;
- } else
- Opc = X86::VMOVUPDZmr;
- break;
- case MVT::v8i64:
- case MVT::v16i32:
- case MVT::v32i16:
- case MVT::v64i8:
- assert(HasAVX512);
- // Note: There are a lot more choices based on type with AVX-512, but
- // there's really no advantage when the store isn't masked.
- if (Aligned)
- Opc = IsNonTemporal ? X86::VMOVNTDQZmr : X86::VMOVDQA64Zmr;
- else
- Opc = X86::VMOVDQU64Zmr;
- break;
- }
- const MCInstrDesc &Desc = TII.get(Opc);
- // Some of the instructions in the previous switch use FR128 instead
- // of FR32 for ValReg. Make sure the register we feed the instruction
- // matches its register class constraints.
- // Note: This is fine to do a copy from FR32 to FR128, this is the
- // same registers behind the scene and actually why it did not trigger
- // any bugs before.
- ValReg = constrainOperandRegClass(Desc, ValReg, Desc.getNumOperands() - 1);
- MachineInstrBuilder MIB =
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, Desc);
- addFullAddress(MIB, AM).addReg(ValReg);
- if (MMO)
- MIB->addMemOperand(*FuncInfo.MF, MMO);
- return true;
- }
- bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
- X86AddressMode &AM,
- MachineMemOperand *MMO, bool Aligned) {
- // Handle 'null' like i32/i64 0.
- if (isa<ConstantPointerNull>(Val))
- Val = Constant::getNullValue(DL.getIntPtrType(Val->getContext()));
- // If this is a store of a simple constant, fold the constant into the store.
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
- unsigned Opc = 0;
- bool Signed = true;
- switch (VT.getSimpleVT().SimpleTy) {
- default: break;
- case MVT::i1:
- Signed = false;
- [[fallthrough]]; // Handle as i8.
- case MVT::i8: Opc = X86::MOV8mi; break;
- case MVT::i16: Opc = X86::MOV16mi; break;
- case MVT::i32: Opc = X86::MOV32mi; break;
- case MVT::i64:
- // Must be a 32-bit sign extended value.
- if (isInt<32>(CI->getSExtValue()))
- Opc = X86::MOV64mi32;
- break;
- }
- if (Opc) {
- MachineInstrBuilder MIB =
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc));
- addFullAddress(MIB, AM).addImm(Signed ? (uint64_t) CI->getSExtValue()
- : CI->getZExtValue());
- if (MMO)
- MIB->addMemOperand(*FuncInfo.MF, MMO);
- return true;
- }
- }
- Register ValReg = getRegForValue(Val);
- if (ValReg == 0)
- return false;
- return X86FastEmitStore(VT, ValReg, AM, MMO, Aligned);
- }
- /// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
- /// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
- /// ISD::SIGN_EXTEND).
- bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
- unsigned Src, EVT SrcVT,
- unsigned &ResultReg) {
- unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src);
- if (RR == 0)
- return false;
- ResultReg = RR;
- return true;
- }
- bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
- // Handle constant address.
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
- // Can't handle alternate code models yet.
- if (TM.getCodeModel() != CodeModel::Small)
- return false;
- // Can't handle TLS yet.
- if (GV->isThreadLocal())
- return false;
- // Can't handle !absolute_symbol references yet.
- if (GV->isAbsoluteSymbolRef())
- return false;
- // RIP-relative addresses can't have additional register operands, so if
- // we've already folded stuff into the addressing mode, just force the
- // global value into its own register, which we can use as the basereg.
- if (!Subtarget->isPICStyleRIPRel() ||
- (AM.Base.Reg == 0 && AM.IndexReg == 0)) {
- // Okay, we've committed to selecting this global. Set up the address.
- AM.GV = GV;
- // Allow the subtarget to classify the global.
- unsigned char GVFlags = Subtarget->classifyGlobalReference(GV);
- // If this reference is relative to the pic base, set it now.
- if (isGlobalRelativeToPICBase(GVFlags)) {
- // FIXME: How do we know Base.Reg is free??
- AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
- }
- // Unless the ABI requires an extra load, return a direct reference to
- // the global.
- if (!isGlobalStubReference(GVFlags)) {
- if (Subtarget->isPICStyleRIPRel()) {
- // Use rip-relative addressing if we can. Above we verified that the
- // base and index registers are unused.
- assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
- AM.Base.Reg = X86::RIP;
- }
- AM.GVOpFlags = GVFlags;
- return true;
- }
- // Ok, we need to do a load from a stub. If we've already loaded from
- // this stub, reuse the loaded pointer, otherwise emit the load now.
- DenseMap<const Value *, Register>::iterator I = LocalValueMap.find(V);
- Register LoadReg;
- if (I != LocalValueMap.end() && I->second) {
- LoadReg = I->second;
- } else {
- // Issue load from stub.
- unsigned Opc = 0;
- const TargetRegisterClass *RC = nullptr;
- X86AddressMode StubAM;
- StubAM.Base.Reg = AM.Base.Reg;
- StubAM.GV = GV;
- StubAM.GVOpFlags = GVFlags;
- // Prepare for inserting code in the local-value area.
- SavePoint SaveInsertPt = enterLocalValueArea();
- if (TLI.getPointerTy(DL) == MVT::i64) {
- Opc = X86::MOV64rm;
- RC = &X86::GR64RegClass;
- } else {
- Opc = X86::MOV32rm;
- RC = &X86::GR32RegClass;
- }
- if (Subtarget->isPICStyleRIPRel() || GVFlags == X86II::MO_GOTPCREL ||
- GVFlags == X86II::MO_GOTPCREL_NORELAX)
- StubAM.Base.Reg = X86::RIP;
- LoadReg = createResultReg(RC);
- MachineInstrBuilder LoadMI =
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), LoadReg);
- addFullAddress(LoadMI, StubAM);
- // Ok, back to normal mode.
- leaveLocalValueArea(SaveInsertPt);
- // Prevent loading GV stub multiple times in same MBB.
- LocalValueMap[V] = LoadReg;
- }
- // Now construct the final address. Note that the Disp, Scale,
- // and Index values may already be set here.
- AM.Base.Reg = LoadReg;
- AM.GV = nullptr;
- return true;
- }
- }
- // If all else fails, try to materialize the value in a register.
- if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
- if (AM.Base.Reg == 0) {
- AM.Base.Reg = getRegForValue(V);
- return AM.Base.Reg != 0;
- }
- if (AM.IndexReg == 0) {
- assert(AM.Scale == 1 && "Scale with no index!");
- AM.IndexReg = getRegForValue(V);
- return AM.IndexReg != 0;
- }
- }
- return false;
- }
- /// X86SelectAddress - Attempt to fill in an address from the given value.
- ///
- bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
- SmallVector<const Value *, 32> GEPs;
- redo_gep:
- const User *U = nullptr;
- unsigned Opcode = Instruction::UserOp1;
- if (const Instruction *I = dyn_cast<Instruction>(V)) {
- // Don't walk into other basic blocks; it's possible we haven't
- // visited them yet, so the instructions may not yet be assigned
- // virtual registers.
- if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(V)) ||
- FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
- Opcode = I->getOpcode();
- U = I;
- }
- } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
- Opcode = C->getOpcode();
- U = C;
- }
- if (PointerType *Ty = dyn_cast<PointerType>(V->getType()))
- if (Ty->getAddressSpace() > 255)
- // Fast instruction selection doesn't support the special
- // address spaces.
- return false;
- switch (Opcode) {
- default: break;
- case Instruction::BitCast:
- // Look past bitcasts.
- return X86SelectAddress(U->getOperand(0), AM);
- case Instruction::IntToPtr:
- // Look past no-op inttoptrs.
- if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
- TLI.getPointerTy(DL))
- return X86SelectAddress(U->getOperand(0), AM);
- break;
- case Instruction::PtrToInt:
- // Look past no-op ptrtoints.
- if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
- return X86SelectAddress(U->getOperand(0), AM);
- break;
- case Instruction::Alloca: {
- // Do static allocas.
- const AllocaInst *A = cast<AllocaInst>(V);
- DenseMap<const AllocaInst *, int>::iterator SI =
- FuncInfo.StaticAllocaMap.find(A);
- if (SI != FuncInfo.StaticAllocaMap.end()) {
- AM.BaseType = X86AddressMode::FrameIndexBase;
- AM.Base.FrameIndex = SI->second;
- return true;
- }
- break;
- }
- case Instruction::Add: {
- // Adds of constants are common and easy enough.
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
- uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();
- // They have to fit in the 32-bit signed displacement field though.
- if (isInt<32>(Disp)) {
- AM.Disp = (uint32_t)Disp;
- return X86SelectAddress(U->getOperand(0), AM);
- }
- }
- break;
- }
- case Instruction::GetElementPtr: {
- X86AddressMode SavedAM = AM;
- // Pattern-match simple GEPs.
- uint64_t Disp = (int32_t)AM.Disp;
- unsigned IndexReg = AM.IndexReg;
- unsigned Scale = AM.Scale;
- gep_type_iterator GTI = gep_type_begin(U);
- // Iterate through the indices, folding what we can. Constants can be
- // folded, and one dynamic index can be handled, if the scale is supported.
- for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
- i != e; ++i, ++GTI) {
- const Value *Op = *i;
- if (StructType *STy = GTI.getStructTypeOrNull()) {
- const StructLayout *SL = DL.getStructLayout(STy);
- Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
- continue;
- }
- // A array/variable index is always of the form i*S where S is the
- // constant scale size. See if we can push the scale into immediates.
- uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
- for (;;) {
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
- // Constant-offset addressing.
- Disp += CI->getSExtValue() * S;
- break;
- }
- if (canFoldAddIntoGEP(U, Op)) {
- // A compatible add with a constant operand. Fold the constant.
- ConstantInt *CI =
- cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
- Disp += CI->getSExtValue() * S;
- // Iterate on the other operand.
- Op = cast<AddOperator>(Op)->getOperand(0);
- continue;
- }
- if (IndexReg == 0 &&
- (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
- (S == 1 || S == 2 || S == 4 || S == 8)) {
- // Scaled-index addressing.
- Scale = S;
- IndexReg = getRegForGEPIndex(Op);
- if (IndexReg == 0)
- return false;
- break;
- }
- // Unsupported.
- goto unsupported_gep;
- }
- }
- // Check for displacement overflow.
- if (!isInt<32>(Disp))
- break;
- AM.IndexReg = IndexReg;
- AM.Scale = Scale;
- AM.Disp = (uint32_t)Disp;
- GEPs.push_back(V);
- if (const GetElementPtrInst *GEP =
- dyn_cast<GetElementPtrInst>(U->getOperand(0))) {
- // Ok, the GEP indices were covered by constant-offset and scaled-index
- // addressing. Update the address state and move on to examining the base.
- V = GEP;
- goto redo_gep;
- } else if (X86SelectAddress(U->getOperand(0), AM)) {
- return true;
- }
- // If we couldn't merge the gep value into this addr mode, revert back to
- // our address and just match the value instead of completely failing.
- AM = SavedAM;
- for (const Value *I : reverse(GEPs))
- if (handleConstantAddresses(I, AM))
- return true;
- return false;
- unsupported_gep:
- // Ok, the GEP indices weren't all covered.
- break;
- }
- }
- return handleConstantAddresses(V, AM);
- }
- /// X86SelectCallAddress - Attempt to fill in an address from the given value.
- ///
- bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
- const User *U = nullptr;
- unsigned Opcode = Instruction::UserOp1;
- const Instruction *I = dyn_cast<Instruction>(V);
- // Record if the value is defined in the same basic block.
- //
- // This information is crucial to know whether or not folding an
- // operand is valid.
- // Indeed, FastISel generates or reuses a virtual register for all
- // operands of all instructions it selects. Obviously, the definition and
- // its uses must use the same virtual register otherwise the produced
- // code is incorrect.
- // Before instruction selection, FunctionLoweringInfo::set sets the virtual
- // registers for values that are alive across basic blocks. This ensures
- // that the values are consistently set between across basic block, even
- // if different instruction selection mechanisms are used (e.g., a mix of
- // SDISel and FastISel).
- // For values local to a basic block, the instruction selection process
- // generates these virtual registers with whatever method is appropriate
- // for its needs. In particular, FastISel and SDISel do not share the way
- // local virtual registers are set.
- // Therefore, this is impossible (or at least unsafe) to share values
- // between basic blocks unless they use the same instruction selection
- // method, which is not guarantee for X86.
- // Moreover, things like hasOneUse could not be used accurately, if we
- // allow to reference values across basic blocks whereas they are not
- // alive across basic blocks initially.
- bool InMBB = true;
- if (I) {
- Opcode = I->getOpcode();
- U = I;
- InMBB = I->getParent() == FuncInfo.MBB->getBasicBlock();
- } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
- Opcode = C->getOpcode();
- U = C;
- }
- switch (Opcode) {
- default: break;
- case Instruction::BitCast:
- // Look past bitcasts if its operand is in the same BB.
- if (InMBB)
- return X86SelectCallAddress(U->getOperand(0), AM);
- break;
- case Instruction::IntToPtr:
- // Look past no-op inttoptrs if its operand is in the same BB.
- if (InMBB &&
- TLI.getValueType(DL, U->getOperand(0)->getType()) ==
- TLI.getPointerTy(DL))
- return X86SelectCallAddress(U->getOperand(0), AM);
- break;
- case Instruction::PtrToInt:
- // Look past no-op ptrtoints if its operand is in the same BB.
- if (InMBB && TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
- return X86SelectCallAddress(U->getOperand(0), AM);
- break;
- }
- // Handle constant address.
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
- // Can't handle alternate code models yet.
- if (TM.getCodeModel() != CodeModel::Small)
- return false;
- // RIP-relative addresses can't have additional register operands.
- if (Subtarget->isPICStyleRIPRel() &&
- (AM.Base.Reg != 0 || AM.IndexReg != 0))
- return false;
- // Can't handle TLS.
- if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
- if (GVar->isThreadLocal())
- return false;
- // Okay, we've committed to selecting this global. Set up the basic address.
- AM.GV = GV;
- // Return a direct reference to the global. Fastisel can handle calls to
- // functions that require loads, such as dllimport and nonlazybind
- // functions.
- if (Subtarget->isPICStyleRIPRel()) {
- // Use rip-relative addressing if we can. Above we verified that the
- // base and index registers are unused.
- assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
- AM.Base.Reg = X86::RIP;
- } else {
- AM.GVOpFlags = Subtarget->classifyLocalReference(nullptr);
- }
- return true;
- }
- // If all else fails, try to materialize the value in a register.
- if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
- auto GetCallRegForValue = [this](const Value *V) {
- Register Reg = getRegForValue(V);
- // In 64-bit mode, we need a 64-bit register even if pointers are 32 bits.
- if (Reg && Subtarget->isTarget64BitILP32()) {
- Register CopyReg = createResultReg(&X86::GR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV32rr),
- CopyReg)
- .addReg(Reg);
- Register ExtReg = createResultReg(&X86::GR64RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::SUBREG_TO_REG), ExtReg)
- .addImm(0)
- .addReg(CopyReg)
- .addImm(X86::sub_32bit);
- Reg = ExtReg;
- }
- return Reg;
- };
- if (AM.Base.Reg == 0) {
- AM.Base.Reg = GetCallRegForValue(V);
- return AM.Base.Reg != 0;
- }
- if (AM.IndexReg == 0) {
- assert(AM.Scale == 1 && "Scale with no index!");
- AM.IndexReg = GetCallRegForValue(V);
- return AM.IndexReg != 0;
- }
- }
- return false;
- }
- /// X86SelectStore - Select and emit code to implement store instructions.
- bool X86FastISel::X86SelectStore(const Instruction *I) {
- // Atomic stores need special handling.
- const StoreInst *S = cast<StoreInst>(I);
- if (S->isAtomic())
- return false;
- const Value *PtrV = I->getOperand(1);
- if (TLI.supportSwiftError()) {
- // Swifterror values can come from either a function parameter with
- // swifterror attribute or an alloca with swifterror attribute.
- if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
- if (Arg->hasSwiftErrorAttr())
- return false;
- }
- if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
- if (Alloca->isSwiftError())
- return false;
- }
- }
- const Value *Val = S->getValueOperand();
- const Value *Ptr = S->getPointerOperand();
- MVT VT;
- if (!isTypeLegal(Val->getType(), VT, /*AllowI1=*/true))
- return false;
- Align Alignment = S->getAlign();
- Align ABIAlignment = DL.getABITypeAlign(Val->getType());
- bool Aligned = Alignment >= ABIAlignment;
- X86AddressMode AM;
- if (!X86SelectAddress(Ptr, AM))
- return false;
- return X86FastEmitStore(VT, Val, AM, createMachineMemOperandFor(I), Aligned);
- }
- /// X86SelectRet - Select and emit code to implement ret instructions.
- bool X86FastISel::X86SelectRet(const Instruction *I) {
- const ReturnInst *Ret = cast<ReturnInst>(I);
- const Function &F = *I->getParent()->getParent();
- const X86MachineFunctionInfo *X86MFInfo =
- FuncInfo.MF->getInfo<X86MachineFunctionInfo>();
- if (!FuncInfo.CanLowerReturn)
- return false;
- if (TLI.supportSwiftError() &&
- F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
- return false;
- if (TLI.supportSplitCSR(FuncInfo.MF))
- return false;
- CallingConv::ID CC = F.getCallingConv();
- if (CC != CallingConv::C &&
- CC != CallingConv::Fast &&
- CC != CallingConv::Tail &&
- CC != CallingConv::SwiftTail &&
- CC != CallingConv::X86_FastCall &&
- CC != CallingConv::X86_StdCall &&
- CC != CallingConv::X86_ThisCall &&
- CC != CallingConv::X86_64_SysV &&
- CC != CallingConv::Win64)
- return false;
- // Don't handle popping bytes if they don't fit the ret's immediate.
- if (!isUInt<16>(X86MFInfo->getBytesToPopOnReturn()))
- return false;
- // fastcc with -tailcallopt is intended to provide a guaranteed
- // tail call optimization. Fastisel doesn't know how to do that.
- if ((CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt) ||
- CC == CallingConv::Tail || CC == CallingConv::SwiftTail)
- return false;
- // Let SDISel handle vararg functions.
- if (F.isVarArg())
- return false;
- // Build a list of return value registers.
- SmallVector<unsigned, 4> RetRegs;
- if (Ret->getNumOperands() > 0) {
- SmallVector<ISD::OutputArg, 4> Outs;
- GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
- // Analyze operands of the call, assigning locations to each operand.
- SmallVector<CCValAssign, 16> ValLocs;
- CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());
- CCInfo.AnalyzeReturn(Outs, RetCC_X86);
- const Value *RV = Ret->getOperand(0);
- Register Reg = getRegForValue(RV);
- if (Reg == 0)
- return false;
- // Only handle a single return value for now.
- if (ValLocs.size() != 1)
- return false;
- CCValAssign &VA = ValLocs[0];
- // Don't bother handling odd stuff for now.
- if (VA.getLocInfo() != CCValAssign::Full)
- return false;
- // Only handle register returns for now.
- if (!VA.isRegLoc())
- return false;
- // The calling-convention tables for x87 returns don't tell
- // the whole story.
- if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
- return false;
- unsigned SrcReg = Reg + VA.getValNo();
- EVT SrcVT = TLI.getValueType(DL, RV->getType());
- EVT DstVT = VA.getValVT();
- // Special handling for extended integers.
- if (SrcVT != DstVT) {
- if (SrcVT != MVT::i1 && SrcVT != MVT::i8 && SrcVT != MVT::i16)
- return false;
- if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
- return false;
- assert(DstVT == MVT::i32 && "X86 should always ext to i32");
- if (SrcVT == MVT::i1) {
- if (Outs[0].Flags.isSExt())
- return false;
- // TODO
- SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg);
- SrcVT = MVT::i8;
- }
- unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
- ISD::SIGN_EXTEND;
- // TODO
- SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, SrcReg);
- }
- // Make the copy.
- Register DstReg = VA.getLocReg();
- const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
- // Avoid a cross-class copy. This is very unlikely.
- if (!SrcRC->contains(DstReg))
- return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
- // Add register to return instruction.
- RetRegs.push_back(VA.getLocReg());
- }
- // Swift calling convention does not require we copy the sret argument
- // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
- // All x86 ABIs require that for returning structs by value we copy
- // the sret argument into %rax/%eax (depending on ABI) for the return.
- // We saved the argument into a virtual register in the entry block,
- // so now we copy the value out and into %rax/%eax.
- if (F.hasStructRetAttr() && CC != CallingConv::Swift &&
- CC != CallingConv::SwiftTail) {
- Register Reg = X86MFInfo->getSRetReturnReg();
- assert(Reg &&
- "SRetReturnReg should have been set in LowerFormalArguments()!");
- unsigned RetReg = Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), RetReg).addReg(Reg);
- RetRegs.push_back(RetReg);
- }
- // Now emit the RET.
- MachineInstrBuilder MIB;
- if (X86MFInfo->getBytesToPopOnReturn()) {
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(Subtarget->is64Bit() ? X86::RETI64 : X86::RETI32))
- .addImm(X86MFInfo->getBytesToPopOnReturn());
- } else {
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(Subtarget->is64Bit() ? X86::RET64 : X86::RET32));
- }
- for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
- MIB.addReg(RetRegs[i], RegState::Implicit);
- return true;
- }
- /// X86SelectLoad - Select and emit code to implement load instructions.
- ///
- bool X86FastISel::X86SelectLoad(const Instruction *I) {
- const LoadInst *LI = cast<LoadInst>(I);
- // Atomic loads need special handling.
- if (LI->isAtomic())
- return false;
- const Value *SV = I->getOperand(0);
- if (TLI.supportSwiftError()) {
- // Swifterror values can come from either a function parameter with
- // swifterror attribute or an alloca with swifterror attribute.
- if (const Argument *Arg = dyn_cast<Argument>(SV)) {
- if (Arg->hasSwiftErrorAttr())
- return false;
- }
- if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
- if (Alloca->isSwiftError())
- return false;
- }
- }
- MVT VT;
- if (!isTypeLegal(LI->getType(), VT, /*AllowI1=*/true))
- return false;
- const Value *Ptr = LI->getPointerOperand();
- X86AddressMode AM;
- if (!X86SelectAddress(Ptr, AM))
- return false;
- unsigned ResultReg = 0;
- if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg,
- LI->getAlign().value()))
- return false;
- updateValueMap(I, ResultReg);
- return true;
- }
- static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
- bool HasAVX512 = Subtarget->hasAVX512();
- bool HasAVX = Subtarget->hasAVX();
- bool HasSSE1 = Subtarget->hasSSE1();
- bool HasSSE2 = Subtarget->hasSSE2();
- switch (VT.getSimpleVT().SimpleTy) {
- default: return 0;
- case MVT::i8: return X86::CMP8rr;
- case MVT::i16: return X86::CMP16rr;
- case MVT::i32: return X86::CMP32rr;
- case MVT::i64: return X86::CMP64rr;
- case MVT::f32:
- return HasAVX512 ? X86::VUCOMISSZrr
- : HasAVX ? X86::VUCOMISSrr
- : HasSSE1 ? X86::UCOMISSrr
- : 0;
- case MVT::f64:
- return HasAVX512 ? X86::VUCOMISDZrr
- : HasAVX ? X86::VUCOMISDrr
- : HasSSE2 ? X86::UCOMISDrr
- : 0;
- }
- }
- /// If we have a comparison with RHS as the RHS of the comparison, return an
- /// opcode that works for the compare (e.g. CMP32ri) otherwise return 0.
- static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
- int64_t Val = RHSC->getSExtValue();
- switch (VT.getSimpleVT().SimpleTy) {
- // Otherwise, we can't fold the immediate into this comparison.
- default:
- return 0;
- case MVT::i8:
- return X86::CMP8ri;
- case MVT::i16:
- if (isInt<8>(Val))
- return X86::CMP16ri8;
- return X86::CMP16ri;
- case MVT::i32:
- if (isInt<8>(Val))
- return X86::CMP32ri8;
- return X86::CMP32ri;
- case MVT::i64:
- if (isInt<8>(Val))
- return X86::CMP64ri8;
- // 64-bit comparisons are only valid if the immediate fits in a 32-bit sext
- // field.
- if (isInt<32>(Val))
- return X86::CMP64ri32;
- return 0;
- }
- }
- bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT,
- const DebugLoc &CurMIMD) {
- Register Op0Reg = getRegForValue(Op0);
- if (Op0Reg == 0) return false;
- // Handle 'null' like i32/i64 0.
- if (isa<ConstantPointerNull>(Op1))
- Op1 = Constant::getNullValue(DL.getIntPtrType(Op0->getContext()));
- // We have two options: compare with register or immediate. If the RHS of
- // the compare is an immediate that we can fold into this compare, use
- // CMPri, otherwise use CMPrr.
- if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
- if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurMIMD, TII.get(CompareImmOpc))
- .addReg(Op0Reg)
- .addImm(Op1C->getSExtValue());
- return true;
- }
- }
- unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
- if (CompareOpc == 0) return false;
- Register Op1Reg = getRegForValue(Op1);
- if (Op1Reg == 0) return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurMIMD, TII.get(CompareOpc))
- .addReg(Op0Reg)
- .addReg(Op1Reg);
- return true;
- }
- bool X86FastISel::X86SelectCmp(const Instruction *I) {
- const CmpInst *CI = cast<CmpInst>(I);
- MVT VT;
- if (!isTypeLegal(I->getOperand(0)->getType(), VT))
- return false;
- // Below code only works for scalars.
- if (VT.isVector())
- return false;
- // Try to optimize or fold the cmp.
- CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
- unsigned ResultReg = 0;
- switch (Predicate) {
- default: break;
- case CmpInst::FCMP_FALSE: {
- ResultReg = createResultReg(&X86::GR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV32r0),
- ResultReg);
- ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, X86::sub_8bit);
- if (!ResultReg)
- return false;
- break;
- }
- case CmpInst::FCMP_TRUE: {
- ResultReg = createResultReg(&X86::GR8RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV8ri),
- ResultReg).addImm(1);
- break;
- }
- }
- if (ResultReg) {
- updateValueMap(I, ResultReg);
- return true;
- }
- const Value *LHS = CI->getOperand(0);
- const Value *RHS = CI->getOperand(1);
- // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
- // We don't have to materialize a zero constant for this case and can just use
- // %x again on the RHS.
- if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
- const auto *RHSC = dyn_cast<ConstantFP>(RHS);
- if (RHSC && RHSC->isNullValue())
- RHS = LHS;
- }
- // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
- static const uint16_t SETFOpcTable[2][3] = {
- { X86::COND_E, X86::COND_NP, X86::AND8rr },
- { X86::COND_NE, X86::COND_P, X86::OR8rr }
- };
- const uint16_t *SETFOpc = nullptr;
- switch (Predicate) {
- default: break;
- case CmpInst::FCMP_OEQ: SETFOpc = &SETFOpcTable[0][0]; break;
- case CmpInst::FCMP_UNE: SETFOpc = &SETFOpcTable[1][0]; break;
- }
- ResultReg = createResultReg(&X86::GR8RegClass);
- if (SETFOpc) {
- if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
- return false;
- Register FlagReg1 = createResultReg(&X86::GR8RegClass);
- Register FlagReg2 = createResultReg(&X86::GR8RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr),
- FlagReg1).addImm(SETFOpc[0]);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr),
- FlagReg2).addImm(SETFOpc[1]);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(SETFOpc[2]),
- ResultReg).addReg(FlagReg1).addReg(FlagReg2);
- updateValueMap(I, ResultReg);
- return true;
- }
- X86::CondCode CC;
- bool SwapArgs;
- std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
- assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
- if (SwapArgs)
- std::swap(LHS, RHS);
- // Emit a compare of LHS/RHS.
- if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
- return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr),
- ResultReg).addImm(CC);
- updateValueMap(I, ResultReg);
- return true;
- }
- bool X86FastISel::X86SelectZExt(const Instruction *I) {
- EVT DstVT = TLI.getValueType(DL, I->getType());
- if (!TLI.isTypeLegal(DstVT))
- return false;
- Register ResultReg = getRegForValue(I->getOperand(0));
- if (ResultReg == 0)
- return false;
- // Handle zero-extension from i1 to i8, which is common.
- MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
- if (SrcVT == MVT::i1) {
- // Set the high bits to zero.
- ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg);
- SrcVT = MVT::i8;
- if (ResultReg == 0)
- return false;
- }
- if (DstVT == MVT::i64) {
- // Handle extension to 64-bits via sub-register shenanigans.
- unsigned MovInst;
- switch (SrcVT.SimpleTy) {
- case MVT::i8: MovInst = X86::MOVZX32rr8; break;
- case MVT::i16: MovInst = X86::MOVZX32rr16; break;
- case MVT::i32: MovInst = X86::MOV32rr; break;
- default: llvm_unreachable("Unexpected zext to i64 source type");
- }
- Register Result32 = createResultReg(&X86::GR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovInst), Result32)
- .addReg(ResultReg);
- ResultReg = createResultReg(&X86::GR64RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::SUBREG_TO_REG),
- ResultReg)
- .addImm(0).addReg(Result32).addImm(X86::sub_32bit);
- } else if (DstVT == MVT::i16) {
- // i8->i16 doesn't exist in the autogenerated isel table. Need to zero
- // extend to 32-bits and then extract down to 16-bits.
- Register Result32 = createResultReg(&X86::GR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOVZX32rr8),
- Result32).addReg(ResultReg);
- ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit);
- } else if (DstVT != MVT::i8) {
- ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
- ResultReg);
- if (ResultReg == 0)
- return false;
- }
- updateValueMap(I, ResultReg);
- return true;
- }
- bool X86FastISel::X86SelectSExt(const Instruction *I) {
- EVT DstVT = TLI.getValueType(DL, I->getType());
- if (!TLI.isTypeLegal(DstVT))
- return false;
- Register ResultReg = getRegForValue(I->getOperand(0));
- if (ResultReg == 0)
- return false;
- // Handle sign-extension from i1 to i8.
- MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
- if (SrcVT == MVT::i1) {
- // Set the high bits to zero.
- Register ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg);
- if (ZExtReg == 0)
- return false;
- // Negate the result to make an 8-bit sign extended value.
- ResultReg = createResultReg(&X86::GR8RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::NEG8r),
- ResultReg).addReg(ZExtReg);
- SrcVT = MVT::i8;
- }
- if (DstVT == MVT::i16) {
- // i8->i16 doesn't exist in the autogenerated isel table. Need to sign
- // extend to 32-bits and then extract down to 16-bits.
- Register Result32 = createResultReg(&X86::GR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOVSX32rr8),
- Result32).addReg(ResultReg);
- ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit);
- } else if (DstVT != MVT::i8) {
- ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::SIGN_EXTEND,
- ResultReg);
- if (ResultReg == 0)
- return false;
- }
- updateValueMap(I, ResultReg);
- return true;
- }
- bool X86FastISel::X86SelectBranch(const Instruction *I) {
- // Unconditional branches are selected by tablegen-generated code.
- // Handle a conditional branch.
- const BranchInst *BI = cast<BranchInst>(I);
- MachineBasicBlock *TrueMBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
- MachineBasicBlock *FalseMBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
- // Fold the common case of a conditional branch with a comparison
- // in the same block (values defined on other blocks may not have
- // initialized registers).
- X86::CondCode CC;
- if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
- if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
- EVT VT = TLI.getValueType(DL, CI->getOperand(0)->getType());
- // Try to optimize or fold the cmp.
- CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
- switch (Predicate) {
- default: break;
- case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, MIMD.getDL()); return true;
- case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, MIMD.getDL()); return true;
- }
- const Value *CmpLHS = CI->getOperand(0);
- const Value *CmpRHS = CI->getOperand(1);
- // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x,
- // 0.0.
- // We don't have to materialize a zero constant for this case and can just
- // use %x again on the RHS.
- if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
- const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
- if (CmpRHSC && CmpRHSC->isNullValue())
- CmpRHS = CmpLHS;
- }
- // Try to take advantage of fallthrough opportunities.
- if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
- std::swap(TrueMBB, FalseMBB);
- Predicate = CmpInst::getInversePredicate(Predicate);
- }
- // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/condition
- // code check. Instead two branch instructions are required to check all
- // the flags. First we change the predicate to a supported condition code,
- // which will be the first branch. Later one we will emit the second
- // branch.
- bool NeedExtraBranch = false;
- switch (Predicate) {
- default: break;
- case CmpInst::FCMP_OEQ:
- std::swap(TrueMBB, FalseMBB);
- [[fallthrough]];
- case CmpInst::FCMP_UNE:
- NeedExtraBranch = true;
- Predicate = CmpInst::FCMP_ONE;
- break;
- }
- bool SwapArgs;
- std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
- assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
- if (SwapArgs)
- std::swap(CmpLHS, CmpRHS);
- // Emit a compare of the LHS and RHS, setting the flags.
- if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->getDebugLoc()))
- return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
- .addMBB(TrueMBB).addImm(CC);
- // X86 requires a second branch to handle UNE (and OEQ, which is mapped
- // to UNE above).
- if (NeedExtraBranch) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
- .addMBB(TrueMBB).addImm(X86::COND_P);
- }
- finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
- return true;
- }
- } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
- // Handle things like "%cond = trunc i32 %X to i1 / br i1 %cond", which
- // typically happen for _Bool and C++ bools.
- MVT SourceVT;
- if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
- isTypeLegal(TI->getOperand(0)->getType(), SourceVT)) {
- unsigned TestOpc = 0;
- switch (SourceVT.SimpleTy) {
- default: break;
- case MVT::i8: TestOpc = X86::TEST8ri; break;
- case MVT::i16: TestOpc = X86::TEST16ri; break;
- case MVT::i32: TestOpc = X86::TEST32ri; break;
- case MVT::i64: TestOpc = X86::TEST64ri32; break;
- }
- if (TestOpc) {
- Register OpReg = getRegForValue(TI->getOperand(0));
- if (OpReg == 0) return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TestOpc))
- .addReg(OpReg).addImm(1);
- unsigned JmpCond = X86::COND_NE;
- if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
- std::swap(TrueMBB, FalseMBB);
- JmpCond = X86::COND_E;
- }
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
- .addMBB(TrueMBB).addImm(JmpCond);
- finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
- return true;
- }
- }
- } else if (foldX86XALUIntrinsic(CC, BI, BI->getCondition())) {
- // Fake request the condition, otherwise the intrinsic might be completely
- // optimized away.
- Register TmpReg = getRegForValue(BI->getCondition());
- if (TmpReg == 0)
- return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
- .addMBB(TrueMBB).addImm(CC);
- finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
- return true;
- }
- // Otherwise do a clumsy setcc and re-test it.
- // Note that i1 essentially gets ANY_EXTEND'ed to i8 where it isn't used
- // in an explicit cast, so make sure to handle that correctly.
- Register OpReg = getRegForValue(BI->getCondition());
- if (OpReg == 0) return false;
- // In case OpReg is a K register, COPY to a GPR
- if (MRI.getRegClass(OpReg) == &X86::VK1RegClass) {
- unsigned KOpReg = OpReg;
- OpReg = createResultReg(&X86::GR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), OpReg)
- .addReg(KOpReg);
- OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, X86::sub_8bit);
- }
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri))
- .addReg(OpReg)
- .addImm(1);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
- .addMBB(TrueMBB).addImm(X86::COND_NE);
- finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
- return true;
- }
- bool X86FastISel::X86SelectShift(const Instruction *I) {
- unsigned CReg = 0, OpReg = 0;
- const TargetRegisterClass *RC = nullptr;
- if (I->getType()->isIntegerTy(8)) {
- CReg = X86::CL;
- RC = &X86::GR8RegClass;
- switch (I->getOpcode()) {
- case Instruction::LShr: OpReg = X86::SHR8rCL; break;
- case Instruction::AShr: OpReg = X86::SAR8rCL; break;
- case Instruction::Shl: OpReg = X86::SHL8rCL; break;
- default: return false;
- }
- } else if (I->getType()->isIntegerTy(16)) {
- CReg = X86::CX;
- RC = &X86::GR16RegClass;
- switch (I->getOpcode()) {
- default: llvm_unreachable("Unexpected shift opcode");
- case Instruction::LShr: OpReg = X86::SHR16rCL; break;
- case Instruction::AShr: OpReg = X86::SAR16rCL; break;
- case Instruction::Shl: OpReg = X86::SHL16rCL; break;
- }
- } else if (I->getType()->isIntegerTy(32)) {
- CReg = X86::ECX;
- RC = &X86::GR32RegClass;
- switch (I->getOpcode()) {
- default: llvm_unreachable("Unexpected shift opcode");
- case Instruction::LShr: OpReg = X86::SHR32rCL; break;
- case Instruction::AShr: OpReg = X86::SAR32rCL; break;
- case Instruction::Shl: OpReg = X86::SHL32rCL; break;
- }
- } else if (I->getType()->isIntegerTy(64)) {
- CReg = X86::RCX;
- RC = &X86::GR64RegClass;
- switch (I->getOpcode()) {
- default: llvm_unreachable("Unexpected shift opcode");
- case Instruction::LShr: OpReg = X86::SHR64rCL; break;
- case Instruction::AShr: OpReg = X86::SAR64rCL; break;
- case Instruction::Shl: OpReg = X86::SHL64rCL; break;
- }
- } else {
- return false;
- }
- MVT VT;
- if (!isTypeLegal(I->getType(), VT))
- return false;
- Register Op0Reg = getRegForValue(I->getOperand(0));
- if (Op0Reg == 0) return false;
- Register Op1Reg = getRegForValue(I->getOperand(1));
- if (Op1Reg == 0) return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
- CReg).addReg(Op1Reg);
- // The shift instruction uses X86::CL. If we defined a super-register
- // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
- if (CReg != X86::CL)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::KILL), X86::CL)
- .addReg(CReg, RegState::Kill);
- Register ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(OpReg), ResultReg)
- .addReg(Op0Reg);
- updateValueMap(I, ResultReg);
- return true;
- }
- bool X86FastISel::X86SelectDivRem(const Instruction *I) {
- const static unsigned NumTypes = 4; // i8, i16, i32, i64
- const static unsigned NumOps = 4; // SDiv, SRem, UDiv, URem
- const static bool S = true; // IsSigned
- const static bool U = false; // !IsSigned
- const static unsigned Copy = TargetOpcode::COPY;
- // For the X86 DIV/IDIV instruction, in most cases the dividend
- // (numerator) must be in a specific register pair highreg:lowreg,
- // producing the quotient in lowreg and the remainder in highreg.
- // For most data types, to set up the instruction, the dividend is
- // copied into lowreg, and lowreg is sign-extended or zero-extended
- // into highreg. The exception is i8, where the dividend is defined
- // as a single register rather than a register pair, and we
- // therefore directly sign-extend or zero-extend the dividend into
- // lowreg, instead of copying, and ignore the highreg.
- const static struct DivRemEntry {
- // The following portion depends only on the data type.
- const TargetRegisterClass *RC;
- unsigned LowInReg; // low part of the register pair
- unsigned HighInReg; // high part of the register pair
- // The following portion depends on both the data type and the operation.
- struct DivRemResult {
- unsigned OpDivRem; // The specific DIV/IDIV opcode to use.
- unsigned OpSignExtend; // Opcode for sign-extending lowreg into
- // highreg, or copying a zero into highreg.
- unsigned OpCopy; // Opcode for copying dividend into lowreg, or
- // zero/sign-extending into lowreg for i8.
- unsigned DivRemResultReg; // Register containing the desired result.
- bool IsOpSigned; // Whether to use signed or unsigned form.
- } ResultTable[NumOps];
- } OpTable[NumTypes] = {
- { &X86::GR8RegClass, X86::AX, 0, {
- { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S }, // SDiv
- { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S }, // SRem
- { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U }, // UDiv
- { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U }, // URem
- }
- }, // i8
- { &X86::GR16RegClass, X86::AX, X86::DX, {
- { X86::IDIV16r, X86::CWD, Copy, X86::AX, S }, // SDiv
- { X86::IDIV16r, X86::CWD, Copy, X86::DX, S }, // SRem
- { X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U }, // UDiv
- { X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U }, // URem
- }
- }, // i16
- { &X86::GR32RegClass, X86::EAX, X86::EDX, {
- { X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S }, // SDiv
- { X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S }, // SRem
- { X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U }, // UDiv
- { X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U }, // URem
- }
- }, // i32
- { &X86::GR64RegClass, X86::RAX, X86::RDX, {
- { X86::IDIV64r, X86::CQO, Copy, X86::RAX, S }, // SDiv
- { X86::IDIV64r, X86::CQO, Copy, X86::RDX, S }, // SRem
- { X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U }, // UDiv
- { X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U }, // URem
- }
- }, // i64
- };
- MVT VT;
- if (!isTypeLegal(I->getType(), VT))
- return false;
- unsigned TypeIndex, OpIndex;
- switch (VT.SimpleTy) {
- default: return false;
- case MVT::i8: TypeIndex = 0; break;
- case MVT::i16: TypeIndex = 1; break;
- case MVT::i32: TypeIndex = 2; break;
- case MVT::i64: TypeIndex = 3;
- if (!Subtarget->is64Bit())
- return false;
- break;
- }
- switch (I->getOpcode()) {
- default: llvm_unreachable("Unexpected div/rem opcode");
- case Instruction::SDiv: OpIndex = 0; break;
- case Instruction::SRem: OpIndex = 1; break;
- case Instruction::UDiv: OpIndex = 2; break;
- case Instruction::URem: OpIndex = 3; break;
- }
- const DivRemEntry &TypeEntry = OpTable[TypeIndex];
- const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
- Register Op0Reg = getRegForValue(I->getOperand(0));
- if (Op0Reg == 0)
- return false;
- Register Op1Reg = getRegForValue(I->getOperand(1));
- if (Op1Reg == 0)
- return false;
- // Move op0 into low-order input register.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg);
- // Zero-extend or sign-extend into high-order input register.
- if (OpEntry.OpSignExtend) {
- if (OpEntry.IsOpSigned)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(OpEntry.OpSignExtend));
- else {
- Register Zero32 = createResultReg(&X86::GR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(X86::MOV32r0), Zero32);
- // Copy the zero into the appropriate sub/super/identical physical
- // register. Unfortunately the operations needed are not uniform enough
- // to fit neatly into the table above.
- if (VT == MVT::i16) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(Copy), TypeEntry.HighInReg)
- .addReg(Zero32, 0, X86::sub_16bit);
- } else if (VT == MVT::i32) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(Copy), TypeEntry.HighInReg)
- .addReg(Zero32);
- } else if (VT == MVT::i64) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
- .addImm(0).addReg(Zero32).addImm(X86::sub_32bit);
- }
- }
- }
- // Generate the DIV/IDIV instruction.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);
- // For i8 remainder, we can't reference ah directly, as we'll end
- // up with bogus copies like %r9b = COPY %ah. Reference ax
- // instead to prevent ah references in a rex instruction.
- //
- // The current assumption of the fast register allocator is that isel
- // won't generate explicit references to the GR8_NOREX registers. If
- // the allocator and/or the backend get enhanced to be more robust in
- // that regard, this can be, and should be, removed.
- unsigned ResultReg = 0;
- if ((I->getOpcode() == Instruction::SRem ||
- I->getOpcode() == Instruction::URem) &&
- OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
- Register SourceSuperReg = createResultReg(&X86::GR16RegClass);
- Register ResultSuperReg = createResultReg(&X86::GR16RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(Copy), SourceSuperReg).addReg(X86::AX);
- // Shift AX right by 8 bits instead of using AH.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SHR16ri),
- ResultSuperReg).addReg(SourceSuperReg).addImm(8);
- // Now reference the 8-bit subreg of the result.
- ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
- X86::sub_8bit);
- }
- // Copy the result out of the physreg if we haven't already.
- if (!ResultReg) {
- ResultReg = createResultReg(TypeEntry.RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Copy), ResultReg)
- .addReg(OpEntry.DivRemResultReg);
- }
- updateValueMap(I, ResultReg);
- return true;
- }
- /// Emit a conditional move instruction (if the are supported) to lower
- /// the select.
- bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
- // Check if the subtarget supports these instructions.
- if (!Subtarget->canUseCMOV())
- return false;
- // FIXME: Add support for i8.
- if (RetVT < MVT::i16 || RetVT > MVT::i64)
- return false;
- const Value *Cond = I->getOperand(0);
- const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
- bool NeedTest = true;
- X86::CondCode CC = X86::COND_NE;
- // Optimize conditions coming from a compare if both instructions are in the
- // same basic block (values defined in other basic blocks may not have
- // initialized registers).
- const auto *CI = dyn_cast<CmpInst>(Cond);
- if (CI && (CI->getParent() == I->getParent())) {
- CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
- // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
- static const uint16_t SETFOpcTable[2][3] = {
- { X86::COND_NP, X86::COND_E, X86::TEST8rr },
- { X86::COND_P, X86::COND_NE, X86::OR8rr }
- };
- const uint16_t *SETFOpc = nullptr;
- switch (Predicate) {
- default: break;
- case CmpInst::FCMP_OEQ:
- SETFOpc = &SETFOpcTable[0][0];
- Predicate = CmpInst::ICMP_NE;
- break;
- case CmpInst::FCMP_UNE:
- SETFOpc = &SETFOpcTable[1][0];
- Predicate = CmpInst::ICMP_NE;
- break;
- }
- bool NeedSwap;
- std::tie(CC, NeedSwap) = X86::getX86ConditionCode(Predicate);
- assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
- const Value *CmpLHS = CI->getOperand(0);
- const Value *CmpRHS = CI->getOperand(1);
- if (NeedSwap)
- std::swap(CmpLHS, CmpRHS);
- EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType());
- // Emit a compare of the LHS and RHS, setting the flags.
- if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
- return false;
- if (SETFOpc) {
- Register FlagReg1 = createResultReg(&X86::GR8RegClass);
- Register FlagReg2 = createResultReg(&X86::GR8RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr),
- FlagReg1).addImm(SETFOpc[0]);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr),
- FlagReg2).addImm(SETFOpc[1]);
- auto const &II = TII.get(SETFOpc[2]);
- if (II.getNumDefs()) {
- Register TmpReg = createResultReg(&X86::GR8RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, TmpReg)
- .addReg(FlagReg2).addReg(FlagReg1);
- } else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
- .addReg(FlagReg2).addReg(FlagReg1);
- }
- }
- NeedTest = false;
- } else if (foldX86XALUIntrinsic(CC, I, Cond)) {
- // Fake request the condition, otherwise the intrinsic might be completely
- // optimized away.
- Register TmpReg = getRegForValue(Cond);
- if (TmpReg == 0)
- return false;
- NeedTest = false;
- }
- if (NeedTest) {
- // Selects operate on i1, however, CondReg is 8 bits width and may contain
- // garbage. Indeed, only the less significant bit is supposed to be
- // accurate. If we read more than the lsb, we may see non-zero values
- // whereas lsb is zero. Therefore, we have to truncate Op0Reg to i1 for
- // the select. This is achieved by performing TEST against 1.
- Register CondReg = getRegForValue(Cond);
- if (CondReg == 0)
- return false;
- // In case OpReg is a K register, COPY to a GPR
- if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
- unsigned KCondReg = CondReg;
- CondReg = createResultReg(&X86::GR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), CondReg)
- .addReg(KCondReg);
- CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit);
- }
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri))
- .addReg(CondReg)
- .addImm(1);
- }
- const Value *LHS = I->getOperand(1);
- const Value *RHS = I->getOperand(2);
- Register RHSReg = getRegForValue(RHS);
- Register LHSReg = getRegForValue(LHS);
- if (!LHSReg || !RHSReg)
- return false;
- const TargetRegisterInfo &TRI = *Subtarget->getRegisterInfo();
- unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(*RC)/8);
- Register ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, LHSReg, CC);
- updateValueMap(I, ResultReg);
- return true;
- }
- /// Emit SSE or AVX instructions to lower the select.
- ///
- /// Try to use SSE1/SSE2 instructions to simulate a select without branches.
- /// This lowers fp selects into a CMP/AND/ANDN/OR sequence when the necessary
- /// SSE instructions are available. If AVX is available, try to use a VBLENDV.
- bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
- // Optimize conditions coming from a compare if both instructions are in the
- // same basic block (values defined in other basic blocks may not have
- // initialized registers).
- const auto *CI = dyn_cast<FCmpInst>(I->getOperand(0));
- if (!CI || (CI->getParent() != I->getParent()))
- return false;
- if (I->getType() != CI->getOperand(0)->getType() ||
- !((Subtarget->hasSSE1() && RetVT == MVT::f32) ||
- (Subtarget->hasSSE2() && RetVT == MVT::f64)))
- return false;
- const Value *CmpLHS = CI->getOperand(0);
- const Value *CmpRHS = CI->getOperand(1);
- CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
- // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
- // We don't have to materialize a zero constant for this case and can just use
- // %x again on the RHS.
- if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
- const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
- if (CmpRHSC && CmpRHSC->isNullValue())
- CmpRHS = CmpLHS;
- }
- unsigned CC;
- bool NeedSwap;
- std::tie(CC, NeedSwap) = getX86SSEConditionCode(Predicate);
- if (CC > 7 && !Subtarget->hasAVX())
- return false;
- if (NeedSwap)
- std::swap(CmpLHS, CmpRHS);
- const Value *LHS = I->getOperand(1);
- const Value *RHS = I->getOperand(2);
- Register LHSReg = getRegForValue(LHS);
- Register RHSReg = getRegForValue(RHS);
- Register CmpLHSReg = getRegForValue(CmpLHS);
- Register CmpRHSReg = getRegForValue(CmpRHS);
- if (!LHSReg || !RHSReg || !CmpLHSReg || !CmpRHSReg)
- return false;
- const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
- unsigned ResultReg;
- if (Subtarget->hasAVX512()) {
- // If we have AVX512 we can use a mask compare and masked movss/sd.
- const TargetRegisterClass *VR128X = &X86::VR128XRegClass;
- const TargetRegisterClass *VK1 = &X86::VK1RegClass;
- unsigned CmpOpcode =
- (RetVT == MVT::f32) ? X86::VCMPSSZrr : X86::VCMPSDZrr;
- Register CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpRHSReg,
- CC);
- // Need an IMPLICIT_DEF for the input that is used to generate the upper
- // bits of the result register since its not based on any of the inputs.
- Register ImplicitDefReg = createResultReg(VR128X);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
- // Place RHSReg is the passthru of the masked movss/sd operation and put
- // LHS in the input. The mask input comes from the compare.
- unsigned MovOpcode =
- (RetVT == MVT::f32) ? X86::VMOVSSZrrk : X86::VMOVSDZrrk;
- unsigned MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, CmpReg,
- ImplicitDefReg, LHSReg);
- ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), ResultReg).addReg(MovReg);
- } else if (Subtarget->hasAVX()) {
- const TargetRegisterClass *VR128 = &X86::VR128RegClass;
- // If we have AVX, create 1 blendv instead of 3 logic instructions.
- // Blendv was introduced with SSE 4.1, but the 2 register form implicitly
- // uses XMM0 as the selection register. That may need just as many
- // instructions as the AND/ANDN/OR sequence due to register moves, so
- // don't bother.
- unsigned CmpOpcode =
- (RetVT == MVT::f32) ? X86::VCMPSSrr : X86::VCMPSDrr;
- unsigned BlendOpcode =
- (RetVT == MVT::f32) ? X86::VBLENDVPSrr : X86::VBLENDVPDrr;
- Register CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpRHSReg,
- CC);
- Register VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, LHSReg,
- CmpReg);
- ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), ResultReg).addReg(VBlendReg);
- } else {
- // Choose the SSE instruction sequence based on data type (float or double).
- static const uint16_t OpcTable[2][4] = {
- { X86::CMPSSrr, X86::ANDPSrr, X86::ANDNPSrr, X86::ORPSrr },
- { X86::CMPSDrr, X86::ANDPDrr, X86::ANDNPDrr, X86::ORPDrr }
- };
- const uint16_t *Opc = nullptr;
- switch (RetVT.SimpleTy) {
- default: return false;
- case MVT::f32: Opc = &OpcTable[0][0]; break;
- case MVT::f64: Opc = &OpcTable[1][0]; break;
- }
- const TargetRegisterClass *VR128 = &X86::VR128RegClass;
- Register CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpRHSReg, CC);
- Register AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg, LHSReg);
- Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg, RHSReg);
- Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, AndReg);
- ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), ResultReg).addReg(OrReg);
- }
- updateValueMap(I, ResultReg);
- return true;
- }
- bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
- // These are pseudo CMOV instructions and will be later expanded into control-
- // flow.
- unsigned Opc;
- switch (RetVT.SimpleTy) {
- default: return false;
- case MVT::i8: Opc = X86::CMOV_GR8; break;
- case MVT::i16: Opc = X86::CMOV_GR16; break;
- case MVT::i32: Opc = X86::CMOV_GR32; break;
- case MVT::f16:
- Opc = Subtarget->hasAVX512() ? X86::CMOV_FR16X : X86::CMOV_FR16; break;
- case MVT::f32:
- Opc = Subtarget->hasAVX512() ? X86::CMOV_FR32X : X86::CMOV_FR32; break;
- case MVT::f64:
- Opc = Subtarget->hasAVX512() ? X86::CMOV_FR64X : X86::CMOV_FR64; break;
- }
- const Value *Cond = I->getOperand(0);
- X86::CondCode CC = X86::COND_NE;
- // Optimize conditions coming from a compare if both instructions are in the
- // same basic block (values defined in other basic blocks may not have
- // initialized registers).
- const auto *CI = dyn_cast<CmpInst>(Cond);
- if (CI && (CI->getParent() == I->getParent())) {
- bool NeedSwap;
- std::tie(CC, NeedSwap) = X86::getX86ConditionCode(CI->getPredicate());
- if (CC > X86::LAST_VALID_COND)
- return false;
- const Value *CmpLHS = CI->getOperand(0);
- const Value *CmpRHS = CI->getOperand(1);
- if (NeedSwap)
- std::swap(CmpLHS, CmpRHS);
- EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType());
- if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
- return false;
- } else {
- Register CondReg = getRegForValue(Cond);
- if (CondReg == 0)
- return false;
- // In case OpReg is a K register, COPY to a GPR
- if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
- unsigned KCondReg = CondReg;
- CondReg = createResultReg(&X86::GR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), CondReg)
- .addReg(KCondReg);
- CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit);
- }
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri))
- .addReg(CondReg)
- .addImm(1);
- }
- const Value *LHS = I->getOperand(1);
- const Value *RHS = I->getOperand(2);
- Register LHSReg = getRegForValue(LHS);
- Register RHSReg = getRegForValue(RHS);
- if (!LHSReg || !RHSReg)
- return false;
- const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
- Register ResultReg =
- fastEmitInst_rri(Opc, RC, RHSReg, LHSReg, CC);
- updateValueMap(I, ResultReg);
- return true;
- }
- bool X86FastISel::X86SelectSelect(const Instruction *I) {
- MVT RetVT;
- if (!isTypeLegal(I->getType(), RetVT))
- return false;
- // Check if we can fold the select.
- if (const auto *CI = dyn_cast<CmpInst>(I->getOperand(0))) {
- CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
- const Value *Opnd = nullptr;
- switch (Predicate) {
- default: break;
- case CmpInst::FCMP_FALSE: Opnd = I->getOperand(2); break;
- case CmpInst::FCMP_TRUE: Opnd = I->getOperand(1); break;
- }
- // No need for a select anymore - this is an unconditional move.
- if (Opnd) {
- Register OpReg = getRegForValue(Opnd);
- if (OpReg == 0)
- return false;
- const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
- Register ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), ResultReg)
- .addReg(OpReg);
- updateValueMap(I, ResultReg);
- return true;
- }
- }
- // First try to use real conditional move instructions.
- if (X86FastEmitCMoveSelect(RetVT, I))
- return true;
- // Try to use a sequence of SSE instructions to simulate a conditional move.
- if (X86FastEmitSSESelect(RetVT, I))
- return true;
- // Fall-back to pseudo conditional move instructions, which will be later
- // converted to control-flow.
- if (X86FastEmitPseudoSelect(RetVT, I))
- return true;
- return false;
- }
- // Common code for X86SelectSIToFP and X86SelectUIToFP.
- bool X86FastISel::X86SelectIntToFP(const Instruction *I, bool IsSigned) {
- // The target-independent selection algorithm in FastISel already knows how
- // to select a SINT_TO_FP if the target is SSE but not AVX.
- // Early exit if the subtarget doesn't have AVX.
- // Unsigned conversion requires avx512.
- bool HasAVX512 = Subtarget->hasAVX512();
- if (!Subtarget->hasAVX() || (!IsSigned && !HasAVX512))
- return false;
- // TODO: We could sign extend narrower types.
- MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
- if (SrcVT != MVT::i32 && SrcVT != MVT::i64)
- return false;
- // Select integer to float/double conversion.
- Register OpReg = getRegForValue(I->getOperand(0));
- if (OpReg == 0)
- return false;
- unsigned Opcode;
- static const uint16_t SCvtOpc[2][2][2] = {
- { { X86::VCVTSI2SSrr, X86::VCVTSI642SSrr },
- { X86::VCVTSI2SDrr, X86::VCVTSI642SDrr } },
- { { X86::VCVTSI2SSZrr, X86::VCVTSI642SSZrr },
- { X86::VCVTSI2SDZrr, X86::VCVTSI642SDZrr } },
- };
- static const uint16_t UCvtOpc[2][2] = {
- { X86::VCVTUSI2SSZrr, X86::VCVTUSI642SSZrr },
- { X86::VCVTUSI2SDZrr, X86::VCVTUSI642SDZrr },
- };
- bool Is64Bit = SrcVT == MVT::i64;
- if (I->getType()->isDoubleTy()) {
- // s/uitofp int -> double
- Opcode = IsSigned ? SCvtOpc[HasAVX512][1][Is64Bit] : UCvtOpc[1][Is64Bit];
- } else if (I->getType()->isFloatTy()) {
- // s/uitofp int -> float
- Opcode = IsSigned ? SCvtOpc[HasAVX512][0][Is64Bit] : UCvtOpc[0][Is64Bit];
- } else
- return false;
- MVT DstVT = TLI.getValueType(DL, I->getType()).getSimpleVT();
- const TargetRegisterClass *RC = TLI.getRegClassFor(DstVT);
- Register ImplicitDefReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
- Register ResultReg = fastEmitInst_rr(Opcode, RC, ImplicitDefReg, OpReg);
- updateValueMap(I, ResultReg);
- return true;
- }
- bool X86FastISel::X86SelectSIToFP(const Instruction *I) {
- return X86SelectIntToFP(I, /*IsSigned*/true);
- }
- bool X86FastISel::X86SelectUIToFP(const Instruction *I) {
- return X86SelectIntToFP(I, /*IsSigned*/false);
- }
- // Helper method used by X86SelectFPExt and X86SelectFPTrunc.
- bool X86FastISel::X86SelectFPExtOrFPTrunc(const Instruction *I,
- unsigned TargetOpc,
- const TargetRegisterClass *RC) {
- assert((I->getOpcode() == Instruction::FPExt ||
- I->getOpcode() == Instruction::FPTrunc) &&
- "Instruction must be an FPExt or FPTrunc!");
- bool HasAVX = Subtarget->hasAVX();
- Register OpReg = getRegForValue(I->getOperand(0));
- if (OpReg == 0)
- return false;
- unsigned ImplicitDefReg;
- if (HasAVX) {
- ImplicitDefReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
- }
- Register ResultReg = createResultReg(RC);
- MachineInstrBuilder MIB;
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpc),
- ResultReg);
- if (HasAVX)
- MIB.addReg(ImplicitDefReg);
- MIB.addReg(OpReg);
- updateValueMap(I, ResultReg);
- return true;
- }
- bool X86FastISel::X86SelectFPExt(const Instruction *I) {
- if (Subtarget->hasSSE2() && I->getType()->isDoubleTy() &&
- I->getOperand(0)->getType()->isFloatTy()) {
- bool HasAVX512 = Subtarget->hasAVX512();
- // fpext from float to double.
- unsigned Opc =
- HasAVX512 ? X86::VCVTSS2SDZrr
- : Subtarget->hasAVX() ? X86::VCVTSS2SDrr : X86::CVTSS2SDrr;
- return X86SelectFPExtOrFPTrunc(I, Opc, TLI.getRegClassFor(MVT::f64));
- }
- return false;
- }
- bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
- if (Subtarget->hasSSE2() && I->getType()->isFloatTy() &&
- I->getOperand(0)->getType()->isDoubleTy()) {
- bool HasAVX512 = Subtarget->hasAVX512();
- // fptrunc from double to float.
- unsigned Opc =
- HasAVX512 ? X86::VCVTSD2SSZrr
- : Subtarget->hasAVX() ? X86::VCVTSD2SSrr : X86::CVTSD2SSrr;
- return X86SelectFPExtOrFPTrunc(I, Opc, TLI.getRegClassFor(MVT::f32));
- }
- return false;
- }
- bool X86FastISel::X86SelectTrunc(const Instruction *I) {
- EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
- EVT DstVT = TLI.getValueType(DL, I->getType());
- // This code only handles truncation to byte.
- if (DstVT != MVT::i8 && DstVT != MVT::i1)
- return false;
- if (!TLI.isTypeLegal(SrcVT))
- return false;
- Register InputReg = getRegForValue(I->getOperand(0));
- if (!InputReg)
- // Unhandled operand. Halt "fast" selection and bail.
- return false;
- if (SrcVT == MVT::i8) {
- // Truncate from i8 to i1; no code needed.
- updateValueMap(I, InputReg);
- return true;
- }
- // Issue an extract_subreg.
- Register ResultReg = fastEmitInst_extractsubreg(MVT::i8, InputReg,
- X86::sub_8bit);
- if (!ResultReg)
- return false;
- updateValueMap(I, ResultReg);
- return true;
- }
- bool X86FastISel::IsMemcpySmall(uint64_t Len) {
- return Len <= (Subtarget->is64Bit() ? 32 : 16);
- }
- bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
- X86AddressMode SrcAM, uint64_t Len) {
- // Make sure we don't bloat code by inlining very large memcpy's.
- if (!IsMemcpySmall(Len))
- return false;
- bool i64Legal = Subtarget->is64Bit();
- // We don't care about alignment here since we just emit integer accesses.
- while (Len) {
- MVT VT;
- if (Len >= 8 && i64Legal)
- VT = MVT::i64;
- else if (Len >= 4)
- VT = MVT::i32;
- else if (Len >= 2)
- VT = MVT::i16;
- else
- VT = MVT::i8;
- unsigned Reg;
- bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg);
- RV &= X86FastEmitStore(VT, Reg, DestAM);
- assert(RV && "Failed to emit load or store??");
- (void)RV;
- unsigned Size = VT.getSizeInBits()/8;
- Len -= Size;
- DestAM.Disp += Size;
- SrcAM.Disp += Size;
- }
- return true;
- }
- bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
- // FIXME: Handle more intrinsics.
- switch (II->getIntrinsicID()) {
- default: return false;
- case Intrinsic::convert_from_fp16:
- case Intrinsic::convert_to_fp16: {
- if (Subtarget->useSoftFloat() || !Subtarget->hasF16C())
- return false;
- const Value *Op = II->getArgOperand(0);
- Register InputReg = getRegForValue(Op);
- if (InputReg == 0)
- return false;
- // F16C only allows converting from float to half and from half to float.
- bool IsFloatToHalf = II->getIntrinsicID() == Intrinsic::convert_to_fp16;
- if (IsFloatToHalf) {
- if (!Op->getType()->isFloatTy())
- return false;
- } else {
- if (!II->getType()->isFloatTy())
- return false;
- }
- unsigned ResultReg = 0;
- const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::v8i16);
- if (IsFloatToHalf) {
- // 'InputReg' is implicitly promoted from register class FR32 to
- // register class VR128 by method 'constrainOperandRegClass' which is
- // directly called by 'fastEmitInst_ri'.
- // Instruction VCVTPS2PHrr takes an extra immediate operand which is
- // used to provide rounding control: use MXCSR.RC, encoded as 0b100.
- // It's consistent with the other FP instructions, which are usually
- // controlled by MXCSR.
- unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPS2PHZ128rr
- : X86::VCVTPS2PHrr;
- InputReg = fastEmitInst_ri(Opc, RC, InputReg, 4);
- // Move the lower 32-bits of ResultReg to another register of class GR32.
- Opc = Subtarget->hasAVX512() ? X86::VMOVPDI2DIZrr
- : X86::VMOVPDI2DIrr;
- ResultReg = createResultReg(&X86::GR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
- .addReg(InputReg, RegState::Kill);
- // The result value is in the lower 16-bits of ResultReg.
- unsigned RegIdx = X86::sub_16bit;
- ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, RegIdx);
- } else {
- assert(Op->getType()->isIntegerTy(16) && "Expected a 16-bit integer!");
- // Explicitly zero-extend the input to 32-bit.
- InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::ZERO_EXTEND, InputReg);
- // The following SCALAR_TO_VECTOR will be expanded into a VMOVDI2PDIrr.
- InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR,
- InputReg);
- unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPH2PSZ128rr
- : X86::VCVTPH2PSrr;
- InputReg = fastEmitInst_r(Opc, RC, InputReg);
- // The result value is in the lower 32-bits of ResultReg.
- // Emit an explicit copy from register class VR128 to register class FR32.
- ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), ResultReg)
- .addReg(InputReg, RegState::Kill);
- }
- updateValueMap(II, ResultReg);
- return true;
- }
- case Intrinsic::frameaddress: {
- MachineFunction *MF = FuncInfo.MF;
- if (MF->getTarget().getMCAsmInfo()->usesWindowsCFI())
- return false;
- Type *RetTy = II->getCalledFunction()->getReturnType();
- MVT VT;
- if (!isTypeLegal(RetTy, VT))
- return false;
- unsigned Opc;
- const TargetRegisterClass *RC = nullptr;
- switch (VT.SimpleTy) {
- default: llvm_unreachable("Invalid result type for frameaddress.");
- case MVT::i32: Opc = X86::MOV32rm; RC = &X86::GR32RegClass; break;
- case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass; break;
- }
- // This needs to be set before we call getPtrSizedFrameRegister, otherwise
- // we get the wrong frame register.
- MachineFrameInfo &MFI = MF->getFrameInfo();
- MFI.setFrameAddressIsTaken(true);
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
- unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(*MF);
- assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
- (FrameReg == X86::EBP && VT == MVT::i32)) &&
- "Invalid Frame Register!");
- // Always make a copy of the frame register to a vreg first, so that we
- // never directly reference the frame register (the TwoAddressInstruction-
- // Pass doesn't like that).
- Register SrcReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), SrcReg).addReg(FrameReg);
- // Now recursively load from the frame address.
- // movq (%rbp), %rax
- // movq (%rax), %rax
- // movq (%rax), %rax
- // ...
- unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
- while (Depth--) {
- Register DestReg = createResultReg(RC);
- addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(Opc), DestReg), SrcReg);
- SrcReg = DestReg;
- }
- updateValueMap(II, SrcReg);
- return true;
- }
- case Intrinsic::memcpy: {
- const MemCpyInst *MCI = cast<MemCpyInst>(II);
- // Don't handle volatile or variable length memcpys.
- if (MCI->isVolatile())
- return false;
- if (isa<ConstantInt>(MCI->getLength())) {
- // Small memcpy's are common enough that we want to do them
- // without a call if possible.
- uint64_t Len = cast<ConstantInt>(MCI->getLength())->getZExtValue();
- if (IsMemcpySmall(Len)) {
- X86AddressMode DestAM, SrcAM;
- if (!X86SelectAddress(MCI->getRawDest(), DestAM) ||
- !X86SelectAddress(MCI->getRawSource(), SrcAM))
- return false;
- TryEmitSmallMemcpy(DestAM, SrcAM, Len);
- return true;
- }
- }
- unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
- if (!MCI->getLength()->getType()->isIntegerTy(SizeWidth))
- return false;
- if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
- return false;
- return lowerCallTo(II, "memcpy", II->arg_size() - 1);
- }
- case Intrinsic::memset: {
- const MemSetInst *MSI = cast<MemSetInst>(II);
- if (MSI->isVolatile())
- return false;
- unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
- if (!MSI->getLength()->getType()->isIntegerTy(SizeWidth))
- return false;
- if (MSI->getDestAddressSpace() > 255)
- return false;
- return lowerCallTo(II, "memset", II->arg_size() - 1);
- }
- case Intrinsic::stackprotector: {
- // Emit code to store the stack guard onto the stack.
- EVT PtrTy = TLI.getPointerTy(DL);
- const Value *Op1 = II->getArgOperand(0); // The guard's value.
- const AllocaInst *Slot = cast<AllocaInst>(II->getArgOperand(1));
- MFI.setStackProtectorIndex(FuncInfo.StaticAllocaMap[Slot]);
- // Grab the frame index.
- X86AddressMode AM;
- if (!X86SelectAddress(Slot, AM)) return false;
- if (!X86FastEmitStore(PtrTy, Op1, AM)) return false;
- return true;
- }
- case Intrinsic::dbg_declare: {
- const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
- X86AddressMode AM;
- assert(DI->getAddress() && "Null address should be checked earlier!");
- if (!X86SelectAddress(DI->getAddress(), AM))
- return false;
- const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
- assert(DI->getVariable()->isValidLocationForIntrinsic(MIMD.getDL()) &&
- "Expected inlined-at fields to agree");
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II), AM)
- .addImm(0)
- .addMetadata(DI->getVariable())
- .addMetadata(DI->getExpression());
- return true;
- }
- case Intrinsic::trap: {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TRAP));
- return true;
- }
- case Intrinsic::sqrt: {
- if (!Subtarget->hasSSE1())
- return false;
- Type *RetTy = II->getCalledFunction()->getReturnType();
- MVT VT;
- if (!isTypeLegal(RetTy, VT))
- return false;
- // Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT
- // is not generated by FastISel yet.
- // FIXME: Update this code once tablegen can handle it.
- static const uint16_t SqrtOpc[3][2] = {
- { X86::SQRTSSr, X86::SQRTSDr },
- { X86::VSQRTSSr, X86::VSQRTSDr },
- { X86::VSQRTSSZr, X86::VSQRTSDZr },
- };
- unsigned AVXLevel = Subtarget->hasAVX512() ? 2 :
- Subtarget->hasAVX() ? 1 :
- 0;
- unsigned Opc;
- switch (VT.SimpleTy) {
- default: return false;
- case MVT::f32: Opc = SqrtOpc[AVXLevel][0]; break;
- case MVT::f64: Opc = SqrtOpc[AVXLevel][1]; break;
- }
- const Value *SrcVal = II->getArgOperand(0);
- Register SrcReg = getRegForValue(SrcVal);
- if (SrcReg == 0)
- return false;
- const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
- unsigned ImplicitDefReg = 0;
- if (AVXLevel > 0) {
- ImplicitDefReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
- }
- Register ResultReg = createResultReg(RC);
- MachineInstrBuilder MIB;
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc),
- ResultReg);
- if (ImplicitDefReg)
- MIB.addReg(ImplicitDefReg);
- MIB.addReg(SrcReg);
- updateValueMap(II, ResultReg);
- return true;
- }
- case Intrinsic::sadd_with_overflow:
- case Intrinsic::uadd_with_overflow:
- case Intrinsic::ssub_with_overflow:
- case Intrinsic::usub_with_overflow:
- case Intrinsic::smul_with_overflow:
- case Intrinsic::umul_with_overflow: {
- // This implements the basic lowering of the xalu with overflow intrinsics
- // into add/sub/mul followed by either seto or setb.
- const Function *Callee = II->getCalledFunction();
- auto *Ty = cast<StructType>(Callee->getReturnType());
- Type *RetTy = Ty->getTypeAtIndex(0U);
- assert(Ty->getTypeAtIndex(1)->isIntegerTy() &&
- Ty->getTypeAtIndex(1)->getScalarSizeInBits() == 1 &&
- "Overflow value expected to be an i1");
- MVT VT;
- if (!isTypeLegal(RetTy, VT))
- return false;
- if (VT < MVT::i8 || VT > MVT::i64)
- return false;
- const Value *LHS = II->getArgOperand(0);
- const Value *RHS = II->getArgOperand(1);
- // Canonicalize immediate to the RHS.
- if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) && II->isCommutative())
- std::swap(LHS, RHS);
- unsigned BaseOpc, CondCode;
- switch (II->getIntrinsicID()) {
- default: llvm_unreachable("Unexpected intrinsic!");
- case Intrinsic::sadd_with_overflow:
- BaseOpc = ISD::ADD; CondCode = X86::COND_O; break;
- case Intrinsic::uadd_with_overflow:
- BaseOpc = ISD::ADD; CondCode = X86::COND_B; break;
- case Intrinsic::ssub_with_overflow:
- BaseOpc = ISD::SUB; CondCode = X86::COND_O; break;
- case Intrinsic::usub_with_overflow:
- BaseOpc = ISD::SUB; CondCode = X86::COND_B; break;
- case Intrinsic::smul_with_overflow:
- BaseOpc = X86ISD::SMUL; CondCode = X86::COND_O; break;
- case Intrinsic::umul_with_overflow:
- BaseOpc = X86ISD::UMUL; CondCode = X86::COND_O; break;
- }
- Register LHSReg = getRegForValue(LHS);
- if (LHSReg == 0)
- return false;
- unsigned ResultReg = 0;
- // Check if we have an immediate version.
- if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
- static const uint16_t Opc[2][4] = {
- { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },
- { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r }
- };
- if (CI->isOne() && (BaseOpc == ISD::ADD || BaseOpc == ISD::SUB) &&
- CondCode == X86::COND_O) {
- // We can use INC/DEC.
- ResultReg = createResultReg(TLI.getRegClassFor(VT));
- bool IsDec = BaseOpc == ISD::SUB;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
- .addReg(LHSReg);
- } else
- ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, CI->getZExtValue());
- }
- unsigned RHSReg;
- if (!ResultReg) {
- RHSReg = getRegForValue(RHS);
- if (RHSReg == 0)
- return false;
- ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, RHSReg);
- }
- // FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit
- // it manually.
- if (BaseOpc == X86ISD::UMUL && !ResultReg) {
- static const uint16_t MULOpc[] =
- { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r };
- static const MCPhysReg Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };
- // First copy the first operand into RAX, which is an implicit input to
- // the X86::MUL*r instruction.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])
- .addReg(LHSReg);
- ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
- TLI.getRegClassFor(VT), RHSReg);
- } else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
- static const uint16_t MULOpc[] =
- { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };
- if (VT == MVT::i8) {
- // Copy the first operand into AL, which is an implicit input to the
- // X86::IMUL8r instruction.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), X86::AL)
- .addReg(LHSReg);
- ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg);
- } else
- ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
- TLI.getRegClassFor(VT), LHSReg, RHSReg);
- }
- if (!ResultReg)
- return false;
- // Assign to a GPR since the overflow return value is lowered to a SETcc.
- Register ResultReg2 = createResultReg(&X86::GR8RegClass);
- assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers.");
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr),
- ResultReg2).addImm(CondCode);
- updateValueMap(II, ResultReg, 2);
- return true;
- }
- case Intrinsic::x86_sse_cvttss2si:
- case Intrinsic::x86_sse_cvttss2si64:
- case Intrinsic::x86_sse2_cvttsd2si:
- case Intrinsic::x86_sse2_cvttsd2si64: {
- bool IsInputDouble;
- switch (II->getIntrinsicID()) {
- default: llvm_unreachable("Unexpected intrinsic.");
- case Intrinsic::x86_sse_cvttss2si:
- case Intrinsic::x86_sse_cvttss2si64:
- if (!Subtarget->hasSSE1())
- return false;
- IsInputDouble = false;
- break;
- case Intrinsic::x86_sse2_cvttsd2si:
- case Intrinsic::x86_sse2_cvttsd2si64:
- if (!Subtarget->hasSSE2())
- return false;
- IsInputDouble = true;
- break;
- }
- Type *RetTy = II->getCalledFunction()->getReturnType();
- MVT VT;
- if (!isTypeLegal(RetTy, VT))
- return false;
- static const uint16_t CvtOpc[3][2][2] = {
- { { X86::CVTTSS2SIrr, X86::CVTTSS2SI64rr },
- { X86::CVTTSD2SIrr, X86::CVTTSD2SI64rr } },
- { { X86::VCVTTSS2SIrr, X86::VCVTTSS2SI64rr },
- { X86::VCVTTSD2SIrr, X86::VCVTTSD2SI64rr } },
- { { X86::VCVTTSS2SIZrr, X86::VCVTTSS2SI64Zrr },
- { X86::VCVTTSD2SIZrr, X86::VCVTTSD2SI64Zrr } },
- };
- unsigned AVXLevel = Subtarget->hasAVX512() ? 2 :
- Subtarget->hasAVX() ? 1 :
- 0;
- unsigned Opc;
- switch (VT.SimpleTy) {
- default: llvm_unreachable("Unexpected result type.");
- case MVT::i32: Opc = CvtOpc[AVXLevel][IsInputDouble][0]; break;
- case MVT::i64: Opc = CvtOpc[AVXLevel][IsInputDouble][1]; break;
- }
- // Check if we can fold insertelement instructions into the convert.
- const Value *Op = II->getArgOperand(0);
- while (auto *IE = dyn_cast<InsertElementInst>(Op)) {
- const Value *Index = IE->getOperand(2);
- if (!isa<ConstantInt>(Index))
- break;
- unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
- if (Idx == 0) {
- Op = IE->getOperand(1);
- break;
- }
- Op = IE->getOperand(0);
- }
- Register Reg = getRegForValue(Op);
- if (Reg == 0)
- return false;
- Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
- .addReg(Reg);
- updateValueMap(II, ResultReg);
- return true;
- }
- }
- }
- bool X86FastISel::fastLowerArguments() {
- if (!FuncInfo.CanLowerReturn)
- return false;
- const Function *F = FuncInfo.Fn;
- if (F->isVarArg())
- return false;
- CallingConv::ID CC = F->getCallingConv();
- if (CC != CallingConv::C)
- return false;
- if (Subtarget->isCallingConvWin64(CC))
- return false;
- if (!Subtarget->is64Bit())
- return false;
- if (Subtarget->useSoftFloat())
- return false;
- // Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments.
- unsigned GPRCnt = 0;
- unsigned FPRCnt = 0;
- for (auto const &Arg : F->args()) {
- if (Arg.hasAttribute(Attribute::ByVal) ||
- Arg.hasAttribute(Attribute::InReg) ||
- Arg.hasAttribute(Attribute::StructRet) ||
- Arg.hasAttribute(Attribute::SwiftSelf) ||
- Arg.hasAttribute(Attribute::SwiftAsync) ||
- Arg.hasAttribute(Attribute::SwiftError) ||
- Arg.hasAttribute(Attribute::Nest))
- return false;
- Type *ArgTy = Arg.getType();
- if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
- return false;
- EVT ArgVT = TLI.getValueType(DL, ArgTy);
- if (!ArgVT.isSimple()) return false;
- switch (ArgVT.getSimpleVT().SimpleTy) {
- default: return false;
- case MVT::i32:
- case MVT::i64:
- ++GPRCnt;
- break;
- case MVT::f32:
- case MVT::f64:
- if (!Subtarget->hasSSE1())
- return false;
- ++FPRCnt;
- break;
- }
- if (GPRCnt > 6)
- return false;
- if (FPRCnt > 8)
- return false;
- }
- static const MCPhysReg GPR32ArgRegs[] = {
- X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
- };
- static const MCPhysReg GPR64ArgRegs[] = {
- X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9
- };
- static const MCPhysReg XMMArgRegs[] = {
- X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
- X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
- };
- unsigned GPRIdx = 0;
- unsigned FPRIdx = 0;
- for (auto const &Arg : F->args()) {
- MVT VT = TLI.getSimpleValueType(DL, Arg.getType());
- const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
- unsigned SrcReg;
- switch (VT.SimpleTy) {
- default: llvm_unreachable("Unexpected value type.");
- case MVT::i32: SrcReg = GPR32ArgRegs[GPRIdx++]; break;
- case MVT::i64: SrcReg = GPR64ArgRegs[GPRIdx++]; break;
- case MVT::f32: [[fallthrough]];
- case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++]; break;
- }
- Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
- // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
- // Without this, EmitLiveInCopies may eliminate the livein if its only
- // use is a bitcast (which isn't turned into an instruction).
- Register ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), ResultReg)
- .addReg(DstReg, getKillRegState(true));
- updateValueMap(&Arg, ResultReg);
- }
- return true;
- }
- static unsigned computeBytesPoppedByCalleeForSRet(const X86Subtarget *Subtarget,
- CallingConv::ID CC,
- const CallBase *CB) {
- if (Subtarget->is64Bit())
- return 0;
- if (Subtarget->getTargetTriple().isOSMSVCRT())
- return 0;
- if (CC == CallingConv::Fast || CC == CallingConv::GHC ||
- CC == CallingConv::HiPE || CC == CallingConv::Tail ||
- CC == CallingConv::SwiftTail)
- return 0;
- if (CB)
- if (CB->arg_empty() || !CB->paramHasAttr(0, Attribute::StructRet) ||
- CB->paramHasAttr(0, Attribute::InReg) || Subtarget->isTargetMCU())
- return 0;
- return 4;
- }
- bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
- auto &OutVals = CLI.OutVals;
- auto &OutFlags = CLI.OutFlags;
- auto &OutRegs = CLI.OutRegs;
- auto &Ins = CLI.Ins;
- auto &InRegs = CLI.InRegs;
- CallingConv::ID CC = CLI.CallConv;
- bool &IsTailCall = CLI.IsTailCall;
- bool IsVarArg = CLI.IsVarArg;
- const Value *Callee = CLI.Callee;
- MCSymbol *Symbol = CLI.Symbol;
- const auto *CB = CLI.CB;
- bool Is64Bit = Subtarget->is64Bit();
- bool IsWin64 = Subtarget->isCallingConvWin64(CC);
- // Call / invoke instructions with NoCfCheck attribute require special
- // handling.
- if (CB && CB->doesNoCfCheck())
- return false;
- // Functions with no_caller_saved_registers that need special handling.
- if ((CB && isa<CallInst>(CB) && CB->hasFnAttr("no_caller_saved_registers")))
- return false;
- // Functions with no_callee_saved_registers that need special handling.
- if ((CB && CB->hasFnAttr("no_callee_saved_registers")))
- return false;
- // Indirect calls with CFI checks need special handling.
- if (CB && CB->isIndirectCall() && CB->getOperandBundle(LLVMContext::OB_kcfi))
- return false;
- // Functions using thunks for indirect calls need to use SDISel.
- if (Subtarget->useIndirectThunkCalls())
- return false;
- // Handle only C, fastcc, and webkit_js calling conventions for now.
- switch (CC) {
- default: return false;
- case CallingConv::C:
- case CallingConv::Fast:
- case CallingConv::Tail:
- case CallingConv::WebKit_JS:
- case CallingConv::Swift:
- case CallingConv::SwiftTail:
- case CallingConv::X86_FastCall:
- case CallingConv::X86_StdCall:
- case CallingConv::X86_ThisCall:
- case CallingConv::Win64:
- case CallingConv::X86_64_SysV:
- case CallingConv::CFGuard_Check:
- break;
- }
- // Allow SelectionDAG isel to handle tail calls.
- if (IsTailCall)
- return false;
- // fastcc with -tailcallopt is intended to provide a guaranteed
- // tail call optimization. Fastisel doesn't know how to do that.
- if ((CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt) ||
- CC == CallingConv::Tail || CC == CallingConv::SwiftTail)
- return false;
- // Don't know how to handle Win64 varargs yet. Nothing special needed for
- // x86-32. Special handling for x86-64 is implemented.
- if (IsVarArg && IsWin64)
- return false;
- // Don't know about inalloca yet.
- if (CLI.CB && CLI.CB->hasInAllocaArgument())
- return false;
- for (auto Flag : CLI.OutFlags)
- if (Flag.isSwiftError() || Flag.isPreallocated())
- return false;
- SmallVector<MVT, 16> OutVTs;
- SmallVector<unsigned, 16> ArgRegs;
- // If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra
- // instruction. This is safe because it is common to all FastISel supported
- // calling conventions on x86.
- for (int i = 0, e = OutVals.size(); i != e; ++i) {
- Value *&Val = OutVals[i];
- ISD::ArgFlagsTy Flags = OutFlags[i];
- if (auto *CI = dyn_cast<ConstantInt>(Val)) {
- if (CI->getBitWidth() < 32) {
- if (Flags.isSExt())
- Val = ConstantExpr::getSExt(CI, Type::getInt32Ty(CI->getContext()));
- else
- Val = ConstantExpr::getZExt(CI, Type::getInt32Ty(CI->getContext()));
- }
- }
- // Passing bools around ends up doing a trunc to i1 and passing it.
- // Codegen this as an argument + "and 1".
- MVT VT;
- auto *TI = dyn_cast<TruncInst>(Val);
- unsigned ResultReg;
- if (TI && TI->getType()->isIntegerTy(1) && CLI.CB &&
- (TI->getParent() == CLI.CB->getParent()) && TI->hasOneUse()) {
- Value *PrevVal = TI->getOperand(0);
- ResultReg = getRegForValue(PrevVal);
- if (!ResultReg)
- return false;
- if (!isTypeLegal(PrevVal->getType(), VT))
- return false;
- ResultReg = fastEmit_ri(VT, VT, ISD::AND, ResultReg, 1);
- } else {
- if (!isTypeLegal(Val->getType(), VT) ||
- (VT.isVector() && VT.getVectorElementType() == MVT::i1))
- return false;
- ResultReg = getRegForValue(Val);
- }
- if (!ResultReg)
- return false;
- ArgRegs.push_back(ResultReg);
- OutVTs.push_back(VT);
- }
- // Analyze operands of the call, assigning locations to each operand.
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext());
- // Allocate shadow area for Win64
- if (IsWin64)
- CCInfo.AllocateStack(32, Align(8));
- CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86);
- // Get a count of how many bytes are to be pushed on the stack.
- unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
- // Issue CALLSEQ_START
- unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown))
- .addImm(NumBytes).addImm(0).addImm(0);
- // Walk the register/memloc assignments, inserting copies/loads.
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign const &VA = ArgLocs[i];
- const Value *ArgVal = OutVals[VA.getValNo()];
- MVT ArgVT = OutVTs[VA.getValNo()];
- if (ArgVT == MVT::x86mmx)
- return false;
- unsigned ArgReg = ArgRegs[VA.getValNo()];
- // Promote the value if needed.
- switch (VA.getLocInfo()) {
- case CCValAssign::Full: break;
- case CCValAssign::SExt: {
- assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
- "Unexpected extend");
- if (ArgVT == MVT::i1)
- return false;
- bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
- ArgVT, ArgReg);
- assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
- ArgVT = VA.getLocVT();
- break;
- }
- case CCValAssign::ZExt: {
- assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
- "Unexpected extend");
- // Handle zero-extension from i1 to i8, which is common.
- if (ArgVT == MVT::i1) {
- // Set the high bits to zero.
- ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg);
- ArgVT = MVT::i8;
- if (ArgReg == 0)
- return false;
- }
- bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
- ArgVT, ArgReg);
- assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
- ArgVT = VA.getLocVT();
- break;
- }
- case CCValAssign::AExt: {
- assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
- "Unexpected extend");
- bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), ArgReg,
- ArgVT, ArgReg);
- if (!Emitted)
- Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
- ArgVT, ArgReg);
- if (!Emitted)
- Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
- ArgVT, ArgReg);
- assert(Emitted && "Failed to emit a aext!"); (void)Emitted;
- ArgVT = VA.getLocVT();
- break;
- }
- case CCValAssign::BCvt: {
- ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg);
- assert(ArgReg && "Failed to emit a bitcast!");
- ArgVT = VA.getLocVT();
- break;
- }
- case CCValAssign::VExt:
- // VExt has not been implemented, so this should be impossible to reach
- // for now. However, fallback to Selection DAG isel once implemented.
- return false;
- case CCValAssign::AExtUpper:
- case CCValAssign::SExtUpper:
- case CCValAssign::ZExtUpper:
- case CCValAssign::FPExt:
- case CCValAssign::Trunc:
- llvm_unreachable("Unexpected loc info!");
- case CCValAssign::Indirect:
- // FIXME: Indirect doesn't need extending, but fast-isel doesn't fully
- // support this.
- return false;
- }
- if (VA.isRegLoc()) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
- OutRegs.push_back(VA.getLocReg());
- } else {
- assert(VA.isMemLoc() && "Unknown value location!");
- // Don't emit stores for undef values.
- if (isa<UndefValue>(ArgVal))
- continue;
- unsigned LocMemOffset = VA.getLocMemOffset();
- X86AddressMode AM;
- AM.Base.Reg = RegInfo->getStackRegister();
- AM.Disp = LocMemOffset;
- ISD::ArgFlagsTy Flags = OutFlags[VA.getValNo()];
- Align Alignment = DL.getABITypeAlign(ArgVal->getType());
- MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
- MachinePointerInfo::getStack(*FuncInfo.MF, LocMemOffset),
- MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
- if (Flags.isByVal()) {
- X86AddressMode SrcAM;
- SrcAM.Base.Reg = ArgReg;
- if (!TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize()))
- return false;
- } else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {
- // If this is a really simple value, emit this with the Value* version
- // of X86FastEmitStore. If it isn't simple, we don't want to do this,
- // as it can cause us to reevaluate the argument.
- if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO))
- return false;
- } else {
- if (!X86FastEmitStore(ArgVT, ArgReg, AM, MMO))
- return false;
- }
- }
- }
- // ELF / PIC requires GOT in the EBX register before function calls via PLT
- // GOT pointer.
- if (Subtarget->isPICStyleGOT()) {
- unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base);
- }
- if (Is64Bit && IsVarArg && !IsWin64) {
- // From AMD64 ABI document:
- // For calls that may call functions that use varargs or stdargs
- // (prototype-less calls or calls to functions containing ellipsis (...) in
- // the declaration) %al is used as hidden argument to specify the number
- // of SSE registers used. The contents of %al do not need to match exactly
- // the number of registers, but must be an ubound on the number of SSE
- // registers used and is in the range 0 - 8 inclusive.
- // Count the number of XMM registers allocated.
- static const MCPhysReg XMMArgRegs[] = {
- X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
- X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
- };
- unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
- assert((Subtarget->hasSSE1() || !NumXMMRegs)
- && "SSE registers cannot be used when SSE is disabled");
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV8ri),
- X86::AL).addImm(NumXMMRegs);
- }
- // Materialize callee address in a register. FIXME: GV address can be
- // handled with a CALLpcrel32 instead.
- X86AddressMode CalleeAM;
- if (!X86SelectCallAddress(Callee, CalleeAM))
- return false;
- unsigned CalleeOp = 0;
- const GlobalValue *GV = nullptr;
- if (CalleeAM.GV != nullptr) {
- GV = CalleeAM.GV;
- } else if (CalleeAM.Base.Reg != 0) {
- CalleeOp = CalleeAM.Base.Reg;
- } else
- return false;
- // Issue the call.
- MachineInstrBuilder MIB;
- if (CalleeOp) {
- // Register-indirect call.
- unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r;
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CallOpc))
- .addReg(CalleeOp);
- } else {
- // Direct call.
- assert(GV && "Not a direct call");
- // See if we need any target-specific flags on the GV operand.
- unsigned char OpFlags = Subtarget->classifyGlobalFunctionReference(GV);
- // This will be a direct call, or an indirect call through memory for
- // NonLazyBind calls or dllimport calls.
- bool NeedLoad = OpFlags == X86II::MO_DLLIMPORT ||
- OpFlags == X86II::MO_GOTPCREL ||
- OpFlags == X86II::MO_GOTPCREL_NORELAX ||
- OpFlags == X86II::MO_COFFSTUB;
- unsigned CallOpc = NeedLoad
- ? (Is64Bit ? X86::CALL64m : X86::CALL32m)
- : (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32);
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CallOpc));
- if (NeedLoad)
- MIB.addReg(Is64Bit ? X86::RIP : 0).addImm(1).addReg(0);
- if (Symbol)
- MIB.addSym(Symbol, OpFlags);
- else
- MIB.addGlobalAddress(GV, 0, OpFlags);
- if (NeedLoad)
- MIB.addReg(0);
- }
- // Add a register mask operand representing the call-preserved registers.
- // Proper defs for return values will be added by setPhysRegsDeadExcept().
- MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
- // Add an implicit use GOT pointer in EBX.
- if (Subtarget->isPICStyleGOT())
- MIB.addReg(X86::EBX, RegState::Implicit);
- if (Is64Bit && IsVarArg && !IsWin64)
- MIB.addReg(X86::AL, RegState::Implicit);
- // Add implicit physical register uses to the call.
- for (auto Reg : OutRegs)
- MIB.addReg(Reg, RegState::Implicit);
- // Issue CALLSEQ_END
- unsigned NumBytesForCalleeToPop =
- X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg,
- TM.Options.GuaranteedTailCallOpt)
- ? NumBytes // Callee pops everything.
- : computeBytesPoppedByCalleeForSRet(Subtarget, CC, CLI.CB);
- unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp))
- .addImm(NumBytes).addImm(NumBytesForCalleeToPop);
- // Now handle call return values.
- SmallVector<CCValAssign, 16> RVLocs;
- CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs,
- CLI.RetTy->getContext());
- CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
- // Copy all of the result registers out of their specified physreg.
- Register ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
- CCValAssign &VA = RVLocs[i];
- EVT CopyVT = VA.getValVT();
- unsigned CopyReg = ResultReg + i;
- Register SrcReg = VA.getLocReg();
- // If this is x86-64, and we disabled SSE, we can't return FP values
- if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
- ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
- report_fatal_error("SSE register return with SSE disabled");
- }
- // If we prefer to use the value in xmm registers, copy it out as f80 and
- // use a truncate to move it from fp stack reg to xmm reg.
- if ((SrcReg == X86::FP0 || SrcReg == X86::FP1) &&
- isScalarFPTypeInSSEReg(VA.getValVT())) {
- CopyVT = MVT::f80;
- CopyReg = createResultReg(&X86::RFP80RegClass);
- }
- // Copy out the result.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), CopyReg).addReg(SrcReg);
- InRegs.push_back(VA.getLocReg());
- // Round the f80 to the right size, which also moves it to the appropriate
- // xmm register. This is accomplished by storing the f80 value in memory
- // and then loading it back.
- if (CopyVT != VA.getValVT()) {
- EVT ResVT = VA.getValVT();
- unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
- unsigned MemSize = ResVT.getSizeInBits()/8;
- int FI = MFI.CreateStackObject(MemSize, Align(MemSize), false);
- addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(Opc)), FI)
- .addReg(CopyReg);
- Opc = ResVT == MVT::f32 ? X86::MOVSSrm_alt : X86::MOVSDrm_alt;
- addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(Opc), ResultReg + i), FI);
- }
- }
- CLI.ResultReg = ResultReg;
- CLI.NumResultRegs = RVLocs.size();
- CLI.Call = MIB;
- return true;
- }
- bool
- X86FastISel::fastSelectInstruction(const Instruction *I) {
- switch (I->getOpcode()) {
- default: break;
- case Instruction::Load:
- return X86SelectLoad(I);
- case Instruction::Store:
- return X86SelectStore(I);
- case Instruction::Ret:
- return X86SelectRet(I);
- case Instruction::ICmp:
- case Instruction::FCmp:
- return X86SelectCmp(I);
- case Instruction::ZExt:
- return X86SelectZExt(I);
- case Instruction::SExt:
- return X86SelectSExt(I);
- case Instruction::Br:
- return X86SelectBranch(I);
- case Instruction::LShr:
- case Instruction::AShr:
- case Instruction::Shl:
- return X86SelectShift(I);
- case Instruction::SDiv:
- case Instruction::UDiv:
- case Instruction::SRem:
- case Instruction::URem:
- return X86SelectDivRem(I);
- case Instruction::Select:
- return X86SelectSelect(I);
- case Instruction::Trunc:
- return X86SelectTrunc(I);
- case Instruction::FPExt:
- return X86SelectFPExt(I);
- case Instruction::FPTrunc:
- return X86SelectFPTrunc(I);
- case Instruction::SIToFP:
- return X86SelectSIToFP(I);
- case Instruction::UIToFP:
- return X86SelectUIToFP(I);
- case Instruction::IntToPtr: // Deliberate fall-through.
- case Instruction::PtrToInt: {
- EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
- EVT DstVT = TLI.getValueType(DL, I->getType());
- if (DstVT.bitsGT(SrcVT))
- return X86SelectZExt(I);
- if (DstVT.bitsLT(SrcVT))
- return X86SelectTrunc(I);
- Register Reg = getRegForValue(I->getOperand(0));
- if (Reg == 0) return false;
- updateValueMap(I, Reg);
- return true;
- }
- case Instruction::BitCast: {
- // Select SSE2/AVX bitcasts between 128/256/512 bit vector types.
- if (!Subtarget->hasSSE2())
- return false;
- MVT SrcVT, DstVT;
- if (!isTypeLegal(I->getOperand(0)->getType(), SrcVT) ||
- !isTypeLegal(I->getType(), DstVT))
- return false;
- // Only allow vectors that use xmm/ymm/zmm.
- if (!SrcVT.isVector() || !DstVT.isVector() ||
- SrcVT.getVectorElementType() == MVT::i1 ||
- DstVT.getVectorElementType() == MVT::i1)
- return false;
- Register Reg = getRegForValue(I->getOperand(0));
- if (!Reg)
- return false;
- // Emit a reg-reg copy so we don't propagate cached known bits information
- // with the wrong VT if we fall out of fast isel after selecting this.
- const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
- Register ResultReg = createResultReg(DstClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
- updateValueMap(I, ResultReg);
- return true;
- }
- }
- return false;
- }
- unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
- if (VT > MVT::i64)
- return 0;
- uint64_t Imm = CI->getZExtValue();
- if (Imm == 0) {
- Register SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
- switch (VT.SimpleTy) {
- default: llvm_unreachable("Unexpected value type");
- case MVT::i1:
- case MVT::i8:
- return fastEmitInst_extractsubreg(MVT::i8, SrcReg, X86::sub_8bit);
- case MVT::i16:
- return fastEmitInst_extractsubreg(MVT::i16, SrcReg, X86::sub_16bit);
- case MVT::i32:
- return SrcReg;
- case MVT::i64: {
- Register ResultReg = createResultReg(&X86::GR64RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
- .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
- return ResultReg;
- }
- }
- }
- unsigned Opc = 0;
- switch (VT.SimpleTy) {
- default: llvm_unreachable("Unexpected value type");
- case MVT::i1:
- VT = MVT::i8;
- [[fallthrough]];
- case MVT::i8: Opc = X86::MOV8ri; break;
- case MVT::i16: Opc = X86::MOV16ri; break;
- case MVT::i32: Opc = X86::MOV32ri; break;
- case MVT::i64: {
- if (isUInt<32>(Imm))
- Opc = X86::MOV32ri64;
- else if (isInt<32>(Imm))
- Opc = X86::MOV64ri32;
- else
- Opc = X86::MOV64ri;
- break;
- }
- }
- return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
- }
- unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
- if (CFP->isNullValue())
- return fastMaterializeFloatZero(CFP);
- // Can't handle alternate code models yet.
- CodeModel::Model CM = TM.getCodeModel();
- if (CM != CodeModel::Small && CM != CodeModel::Large)
- return 0;
- // Get opcode and regclass of the output for the given load instruction.
- unsigned Opc = 0;
- bool HasSSE1 = Subtarget->hasSSE1();
- bool HasSSE2 = Subtarget->hasSSE2();
- bool HasAVX = Subtarget->hasAVX();
- bool HasAVX512 = Subtarget->hasAVX512();
- switch (VT.SimpleTy) {
- default: return 0;
- case MVT::f32:
- Opc = HasAVX512 ? X86::VMOVSSZrm_alt
- : HasAVX ? X86::VMOVSSrm_alt
- : HasSSE1 ? X86::MOVSSrm_alt
- : X86::LD_Fp32m;
- break;
- case MVT::f64:
- Opc = HasAVX512 ? X86::VMOVSDZrm_alt
- : HasAVX ? X86::VMOVSDrm_alt
- : HasSSE2 ? X86::MOVSDrm_alt
- : X86::LD_Fp64m;
- break;
- case MVT::f80:
- // No f80 support yet.
- return 0;
- }
- // MachineConstantPool wants an explicit alignment.
- Align Alignment = DL.getPrefTypeAlign(CFP->getType());
- // x86-32 PIC requires a PIC base register for constant pools.
- unsigned PICBase = 0;
- unsigned char OpFlag = Subtarget->classifyLocalReference(nullptr);
- if (OpFlag == X86II::MO_PIC_BASE_OFFSET)
- PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
- else if (OpFlag == X86II::MO_GOTOFF)
- PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
- else if (Subtarget->is64Bit() && TM.getCodeModel() == CodeModel::Small)
- PICBase = X86::RIP;
- // Create the load from the constant pool.
- unsigned CPI = MCP.getConstantPoolIndex(CFP, Alignment);
- Register ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
- // Large code model only applies to 64-bit mode.
- if (Subtarget->is64Bit() && CM == CodeModel::Large) {
- Register AddrReg = createResultReg(&X86::GR64RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV64ri),
- AddrReg)
- .addConstantPoolIndex(CPI, 0, OpFlag);
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(Opc), ResultReg);
- addRegReg(MIB, AddrReg, false, PICBase, false);
- MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
- MachinePointerInfo::getConstantPool(*FuncInfo.MF),
- MachineMemOperand::MOLoad, DL.getPointerSize(), Alignment);
- MIB->addMemOperand(*FuncInfo.MF, MMO);
- return ResultReg;
- }
- addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(Opc), ResultReg),
- CPI, PICBase, OpFlag);
- return ResultReg;
- }
- unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
- // Can't handle alternate code models yet.
- if (TM.getCodeModel() != CodeModel::Small)
- return 0;
- // Materialize addresses with LEA/MOV instructions.
- X86AddressMode AM;
- if (X86SelectAddress(GV, AM)) {
- // If the expression is just a basereg, then we're done, otherwise we need
- // to emit an LEA.
- if (AM.BaseType == X86AddressMode::RegBase &&
- AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr)
- return AM.Base.Reg;
- Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
- if (TM.getRelocationModel() == Reloc::Static &&
- TLI.getPointerTy(DL) == MVT::i64) {
- // The displacement code could be more than 32 bits away so we need to use
- // an instruction with a 64 bit immediate
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV64ri),
- ResultReg)
- .addGlobalAddress(GV);
- } else {
- unsigned Opc =
- TLI.getPointerTy(DL) == MVT::i32
- ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
- : X86::LEA64r;
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(Opc), ResultReg), AM);
- }
- return ResultReg;
- }
- return 0;
- }
- unsigned X86FastISel::fastMaterializeConstant(const Constant *C) {
- EVT CEVT = TLI.getValueType(DL, C->getType(), true);
- // Only handle simple types.
- if (!CEVT.isSimple())
- return 0;
- MVT VT = CEVT.getSimpleVT();
- if (const auto *CI = dyn_cast<ConstantInt>(C))
- return X86MaterializeInt(CI, VT);
- if (const auto *CFP = dyn_cast<ConstantFP>(C))
- return X86MaterializeFP(CFP, VT);
- if (const auto *GV = dyn_cast<GlobalValue>(C))
- return X86MaterializeGV(GV, VT);
- if (isa<UndefValue>(C)) {
- unsigned Opc = 0;
- switch (VT.SimpleTy) {
- default:
- break;
- case MVT::f32:
- if (!Subtarget->hasSSE1())
- Opc = X86::LD_Fp032;
- break;
- case MVT::f64:
- if (!Subtarget->hasSSE2())
- Opc = X86::LD_Fp064;
- break;
- case MVT::f80:
- Opc = X86::LD_Fp080;
- break;
- }
- if (Opc) {
- Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc),
- ResultReg);
- return ResultReg;
- }
- }
- return 0;
- }
- unsigned X86FastISel::fastMaterializeAlloca(const AllocaInst *C) {
- // Fail on dynamic allocas. At this point, getRegForValue has already
- // checked its CSE maps, so if we're here trying to handle a dynamic
- // alloca, we're not going to succeed. X86SelectAddress has a
- // check for dynamic allocas, because it's called directly from
- // various places, but targetMaterializeAlloca also needs a check
- // in order to avoid recursion between getRegForValue,
- // X86SelectAddrss, and targetMaterializeAlloca.
- if (!FuncInfo.StaticAllocaMap.count(C))
- return 0;
- assert(C->isStaticAlloca() && "dynamic alloca in the static alloca map?");
- X86AddressMode AM;
- if (!X86SelectAddress(C, AM))
- return 0;
- unsigned Opc =
- TLI.getPointerTy(DL) == MVT::i32
- ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
- : X86::LEA64r;
- const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL));
- Register ResultReg = createResultReg(RC);
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(Opc), ResultReg), AM);
- return ResultReg;
- }
- unsigned X86FastISel::fastMaterializeFloatZero(const ConstantFP *CF) {
- MVT VT;
- if (!isTypeLegal(CF->getType(), VT))
- return 0;
- // Get opcode and regclass for the given zero.
- bool HasSSE1 = Subtarget->hasSSE1();
- bool HasSSE2 = Subtarget->hasSSE2();
- bool HasAVX512 = Subtarget->hasAVX512();
- unsigned Opc = 0;
- switch (VT.SimpleTy) {
- default: return 0;
- case MVT::f16:
- Opc = HasAVX512 ? X86::AVX512_FsFLD0SH : X86::FsFLD0SH;
- break;
- case MVT::f32:
- Opc = HasAVX512 ? X86::AVX512_FsFLD0SS
- : HasSSE1 ? X86::FsFLD0SS
- : X86::LD_Fp032;
- break;
- case MVT::f64:
- Opc = HasAVX512 ? X86::AVX512_FsFLD0SD
- : HasSSE2 ? X86::FsFLD0SD
- : X86::LD_Fp064;
- break;
- case MVT::f80:
- // No f80 support yet.
- return 0;
- }
- Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg);
- return ResultReg;
- }
- bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
- const LoadInst *LI) {
- const Value *Ptr = LI->getPointerOperand();
- X86AddressMode AM;
- if (!X86SelectAddress(Ptr, AM))
- return false;
- const X86InstrInfo &XII = (const X86InstrInfo &)TII;
- unsigned Size = DL.getTypeAllocSize(LI->getType());
- SmallVector<MachineOperand, 8> AddrOps;
- AM.getFullAddress(AddrOps);
- MachineInstr *Result = XII.foldMemoryOperandImpl(
- *FuncInfo.MF, *MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, LI->getAlign(),
- /*AllowCommute=*/true);
- if (!Result)
- return false;
- // The index register could be in the wrong register class. Unfortunately,
- // foldMemoryOperandImpl could have commuted the instruction so its not enough
- // to just look at OpNo + the offset to the index reg. We actually need to
- // scan the instruction to find the index reg and see if its the correct reg
- // class.
- unsigned OperandNo = 0;
- for (MachineInstr::mop_iterator I = Result->operands_begin(),
- E = Result->operands_end(); I != E; ++I, ++OperandNo) {
- MachineOperand &MO = *I;
- if (!MO.isReg() || MO.isDef() || MO.getReg() != AM.IndexReg)
- continue;
- // Found the index reg, now try to rewrite it.
- Register IndexReg = constrainOperandRegClass(Result->getDesc(),
- MO.getReg(), OperandNo);
- if (IndexReg == MO.getReg())
- continue;
- MO.setReg(IndexReg);
- }
- Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
- Result->cloneInstrSymbols(*FuncInfo.MF, *MI);
- MachineBasicBlock::iterator I(MI);
- removeDeadCode(I, std::next(I));
- return true;
- }
- unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- unsigned Op0, unsigned Op1,
- unsigned Op2, unsigned Op3) {
- const MCInstrDesc &II = TII.get(MachineInstOpcode);
- Register ResultReg = createResultReg(RC);
- Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
- Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
- Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
- Op3 = constrainOperandRegClass(II, Op3, II.getNumDefs() + 3);
- if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
- .addReg(Op0)
- .addReg(Op1)
- .addReg(Op2)
- .addReg(Op3);
- else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
- .addReg(Op0)
- .addReg(Op1)
- .addReg(Op2)
- .addReg(Op3);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
- ResultReg)
- .addReg(II.implicit_defs()[0]);
- }
- return ResultReg;
- }
- namespace llvm {
- FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo,
- const TargetLibraryInfo *libInfo) {
- return new X86FastISel(funcInfo, libInfo);
- }
- }
|