1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693 |
- //===- X86InstructionSelector.cpp -----------------------------------------===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- /// \file
- /// This file implements the targeting of the InstructionSelector class for
- /// X86.
- /// \todo This should be generated by TableGen.
- //===----------------------------------------------------------------------===//
- #include "MCTargetDesc/X86BaseInfo.h"
- #include "X86.h"
- #include "X86InstrBuilder.h"
- #include "X86InstrInfo.h"
- #include "X86RegisterBankInfo.h"
- #include "X86RegisterInfo.h"
- #include "X86Subtarget.h"
- #include "X86TargetMachine.h"
- #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
- #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
- #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
- #include "llvm/CodeGen/GlobalISel/Utils.h"
- #include "llvm/CodeGen/MachineBasicBlock.h"
- #include "llvm/CodeGen/MachineConstantPool.h"
- #include "llvm/CodeGen/MachineFunction.h"
- #include "llvm/CodeGen/MachineInstr.h"
- #include "llvm/CodeGen/MachineInstrBuilder.h"
- #include "llvm/CodeGen/MachineMemOperand.h"
- #include "llvm/CodeGen/MachineOperand.h"
- #include "llvm/CodeGen/MachineRegisterInfo.h"
- #include "llvm/CodeGen/TargetOpcodes.h"
- #include "llvm/CodeGen/TargetRegisterInfo.h"
- #include "llvm/IR/DataLayout.h"
- #include "llvm/IR/InstrTypes.h"
- #include "llvm/IR/IntrinsicsX86.h"
- #include "llvm/Support/AtomicOrdering.h"
- #include "llvm/Support/CodeGen.h"
- #include "llvm/Support/Debug.h"
- #include "llvm/Support/ErrorHandling.h"
- #include "llvm/Support/LowLevelTypeImpl.h"
- #include "llvm/Support/MathExtras.h"
- #include "llvm/Support/raw_ostream.h"
- #include <cassert>
- #include <cstdint>
- #include <tuple>
- #define DEBUG_TYPE "X86-isel"
- using namespace llvm;
- namespace {
- #define GET_GLOBALISEL_PREDICATE_BITSET
- #include "X86GenGlobalISel.inc"
- #undef GET_GLOBALISEL_PREDICATE_BITSET
- class X86InstructionSelector : public InstructionSelector {
- public:
- X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
- const X86RegisterBankInfo &RBI);
- bool select(MachineInstr &I) override;
- static const char *getName() { return DEBUG_TYPE; }
- private:
- /// tblgen-erated 'select' implementation, used as the initial selector for
- /// the patterns that don't require complex C++.
- bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
- // TODO: remove after supported by Tablegen-erated instruction selection.
- unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
- Align Alignment) const;
- bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- bool selectTruncOrPtrToInt(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- bool selectFCmp(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
- bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF);
- bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF);
- bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
- const unsigned DstReg,
- const TargetRegisterClass *DstRC,
- const unsigned SrcReg,
- const TargetRegisterClass *SrcRC) const;
- bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
- bool selectDivRem(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- bool selectIntrinsicWSideEffects(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
- // emit insert subreg instruction and insert it before MachineInstr &I
- bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
- MachineRegisterInfo &MRI, MachineFunction &MF) const;
- // emit extract subreg instruction and insert it before MachineInstr &I
- bool emitExtractSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
- MachineRegisterInfo &MRI, MachineFunction &MF) const;
- const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
- const TargetRegisterClass *getRegClass(LLT Ty, unsigned Reg,
- MachineRegisterInfo &MRI) const;
- const X86TargetMachine &TM;
- const X86Subtarget &STI;
- const X86InstrInfo &TII;
- const X86RegisterInfo &TRI;
- const X86RegisterBankInfo &RBI;
- #define GET_GLOBALISEL_PREDICATES_DECL
- #include "X86GenGlobalISel.inc"
- #undef GET_GLOBALISEL_PREDICATES_DECL
- #define GET_GLOBALISEL_TEMPORARIES_DECL
- #include "X86GenGlobalISel.inc"
- #undef GET_GLOBALISEL_TEMPORARIES_DECL
- };
- } // end anonymous namespace
- #define GET_GLOBALISEL_IMPL
- #include "X86GenGlobalISel.inc"
- #undef GET_GLOBALISEL_IMPL
- X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
- const X86Subtarget &STI,
- const X86RegisterBankInfo &RBI)
- : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
- RBI(RBI),
- #define GET_GLOBALISEL_PREDICATES_INIT
- #include "X86GenGlobalISel.inc"
- #undef GET_GLOBALISEL_PREDICATES_INIT
- #define GET_GLOBALISEL_TEMPORARIES_INIT
- #include "X86GenGlobalISel.inc"
- #undef GET_GLOBALISEL_TEMPORARIES_INIT
- {
- }
- // FIXME: This should be target-independent, inferred from the types declared
- // for each class in the bank.
- const TargetRegisterClass *
- X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
- if (RB.getID() == X86::GPRRegBankID) {
- if (Ty.getSizeInBits() <= 8)
- return &X86::GR8RegClass;
- if (Ty.getSizeInBits() == 16)
- return &X86::GR16RegClass;
- if (Ty.getSizeInBits() == 32)
- return &X86::GR32RegClass;
- if (Ty.getSizeInBits() == 64)
- return &X86::GR64RegClass;
- }
- if (RB.getID() == X86::VECRRegBankID) {
- if (Ty.getSizeInBits() == 32)
- return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
- if (Ty.getSizeInBits() == 64)
- return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
- if (Ty.getSizeInBits() == 128)
- return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
- if (Ty.getSizeInBits() == 256)
- return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
- if (Ty.getSizeInBits() == 512)
- return &X86::VR512RegClass;
- }
- llvm_unreachable("Unknown RegBank!");
- }
- const TargetRegisterClass *
- X86InstructionSelector::getRegClass(LLT Ty, unsigned Reg,
- MachineRegisterInfo &MRI) const {
- const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
- return getRegClass(Ty, RegBank);
- }
- static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
- unsigned SubIdx = X86::NoSubRegister;
- if (RC == &X86::GR32RegClass) {
- SubIdx = X86::sub_32bit;
- } else if (RC == &X86::GR16RegClass) {
- SubIdx = X86::sub_16bit;
- } else if (RC == &X86::GR8RegClass) {
- SubIdx = X86::sub_8bit;
- }
- return SubIdx;
- }
- static const TargetRegisterClass *getRegClassFromGRPhysReg(Register Reg) {
- assert(Reg.isPhysical());
- if (X86::GR64RegClass.contains(Reg))
- return &X86::GR64RegClass;
- if (X86::GR32RegClass.contains(Reg))
- return &X86::GR32RegClass;
- if (X86::GR16RegClass.contains(Reg))
- return &X86::GR16RegClass;
- if (X86::GR8RegClass.contains(Reg))
- return &X86::GR8RegClass;
- llvm_unreachable("Unknown RegClass for PhysReg!");
- }
- // Set X86 Opcode and constrain DestReg.
- bool X86InstructionSelector::selectCopy(MachineInstr &I,
- MachineRegisterInfo &MRI) const {
- Register DstReg = I.getOperand(0).getReg();
- const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
- const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
- Register SrcReg = I.getOperand(1).getReg();
- const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
- const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
- if (DstReg.isPhysical()) {
- assert(I.isCopy() && "Generic operators do not allow physical registers");
- if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
- DstRegBank.getID() == X86::GPRRegBankID) {
- const TargetRegisterClass *SrcRC =
- getRegClass(MRI.getType(SrcReg), SrcRegBank);
- const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
- if (SrcRC != DstRC) {
- // This case can be generated by ABI lowering, performe anyext
- Register ExtSrc = MRI.createVirtualRegister(DstRC);
- BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(TargetOpcode::SUBREG_TO_REG))
- .addDef(ExtSrc)
- .addImm(0)
- .addReg(SrcReg)
- .addImm(getSubRegIndex(SrcRC));
- I.getOperand(1).setReg(ExtSrc);
- }
- }
- return true;
- }
- assert((!SrcReg.isPhysical() || I.isCopy()) &&
- "No phys reg on generic operators");
- assert((DstSize == SrcSize ||
- // Copies are a mean to setup initial types, the number of
- // bits may not exactly match.
- (SrcReg.isPhysical() &&
- DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
- "Copy with different width?!");
- const TargetRegisterClass *DstRC =
- getRegClass(MRI.getType(DstReg), DstRegBank);
- if (SrcRegBank.getID() == X86::GPRRegBankID &&
- DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
- SrcReg.isPhysical()) {
- // Change the physical register to performe truncate.
- const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
- if (DstRC != SrcRC) {
- I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
- I.getOperand(1).substPhysReg(SrcReg, TRI);
- }
- }
- // No need to constrain SrcReg. It will get constrained when
- // we hit another of its use or its defs.
- // Copies do not have constraints.
- const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
- if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
- if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
- return false;
- }
- }
- I.setDesc(TII.get(X86::COPY));
- return true;
- }
- bool X86InstructionSelector::select(MachineInstr &I) {
- assert(I.getParent() && "Instruction should be in a basic block!");
- assert(I.getParent()->getParent() && "Instruction should be in a function!");
- MachineBasicBlock &MBB = *I.getParent();
- MachineFunction &MF = *MBB.getParent();
- MachineRegisterInfo &MRI = MF.getRegInfo();
- unsigned Opcode = I.getOpcode();
- if (!isPreISelGenericOpcode(Opcode)) {
- // Certain non-generic instructions also need some special handling.
- if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
- return false;
- if (I.isCopy())
- return selectCopy(I, MRI);
- return true;
- }
- assert(I.getNumOperands() == I.getNumExplicitOperands() &&
- "Generic instruction has unexpected implicit operands\n");
- if (selectImpl(I, *CoverageInfo))
- return true;
- LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
- // TODO: This should be implemented by tblgen.
- switch (I.getOpcode()) {
- default:
- return false;
- case TargetOpcode::G_STORE:
- case TargetOpcode::G_LOAD:
- return selectLoadStoreOp(I, MRI, MF);
- case TargetOpcode::G_PTR_ADD:
- case TargetOpcode::G_FRAME_INDEX:
- return selectFrameIndexOrGep(I, MRI, MF);
- case TargetOpcode::G_GLOBAL_VALUE:
- return selectGlobalValue(I, MRI, MF);
- case TargetOpcode::G_CONSTANT:
- return selectConstant(I, MRI, MF);
- case TargetOpcode::G_FCONSTANT:
- return materializeFP(I, MRI, MF);
- case TargetOpcode::G_PTRTOINT:
- case TargetOpcode::G_TRUNC:
- return selectTruncOrPtrToInt(I, MRI, MF);
- case TargetOpcode::G_INTTOPTR:
- return selectCopy(I, MRI);
- case TargetOpcode::G_ZEXT:
- return selectZext(I, MRI, MF);
- case TargetOpcode::G_ANYEXT:
- return selectAnyext(I, MRI, MF);
- case TargetOpcode::G_ICMP:
- return selectCmp(I, MRI, MF);
- case TargetOpcode::G_FCMP:
- return selectFCmp(I, MRI, MF);
- case TargetOpcode::G_UADDE:
- return selectUadde(I, MRI, MF);
- case TargetOpcode::G_UNMERGE_VALUES:
- return selectUnmergeValues(I, MRI, MF);
- case TargetOpcode::G_MERGE_VALUES:
- case TargetOpcode::G_CONCAT_VECTORS:
- return selectMergeValues(I, MRI, MF);
- case TargetOpcode::G_EXTRACT:
- return selectExtract(I, MRI, MF);
- case TargetOpcode::G_INSERT:
- return selectInsert(I, MRI, MF);
- case TargetOpcode::G_BRCOND:
- return selectCondBranch(I, MRI, MF);
- case TargetOpcode::G_IMPLICIT_DEF:
- case TargetOpcode::G_PHI:
- return selectImplicitDefOrPHI(I, MRI);
- case TargetOpcode::G_SDIV:
- case TargetOpcode::G_UDIV:
- case TargetOpcode::G_SREM:
- case TargetOpcode::G_UREM:
- return selectDivRem(I, MRI, MF);
- case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
- return selectIntrinsicWSideEffects(I, MRI, MF);
- }
- return false;
- }
- unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
- const RegisterBank &RB,
- unsigned Opc,
- Align Alignment) const {
- bool Isload = (Opc == TargetOpcode::G_LOAD);
- bool HasAVX = STI.hasAVX();
- bool HasAVX512 = STI.hasAVX512();
- bool HasVLX = STI.hasVLX();
- if (Ty == LLT::scalar(8)) {
- if (X86::GPRRegBankID == RB.getID())
- return Isload ? X86::MOV8rm : X86::MOV8mr;
- } else if (Ty == LLT::scalar(16)) {
- if (X86::GPRRegBankID == RB.getID())
- return Isload ? X86::MOV16rm : X86::MOV16mr;
- } else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) {
- if (X86::GPRRegBankID == RB.getID())
- return Isload ? X86::MOV32rm : X86::MOV32mr;
- if (X86::VECRRegBankID == RB.getID())
- return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :
- HasAVX ? X86::VMOVSSrm_alt :
- X86::MOVSSrm_alt)
- : (HasAVX512 ? X86::VMOVSSZmr :
- HasAVX ? X86::VMOVSSmr :
- X86::MOVSSmr);
- } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
- if (X86::GPRRegBankID == RB.getID())
- return Isload ? X86::MOV64rm : X86::MOV64mr;
- if (X86::VECRRegBankID == RB.getID())
- return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :
- HasAVX ? X86::VMOVSDrm_alt :
- X86::MOVSDrm_alt)
- : (HasAVX512 ? X86::VMOVSDZmr :
- HasAVX ? X86::VMOVSDmr :
- X86::MOVSDmr);
- } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
- if (Alignment >= Align(16))
- return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
- : HasAVX512
- ? X86::VMOVAPSZ128rm_NOVLX
- : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
- : (HasVLX ? X86::VMOVAPSZ128mr
- : HasAVX512
- ? X86::VMOVAPSZ128mr_NOVLX
- : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
- else
- return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
- : HasAVX512
- ? X86::VMOVUPSZ128rm_NOVLX
- : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
- : (HasVLX ? X86::VMOVUPSZ128mr
- : HasAVX512
- ? X86::VMOVUPSZ128mr_NOVLX
- : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
- } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
- if (Alignment >= Align(32))
- return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
- : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
- : X86::VMOVAPSYrm)
- : (HasVLX ? X86::VMOVAPSZ256mr
- : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
- : X86::VMOVAPSYmr);
- else
- return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
- : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
- : X86::VMOVUPSYrm)
- : (HasVLX ? X86::VMOVUPSZ256mr
- : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
- : X86::VMOVUPSYmr);
- } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
- if (Alignment >= Align(64))
- return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
- else
- return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
- }
- return Opc;
- }
- // Fill in an address from the given instruction.
- static void X86SelectAddress(const MachineInstr &I,
- const MachineRegisterInfo &MRI,
- X86AddressMode &AM) {
- assert(I.getOperand(0).isReg() && "unsupported opperand.");
- assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
- "unsupported type.");
- if (I.getOpcode() == TargetOpcode::G_PTR_ADD) {
- if (auto COff = getIConstantVRegSExtVal(I.getOperand(2).getReg(), MRI)) {
- int64_t Imm = *COff;
- if (isInt<32>(Imm)) { // Check for displacement overflow.
- AM.Disp = static_cast<int32_t>(Imm);
- AM.Base.Reg = I.getOperand(1).getReg();
- return;
- }
- }
- } else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
- AM.Base.FrameIndex = I.getOperand(1).getIndex();
- AM.BaseType = X86AddressMode::FrameIndexBase;
- return;
- }
- // Default behavior.
- AM.Base.Reg = I.getOperand(0).getReg();
- }
- bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- unsigned Opc = I.getOpcode();
- assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
- "unexpected instruction");
- const Register DefReg = I.getOperand(0).getReg();
- LLT Ty = MRI.getType(DefReg);
- const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
- assert(I.hasOneMemOperand());
- auto &MemOp = **I.memoperands_begin();
- if (MemOp.isAtomic()) {
- // Note: for unordered operations, we rely on the fact the appropriate MMO
- // is already on the instruction we're mutating, and thus we don't need to
- // make any changes. So long as we select an opcode which is capable of
- // loading or storing the appropriate size atomically, the rest of the
- // backend is required to respect the MMO state.
- if (!MemOp.isUnordered()) {
- LLVM_DEBUG(dbgs() << "Atomic ordering not supported yet\n");
- return false;
- }
- if (MemOp.getAlign() < Ty.getSizeInBits() / 8) {
- LLVM_DEBUG(dbgs() << "Unaligned atomics not supported yet\n");
- return false;
- }
- }
- unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlign());
- if (NewOpc == Opc)
- return false;
- X86AddressMode AM;
- X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM);
- I.setDesc(TII.get(NewOpc));
- MachineInstrBuilder MIB(MF, I);
- if (Opc == TargetOpcode::G_LOAD) {
- I.RemoveOperand(1);
- addFullAddress(MIB, AM);
- } else {
- // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
- I.RemoveOperand(1);
- I.RemoveOperand(0);
- addFullAddress(MIB, AM).addUse(DefReg);
- }
- return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
- }
- static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
- if (Ty == LLT::pointer(0, 64))
- return X86::LEA64r;
- else if (Ty == LLT::pointer(0, 32))
- return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
- else
- llvm_unreachable("Can't get LEA opcode. Unsupported type.");
- }
- bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- unsigned Opc = I.getOpcode();
- assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
- "unexpected instruction");
- const Register DefReg = I.getOperand(0).getReg();
- LLT Ty = MRI.getType(DefReg);
- // Use LEA to calculate frame index and GEP
- unsigned NewOpc = getLeaOP(Ty, STI);
- I.setDesc(TII.get(NewOpc));
- MachineInstrBuilder MIB(MF, I);
- if (Opc == TargetOpcode::G_FRAME_INDEX) {
- addOffset(MIB, 0);
- } else {
- MachineOperand &InxOp = I.getOperand(2);
- I.addOperand(InxOp); // set IndexReg
- InxOp.ChangeToImmediate(1); // set Scale
- MIB.addImm(0).addReg(0);
- }
- return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
- }
- bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
- "unexpected instruction");
- auto GV = I.getOperand(1).getGlobal();
- if (GV->isThreadLocal()) {
- return false; // TODO: we don't support TLS yet.
- }
- // Can't handle alternate code models yet.
- if (TM.getCodeModel() != CodeModel::Small)
- return false;
- X86AddressMode AM;
- AM.GV = GV;
- AM.GVOpFlags = STI.classifyGlobalReference(GV);
- // TODO: The ABI requires an extra load. not supported yet.
- if (isGlobalStubReference(AM.GVOpFlags))
- return false;
- // TODO: This reference is relative to the pic base. not supported yet.
- if (isGlobalRelativeToPICBase(AM.GVOpFlags))
- return false;
- if (STI.isPICStyleRIPRel()) {
- // Use rip-relative addressing.
- assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
- AM.Base.Reg = X86::RIP;
- }
- const Register DefReg = I.getOperand(0).getReg();
- LLT Ty = MRI.getType(DefReg);
- unsigned NewOpc = getLeaOP(Ty, STI);
- I.setDesc(TII.get(NewOpc));
- MachineInstrBuilder MIB(MF, I);
- I.RemoveOperand(1);
- addFullAddress(MIB, AM);
- return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
- }
- bool X86InstructionSelector::selectConstant(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
- "unexpected instruction");
- const Register DefReg = I.getOperand(0).getReg();
- LLT Ty = MRI.getType(DefReg);
- if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
- return false;
- uint64_t Val = 0;
- if (I.getOperand(1).isCImm()) {
- Val = I.getOperand(1).getCImm()->getZExtValue();
- I.getOperand(1).ChangeToImmediate(Val);
- } else if (I.getOperand(1).isImm()) {
- Val = I.getOperand(1).getImm();
- } else
- llvm_unreachable("Unsupported operand type.");
- unsigned NewOpc;
- switch (Ty.getSizeInBits()) {
- case 8:
- NewOpc = X86::MOV8ri;
- break;
- case 16:
- NewOpc = X86::MOV16ri;
- break;
- case 32:
- NewOpc = X86::MOV32ri;
- break;
- case 64:
- // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
- if (isInt<32>(Val))
- NewOpc = X86::MOV64ri32;
- else
- NewOpc = X86::MOV64ri;
- break;
- default:
- llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
- }
- I.setDesc(TII.get(NewOpc));
- return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
- }
- // Helper function for selectTruncOrPtrToInt and selectAnyext.
- // Returns true if DstRC lives on a floating register class and
- // SrcRC lives on a 128-bit vector class.
- static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
- const TargetRegisterClass *SrcRC) {
- return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
- DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
- (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
- }
- bool X86InstructionSelector::selectTurnIntoCOPY(
- MachineInstr &I, MachineRegisterInfo &MRI, const unsigned DstReg,
- const TargetRegisterClass *DstRC, const unsigned SrcReg,
- const TargetRegisterClass *SrcRC) const {
- if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
- !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
- return false;
- }
- I.setDesc(TII.get(X86::COPY));
- return true;
- }
- bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- assert((I.getOpcode() == TargetOpcode::G_TRUNC ||
- I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
- "unexpected instruction");
- const Register DstReg = I.getOperand(0).getReg();
- const Register SrcReg = I.getOperand(1).getReg();
- const LLT DstTy = MRI.getType(DstReg);
- const LLT SrcTy = MRI.getType(SrcReg);
- const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
- const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
- if (DstRB.getID() != SrcRB.getID()) {
- LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode())
- << " input/output on different banks\n");
- return false;
- }
- const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
- const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
- if (!DstRC || !SrcRC)
- return false;
- // If that's truncation of the value that lives on the vector class and goes
- // into the floating class, just replace it with copy, as we are able to
- // select it as a regular move.
- if (canTurnIntoCOPY(DstRC, SrcRC))
- return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
- if (DstRB.getID() != X86::GPRRegBankID)
- return false;
- unsigned SubIdx;
- if (DstRC == SrcRC) {
- // Nothing to be done
- SubIdx = X86::NoSubRegister;
- } else if (DstRC == &X86::GR32RegClass) {
- SubIdx = X86::sub_32bit;
- } else if (DstRC == &X86::GR16RegClass) {
- SubIdx = X86::sub_16bit;
- } else if (DstRC == &X86::GR8RegClass) {
- SubIdx = X86::sub_8bit;
- } else {
- return false;
- }
- SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
- if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
- !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << "\n");
- return false;
- }
- I.getOperand(1).setSubReg(SubIdx);
- I.setDesc(TII.get(X86::COPY));
- return true;
- }
- bool X86InstructionSelector::selectZext(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
- const Register DstReg = I.getOperand(0).getReg();
- const Register SrcReg = I.getOperand(1).getReg();
- const LLT DstTy = MRI.getType(DstReg);
- const LLT SrcTy = MRI.getType(SrcReg);
- assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(16)) &&
- "8=>16 Zext is handled by tablegen");
- assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
- "8=>32 Zext is handled by tablegen");
- assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
- "16=>32 Zext is handled by tablegen");
- assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(64)) &&
- "8=>64 Zext is handled by tablegen");
- assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(64)) &&
- "16=>64 Zext is handled by tablegen");
- assert(!(SrcTy == LLT::scalar(32) && DstTy == LLT::scalar(64)) &&
- "32=>64 Zext is handled by tablegen");
- if (SrcTy != LLT::scalar(1))
- return false;
- unsigned AndOpc;
- if (DstTy == LLT::scalar(8))
- AndOpc = X86::AND8ri;
- else if (DstTy == LLT::scalar(16))
- AndOpc = X86::AND16ri8;
- else if (DstTy == LLT::scalar(32))
- AndOpc = X86::AND32ri8;
- else if (DstTy == LLT::scalar(64))
- AndOpc = X86::AND64ri8;
- else
- return false;
- Register DefReg = SrcReg;
- if (DstTy != LLT::scalar(8)) {
- Register ImpDefReg =
- MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
- BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);
- DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
- BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(TargetOpcode::INSERT_SUBREG), DefReg)
- .addReg(ImpDefReg)
- .addReg(SrcReg)
- .addImm(X86::sub_8bit);
- }
- MachineInstr &AndInst =
- *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
- .addReg(DefReg)
- .addImm(1);
- constrainSelectedInstRegOperands(AndInst, TII, TRI, RBI);
- I.eraseFromParent();
- return true;
- }
- bool X86InstructionSelector::selectAnyext(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
- const Register DstReg = I.getOperand(0).getReg();
- const Register SrcReg = I.getOperand(1).getReg();
- const LLT DstTy = MRI.getType(DstReg);
- const LLT SrcTy = MRI.getType(SrcReg);
- const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
- const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
- assert(DstRB.getID() == SrcRB.getID() &&
- "G_ANYEXT input/output on different banks\n");
- assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
- "G_ANYEXT incorrect operand size");
- const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
- const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
- // If that's ANY_EXT of the value that lives on the floating class and goes
- // into the vector class, just replace it with copy, as we are able to select
- // it as a regular move.
- if (canTurnIntoCOPY(SrcRC, DstRC))
- return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
- if (DstRB.getID() != X86::GPRRegBankID)
- return false;
- if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
- !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
- return false;
- }
- if (SrcRC == DstRC) {
- I.setDesc(TII.get(X86::COPY));
- return true;
- }
- BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(TargetOpcode::SUBREG_TO_REG))
- .addDef(DstReg)
- .addImm(0)
- .addReg(SrcReg)
- .addImm(getSubRegIndex(SrcRC));
- I.eraseFromParent();
- return true;
- }
- bool X86InstructionSelector::selectCmp(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
- X86::CondCode CC;
- bool SwapArgs;
- std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
- (CmpInst::Predicate)I.getOperand(1).getPredicate());
- Register LHS = I.getOperand(2).getReg();
- Register RHS = I.getOperand(3).getReg();
- if (SwapArgs)
- std::swap(LHS, RHS);
- unsigned OpCmp;
- LLT Ty = MRI.getType(LHS);
- switch (Ty.getSizeInBits()) {
- default:
- return false;
- case 8:
- OpCmp = X86::CMP8rr;
- break;
- case 16:
- OpCmp = X86::CMP16rr;
- break;
- case 32:
- OpCmp = X86::CMP32rr;
- break;
- case 64:
- OpCmp = X86::CMP64rr;
- break;
- }
- MachineInstr &CmpInst =
- *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
- .addReg(LHS)
- .addReg(RHS);
- MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(X86::SETCCr), I.getOperand(0).getReg()).addImm(CC);
- constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
- constrainSelectedInstRegOperands(SetInst, TII, TRI, RBI);
- I.eraseFromParent();
- return true;
- }
- bool X86InstructionSelector::selectFCmp(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- assert((I.getOpcode() == TargetOpcode::G_FCMP) && "unexpected instruction");
- Register LhsReg = I.getOperand(2).getReg();
- Register RhsReg = I.getOperand(3).getReg();
- CmpInst::Predicate Predicate =
- (CmpInst::Predicate)I.getOperand(1).getPredicate();
- // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
- static const uint16_t SETFOpcTable[2][3] = {
- {X86::COND_E, X86::COND_NP, X86::AND8rr},
- {X86::COND_NE, X86::COND_P, X86::OR8rr}};
- const uint16_t *SETFOpc = nullptr;
- switch (Predicate) {
- default:
- break;
- case CmpInst::FCMP_OEQ:
- SETFOpc = &SETFOpcTable[0][0];
- break;
- case CmpInst::FCMP_UNE:
- SETFOpc = &SETFOpcTable[1][0];
- break;
- }
- // Compute the opcode for the CMP instruction.
- unsigned OpCmp;
- LLT Ty = MRI.getType(LhsReg);
- switch (Ty.getSizeInBits()) {
- default:
- return false;
- case 32:
- OpCmp = X86::UCOMISSrr;
- break;
- case 64:
- OpCmp = X86::UCOMISDrr;
- break;
- }
- Register ResultReg = I.getOperand(0).getReg();
- RBI.constrainGenericRegister(
- ResultReg,
- *getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI, TRI)), MRI);
- if (SETFOpc) {
- MachineInstr &CmpInst =
- *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
- .addReg(LhsReg)
- .addReg(RhsReg);
- Register FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass);
- Register FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass);
- MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]);
- MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]);
- MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(SETFOpc[2]), ResultReg)
- .addReg(FlagReg1)
- .addReg(FlagReg2);
- constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
- constrainSelectedInstRegOperands(Set1, TII, TRI, RBI);
- constrainSelectedInstRegOperands(Set2, TII, TRI, RBI);
- constrainSelectedInstRegOperands(Set3, TII, TRI, RBI);
- I.eraseFromParent();
- return true;
- }
- X86::CondCode CC;
- bool SwapArgs;
- std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
- assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
- if (SwapArgs)
- std::swap(LhsReg, RhsReg);
- // Emit a compare of LHS/RHS.
- MachineInstr &CmpInst =
- *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
- .addReg(LhsReg)
- .addReg(RhsReg);
- MachineInstr &Set =
- *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), ResultReg).addImm(CC);
- constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
- constrainSelectedInstRegOperands(Set, TII, TRI, RBI);
- I.eraseFromParent();
- return true;
- }
- bool X86InstructionSelector::selectUadde(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- assert((I.getOpcode() == TargetOpcode::G_UADDE) && "unexpected instruction");
- const Register DstReg = I.getOperand(0).getReg();
- const Register CarryOutReg = I.getOperand(1).getReg();
- const Register Op0Reg = I.getOperand(2).getReg();
- const Register Op1Reg = I.getOperand(3).getReg();
- Register CarryInReg = I.getOperand(4).getReg();
- const LLT DstTy = MRI.getType(DstReg);
- if (DstTy != LLT::scalar(32))
- return false;
- // find CarryIn def instruction.
- MachineInstr *Def = MRI.getVRegDef(CarryInReg);
- while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
- CarryInReg = Def->getOperand(1).getReg();
- Def = MRI.getVRegDef(CarryInReg);
- }
- unsigned Opcode;
- if (Def->getOpcode() == TargetOpcode::G_UADDE) {
- // carry set by prev ADD.
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS)
- .addReg(CarryInReg);
- if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI))
- return false;
- Opcode = X86::ADC32rr;
- } else if (auto val = getIConstantVRegVal(CarryInReg, MRI)) {
- // carry is constant, support only 0.
- if (*val != 0)
- return false;
- Opcode = X86::ADD32rr;
- } else
- return false;
- MachineInstr &AddInst =
- *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
- .addReg(Op0Reg)
- .addReg(Op1Reg);
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg)
- .addReg(X86::EFLAGS);
- if (!constrainSelectedInstRegOperands(AddInst, TII, TRI, RBI) ||
- !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI))
- return false;
- I.eraseFromParent();
- return true;
- }
- bool X86InstructionSelector::selectExtract(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
- "unexpected instruction");
- const Register DstReg = I.getOperand(0).getReg();
- const Register SrcReg = I.getOperand(1).getReg();
- int64_t Index = I.getOperand(2).getImm();
- const LLT DstTy = MRI.getType(DstReg);
- const LLT SrcTy = MRI.getType(SrcReg);
- // Meanwile handle vector type only.
- if (!DstTy.isVector())
- return false;
- if (Index % DstTy.getSizeInBits() != 0)
- return false; // Not extract subvector.
- if (Index == 0) {
- // Replace by extract subreg copy.
- if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
- return false;
- I.eraseFromParent();
- return true;
- }
- bool HasAVX = STI.hasAVX();
- bool HasAVX512 = STI.hasAVX512();
- bool HasVLX = STI.hasVLX();
- if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
- if (HasVLX)
- I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr));
- else if (HasAVX)
- I.setDesc(TII.get(X86::VEXTRACTF128rr));
- else
- return false;
- } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
- if (DstTy.getSizeInBits() == 128)
- I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr));
- else if (DstTy.getSizeInBits() == 256)
- I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr));
- else
- return false;
- } else
- return false;
- // Convert to X86 VEXTRACT immediate.
- Index = Index / DstTy.getSizeInBits();
- I.getOperand(2).setImm(Index);
- return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
- }
- bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
- MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- const LLT DstTy = MRI.getType(DstReg);
- const LLT SrcTy = MRI.getType(SrcReg);
- unsigned SubIdx = X86::NoSubRegister;
- if (!DstTy.isVector() || !SrcTy.isVector())
- return false;
- assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
- "Incorrect Src/Dst register size");
- if (DstTy.getSizeInBits() == 128)
- SubIdx = X86::sub_xmm;
- else if (DstTy.getSizeInBits() == 256)
- SubIdx = X86::sub_ymm;
- else
- return false;
- const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
- const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
- SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
- if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
- !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- LLVM_DEBUG(dbgs() << "Failed to constrain EXTRACT_SUBREG\n");
- return false;
- }
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
- .addReg(SrcReg, 0, SubIdx);
- return true;
- }
- bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
- MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- const LLT DstTy = MRI.getType(DstReg);
- const LLT SrcTy = MRI.getType(SrcReg);
- unsigned SubIdx = X86::NoSubRegister;
- // TODO: support scalar types
- if (!DstTy.isVector() || !SrcTy.isVector())
- return false;
- assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
- "Incorrect Src/Dst register size");
- if (SrcTy.getSizeInBits() == 128)
- SubIdx = X86::sub_xmm;
- else if (SrcTy.getSizeInBits() == 256)
- SubIdx = X86::sub_ymm;
- else
- return false;
- const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
- const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
- if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
- !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
- return false;
- }
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
- .addReg(DstReg, RegState::DefineNoRead, SubIdx)
- .addReg(SrcReg);
- return true;
- }
- bool X86InstructionSelector::selectInsert(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
- const Register DstReg = I.getOperand(0).getReg();
- const Register SrcReg = I.getOperand(1).getReg();
- const Register InsertReg = I.getOperand(2).getReg();
- int64_t Index = I.getOperand(3).getImm();
- const LLT DstTy = MRI.getType(DstReg);
- const LLT InsertRegTy = MRI.getType(InsertReg);
- // Meanwile handle vector type only.
- if (!DstTy.isVector())
- return false;
- if (Index % InsertRegTy.getSizeInBits() != 0)
- return false; // Not insert subvector.
- if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
- // Replace by subreg copy.
- if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
- return false;
- I.eraseFromParent();
- return true;
- }
- bool HasAVX = STI.hasAVX();
- bool HasAVX512 = STI.hasAVX512();
- bool HasVLX = STI.hasVLX();
- if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
- if (HasVLX)
- I.setDesc(TII.get(X86::VINSERTF32x4Z256rr));
- else if (HasAVX)
- I.setDesc(TII.get(X86::VINSERTF128rr));
- else
- return false;
- } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
- if (InsertRegTy.getSizeInBits() == 128)
- I.setDesc(TII.get(X86::VINSERTF32x4Zrr));
- else if (InsertRegTy.getSizeInBits() == 256)
- I.setDesc(TII.get(X86::VINSERTF64x4Zrr));
- else
- return false;
- } else
- return false;
- // Convert to X86 VINSERT immediate.
- Index = Index / InsertRegTy.getSizeInBits();
- I.getOperand(3).setImm(Index);
- return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
- }
- bool X86InstructionSelector::selectUnmergeValues(
- MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) {
- assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
- "unexpected instruction");
- // Split to extracts.
- unsigned NumDefs = I.getNumOperands() - 1;
- Register SrcReg = I.getOperand(NumDefs).getReg();
- unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
- for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
- MachineInstr &ExtrInst =
- *BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
- .addReg(SrcReg)
- .addImm(Idx * DefSize);
- if (!select(ExtrInst))
- return false;
- }
- I.eraseFromParent();
- return true;
- }
- bool X86InstructionSelector::selectMergeValues(
- MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) {
- assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
- I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
- "unexpected instruction");
- // Split to inserts.
- Register DstReg = I.getOperand(0).getReg();
- Register SrcReg0 = I.getOperand(1).getReg();
- const LLT DstTy = MRI.getType(DstReg);
- const LLT SrcTy = MRI.getType(SrcReg0);
- unsigned SrcSize = SrcTy.getSizeInBits();
- const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
- // For the first src use insertSubReg.
- Register DefReg = MRI.createGenericVirtualRegister(DstTy);
- MRI.setRegBank(DefReg, RegBank);
- if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
- return false;
- for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
- Register Tmp = MRI.createGenericVirtualRegister(DstTy);
- MRI.setRegBank(Tmp, RegBank);
- MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(TargetOpcode::G_INSERT), Tmp)
- .addReg(DefReg)
- .addReg(I.getOperand(Idx).getReg())
- .addImm((Idx - 1) * SrcSize);
- DefReg = Tmp;
- if (!select(InsertInst))
- return false;
- }
- MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(TargetOpcode::COPY), DstReg)
- .addReg(DefReg);
- if (!select(CopyInst))
- return false;
- I.eraseFromParent();
- return true;
- }
- bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
- const Register CondReg = I.getOperand(0).getReg();
- MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
- MachineInstr &TestInst =
- *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
- .addReg(CondReg)
- .addImm(1);
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JCC_1))
- .addMBB(DestMBB).addImm(X86::COND_NE);
- constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
- I.eraseFromParent();
- return true;
- }
- bool X86InstructionSelector::materializeFP(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
- "unexpected instruction");
- // Can't handle alternate code models yet.
- CodeModel::Model CM = TM.getCodeModel();
- if (CM != CodeModel::Small && CM != CodeModel::Large)
- return false;
- const Register DstReg = I.getOperand(0).getReg();
- const LLT DstTy = MRI.getType(DstReg);
- const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
- Align Alignment = Align(DstTy.getSizeInBytes());
- const DebugLoc &DbgLoc = I.getDebugLoc();
- unsigned Opc =
- getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
- // Create the load from the constant pool.
- const ConstantFP *CFP = I.getOperand(1).getFPImm();
- unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Alignment);
- MachineInstr *LoadInst = nullptr;
- unsigned char OpFlag = STI.classifyLocalReference(nullptr);
- if (CM == CodeModel::Large && STI.is64Bit()) {
- // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
- // they cannot be folded into immediate fields.
- Register AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
- BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
- .addConstantPoolIndex(CPI, 0, OpFlag);
- MachineMemOperand *MMO = MF.getMachineMemOperand(
- MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
- MF.getDataLayout().getPointerSize(), Alignment);
- LoadInst =
- addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
- AddrReg)
- .addMemOperand(MMO);
- } else if (CM == CodeModel::Small || !STI.is64Bit()) {
- // Handle the case when globals fit in our immediate field.
- // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
- // x86-32 PIC requires a PIC base register for constant pools.
- unsigned PICBase = 0;
- if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
- // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
- // In DAGISEL the code that initialize it generated by the CGBR pass.
- return false; // TODO support the mode.
- } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
- PICBase = X86::RIP;
- LoadInst = addConstantPoolReference(
- BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
- OpFlag);
- } else
- return false;
- constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
- I.eraseFromParent();
- return true;
- }
- bool X86InstructionSelector::selectImplicitDefOrPHI(
- MachineInstr &I, MachineRegisterInfo &MRI) const {
- assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
- I.getOpcode() == TargetOpcode::G_PHI) &&
- "unexpected instruction");
- Register DstReg = I.getOperand(0).getReg();
- if (!MRI.getRegClassOrNull(DstReg)) {
- const LLT DstTy = MRI.getType(DstReg);
- const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
- if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
- LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
- return false;
- }
- }
- if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
- I.setDesc(TII.get(X86::IMPLICIT_DEF));
- else
- I.setDesc(TII.get(X86::PHI));
- return true;
- }
- bool X86InstructionSelector::selectDivRem(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- // The implementation of this function is taken from X86FastISel.
- assert((I.getOpcode() == TargetOpcode::G_SDIV ||
- I.getOpcode() == TargetOpcode::G_SREM ||
- I.getOpcode() == TargetOpcode::G_UDIV ||
- I.getOpcode() == TargetOpcode::G_UREM) &&
- "unexpected instruction");
- const Register DstReg = I.getOperand(0).getReg();
- const Register Op1Reg = I.getOperand(1).getReg();
- const Register Op2Reg = I.getOperand(2).getReg();
- const LLT RegTy = MRI.getType(DstReg);
- assert(RegTy == MRI.getType(Op1Reg) && RegTy == MRI.getType(Op2Reg) &&
- "Arguments and return value types must match");
- const RegisterBank *RegRB = RBI.getRegBank(DstReg, MRI, TRI);
- if (!RegRB || RegRB->getID() != X86::GPRRegBankID)
- return false;
- const static unsigned NumTypes = 4; // i8, i16, i32, i64
- const static unsigned NumOps = 4; // SDiv, SRem, UDiv, URem
- const static bool S = true; // IsSigned
- const static bool U = false; // !IsSigned
- const static unsigned Copy = TargetOpcode::COPY;
- // For the X86 IDIV instruction, in most cases the dividend
- // (numerator) must be in a specific register pair highreg:lowreg,
- // producing the quotient in lowreg and the remainder in highreg.
- // For most data types, to set up the instruction, the dividend is
- // copied into lowreg, and lowreg is sign-extended into highreg. The
- // exception is i8, where the dividend is defined as a single register rather
- // than a register pair, and we therefore directly sign-extend the dividend
- // into lowreg, instead of copying, and ignore the highreg.
- const static struct DivRemEntry {
- // The following portion depends only on the data type.
- unsigned SizeInBits;
- unsigned LowInReg; // low part of the register pair
- unsigned HighInReg; // high part of the register pair
- // The following portion depends on both the data type and the operation.
- struct DivRemResult {
- unsigned OpDivRem; // The specific DIV/IDIV opcode to use.
- unsigned OpSignExtend; // Opcode for sign-extending lowreg into
- // highreg, or copying a zero into highreg.
- unsigned OpCopy; // Opcode for copying dividend into lowreg, or
- // zero/sign-extending into lowreg for i8.
- unsigned DivRemResultReg; // Register containing the desired result.
- bool IsOpSigned; // Whether to use signed or unsigned form.
- } ResultTable[NumOps];
- } OpTable[NumTypes] = {
- {8,
- X86::AX,
- 0,
- {
- {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S}, // SDiv
- {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SRem
- {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U}, // UDiv
- {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U}, // URem
- }}, // i8
- {16,
- X86::AX,
- X86::DX,
- {
- {X86::IDIV16r, X86::CWD, Copy, X86::AX, S}, // SDiv
- {X86::IDIV16r, X86::CWD, Copy, X86::DX, S}, // SRem
- {X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U}, // UDiv
- {X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U}, // URem
- }}, // i16
- {32,
- X86::EAX,
- X86::EDX,
- {
- {X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S}, // SDiv
- {X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S}, // SRem
- {X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U}, // UDiv
- {X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U}, // URem
- }}, // i32
- {64,
- X86::RAX,
- X86::RDX,
- {
- {X86::IDIV64r, X86::CQO, Copy, X86::RAX, S}, // SDiv
- {X86::IDIV64r, X86::CQO, Copy, X86::RDX, S}, // SRem
- {X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U}, // UDiv
- {X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U}, // URem
- }}, // i64
- };
- auto OpEntryIt = llvm::find_if(OpTable, [RegTy](const DivRemEntry &El) {
- return El.SizeInBits == RegTy.getSizeInBits();
- });
- if (OpEntryIt == std::end(OpTable))
- return false;
- unsigned OpIndex;
- switch (I.getOpcode()) {
- default:
- llvm_unreachable("Unexpected div/rem opcode");
- case TargetOpcode::G_SDIV:
- OpIndex = 0;
- break;
- case TargetOpcode::G_SREM:
- OpIndex = 1;
- break;
- case TargetOpcode::G_UDIV:
- OpIndex = 2;
- break;
- case TargetOpcode::G_UREM:
- OpIndex = 3;
- break;
- }
- const DivRemEntry &TypeEntry = *OpEntryIt;
- const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
- const TargetRegisterClass *RegRC = getRegClass(RegTy, *RegRB);
- if (!RBI.constrainGenericRegister(Op1Reg, *RegRC, MRI) ||
- !RBI.constrainGenericRegister(Op2Reg, *RegRC, MRI) ||
- !RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
- LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
- return false;
- }
- // Move op1 into low-order input register.
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpCopy),
- TypeEntry.LowInReg)
- .addReg(Op1Reg);
- // Zero-extend or sign-extend into high-order input register.
- if (OpEntry.OpSignExtend) {
- if (OpEntry.IsOpSigned)
- BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(OpEntry.OpSignExtend));
- else {
- Register Zero32 = MRI.createVirtualRegister(&X86::GR32RegClass);
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::MOV32r0),
- Zero32);
- // Copy the zero into the appropriate sub/super/identical physical
- // register. Unfortunately the operations needed are not uniform enough
- // to fit neatly into the table above.
- if (RegTy.getSizeInBits() == 16) {
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
- TypeEntry.HighInReg)
- .addReg(Zero32, 0, X86::sub_16bit);
- } else if (RegTy.getSizeInBits() == 32) {
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
- TypeEntry.HighInReg)
- .addReg(Zero32);
- } else if (RegTy.getSizeInBits() == 64) {
- BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
- .addImm(0)
- .addReg(Zero32)
- .addImm(X86::sub_32bit);
- }
- }
- }
- // Generate the DIV/IDIV instruction.
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpDivRem))
- .addReg(Op2Reg);
- // For i8 remainder, we can't reference ah directly, as we'll end
- // up with bogus copies like %r9b = COPY %ah. Reference ax
- // instead to prevent ah references in a rex instruction.
- //
- // The current assumption of the fast register allocator is that isel
- // won't generate explicit references to the GR8_NOREX registers. If
- // the allocator and/or the backend get enhanced to be more robust in
- // that regard, this can be, and should be, removed.
- if ((I.getOpcode() == Instruction::SRem ||
- I.getOpcode() == Instruction::URem) &&
- OpEntry.DivRemResultReg == X86::AH && STI.is64Bit()) {
- Register SourceSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
- Register ResultSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy), SourceSuperReg)
- .addReg(X86::AX);
- // Shift AX right by 8 bits instead of using AH.
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SHR16ri),
- ResultSuperReg)
- .addReg(SourceSuperReg)
- .addImm(8);
- // Now reference the 8-bit subreg of the result.
- BuildMI(*I.getParent(), I, I.getDebugLoc(),
- TII.get(TargetOpcode::SUBREG_TO_REG))
- .addDef(DstReg)
- .addImm(0)
- .addReg(ResultSuperReg)
- .addImm(X86::sub_8bit);
- } else {
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
- DstReg)
- .addReg(OpEntry.DivRemResultReg);
- }
- I.eraseFromParent();
- return true;
- }
- bool X86InstructionSelector::selectIntrinsicWSideEffects(
- MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) const {
- assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
- "unexpected instruction");
- if (I.getOperand(0).getIntrinsicID() != Intrinsic::trap)
- return false;
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TRAP));
- I.eraseFromParent();
- return true;
- }
- InstructionSelector *
- llvm::createX86InstructionSelector(const X86TargetMachine &TM,
- X86Subtarget &Subtarget,
- X86RegisterBankInfo &RBI) {
- return new X86InstructionSelector(TM, Subtarget, RBI);
- }
|