12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001 |
- //===- ARMISelLowering.h - ARM DAG Lowering Interface -----------*- C++ -*-===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- //
- // This file defines the interfaces that ARM uses to lower LLVM code into a
- // selection DAG.
- //
- //===----------------------------------------------------------------------===//
- #ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
- #define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
- #include "MCTargetDesc/ARMBaseInfo.h"
- #include "llvm/ADT/SmallVector.h"
- #include "llvm/ADT/StringRef.h"
- #include "llvm/CodeGen/CallingConvLower.h"
- #include "llvm/CodeGen/ISDOpcodes.h"
- #include "llvm/CodeGen/MachineFunction.h"
- #include "llvm/CodeGen/SelectionDAGNodes.h"
- #include "llvm/CodeGen/TargetLowering.h"
- #include "llvm/CodeGen/ValueTypes.h"
- #include "llvm/IR/Attributes.h"
- #include "llvm/IR/CallingConv.h"
- #include "llvm/IR/Function.h"
- #include "llvm/IR/IRBuilder.h"
- #include "llvm/IR/InlineAsm.h"
- #include "llvm/Support/CodeGen.h"
- #include "llvm/Support/MachineValueType.h"
- #include <optional>
- #include <utility>
- namespace llvm {
- class ARMSubtarget;
- class DataLayout;
- class FastISel;
- class FunctionLoweringInfo;
- class GlobalValue;
- class InstrItineraryData;
- class Instruction;
- class MachineBasicBlock;
- class MachineInstr;
- class SelectionDAG;
- class TargetLibraryInfo;
- class TargetMachine;
- class TargetRegisterInfo;
- class VectorType;
- namespace ARMISD {
- // ARM Specific DAG Nodes
- enum NodeType : unsigned {
- // Start the numbering where the builtin ops and target ops leave off.
- FIRST_NUMBER = ISD::BUILTIN_OP_END,
- Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
- // TargetExternalSymbol, and TargetGlobalAddress.
- WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
- // PIC mode.
- WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
- // Add pseudo op to model memcpy for struct byval.
- COPY_STRUCT_BYVAL,
- CALL, // Function call.
- CALL_PRED, // Function call that's predicable.
- CALL_NOLINK, // Function call with branch not branch-and-link.
- tSECALL, // CMSE non-secure function call.
- t2CALL_BTI, // Thumb function call followed by BTI instruction.
- BRCOND, // Conditional branch.
- BR_JT, // Jumptable branch.
- BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
- RET_FLAG, // Return with a flag operand.
- SERET_FLAG, // CMSE Entry function return with a flag operand.
- INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand.
- PIC_ADD, // Add with a PC operand and a PIC label.
- ASRL, // MVE long arithmetic shift right.
- LSRL, // MVE long shift right.
- LSLL, // MVE long shift left.
- CMP, // ARM compare instructions.
- CMN, // ARM CMN instructions.
- CMPZ, // ARM compare that sets only Z flag.
- CMPFP, // ARM VFP compare instruction, sets FPSCR.
- CMPFPE, // ARM VFP signalling compare instruction, sets FPSCR.
- CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
- CMPFPEw0, // ARM VFP signalling compare against zero instruction, sets
- // FPSCR.
- FMSTAT, // ARM fmstat instruction.
- CMOV, // ARM conditional move instructions.
- SUBS, // Flag-setting subtraction.
- SSAT, // Signed saturation
- USAT, // Unsigned saturation
- BCC_i64,
- SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
- SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
- RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
- ADDC, // Add with carry
- ADDE, // Add using carry
- SUBC, // Sub with carry
- SUBE, // Sub using carry
- LSLS, // Shift left producing carry
- VMOVRRD, // double to two gprs.
- VMOVDRR, // Two gprs to double.
- VMOVSR, // move gpr to single, used for f32 literal constructed in a gpr
- EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
- EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
- EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch.
- TC_RETURN, // Tail call return pseudo.
- THREAD_POINTER,
- DYN_ALLOC, // Dynamic allocation on the stack.
- MEMBARRIER_MCR, // Memory barrier (MCR)
- PRELOAD, // Preload
- WIN__CHKSTK, // Windows' __chkstk call to do stack probing.
- WIN__DBZCHK, // Windows' divide by zero check
- WLS, // Low-overhead loops, While Loop Start branch. See t2WhileLoopStart
- WLSSETUP, // Setup for the iteration count of a WLS. See t2WhileLoopSetup.
- LOOP_DEC, // Really a part of LE, performs the sub
- LE, // Low-overhead loops, Loop End
- PREDICATE_CAST, // Predicate cast for MVE i1 types
- VECTOR_REG_CAST, // Reinterpret the current contents of a vector register
- MVESEXT, // Legalization aids for extending a vector into two/four vectors.
- MVEZEXT, // or truncating two/four vectors into one. Eventually becomes
- MVETRUNC, // stack store/load sequence, if not optimized to anything else.
- VCMP, // Vector compare.
- VCMPZ, // Vector compare to zero.
- VTST, // Vector test bits.
- // Vector shift by vector
- VSHLs, // ...left/right by signed
- VSHLu, // ...left/right by unsigned
- // Vector shift by immediate:
- VSHLIMM, // ...left
- VSHRsIMM, // ...right (signed)
- VSHRuIMM, // ...right (unsigned)
- // Vector rounding shift by immediate:
- VRSHRsIMM, // ...right (signed)
- VRSHRuIMM, // ...right (unsigned)
- VRSHRNIMM, // ...right narrow
- // Vector saturating shift by immediate:
- VQSHLsIMM, // ...left (signed)
- VQSHLuIMM, // ...left (unsigned)
- VQSHLsuIMM, // ...left (signed to unsigned)
- VQSHRNsIMM, // ...right narrow (signed)
- VQSHRNuIMM, // ...right narrow (unsigned)
- VQSHRNsuIMM, // ...right narrow (signed to unsigned)
- // Vector saturating rounding shift by immediate:
- VQRSHRNsIMM, // ...right narrow (signed)
- VQRSHRNuIMM, // ...right narrow (unsigned)
- VQRSHRNsuIMM, // ...right narrow (signed to unsigned)
- // Vector shift and insert:
- VSLIIMM, // ...left
- VSRIIMM, // ...right
- // Vector get lane (VMOV scalar to ARM core register)
- // (These are used for 8- and 16-bit element types only.)
- VGETLANEu, // zero-extend vector extract element
- VGETLANEs, // sign-extend vector extract element
- // Vector move immediate and move negated immediate:
- VMOVIMM,
- VMVNIMM,
- // Vector move f32 immediate:
- VMOVFPIMM,
- // Move H <-> R, clearing top 16 bits
- VMOVrh,
- VMOVhr,
- // Vector duplicate:
- VDUP,
- VDUPLANE,
- // Vector shuffles:
- VEXT, // extract
- VREV64, // reverse elements within 64-bit doublewords
- VREV32, // reverse elements within 32-bit words
- VREV16, // reverse elements within 16-bit halfwords
- VZIP, // zip (interleave)
- VUZP, // unzip (deinterleave)
- VTRN, // transpose
- VTBL1, // 1-register shuffle with mask
- VTBL2, // 2-register shuffle with mask
- VMOVN, // MVE vmovn
- // MVE Saturating truncates
- VQMOVNs, // Vector (V) Saturating (Q) Move and Narrow (N), signed (s)
- VQMOVNu, // Vector (V) Saturating (Q) Move and Narrow (N), unsigned (u)
- // MVE float <> half converts
- VCVTN, // MVE vcvt f32 -> f16, truncating into either the bottom or top
- // lanes
- VCVTL, // MVE vcvt f16 -> f32, extending from either the bottom or top lanes
- // MVE VIDUP instruction, taking a start value and increment.
- VIDUP,
- // Vector multiply long:
- VMULLs, // ...signed
- VMULLu, // ...unsigned
- VQDMULH, // MVE vqdmulh instruction
- // MVE reductions
- VADDVs, // sign- or zero-extend the elements of a vector to i32,
- VADDVu, // add them all together, and return an i32 of their sum
- VADDVps, // Same as VADDV[su] but with a v4i1 predicate mask
- VADDVpu,
- VADDLVs, // sign- or zero-extend elements to i64 and sum, returning
- VADDLVu, // the low and high 32-bit halves of the sum
- VADDLVAs, // Same as VADDLV[su] but also add an input accumulator
- VADDLVAu, // provided as low and high halves
- VADDLVps, // Same as VADDLV[su] but with a v4i1 predicate mask
- VADDLVpu,
- VADDLVAps, // Same as VADDLVp[su] but with a v4i1 predicate mask
- VADDLVApu,
- VMLAVs, // sign- or zero-extend the elements of two vectors to i32, multiply
- // them
- VMLAVu, // and add the results together, returning an i32 of their sum
- VMLAVps, // Same as VMLAV[su] with a v4i1 predicate mask
- VMLAVpu,
- VMLALVs, // Same as VMLAV but with i64, returning the low and
- VMLALVu, // high 32-bit halves of the sum
- VMLALVps, // Same as VMLALV[su] with a v4i1 predicate mask
- VMLALVpu,
- VMLALVAs, // Same as VMLALV but also add an input accumulator
- VMLALVAu, // provided as low and high halves
- VMLALVAps, // Same as VMLALVA[su] with a v4i1 predicate mask
- VMLALVApu,
- VMINVu, // Find minimum unsigned value of a vector and register
- VMINVs, // Find minimum signed value of a vector and register
- VMAXVu, // Find maximum unsigned value of a vector and register
- VMAXVs, // Find maximum signed value of a vector and register
- SMULWB, // Signed multiply word by half word, bottom
- SMULWT, // Signed multiply word by half word, top
- UMLAL, // 64bit Unsigned Accumulate Multiply
- SMLAL, // 64bit Signed Accumulate Multiply
- UMAAL, // 64-bit Unsigned Accumulate Accumulate Multiply
- SMLALBB, // 64-bit signed accumulate multiply bottom, bottom 16
- SMLALBT, // 64-bit signed accumulate multiply bottom, top 16
- SMLALTB, // 64-bit signed accumulate multiply top, bottom 16
- SMLALTT, // 64-bit signed accumulate multiply top, top 16
- SMLALD, // Signed multiply accumulate long dual
- SMLALDX, // Signed multiply accumulate long dual exchange
- SMLSLD, // Signed multiply subtract long dual
- SMLSLDX, // Signed multiply subtract long dual exchange
- SMMLAR, // Signed multiply long, round and add
- SMMLSR, // Signed multiply long, subtract and round
- // Single Lane QADD8 and QADD16. Only the bottom lane. That's what the b
- // stands for.
- QADD8b,
- QSUB8b,
- QADD16b,
- QSUB16b,
- UQADD8b,
- UQSUB8b,
- UQADD16b,
- UQSUB16b,
- // Operands of the standard BUILD_VECTOR node are not legalized, which
- // is fine if BUILD_VECTORs are always lowered to shuffles or other
- // operations, but for ARM some BUILD_VECTORs are legal as-is and their
- // operands need to be legalized. Define an ARM-specific version of
- // BUILD_VECTOR for this purpose.
- BUILD_VECTOR,
- // Bit-field insert
- BFI,
- // Vector OR with immediate
- VORRIMM,
- // Vector AND with NOT of immediate
- VBICIMM,
- // Pseudo vector bitwise select
- VBSP,
- // Pseudo-instruction representing a memory copy using ldm/stm
- // instructions.
- MEMCPY,
- // Pseudo-instruction representing a memory copy using a tail predicated
- // loop
- MEMCPYLOOP,
- // Pseudo-instruction representing a memset using a tail predicated
- // loop
- MEMSETLOOP,
- // V8.1MMainline condition select
- CSINV, // Conditional select invert.
- CSNEG, // Conditional select negate.
- CSINC, // Conditional select increment.
- // Vector load N-element structure to all lanes:
- VLD1DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
- VLD2DUP,
- VLD3DUP,
- VLD4DUP,
- // NEON loads with post-increment base updates:
- VLD1_UPD,
- VLD2_UPD,
- VLD3_UPD,
- VLD4_UPD,
- VLD2LN_UPD,
- VLD3LN_UPD,
- VLD4LN_UPD,
- VLD1DUP_UPD,
- VLD2DUP_UPD,
- VLD3DUP_UPD,
- VLD4DUP_UPD,
- VLD1x2_UPD,
- VLD1x3_UPD,
- VLD1x4_UPD,
- // NEON stores with post-increment base updates:
- VST1_UPD,
- VST2_UPD,
- VST3_UPD,
- VST4_UPD,
- VST2LN_UPD,
- VST3LN_UPD,
- VST4LN_UPD,
- VST1x2_UPD,
- VST1x3_UPD,
- VST1x4_UPD,
- // Load/Store of dual registers
- LDRD,
- STRD
- };
- } // end namespace ARMISD
- namespace ARM {
- /// Possible values of current rounding mode, which is specified in bits
- /// 23:22 of FPSCR.
- enum Rounding {
- RN = 0, // Round to Nearest
- RP = 1, // Round towards Plus infinity
- RM = 2, // Round towards Minus infinity
- RZ = 3, // Round towards Zero
- rmMask = 3 // Bit mask selecting rounding mode
- };
- // Bit position of rounding mode bits in FPSCR.
- const unsigned RoundingBitsPos = 22;
- } // namespace ARM
- /// Define some predicates that are used for node matching.
- namespace ARM {
- bool isBitFieldInvertedMask(unsigned v);
- } // end namespace ARM
- //===--------------------------------------------------------------------===//
- // ARMTargetLowering - ARM Implementation of the TargetLowering interface
- class ARMTargetLowering : public TargetLowering {
- public:
- explicit ARMTargetLowering(const TargetMachine &TM,
- const ARMSubtarget &STI);
- unsigned getJumpTableEncoding() const override;
- bool useSoftFloat() const override;
- SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
- /// ReplaceNodeResults - Replace the results of node with an illegal result
- /// type with new values built out of custom code.
- void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG) const override;
- const char *getTargetNodeName(unsigned Opcode) const override;
- bool isSelectSupported(SelectSupportKind Kind) const override {
- // ARM does not support scalar condition selects on vectors.
- return (Kind != ScalarCondVectorVal);
- }
- bool isReadOnly(const GlobalValue *GV) const;
- /// getSetCCResultType - Return the value type to use for ISD::SETCC.
- EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
- EVT VT) const override;
- MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr &MI,
- MachineBasicBlock *MBB) const override;
- void AdjustInstrPostInstrSelection(MachineInstr &MI,
- SDNode *Node) const override;
- SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const;
- SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const;
- SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const;
- SDValue PerformIntrinsicCombine(SDNode *N, DAGCombinerInfo &DCI) const;
- SDValue PerformMVEExtCombine(SDNode *N, DAGCombinerInfo &DCI) const;
- SDValue PerformMVETruncCombine(SDNode *N, DAGCombinerInfo &DCI) const;
- SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
- bool SimplifyDemandedBitsForTargetNode(SDValue Op,
- const APInt &OriginalDemandedBits,
- const APInt &OriginalDemandedElts,
- KnownBits &Known,
- TargetLoweringOpt &TLO,
- unsigned Depth) const override;
- bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override;
- /// allowsMisalignedMemoryAccesses - Returns true if the target allows
- /// unaligned memory accesses of the specified type. Returns whether it
- /// is "fast" by reference in the second argument.
- bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
- Align Alignment,
- MachineMemOperand::Flags Flags,
- unsigned *Fast) const override;
- EVT getOptimalMemOpType(const MemOp &Op,
- const AttributeList &FuncAttributes) const override;
- bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
- bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
- bool isZExtFree(SDValue Val, EVT VT2) const override;
- bool shouldSinkOperands(Instruction *I,
- SmallVectorImpl<Use *> &Ops) const override;
- Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const override;
- bool isFNegFree(EVT VT) const override;
- bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
- bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
- /// isLegalAddressingMode - Return true if the addressing mode represented
- /// by AM is legal for this target, for a load/store of the specified type.
- bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
- Type *Ty, unsigned AS,
- Instruction *I = nullptr) const override;
- bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
- /// Returns true if the addressing mode representing by AM is legal
- /// for the Thumb1 target, for a load/store of the specified type.
- bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
- /// isLegalICmpImmediate - Return true if the specified immediate is legal
- /// icmp immediate, that is the target has icmp instructions which can
- /// compare a register against the immediate without having to materialize
- /// the immediate into a register.
- bool isLegalICmpImmediate(int64_t Imm) const override;
- /// isLegalAddImmediate - Return true if the specified immediate is legal
- /// add immediate, that is the target has add instructions which can
- /// add a register and the immediate without having to materialize
- /// the immediate into a register.
- bool isLegalAddImmediate(int64_t Imm) const override;
- /// getPreIndexedAddressParts - returns true by value, base pointer and
- /// offset pointer and addressing mode by reference if the node's address
- /// can be legally represented as pre-indexed load / store address.
- bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
- ISD::MemIndexedMode &AM,
- SelectionDAG &DAG) const override;
- /// getPostIndexedAddressParts - returns true by value, base pointer and
- /// offset pointer and addressing mode by reference if this node can be
- /// combined with a load / store to form a post-indexed load / store.
- bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
- SDValue &Offset, ISD::MemIndexedMode &AM,
- SelectionDAG &DAG) const override;
- void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
- const APInt &DemandedElts,
- const SelectionDAG &DAG,
- unsigned Depth) const override;
- bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
- const APInt &DemandedElts,
- TargetLoweringOpt &TLO) const override;
- bool ExpandInlineAsm(CallInst *CI) const override;
- ConstraintType getConstraintType(StringRef Constraint) const override;
- /// Examine constraint string and operand type and determine a weight value.
- /// The operand object must already have been set up with the operand type.
- ConstraintWeight getSingleConstraintMatchWeight(
- AsmOperandInfo &info, const char *constraint) const override;
- std::pair<unsigned, const TargetRegisterClass *>
- getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
- StringRef Constraint, MVT VT) const override;
- const char *LowerXConstraint(EVT ConstraintVT) const override;
- /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
- /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
- /// true it means one of the asm constraint of the inline asm instruction
- /// being processed is 'm'.
- void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
- std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const override;
- unsigned
- getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
- if (ConstraintCode == "Q")
- return InlineAsm::Constraint_Q;
- else if (ConstraintCode.size() == 2) {
- if (ConstraintCode[0] == 'U') {
- switch(ConstraintCode[1]) {
- default:
- break;
- case 'm':
- return InlineAsm::Constraint_Um;
- case 'n':
- return InlineAsm::Constraint_Un;
- case 'q':
- return InlineAsm::Constraint_Uq;
- case 's':
- return InlineAsm::Constraint_Us;
- case 't':
- return InlineAsm::Constraint_Ut;
- case 'v':
- return InlineAsm::Constraint_Uv;
- case 'y':
- return InlineAsm::Constraint_Uy;
- }
- }
- }
- return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
- }
- const ARMSubtarget* getSubtarget() const {
- return Subtarget;
- }
- /// getRegClassFor - Return the register class that should be used for the
- /// specified value type.
- const TargetRegisterClass *
- getRegClassFor(MVT VT, bool isDivergent = false) const override;
- bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
- Align &PrefAlign) const override;
- /// createFastISel - This method returns a target specific FastISel object,
- /// or null if the target does not support "fast" ISel.
- FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
- const TargetLibraryInfo *libInfo) const override;
- Sched::Preference getSchedulingPreference(SDNode *N) const override;
- bool preferZeroCompareBranch() const override { return true; }
- bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
- bool
- isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
- bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
- /// isFPImmLegal - Returns true if the target can instruction select the
- /// specified FP immediate natively. If false, the legalizer will
- /// materialize the FP immediate as a load from a constant pool.
- bool isFPImmLegal(const APFloat &Imm, EVT VT,
- bool ForCodeSize = false) const override;
- bool getTgtMemIntrinsic(IntrinsicInfo &Info,
- const CallInst &I,
- MachineFunction &MF,
- unsigned Intrinsic) const override;
- /// Returns true if it is beneficial to convert a load of a constant
- /// to just the constant itself.
- bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
- Type *Ty) const override;
- /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
- /// with this index.
- bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
- unsigned Index) const override;
- bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
- bool MathUsed) const override {
- // Using overflow ops for overflow checks only should beneficial on ARM.
- return TargetLowering::shouldFormOverflowOp(Opcode, VT, true);
- }
- /// Returns true if an argument of type Ty needs to be passed in a
- /// contiguous block of registers in calling convention CallConv.
- bool functionArgumentNeedsConsecutiveRegisters(
- Type *Ty, CallingConv::ID CallConv, bool isVarArg,
- const DataLayout &DL) const override;
- /// If a physical register, this returns the register that receives the
- /// exception address on entry to an EH pad.
- Register
- getExceptionPointerRegister(const Constant *PersonalityFn) const override;
- /// If a physical register, this returns the register that receives the
- /// exception typeid on entry to a landing pad.
- Register
- getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
- Instruction *makeDMB(IRBuilderBase &Builder, ARM_MB::MemBOpt Domain) const;
- Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
- AtomicOrdering Ord) const override;
- Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr,
- AtomicOrdering Ord) const override;
- void
- emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override;
- Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
- AtomicOrdering Ord) const override;
- Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
- AtomicOrdering Ord) const override;
- unsigned getMaxSupportedInterleaveFactor() const override;
- bool lowerInterleavedLoad(LoadInst *LI,
- ArrayRef<ShuffleVectorInst *> Shuffles,
- ArrayRef<unsigned> Indices,
- unsigned Factor) const override;
- bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
- unsigned Factor) const override;
- bool shouldInsertFencesForAtomic(const Instruction *I) const override;
- TargetLoweringBase::AtomicExpansionKind
- shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
- TargetLoweringBase::AtomicExpansionKind
- shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
- TargetLoweringBase::AtomicExpansionKind
- shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
- TargetLoweringBase::AtomicExpansionKind
- shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
- bool useLoadStackGuardNode() const override;
- void insertSSPDeclarations(Module &M) const override;
- Value *getSDagStackGuard(const Module &M) const override;
- Function *getSSPStackGuardCheck(const Module &M) const override;
- bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
- unsigned &Cost) const override;
- bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
- const MachineFunction &MF) const override {
- // Do not merge to larger than i32.
- return (MemVT.getSizeInBits() <= 32);
- }
- bool isCheapToSpeculateCttz(Type *Ty) const override;
- bool isCheapToSpeculateCtlz(Type *Ty) const override;
- bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
- return VT.isScalarInteger();
- }
- bool supportSwiftError() const override {
- return true;
- }
- bool hasStandaloneRem(EVT VT) const override {
- return HasStandaloneRem;
- }
- ShiftLegalizationStrategy
- preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N,
- unsigned ExpansionFactor) const override;
- CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const;
- CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const;
- /// Returns true if \p VecTy is a legal interleaved access type. This
- /// function checks the vector element type and the overall width of the
- /// vector.
- bool isLegalInterleavedAccessType(unsigned Factor, FixedVectorType *VecTy,
- Align Alignment,
- const DataLayout &DL) const;
- bool isMulAddWithConstProfitable(SDValue AddNode,
- SDValue ConstNode) const override;
- bool alignLoopsWithOptSize() const override;
- /// Returns the number of interleaved accesses that will be generated when
- /// lowering accesses of the given type.
- unsigned getNumInterleavedAccesses(VectorType *VecTy,
- const DataLayout &DL) const;
- void finalizeLowering(MachineFunction &MF) const override;
- /// Return the correct alignment for the current calling convention.
- Align getABIAlignmentForCallingConv(Type *ArgTy,
- const DataLayout &DL) const override;
- bool isDesirableToCommuteWithShift(const SDNode *N,
- CombineLevel Level) const override;
- bool isDesirableToCommuteXorWithShift(const SDNode *N) const override;
- bool shouldFoldConstantShiftPairToMask(const SDNode *N,
- CombineLevel Level) const override;
- bool preferIncOfAddToSubOfNot(EVT VT) const override;
- bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
- bool isComplexDeinterleavingSupported() const override;
- bool isComplexDeinterleavingOperationSupported(
- ComplexDeinterleavingOperation Operation, Type *Ty) const override;
- Value *createComplexDeinterleavingIR(
- Instruction *I, ComplexDeinterleavingOperation OperationType,
- ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
- Value *Accumulator = nullptr) const override;
- protected:
- std::pair<const TargetRegisterClass *, uint8_t>
- findRepresentativeClass(const TargetRegisterInfo *TRI,
- MVT VT) const override;
- private:
- /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
- /// make the right decision when generating code for different targets.
- const ARMSubtarget *Subtarget;
- const TargetRegisterInfo *RegInfo;
- const InstrItineraryData *Itins;
- /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
- unsigned ARMPCLabelIndex;
- // TODO: remove this, and have shouldInsertFencesForAtomic do the proper
- // check.
- bool InsertFencesForAtomic;
- bool HasStandaloneRem = true;
- void addTypeForNEON(MVT VT, MVT PromotedLdStVT);
- void addDRTypeForNEON(MVT VT);
- void addQRTypeForNEON(MVT VT);
- std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const;
- using RegsToPassVector = SmallVector<std::pair<unsigned, SDValue>, 8>;
- void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain,
- SDValue &Arg, RegsToPassVector &RegsToPass,
- CCValAssign &VA, CCValAssign &NextVA,
- SDValue &StackPtr,
- SmallVectorImpl<SDValue> &MemOpChains,
- bool IsTailCall,
- int SPDiff) const;
- SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
- SDValue &Root, SelectionDAG &DAG,
- const SDLoc &dl) const;
- CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC,
- bool isVarArg) const;
- CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
- bool isVarArg) const;
- std::pair<SDValue, MachinePointerInfo>
- computeAddrForCallArg(const SDLoc &dl, SelectionDAG &DAG,
- const CCValAssign &VA, SDValue StackPtr,
- bool IsTailCall, int SPDiff) const;
- SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG,
- const ARMSubtarget *Subtarget) const;
- SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
- const ARMSubtarget *Subtarget) const;
- SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
- SelectionDAG &DAG) const;
- SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
- SelectionDAG &DAG,
- TLSModel::Model model) const;
- SDValue LowerGlobalTLSAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalTLSAddressWindows(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerUnsignedALUO(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
- const ARMSubtarget *ST) const;
- SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
- const ARMSubtarget *ST) const;
- SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const;
- void ExpandDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed,
- SmallVectorImpl<SDValue> &Results) const;
- SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
- const ARMSubtarget *Subtarget) const;
- SDValue LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, bool Signed,
- SDValue &Chain) const;
- SDValue LowerREM(SDNode *N, SelectionDAG &DAG) const;
- SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFSETCC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
- void LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG) const;
- Register getRegisterByName(const char* RegName, LLT VT,
- const MachineFunction &MF) const override;
- SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
- SmallVectorImpl<SDNode *> &Created) const override;
- bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
- EVT VT) const override;
- SDValue MoveToHPR(const SDLoc &dl, SelectionDAG &DAG, MVT LocVT, MVT ValVT,
- SDValue Val) const;
- SDValue MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG, MVT LocVT,
- MVT ValVT, SDValue Val) const;
- SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- const SDLoc &dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
- SDValue ThisVal) const;
- bool supportSplitCSR(MachineFunction *MF) const override {
- return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
- MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
- }
- void initializeSplitCSR(MachineBasicBlock *Entry) const override;
- void insertCopiesSplitCSR(
- MachineBasicBlock *Entry,
- const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
- bool splitValueIntoRegisterParts(
- SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
- unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC)
- const override;
- SDValue joinRegisterPartsIntoValue(
- SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts,
- unsigned NumParts, MVT PartVT, EVT ValueVT,
- std::optional<CallingConv::ID> CC) const override;
- SDValue
- LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- const SDLoc &dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const override;
- int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &dl,
- SDValue &Chain, const Value *OrigArg,
- unsigned InRegsParamRecordIdx, int ArgOffset,
- unsigned ArgSize) const;
- void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
- const SDLoc &dl, SDValue &Chain,
- unsigned ArgOffset, unsigned TotalArgRegsSaveSize,
- bool ForceMutable = false) const;
- SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const override;
- /// HandleByVal - Target-specific cleanup for ByVal support.
- void HandleByVal(CCState *, unsigned &, Align) const override;
- /// IsEligibleForTailCallOptimization - Check whether the call is eligible
- /// for tail call optimization. Targets which want to do tail call
- /// optimization should implement this function.
- bool IsEligibleForTailCallOptimization(
- SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
- bool isCalleeStructRet, bool isCallerStructRet,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG,
- const bool isIndirect) const;
- bool CanLowerReturn(CallingConv::ID CallConv,
- MachineFunction &MF, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- LLVMContext &Context) const override;
- SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SDLoc &dl, SelectionDAG &DAG) const override;
- bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
- bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
- bool shouldConsiderGEPOffsetSplit() const override { return true; }
- bool isUnsupportedFloatingType(EVT VT) const;
- SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
- SDValue ARMcc, SDValue CCR, SDValue Cmp,
- SelectionDAG &DAG) const;
- SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
- SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const;
- SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
- const SDLoc &dl, bool Signaling = false) const;
- SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
- SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
- void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
- MachineBasicBlock *DispatchBB, int FI) const;
- void EmitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *MBB) const;
- bool RemapAddSubWithFlags(MachineInstr &MI, MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitStructByval(MachineInstr &MI,
- MachineBasicBlock *MBB) const;
- MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI,
- MachineBasicBlock *MBB) const;
- MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI,
- MachineBasicBlock *MBB) const;
- void addMVEVectorTypes(bool HasMVEFP);
- void addAllExtLoads(const MVT From, const MVT To, LegalizeAction Action);
- void setAllExpand(MVT VT);
- };
- enum VMOVModImmType {
- VMOVModImm,
- VMVNModImm,
- MVEVMVNModImm,
- OtherModImm
- };
- namespace ARM {
- FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
- const TargetLibraryInfo *libInfo);
- } // end namespace ARM
- } // end namespace llvm
- #endif // LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
|