ARMBaseInstrInfo.h 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965
  1. //===-- ARMBaseInstrInfo.h - ARM Base Instruction Information ---*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains the Base ARM implementation of the TargetInstrInfo class.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #ifndef LLVM_LIB_TARGET_ARM_ARMBASEINSTRINFO_H
  13. #define LLVM_LIB_TARGET_ARM_ARMBASEINSTRINFO_H
  14. #include "MCTargetDesc/ARMBaseInfo.h"
  15. #include "llvm/ADT/DenseMap.h"
  16. #include "llvm/ADT/SmallSet.h"
  17. #include "llvm/CodeGen/MachineBasicBlock.h"
  18. #include "llvm/CodeGen/MachineInstr.h"
  19. #include "llvm/CodeGen/MachineInstrBuilder.h"
  20. #include "llvm/CodeGen/MachineOperand.h"
  21. #include "llvm/CodeGen/TargetInstrInfo.h"
  22. #include "llvm/IR/IntrinsicInst.h"
  23. #include "llvm/IR/IntrinsicsARM.h"
  24. #include <array>
  25. #include <cstdint>
  26. #define GET_INSTRINFO_HEADER
  27. #include "ARMGenInstrInfo.inc"
  28. namespace llvm {
  29. class ARMBaseRegisterInfo;
  30. class ARMSubtarget;
  31. class ARMBaseInstrInfo : public ARMGenInstrInfo {
  32. const ARMSubtarget &Subtarget;
  33. protected:
  34. // Can be only subclassed.
  35. explicit ARMBaseInstrInfo(const ARMSubtarget &STI);
  36. void expandLoadStackGuardBase(MachineBasicBlock::iterator MI,
  37. unsigned LoadImmOpc, unsigned LoadOpc) const;
  38. /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
  39. /// and \p DefIdx.
  40. /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
  41. /// the list is modeled as <Reg:SubReg, SubIdx>.
  42. /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
  43. /// two elements:
  44. /// - %1:sub1, sub0
  45. /// - %2<:0>, sub1
  46. ///
  47. /// \returns true if it is possible to build such an input sequence
  48. /// with the pair \p MI, \p DefIdx. False otherwise.
  49. ///
  50. /// \pre MI.isRegSequenceLike().
  51. bool getRegSequenceLikeInputs(
  52. const MachineInstr &MI, unsigned DefIdx,
  53. SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const override;
  54. /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
  55. /// and \p DefIdx.
  56. /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
  57. /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
  58. /// - %1:sub1, sub0
  59. ///
  60. /// \returns true if it is possible to build such an input sequence
  61. /// with the pair \p MI, \p DefIdx. False otherwise.
  62. ///
  63. /// \pre MI.isExtractSubregLike().
  64. bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx,
  65. RegSubRegPairAndIdx &InputReg) const override;
  66. /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
  67. /// and \p DefIdx.
  68. /// \p [out] BaseReg and \p [out] InsertedReg contain
  69. /// the equivalent inputs of INSERT_SUBREG.
  70. /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
  71. /// - BaseReg: %0:sub0
  72. /// - InsertedReg: %1:sub1, sub3
  73. ///
  74. /// \returns true if it is possible to build such an input sequence
  75. /// with the pair \p MI, \p DefIdx. False otherwise.
  76. ///
  77. /// \pre MI.isInsertSubregLike().
  78. bool
  79. getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx,
  80. RegSubRegPair &BaseReg,
  81. RegSubRegPairAndIdx &InsertedReg) const override;
  82. /// Commutes the operands in the given instruction.
  83. /// The commutable operands are specified by their indices OpIdx1 and OpIdx2.
  84. ///
  85. /// Do not call this method for a non-commutable instruction or for
  86. /// non-commutable pair of operand indices OpIdx1 and OpIdx2.
  87. /// Even though the instruction is commutable, the method may still
  88. /// fail to commute the operands, null pointer is returned in such cases.
  89. MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
  90. unsigned OpIdx1,
  91. unsigned OpIdx2) const override;
  92. /// If the specific machine instruction is an instruction that moves/copies
  93. /// value from one register to another register return destination and source
  94. /// registers as machine operands.
  95. std::optional<DestSourcePair>
  96. isCopyInstrImpl(const MachineInstr &MI) const override;
  97. /// Specialization of \ref TargetInstrInfo::describeLoadedValue, used to
  98. /// enhance debug entry value descriptions for ARM targets.
  99. std::optional<ParamLoadedValue>
  100. describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
  101. public:
  102. // Return whether the target has an explicit NOP encoding.
  103. bool hasNOP() const;
  104. // Return the non-pre/post incrementing version of 'Opc'. Return 0
  105. // if there is not such an opcode.
  106. virtual unsigned getUnindexedOpcode(unsigned Opc) const = 0;
  107. MachineInstr *convertToThreeAddress(MachineInstr &MI, LiveVariables *LV,
  108. LiveIntervals *LIS) const override;
  109. virtual const ARMBaseRegisterInfo &getRegisterInfo() const = 0;
  110. const ARMSubtarget &getSubtarget() const { return Subtarget; }
  111. ScheduleHazardRecognizer *
  112. CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
  113. const ScheduleDAG *DAG) const override;
  114. ScheduleHazardRecognizer *
  115. CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
  116. const ScheduleDAGMI *DAG) const override;
  117. ScheduleHazardRecognizer *
  118. CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
  119. const ScheduleDAG *DAG) const override;
  120. // Branch analysis.
  121. bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
  122. MachineBasicBlock *&FBB,
  123. SmallVectorImpl<MachineOperand> &Cond,
  124. bool AllowModify = false) const override;
  125. unsigned removeBranch(MachineBasicBlock &MBB,
  126. int *BytesRemoved = nullptr) const override;
  127. unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
  128. MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
  129. const DebugLoc &DL,
  130. int *BytesAdded = nullptr) const override;
  131. bool
  132. reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
  133. // Predication support.
  134. bool isPredicated(const MachineInstr &MI) const override;
  135. // MIR printer helper function to annotate Operands with a comment.
  136. std::string
  137. createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op,
  138. unsigned OpIdx,
  139. const TargetRegisterInfo *TRI) const override;
  140. ARMCC::CondCodes getPredicate(const MachineInstr &MI) const {
  141. int PIdx = MI.findFirstPredOperandIdx();
  142. return PIdx != -1 ? (ARMCC::CondCodes)MI.getOperand(PIdx).getImm()
  143. : ARMCC::AL;
  144. }
  145. bool PredicateInstruction(MachineInstr &MI,
  146. ArrayRef<MachineOperand> Pred) const override;
  147. bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
  148. ArrayRef<MachineOperand> Pred2) const override;
  149. bool ClobbersPredicate(MachineInstr &MI, std::vector<MachineOperand> &Pred,
  150. bool SkipDead) const override;
  151. bool isPredicable(const MachineInstr &MI) const override;
  152. // CPSR defined in instruction
  153. static bool isCPSRDefined(const MachineInstr &MI);
  154. /// GetInstSize - Returns the size of the specified MachineInstr.
  155. ///
  156. unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
  157. unsigned isLoadFromStackSlot(const MachineInstr &MI,
  158. int &FrameIndex) const override;
  159. unsigned isStoreToStackSlot(const MachineInstr &MI,
  160. int &FrameIndex) const override;
  161. unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
  162. int &FrameIndex) const override;
  163. unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
  164. int &FrameIndex) const override;
  165. void copyToCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
  166. unsigned SrcReg, bool KillSrc,
  167. const ARMSubtarget &Subtarget) const;
  168. void copyFromCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
  169. unsigned DestReg, bool KillSrc,
  170. const ARMSubtarget &Subtarget) const;
  171. void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
  172. const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
  173. bool KillSrc) const override;
  174. void storeRegToStackSlot(MachineBasicBlock &MBB,
  175. MachineBasicBlock::iterator MBBI, Register SrcReg,
  176. bool isKill, int FrameIndex,
  177. const TargetRegisterClass *RC,
  178. const TargetRegisterInfo *TRI,
  179. Register VReg) const override;
  180. void loadRegFromStackSlot(MachineBasicBlock &MBB,
  181. MachineBasicBlock::iterator MBBI, Register DestReg,
  182. int FrameIndex, const TargetRegisterClass *RC,
  183. const TargetRegisterInfo *TRI,
  184. Register VReg) const override;
  185. bool expandPostRAPseudo(MachineInstr &MI) const override;
  186. bool shouldSink(const MachineInstr &MI) const override;
  187. void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
  188. Register DestReg, unsigned SubIdx,
  189. const MachineInstr &Orig,
  190. const TargetRegisterInfo &TRI) const override;
  191. MachineInstr &
  192. duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
  193. const MachineInstr &Orig) const override;
  194. const MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB, unsigned Reg,
  195. unsigned SubIdx, unsigned State,
  196. const TargetRegisterInfo *TRI) const;
  197. bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1,
  198. const MachineRegisterInfo *MRI) const override;
  199. /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
  200. /// determine if two loads are loading from the same base address. It should
  201. /// only return true if the base pointers are the same and the only
  202. /// differences between the two addresses is the offset. It also returns the
  203. /// offsets by reference.
  204. bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1,
  205. int64_t &Offset2) const override;
  206. /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
  207. /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads
  208. /// should be scheduled togther. On some targets if two loads are loading from
  209. /// addresses in the same cache line, it's better if they are scheduled
  210. /// together. This function takes two integers that represent the load offsets
  211. /// from the common base address. It returns true if it decides it's desirable
  212. /// to schedule the two loads together. "NumLoads" is the number of loads that
  213. /// have already been scheduled after Load1.
  214. bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
  215. int64_t Offset1, int64_t Offset2,
  216. unsigned NumLoads) const override;
  217. bool isSchedulingBoundary(const MachineInstr &MI,
  218. const MachineBasicBlock *MBB,
  219. const MachineFunction &MF) const override;
  220. bool isProfitableToIfCvt(MachineBasicBlock &MBB,
  221. unsigned NumCycles, unsigned ExtraPredCycles,
  222. BranchProbability Probability) const override;
  223. bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumT,
  224. unsigned ExtraT, MachineBasicBlock &FMBB,
  225. unsigned NumF, unsigned ExtraF,
  226. BranchProbability Probability) const override;
  227. bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
  228. BranchProbability Probability) const override {
  229. return NumCycles == 1;
  230. }
  231. unsigned extraSizeToPredicateInstructions(const MachineFunction &MF,
  232. unsigned NumInsts) const override;
  233. unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const override;
  234. bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
  235. MachineBasicBlock &FMBB) const override;
  236. /// analyzeCompare - For a comparison instruction, return the source registers
  237. /// in SrcReg and SrcReg2 if having two register operands, and the value it
  238. /// compares against in CmpValue. Return true if the comparison instruction
  239. /// can be analyzed.
  240. bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
  241. Register &SrcReg2, int64_t &CmpMask,
  242. int64_t &CmpValue) const override;
  243. /// optimizeCompareInstr - Convert the instruction to set the zero flag so
  244. /// that we can remove a "comparison with zero"; Remove a redundant CMP
  245. /// instruction if the flags can be updated in the same way by an earlier
  246. /// instruction such as SUB.
  247. bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
  248. Register SrcReg2, int64_t CmpMask, int64_t CmpValue,
  249. const MachineRegisterInfo *MRI) const override;
  250. bool analyzeSelect(const MachineInstr &MI,
  251. SmallVectorImpl<MachineOperand> &Cond, unsigned &TrueOp,
  252. unsigned &FalseOp, bool &Optimizable) const override;
  253. MachineInstr *optimizeSelect(MachineInstr &MI,
  254. SmallPtrSetImpl<MachineInstr *> &SeenMIs,
  255. bool) const override;
  256. /// FoldImmediate - 'Reg' is known to be defined by a move immediate
  257. /// instruction, try to fold the immediate into the use instruction.
  258. bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg,
  259. MachineRegisterInfo *MRI) const override;
  260. unsigned getNumMicroOps(const InstrItineraryData *ItinData,
  261. const MachineInstr &MI) const override;
  262. int getOperandLatency(const InstrItineraryData *ItinData,
  263. const MachineInstr &DefMI, unsigned DefIdx,
  264. const MachineInstr &UseMI,
  265. unsigned UseIdx) const override;
  266. int getOperandLatency(const InstrItineraryData *ItinData,
  267. SDNode *DefNode, unsigned DefIdx,
  268. SDNode *UseNode, unsigned UseIdx) const override;
  269. /// VFP/NEON execution domains.
  270. std::pair<uint16_t, uint16_t>
  271. getExecutionDomain(const MachineInstr &MI) const override;
  272. void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override;
  273. unsigned
  274. getPartialRegUpdateClearance(const MachineInstr &, unsigned,
  275. const TargetRegisterInfo *) const override;
  276. void breakPartialRegDependency(MachineInstr &, unsigned,
  277. const TargetRegisterInfo *TRI) const override;
  278. /// Get the number of addresses by LDM or VLDM or zero for unknown.
  279. unsigned getNumLDMAddresses(const MachineInstr &MI) const;
  280. std::pair<unsigned, unsigned>
  281. decomposeMachineOperandsTargetFlags(unsigned TF) const override;
  282. ArrayRef<std::pair<unsigned, const char *>>
  283. getSerializableDirectMachineOperandTargetFlags() const override;
  284. ArrayRef<std::pair<unsigned, const char *>>
  285. getSerializableBitmaskMachineOperandTargetFlags() const override;
  286. /// ARM supports the MachineOutliner.
  287. bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
  288. bool OutlineFromLinkOnceODRs) const override;
  289. outliner::OutlinedFunction getOutliningCandidateInfo(
  290. std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override;
  291. void mergeOutliningCandidateAttributes(
  292. Function &F, std::vector<outliner::Candidate> &Candidates) const override;
  293. outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MIT,
  294. unsigned Flags) const override;
  295. bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
  296. unsigned &Flags) const override;
  297. void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
  298. const outliner::OutlinedFunction &OF) const override;
  299. MachineBasicBlock::iterator
  300. insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
  301. MachineBasicBlock::iterator &It, MachineFunction &MF,
  302. outliner::Candidate &C) const override;
  303. /// Enable outlining by default at -Oz.
  304. bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override;
  305. bool isUnspillableTerminatorImpl(const MachineInstr *MI) const override {
  306. return MI->getOpcode() == ARM::t2LoopEndDec ||
  307. MI->getOpcode() == ARM::t2DoLoopStartTP ||
  308. MI->getOpcode() == ARM::t2WhileLoopStartLR ||
  309. MI->getOpcode() == ARM::t2WhileLoopStartTP;
  310. }
  311. /// Analyze loop L, which must be a single-basic-block loop, and if the
  312. /// conditions can be understood enough produce a PipelinerLoopInfo object.
  313. std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
  314. analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override;
  315. private:
  316. /// Returns an unused general-purpose register which can be used for
  317. /// constructing an outlined call if one exists. Returns 0 otherwise.
  318. Register findRegisterToSaveLRTo(outliner::Candidate &C) const;
  319. /// Adds an instruction which saves the link register on top of the stack into
  320. /// the MachineBasicBlock \p MBB at position \p It. If \p Auth is true,
  321. /// compute and store an authentication code alongiside the link register.
  322. /// If \p CFI is true, emit CFI instructions.
  323. void saveLROnStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator It,
  324. bool CFI, bool Auth) const;
  325. /// Adds an instruction which restores the link register from the top the
  326. /// stack into the MachineBasicBlock \p MBB at position \p It. If \p Auth is
  327. /// true, restore an authentication code and authenticate LR.
  328. /// If \p CFI is true, emit CFI instructions.
  329. void restoreLRFromStack(MachineBasicBlock &MBB,
  330. MachineBasicBlock::iterator It, bool CFI,
  331. bool Auth) const;
  332. /// Emit CFI instructions into the MachineBasicBlock \p MBB at position \p It,
  333. /// for the case when the LR is saved in the register \p Reg.
  334. void emitCFIForLRSaveToReg(MachineBasicBlock &MBB,
  335. MachineBasicBlock::iterator It,
  336. Register Reg) const;
  337. /// Emit CFI instructions into the MachineBasicBlock \p MBB at position \p It,
  338. /// after the LR is was restored from a register.
  339. void emitCFIForLRRestoreFromReg(MachineBasicBlock &MBB,
  340. MachineBasicBlock::iterator It) const;
  341. /// \brief Sets the offsets on outlined instructions in \p MBB which use SP
  342. /// so that they will be valid post-outlining.
  343. ///
  344. /// \param MBB A \p MachineBasicBlock in an outlined function.
  345. void fixupPostOutline(MachineBasicBlock &MBB) const;
  346. /// Returns true if the machine instruction offset can handle the stack fixup
  347. /// and updates it if requested.
  348. bool checkAndUpdateStackOffset(MachineInstr *MI, int64_t Fixup,
  349. bool Updt) const;
  350. unsigned getInstBundleLength(const MachineInstr &MI) const;
  351. int getVLDMDefCycle(const InstrItineraryData *ItinData,
  352. const MCInstrDesc &DefMCID,
  353. unsigned DefClass,
  354. unsigned DefIdx, unsigned DefAlign) const;
  355. int getLDMDefCycle(const InstrItineraryData *ItinData,
  356. const MCInstrDesc &DefMCID,
  357. unsigned DefClass,
  358. unsigned DefIdx, unsigned DefAlign) const;
  359. int getVSTMUseCycle(const InstrItineraryData *ItinData,
  360. const MCInstrDesc &UseMCID,
  361. unsigned UseClass,
  362. unsigned UseIdx, unsigned UseAlign) const;
  363. int getSTMUseCycle(const InstrItineraryData *ItinData,
  364. const MCInstrDesc &UseMCID,
  365. unsigned UseClass,
  366. unsigned UseIdx, unsigned UseAlign) const;
  367. int getOperandLatency(const InstrItineraryData *ItinData,
  368. const MCInstrDesc &DefMCID,
  369. unsigned DefIdx, unsigned DefAlign,
  370. const MCInstrDesc &UseMCID,
  371. unsigned UseIdx, unsigned UseAlign) const;
  372. int getOperandLatencyImpl(const InstrItineraryData *ItinData,
  373. const MachineInstr &DefMI, unsigned DefIdx,
  374. const MCInstrDesc &DefMCID, unsigned DefAdj,
  375. const MachineOperand &DefMO, unsigned Reg,
  376. const MachineInstr &UseMI, unsigned UseIdx,
  377. const MCInstrDesc &UseMCID, unsigned UseAdj) const;
  378. unsigned getPredicationCost(const MachineInstr &MI) const override;
  379. unsigned getInstrLatency(const InstrItineraryData *ItinData,
  380. const MachineInstr &MI,
  381. unsigned *PredCost = nullptr) const override;
  382. int getInstrLatency(const InstrItineraryData *ItinData,
  383. SDNode *Node) const override;
  384. bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
  385. const MachineRegisterInfo *MRI,
  386. const MachineInstr &DefMI, unsigned DefIdx,
  387. const MachineInstr &UseMI,
  388. unsigned UseIdx) const override;
  389. bool hasLowDefLatency(const TargetSchedModel &SchedModel,
  390. const MachineInstr &DefMI,
  391. unsigned DefIdx) const override;
  392. /// verifyInstruction - Perform target specific instruction verification.
  393. bool verifyInstruction(const MachineInstr &MI,
  394. StringRef &ErrInfo) const override;
  395. virtual void expandLoadStackGuard(MachineBasicBlock::iterator MI) const = 0;
  396. void expandMEMCPY(MachineBasicBlock::iterator) const;
  397. /// Identify instructions that can be folded into a MOVCC instruction, and
  398. /// return the defining instruction.
  399. MachineInstr *canFoldIntoMOVCC(Register Reg, const MachineRegisterInfo &MRI,
  400. const TargetInstrInfo *TII) const;
  401. bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override;
  402. private:
  403. /// Modeling special VFP / NEON fp MLA / MLS hazards.
  404. /// MLxEntryMap - Map fp MLA / MLS to the corresponding entry in the internal
  405. /// MLx table.
  406. DenseMap<unsigned, unsigned> MLxEntryMap;
  407. /// MLxHazardOpcodes - Set of add / sub and multiply opcodes that would cause
  408. /// stalls when scheduled together with fp MLA / MLS opcodes.
  409. SmallSet<unsigned, 16> MLxHazardOpcodes;
  410. public:
  411. /// isFpMLxInstruction - Return true if the specified opcode is a fp MLA / MLS
  412. /// instruction.
  413. bool isFpMLxInstruction(unsigned Opcode) const {
  414. return MLxEntryMap.count(Opcode);
  415. }
  416. /// isFpMLxInstruction - This version also returns the multiply opcode and the
  417. /// addition / subtraction opcode to expand to. Return true for 'HasLane' for
  418. /// the MLX instructions with an extra lane operand.
  419. bool isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc,
  420. unsigned &AddSubOpc, bool &NegAcc,
  421. bool &HasLane) const;
  422. /// canCauseFpMLxStall - Return true if an instruction of the specified opcode
  423. /// will cause stalls when scheduled after (within 4-cycle window) a fp
  424. /// MLA / MLS instruction.
  425. bool canCauseFpMLxStall(unsigned Opcode) const {
  426. return MLxHazardOpcodes.count(Opcode);
  427. }
  428. /// Returns true if the instruction has a shift by immediate that can be
  429. /// executed in one cycle less.
  430. bool isSwiftFastImmShift(const MachineInstr *MI) const;
  431. /// Returns predicate register associated with the given frame instruction.
  432. unsigned getFramePred(const MachineInstr &MI) const {
  433. assert(isFrameInstr(MI));
  434. // Operands of ADJCALLSTACKDOWN/ADJCALLSTACKUP:
  435. // - argument declared in the pattern:
  436. // 0 - frame size
  437. // 1 - arg of CALLSEQ_START/CALLSEQ_END
  438. // 2 - predicate code (like ARMCC::AL)
  439. // - added by predOps:
  440. // 3 - predicate reg
  441. return MI.getOperand(3).getReg();
  442. }
  443. std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
  444. Register Reg) const override;
  445. };
  446. /// Get the operands corresponding to the given \p Pred value. By default, the
  447. /// predicate register is assumed to be 0 (no register), but you can pass in a
  448. /// \p PredReg if that is not the case.
  449. static inline std::array<MachineOperand, 2> predOps(ARMCC::CondCodes Pred,
  450. unsigned PredReg = 0) {
  451. return {{MachineOperand::CreateImm(static_cast<int64_t>(Pred)),
  452. MachineOperand::CreateReg(PredReg, false)}};
  453. }
  454. /// Get the operand corresponding to the conditional code result. By default,
  455. /// this is 0 (no register).
  456. static inline MachineOperand condCodeOp(unsigned CCReg = 0) {
  457. return MachineOperand::CreateReg(CCReg, false);
  458. }
  459. /// Get the operand corresponding to the conditional code result for Thumb1.
  460. /// This operand will always refer to CPSR and it will have the Define flag set.
  461. /// You can optionally set the Dead flag by means of \p isDead.
  462. static inline MachineOperand t1CondCodeOp(bool isDead = false) {
  463. return MachineOperand::CreateReg(ARM::CPSR,
  464. /*Define*/ true, /*Implicit*/ false,
  465. /*Kill*/ false, isDead);
  466. }
  467. static inline
  468. bool isUncondBranchOpcode(int Opc) {
  469. return Opc == ARM::B || Opc == ARM::tB || Opc == ARM::t2B;
  470. }
  471. // This table shows the VPT instruction variants, i.e. the different
  472. // mask field encodings, see also B5.6. Predication/conditional execution in
  473. // the ArmARM.
  474. static inline bool isVPTOpcode(int Opc) {
  475. return Opc == ARM::MVE_VPTv16i8 || Opc == ARM::MVE_VPTv16u8 ||
  476. Opc == ARM::MVE_VPTv16s8 || Opc == ARM::MVE_VPTv8i16 ||
  477. Opc == ARM::MVE_VPTv8u16 || Opc == ARM::MVE_VPTv8s16 ||
  478. Opc == ARM::MVE_VPTv4i32 || Opc == ARM::MVE_VPTv4u32 ||
  479. Opc == ARM::MVE_VPTv4s32 || Opc == ARM::MVE_VPTv4f32 ||
  480. Opc == ARM::MVE_VPTv8f16 || Opc == ARM::MVE_VPTv16i8r ||
  481. Opc == ARM::MVE_VPTv16u8r || Opc == ARM::MVE_VPTv16s8r ||
  482. Opc == ARM::MVE_VPTv8i16r || Opc == ARM::MVE_VPTv8u16r ||
  483. Opc == ARM::MVE_VPTv8s16r || Opc == ARM::MVE_VPTv4i32r ||
  484. Opc == ARM::MVE_VPTv4u32r || Opc == ARM::MVE_VPTv4s32r ||
  485. Opc == ARM::MVE_VPTv4f32r || Opc == ARM::MVE_VPTv8f16r ||
  486. Opc == ARM::MVE_VPST;
  487. }
  488. static inline
  489. unsigned VCMPOpcodeToVPT(unsigned Opcode) {
  490. switch (Opcode) {
  491. default:
  492. return 0;
  493. case ARM::MVE_VCMPf32:
  494. return ARM::MVE_VPTv4f32;
  495. case ARM::MVE_VCMPf16:
  496. return ARM::MVE_VPTv8f16;
  497. case ARM::MVE_VCMPi8:
  498. return ARM::MVE_VPTv16i8;
  499. case ARM::MVE_VCMPi16:
  500. return ARM::MVE_VPTv8i16;
  501. case ARM::MVE_VCMPi32:
  502. return ARM::MVE_VPTv4i32;
  503. case ARM::MVE_VCMPu8:
  504. return ARM::MVE_VPTv16u8;
  505. case ARM::MVE_VCMPu16:
  506. return ARM::MVE_VPTv8u16;
  507. case ARM::MVE_VCMPu32:
  508. return ARM::MVE_VPTv4u32;
  509. case ARM::MVE_VCMPs8:
  510. return ARM::MVE_VPTv16s8;
  511. case ARM::MVE_VCMPs16:
  512. return ARM::MVE_VPTv8s16;
  513. case ARM::MVE_VCMPs32:
  514. return ARM::MVE_VPTv4s32;
  515. case ARM::MVE_VCMPf32r:
  516. return ARM::MVE_VPTv4f32r;
  517. case ARM::MVE_VCMPf16r:
  518. return ARM::MVE_VPTv8f16r;
  519. case ARM::MVE_VCMPi8r:
  520. return ARM::MVE_VPTv16i8r;
  521. case ARM::MVE_VCMPi16r:
  522. return ARM::MVE_VPTv8i16r;
  523. case ARM::MVE_VCMPi32r:
  524. return ARM::MVE_VPTv4i32r;
  525. case ARM::MVE_VCMPu8r:
  526. return ARM::MVE_VPTv16u8r;
  527. case ARM::MVE_VCMPu16r:
  528. return ARM::MVE_VPTv8u16r;
  529. case ARM::MVE_VCMPu32r:
  530. return ARM::MVE_VPTv4u32r;
  531. case ARM::MVE_VCMPs8r:
  532. return ARM::MVE_VPTv16s8r;
  533. case ARM::MVE_VCMPs16r:
  534. return ARM::MVE_VPTv8s16r;
  535. case ARM::MVE_VCMPs32r:
  536. return ARM::MVE_VPTv4s32r;
  537. }
  538. }
  539. static inline
  540. bool isCondBranchOpcode(int Opc) {
  541. return Opc == ARM::Bcc || Opc == ARM::tBcc || Opc == ARM::t2Bcc;
  542. }
  543. static inline bool isJumpTableBranchOpcode(int Opc) {
  544. return Opc == ARM::BR_JTr || Opc == ARM::BR_JTm_i12 ||
  545. Opc == ARM::BR_JTm_rs || Opc == ARM::BR_JTadd || Opc == ARM::tBR_JTr ||
  546. Opc == ARM::t2BR_JT;
  547. }
  548. static inline
  549. bool isIndirectBranchOpcode(int Opc) {
  550. return Opc == ARM::BX || Opc == ARM::MOVPCRX || Opc == ARM::tBRIND;
  551. }
  552. static inline bool isIndirectCall(const MachineInstr &MI) {
  553. int Opc = MI.getOpcode();
  554. switch (Opc) {
  555. // indirect calls:
  556. case ARM::BLX:
  557. case ARM::BLX_noip:
  558. case ARM::BLX_pred:
  559. case ARM::BLX_pred_noip:
  560. case ARM::BX_CALL:
  561. case ARM::BMOVPCRX_CALL:
  562. case ARM::TCRETURNri:
  563. case ARM::TAILJMPr:
  564. case ARM::TAILJMPr4:
  565. case ARM::tBLXr:
  566. case ARM::tBLXr_noip:
  567. case ARM::tBLXNSr:
  568. case ARM::tBLXNS_CALL:
  569. case ARM::tBX_CALL:
  570. case ARM::tTAILJMPr:
  571. assert(MI.isCall(MachineInstr::IgnoreBundle));
  572. return true;
  573. // direct calls:
  574. case ARM::BL:
  575. case ARM::BL_pred:
  576. case ARM::BMOVPCB_CALL:
  577. case ARM::BL_PUSHLR:
  578. case ARM::BLXi:
  579. case ARM::TCRETURNdi:
  580. case ARM::TAILJMPd:
  581. case ARM::SVC:
  582. case ARM::HVC:
  583. case ARM::TPsoft:
  584. case ARM::tTAILJMPd:
  585. case ARM::t2SMC:
  586. case ARM::t2HVC:
  587. case ARM::tBL:
  588. case ARM::tBLXi:
  589. case ARM::tBL_PUSHLR:
  590. case ARM::tTAILJMPdND:
  591. case ARM::tSVC:
  592. case ARM::tTPsoft:
  593. assert(MI.isCall(MachineInstr::IgnoreBundle));
  594. return false;
  595. }
  596. assert(!MI.isCall(MachineInstr::IgnoreBundle));
  597. return false;
  598. }
  599. static inline bool isIndirectControlFlowNotComingBack(const MachineInstr &MI) {
  600. int opc = MI.getOpcode();
  601. return MI.isReturn() || isIndirectBranchOpcode(MI.getOpcode()) ||
  602. isJumpTableBranchOpcode(opc);
  603. }
  604. static inline bool isSpeculationBarrierEndBBOpcode(int Opc) {
  605. return Opc == ARM::SpeculationBarrierISBDSBEndBB ||
  606. Opc == ARM::SpeculationBarrierSBEndBB ||
  607. Opc == ARM::t2SpeculationBarrierISBDSBEndBB ||
  608. Opc == ARM::t2SpeculationBarrierSBEndBB;
  609. }
  610. static inline bool isPopOpcode(int Opc) {
  611. return Opc == ARM::tPOP_RET || Opc == ARM::LDMIA_RET ||
  612. Opc == ARM::t2LDMIA_RET || Opc == ARM::tPOP || Opc == ARM::LDMIA_UPD ||
  613. Opc == ARM::t2LDMIA_UPD || Opc == ARM::VLDMDIA_UPD;
  614. }
  615. static inline bool isPushOpcode(int Opc) {
  616. return Opc == ARM::tPUSH || Opc == ARM::t2STMDB_UPD ||
  617. Opc == ARM::STMDB_UPD || Opc == ARM::VSTMDDB_UPD;
  618. }
  619. static inline bool isSubImmOpcode(int Opc) {
  620. return Opc == ARM::SUBri ||
  621. Opc == ARM::tSUBi3 || Opc == ARM::tSUBi8 ||
  622. Opc == ARM::tSUBSi3 || Opc == ARM::tSUBSi8 ||
  623. Opc == ARM::t2SUBri || Opc == ARM::t2SUBri12 || Opc == ARM::t2SUBSri;
  624. }
  625. static inline bool isMovRegOpcode(int Opc) {
  626. return Opc == ARM::MOVr || Opc == ARM::tMOVr || Opc == ARM::t2MOVr;
  627. }
  628. /// isValidCoprocessorNumber - decide whether an explicit coprocessor
  629. /// number is legal in generic instructions like CDP. The answer can
  630. /// vary with the subtarget.
  631. static inline bool isValidCoprocessorNumber(unsigned Num,
  632. const FeatureBitset& featureBits) {
  633. // In Armv7 and Armv8-M CP10 and CP11 clash with VFP/NEON, however, the
  634. // coprocessor is still valid for CDP/MCR/MRC and friends. Allowing it is
  635. // useful for code which is shared with older architectures which do not know
  636. // the new VFP/NEON mnemonics.
  637. // Armv8-A disallows everything *other* than 111x (CP14 and CP15).
  638. if (featureBits[ARM::HasV8Ops] && (Num & 0xE) != 0xE)
  639. return false;
  640. // Armv8.1-M disallows 100x (CP8,CP9) and 111x (CP14,CP15)
  641. // which clash with MVE.
  642. if (featureBits[ARM::HasV8_1MMainlineOps] &&
  643. ((Num & 0xE) == 0x8 || (Num & 0xE) == 0xE))
  644. return false;
  645. return true;
  646. }
  647. static inline bool isSEHInstruction(const MachineInstr &MI) {
  648. unsigned Opc = MI.getOpcode();
  649. switch (Opc) {
  650. case ARM::SEH_StackAlloc:
  651. case ARM::SEH_SaveRegs:
  652. case ARM::SEH_SaveRegs_Ret:
  653. case ARM::SEH_SaveSP:
  654. case ARM::SEH_SaveFRegs:
  655. case ARM::SEH_SaveLR:
  656. case ARM::SEH_Nop:
  657. case ARM::SEH_Nop_Ret:
  658. case ARM::SEH_PrologEnd:
  659. case ARM::SEH_EpilogStart:
  660. case ARM::SEH_EpilogEnd:
  661. return true;
  662. default:
  663. return false;
  664. }
  665. }
  666. /// getInstrPredicate - If instruction is predicated, returns its predicate
  667. /// condition, otherwise returns AL. It also returns the condition code
  668. /// register by reference.
  669. ARMCC::CondCodes getInstrPredicate(const MachineInstr &MI, Register &PredReg);
  670. unsigned getMatchingCondBranchOpcode(unsigned Opc);
  671. /// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether
  672. /// the instruction is encoded with an 'S' bit is determined by the optional
  673. /// CPSR def operand.
  674. unsigned convertAddSubFlagsOpcode(unsigned OldOpc);
  675. /// emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of
  676. /// instructions to materializea destreg = basereg + immediate in ARM / Thumb2
  677. /// code.
  678. void emitARMRegPlusImmediate(MachineBasicBlock &MBB,
  679. MachineBasicBlock::iterator &MBBI,
  680. const DebugLoc &dl, Register DestReg,
  681. Register BaseReg, int NumBytes,
  682. ARMCC::CondCodes Pred, Register PredReg,
  683. const ARMBaseInstrInfo &TII, unsigned MIFlags = 0);
  684. void emitT2RegPlusImmediate(MachineBasicBlock &MBB,
  685. MachineBasicBlock::iterator &MBBI,
  686. const DebugLoc &dl, Register DestReg,
  687. Register BaseReg, int NumBytes,
  688. ARMCC::CondCodes Pred, Register PredReg,
  689. const ARMBaseInstrInfo &TII, unsigned MIFlags = 0);
  690. void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
  691. MachineBasicBlock::iterator &MBBI,
  692. const DebugLoc &dl, Register DestReg,
  693. Register BaseReg, int NumBytes,
  694. const TargetInstrInfo &TII,
  695. const ARMBaseRegisterInfo &MRI,
  696. unsigned MIFlags = 0);
  697. /// Tries to add registers to the reglist of a given base-updating
  698. /// push/pop instruction to adjust the stack by an additional
  699. /// NumBytes. This can save a few bytes per function in code-size, but
  700. /// obviously generates more memory traffic. As such, it only takes
  701. /// effect in functions being optimised for size.
  702. bool tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget,
  703. MachineFunction &MF, MachineInstr *MI,
  704. unsigned NumBytes);
  705. /// rewriteARMFrameIndex / rewriteT2FrameIndex -
  706. /// Rewrite MI to access 'Offset' bytes from the FP. Return false if the
  707. /// offset could not be handled directly in MI, and return the left-over
  708. /// portion by reference.
  709. bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
  710. Register FrameReg, int &Offset,
  711. const ARMBaseInstrInfo &TII);
  712. bool rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
  713. Register FrameReg, int &Offset,
  714. const ARMBaseInstrInfo &TII,
  715. const TargetRegisterInfo *TRI);
  716. /// Return true if Reg is defd between From and To
  717. bool registerDefinedBetween(unsigned Reg, MachineBasicBlock::iterator From,
  718. MachineBasicBlock::iterator To,
  719. const TargetRegisterInfo *TRI);
  720. /// Search backwards from a tBcc to find a tCMPi8 against 0, meaning
  721. /// we can convert them to a tCBZ or tCBNZ. Return nullptr if not found.
  722. MachineInstr *findCMPToFoldIntoCBZ(MachineInstr *Br,
  723. const TargetRegisterInfo *TRI);
  724. void addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB);
  725. void addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, Register DestReg);
  726. void addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond);
  727. void addPredicatedMveVpredROp(MachineInstrBuilder &MIB, unsigned Cond,
  728. unsigned Inactive);
  729. /// Returns the number of instructions required to materialize the given
  730. /// constant in a register, or 3 if a literal pool load is needed.
  731. /// If ForCodesize is specified, an approximate cost in bytes is returned.
  732. unsigned ConstantMaterializationCost(unsigned Val,
  733. const ARMSubtarget *Subtarget,
  734. bool ForCodesize = false);
  735. /// Returns true if Val1 has a lower Constant Materialization Cost than Val2.
  736. /// Uses the cost from ConstantMaterializationCost, first with ForCodesize as
  737. /// specified. If the scores are equal, return the comparison for !ForCodesize.
  738. bool HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2,
  739. const ARMSubtarget *Subtarget,
  740. bool ForCodesize = false);
  741. // Return the immediate if this is ADDri or SUBri, scaled as appropriate.
  742. // Returns 0 for unknown instructions.
  743. inline int getAddSubImmediate(MachineInstr &MI) {
  744. int Scale = 1;
  745. unsigned ImmOp;
  746. switch (MI.getOpcode()) {
  747. case ARM::t2ADDri:
  748. ImmOp = 2;
  749. break;
  750. case ARM::t2SUBri:
  751. case ARM::t2SUBri12:
  752. ImmOp = 2;
  753. Scale = -1;
  754. break;
  755. case ARM::tSUBi3:
  756. case ARM::tSUBi8:
  757. ImmOp = 3;
  758. Scale = -1;
  759. break;
  760. default:
  761. return 0;
  762. }
  763. return Scale * MI.getOperand(ImmOp).getImm();
  764. }
  765. // Given a memory access Opcode, check that the give Imm would be a valid Offset
  766. // for this instruction using its addressing mode.
  767. inline bool isLegalAddressImm(unsigned Opcode, int Imm,
  768. const TargetInstrInfo *TII) {
  769. const MCInstrDesc &Desc = TII->get(Opcode);
  770. unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
  771. switch (AddrMode) {
  772. case ARMII::AddrModeT2_i7:
  773. return std::abs(Imm) < ((1 << 7) * 1);
  774. case ARMII::AddrModeT2_i7s2:
  775. return std::abs(Imm) < ((1 << 7) * 2) && Imm % 2 == 0;
  776. case ARMII::AddrModeT2_i7s4:
  777. return std::abs(Imm) < ((1 << 7) * 4) && Imm % 4 == 0;
  778. case ARMII::AddrModeT2_i8:
  779. return std::abs(Imm) < ((1 << 8) * 1);
  780. case ARMII::AddrModeT2_i8pos:
  781. return Imm >= 0 && Imm < ((1 << 8) * 1);
  782. case ARMII::AddrModeT2_i8neg:
  783. return Imm < 0 && -Imm < ((1 << 8) * 1);
  784. case ARMII::AddrModeT2_i8s4:
  785. return std::abs(Imm) < ((1 << 8) * 4) && Imm % 4 == 0;
  786. case ARMII::AddrModeT2_i12:
  787. return Imm >= 0 && Imm < ((1 << 12) * 1);
  788. case ARMII::AddrMode2:
  789. return std::abs(Imm) < ((1 << 12) * 1);
  790. default:
  791. llvm_unreachable("Unhandled Addressing mode");
  792. }
  793. }
  794. // Return true if the given intrinsic is a gather
  795. inline bool isGather(IntrinsicInst *IntInst) {
  796. if (IntInst == nullptr)
  797. return false;
  798. unsigned IntrinsicID = IntInst->getIntrinsicID();
  799. return (IntrinsicID == Intrinsic::masked_gather ||
  800. IntrinsicID == Intrinsic::arm_mve_vldr_gather_base ||
  801. IntrinsicID == Intrinsic::arm_mve_vldr_gather_base_predicated ||
  802. IntrinsicID == Intrinsic::arm_mve_vldr_gather_base_wb ||
  803. IntrinsicID == Intrinsic::arm_mve_vldr_gather_base_wb_predicated ||
  804. IntrinsicID == Intrinsic::arm_mve_vldr_gather_offset ||
  805. IntrinsicID == Intrinsic::arm_mve_vldr_gather_offset_predicated);
  806. }
  807. // Return true if the given intrinsic is a scatter
  808. inline bool isScatter(IntrinsicInst *IntInst) {
  809. if (IntInst == nullptr)
  810. return false;
  811. unsigned IntrinsicID = IntInst->getIntrinsicID();
  812. return (IntrinsicID == Intrinsic::masked_scatter ||
  813. IntrinsicID == Intrinsic::arm_mve_vstr_scatter_base ||
  814. IntrinsicID == Intrinsic::arm_mve_vstr_scatter_base_predicated ||
  815. IntrinsicID == Intrinsic::arm_mve_vstr_scatter_base_wb ||
  816. IntrinsicID == Intrinsic::arm_mve_vstr_scatter_base_wb_predicated ||
  817. IntrinsicID == Intrinsic::arm_mve_vstr_scatter_offset ||
  818. IntrinsicID == Intrinsic::arm_mve_vstr_scatter_offset_predicated);
  819. }
  820. // Return true if the given intrinsic is a gather or scatter
  821. inline bool isGatherScatter(IntrinsicInst *IntInst) {
  822. if (IntInst == nullptr)
  823. return false;
  824. return isGather(IntInst) || isScatter(IntInst);
  825. }
  826. unsigned getBLXOpcode(const MachineFunction &MF);
  827. unsigned gettBLXrOpcode(const MachineFunction &MF);
  828. unsigned getBLXpredOpcode(const MachineFunction &MF);
  829. } // end namespace llvm
  830. #endif // LLVM_LIB_TARGET_ARM_ARMBASEINSTRINFO_H