AArch64InstrInfo.h 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610
  1. //===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains the AArch64 implementation of the TargetInstrInfo class.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
  13. #define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
  14. #include "AArch64.h"
  15. #include "AArch64RegisterInfo.h"
  16. #include "llvm/CodeGen/TargetInstrInfo.h"
  17. #include "llvm/Support/TypeSize.h"
  18. #define GET_INSTRINFO_HEADER
  19. #include "AArch64GenInstrInfo.inc"
  20. namespace llvm {
  21. class AArch64Subtarget;
  22. static const MachineMemOperand::Flags MOSuppressPair =
  23. MachineMemOperand::MOTargetFlag1;
  24. static const MachineMemOperand::Flags MOStridedAccess =
  25. MachineMemOperand::MOTargetFlag2;
  26. #define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
  27. class AArch64InstrInfo final : public AArch64GenInstrInfo {
  28. const AArch64RegisterInfo RI;
  29. const AArch64Subtarget &Subtarget;
  30. public:
  31. explicit AArch64InstrInfo(const AArch64Subtarget &STI);
  32. /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
  33. /// such, whenever a client has an instance of instruction info, it should
  34. /// always be able to get register info as well (through this method).
  35. const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
  36. unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
  37. bool isAsCheapAsAMove(const MachineInstr &MI) const override;
  38. bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
  39. Register &DstReg, unsigned &SubIdx) const override;
  40. bool
  41. areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
  42. const MachineInstr &MIb) const override;
  43. unsigned isLoadFromStackSlot(const MachineInstr &MI,
  44. int &FrameIndex) const override;
  45. unsigned isStoreToStackSlot(const MachineInstr &MI,
  46. int &FrameIndex) const override;
  47. /// Does this instruction set its full destination register to zero?
  48. static bool isGPRZero(const MachineInstr &MI);
  49. /// Does this instruction rename a GPR without modifying bits?
  50. static bool isGPRCopy(const MachineInstr &MI);
  51. /// Does this instruction rename an FPR without modifying bits?
  52. static bool isFPRCopy(const MachineInstr &MI);
  53. /// Return true if pairing the given load or store is hinted to be
  54. /// unprofitable.
  55. static bool isLdStPairSuppressed(const MachineInstr &MI);
  56. /// Return true if the given load or store is a strided memory access.
  57. static bool isStridedAccess(const MachineInstr &MI);
  58. /// Return true if it has an unscaled load/store offset.
  59. static bool hasUnscaledLdStOffset(unsigned Opc);
  60. static bool hasUnscaledLdStOffset(MachineInstr &MI) {
  61. return hasUnscaledLdStOffset(MI.getOpcode());
  62. }
  63. /// Returns the unscaled load/store for the scaled load/store opcode,
  64. /// if there is a corresponding unscaled variant available.
  65. static std::optional<unsigned> getUnscaledLdSt(unsigned Opc);
  66. /// Scaling factor for (scaled or unscaled) load or store.
  67. static int getMemScale(unsigned Opc);
  68. static int getMemScale(const MachineInstr &MI) {
  69. return getMemScale(MI.getOpcode());
  70. }
  71. /// Returns whether the instruction is a pre-indexed load.
  72. static bool isPreLd(const MachineInstr &MI);
  73. /// Returns whether the instruction is a pre-indexed store.
  74. static bool isPreSt(const MachineInstr &MI);
  75. /// Returns whether the instruction is a pre-indexed load/store.
  76. static bool isPreLdSt(const MachineInstr &MI);
  77. /// Returns whether the instruction is a paired load/store.
  78. static bool isPairedLdSt(const MachineInstr &MI);
  79. /// Returns the base register operator of a load/store.
  80. static const MachineOperand &getLdStBaseOp(const MachineInstr &MI);
  81. /// Returns the the immediate offset operator of a load/store.
  82. static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI);
  83. /// Returns whether the instruction is FP or NEON.
  84. static bool isFpOrNEON(const MachineInstr &MI);
  85. /// Returns whether the instruction is in Q form (128 bit operands)
  86. static bool isQForm(const MachineInstr &MI);
  87. /// Returns the index for the immediate for a given instruction.
  88. static unsigned getLoadStoreImmIdx(unsigned Opc);
  89. /// Return true if pairing the given load or store may be paired with another.
  90. static bool isPairableLdStInst(const MachineInstr &MI);
  91. /// Return the opcode that set flags when possible. The caller is
  92. /// responsible for ensuring the opc has a flag setting equivalent.
  93. static unsigned convertToFlagSettingOpc(unsigned Opc);
  94. /// Return true if this is a load/store that can be potentially paired/merged.
  95. bool isCandidateToMergeOrPair(const MachineInstr &MI) const;
  96. /// Hint that pairing the given load or store is unprofitable.
  97. static void suppressLdStPair(MachineInstr &MI);
  98. std::optional<ExtAddrMode>
  99. getAddrModeFromMemoryOp(const MachineInstr &MemI,
  100. const TargetRegisterInfo *TRI) const override;
  101. bool getMemOperandsWithOffsetWidth(
  102. const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
  103. int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
  104. const TargetRegisterInfo *TRI) const override;
  105. /// If \p OffsetIsScalable is set to 'true', the offset is scaled by `vscale`.
  106. /// This is true for some SVE instructions like ldr/str that have a
  107. /// 'reg + imm' addressing mode where the immediate is an index to the
  108. /// scalable vector located at 'reg + imm * vscale x #bytes'.
  109. bool getMemOperandWithOffsetWidth(const MachineInstr &MI,
  110. const MachineOperand *&BaseOp,
  111. int64_t &Offset, bool &OffsetIsScalable,
  112. unsigned &Width,
  113. const TargetRegisterInfo *TRI) const;
  114. /// Return the immediate offset of the base register in a load/store \p LdSt.
  115. MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const;
  116. /// Returns true if opcode \p Opc is a memory operation. If it is, set
  117. /// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly.
  118. ///
  119. /// For unscaled instructions, \p Scale is set to 1.
  120. static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, unsigned &Width,
  121. int64_t &MinOffset, int64_t &MaxOffset);
  122. bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
  123. ArrayRef<const MachineOperand *> BaseOps2,
  124. unsigned NumLoads, unsigned NumBytes) const override;
  125. void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
  126. const DebugLoc &DL, MCRegister DestReg,
  127. MCRegister SrcReg, bool KillSrc, unsigned Opcode,
  128. llvm::ArrayRef<unsigned> Indices) const;
  129. void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
  130. DebugLoc DL, unsigned DestReg, unsigned SrcReg,
  131. bool KillSrc, unsigned Opcode, unsigned ZeroReg,
  132. llvm::ArrayRef<unsigned> Indices) const;
  133. void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
  134. const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
  135. bool KillSrc) const override;
  136. void storeRegToStackSlot(MachineBasicBlock &MBB,
  137. MachineBasicBlock::iterator MBBI, Register SrcReg,
  138. bool isKill, int FrameIndex,
  139. const TargetRegisterClass *RC,
  140. const TargetRegisterInfo *TRI,
  141. Register VReg) const override;
  142. void loadRegFromStackSlot(MachineBasicBlock &MBB,
  143. MachineBasicBlock::iterator MBBI, Register DestReg,
  144. int FrameIndex, const TargetRegisterClass *RC,
  145. const TargetRegisterInfo *TRI,
  146. Register VReg) const override;
  147. // This tells target independent code that it is okay to pass instructions
  148. // with subreg operands to foldMemoryOperandImpl.
  149. bool isSubregFoldable() const override { return true; }
  150. using TargetInstrInfo::foldMemoryOperandImpl;
  151. MachineInstr *
  152. foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
  153. ArrayRef<unsigned> Ops,
  154. MachineBasicBlock::iterator InsertPt, int FrameIndex,
  155. LiveIntervals *LIS = nullptr,
  156. VirtRegMap *VRM = nullptr) const override;
  157. /// \returns true if a branch from an instruction with opcode \p BranchOpc
  158. /// bytes is capable of jumping to a position \p BrOffset bytes away.
  159. bool isBranchOffsetInRange(unsigned BranchOpc,
  160. int64_t BrOffset) const override;
  161. MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
  162. bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
  163. MachineBasicBlock *&FBB,
  164. SmallVectorImpl<MachineOperand> &Cond,
  165. bool AllowModify = false) const override;
  166. bool analyzeBranchPredicate(MachineBasicBlock &MBB,
  167. MachineBranchPredicate &MBP,
  168. bool AllowModify) const override;
  169. unsigned removeBranch(MachineBasicBlock &MBB,
  170. int *BytesRemoved = nullptr) const override;
  171. unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
  172. MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
  173. const DebugLoc &DL,
  174. int *BytesAdded = nullptr) const override;
  175. bool
  176. reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
  177. bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
  178. Register, Register, Register, int &, int &,
  179. int &) const override;
  180. void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
  181. const DebugLoc &DL, Register DstReg,
  182. ArrayRef<MachineOperand> Cond, Register TrueReg,
  183. Register FalseReg) const override;
  184. MCInst getNop() const override;
  185. bool isSchedulingBoundary(const MachineInstr &MI,
  186. const MachineBasicBlock *MBB,
  187. const MachineFunction &MF) const override;
  188. /// analyzeCompare - For a comparison instruction, return the source registers
  189. /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
  190. /// Return true if the comparison instruction can be analyzed.
  191. bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
  192. Register &SrcReg2, int64_t &CmpMask,
  193. int64_t &CmpValue) const override;
  194. /// optimizeCompareInstr - Convert the instruction supplying the argument to
  195. /// the comparison into one that sets the zero bit in the flags register.
  196. bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
  197. Register SrcReg2, int64_t CmpMask, int64_t CmpValue,
  198. const MachineRegisterInfo *MRI) const override;
  199. bool optimizeCondBranch(MachineInstr &MI) const override;
  200. /// Return true when a code sequence can improve throughput. It
  201. /// should be called only for instructions in loops.
  202. /// \param Pattern - combiner pattern
  203. bool isThroughputPattern(MachineCombinerPattern Pattern) const override;
  204. /// Return true when there is potentially a faster code sequence
  205. /// for an instruction chain ending in ``Root``. All potential patterns are
  206. /// listed in the ``Patterns`` array.
  207. bool
  208. getMachineCombinerPatterns(MachineInstr &Root,
  209. SmallVectorImpl<MachineCombinerPattern> &Patterns,
  210. bool DoRegPressureReduce) const override;
  211. /// Return true when Inst is associative and commutative so that it can be
  212. /// reassociated. If Invert is true, then the inverse of Inst operation must
  213. /// be checked.
  214. bool isAssociativeAndCommutative(const MachineInstr &Inst,
  215. bool Invert) const override;
  216. /// When getMachineCombinerPatterns() finds patterns, this function generates
  217. /// the instructions that could replace the original code sequence
  218. void genAlternativeCodeSequence(
  219. MachineInstr &Root, MachineCombinerPattern Pattern,
  220. SmallVectorImpl<MachineInstr *> &InsInstrs,
  221. SmallVectorImpl<MachineInstr *> &DelInstrs,
  222. DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
  223. /// AArch64 supports MachineCombiner.
  224. bool useMachineCombiner() const override;
  225. bool expandPostRAPseudo(MachineInstr &MI) const override;
  226. std::pair<unsigned, unsigned>
  227. decomposeMachineOperandsTargetFlags(unsigned TF) const override;
  228. ArrayRef<std::pair<unsigned, const char *>>
  229. getSerializableDirectMachineOperandTargetFlags() const override;
  230. ArrayRef<std::pair<unsigned, const char *>>
  231. getSerializableBitmaskMachineOperandTargetFlags() const override;
  232. ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
  233. getSerializableMachineMemOperandTargetFlags() const override;
  234. bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
  235. bool OutlineFromLinkOnceODRs) const override;
  236. outliner::OutlinedFunction getOutliningCandidateInfo(
  237. std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override;
  238. outliner::InstrType
  239. getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const override;
  240. bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
  241. unsigned &Flags) const override;
  242. void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
  243. const outliner::OutlinedFunction &OF) const override;
  244. MachineBasicBlock::iterator
  245. insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
  246. MachineBasicBlock::iterator &It, MachineFunction &MF,
  247. outliner::Candidate &C) const override;
  248. bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override;
  249. /// Returns the vector element size (B, H, S or D) of an SVE opcode.
  250. uint64_t getElementSizeForOpcode(unsigned Opc) const;
  251. /// Returns true if the opcode is for an SVE instruction that sets the
  252. /// condition codes as if it's results had been fed to a PTEST instruction
  253. /// along with the same general predicate.
  254. bool isPTestLikeOpcode(unsigned Opc) const;
  255. /// Returns true if the opcode is for an SVE WHILE## instruction.
  256. bool isWhileOpcode(unsigned Opc) const;
  257. /// Returns true if the instruction has a shift by immediate that can be
  258. /// executed in one cycle less.
  259. static bool isFalkorShiftExtFast(const MachineInstr &MI);
  260. /// Return true if the instructions is a SEH instruciton used for unwinding
  261. /// on Windows.
  262. static bool isSEHInstruction(const MachineInstr &MI);
  263. std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
  264. Register Reg) const override;
  265. std::optional<ParamLoadedValue>
  266. describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
  267. unsigned int getTailDuplicateSize(CodeGenOpt::Level OptLevel) const override;
  268. bool isExtendLikelyToBeFolded(MachineInstr &ExtMI,
  269. MachineRegisterInfo &MRI) const override;
  270. static void decomposeStackOffsetForFrameOffsets(const StackOffset &Offset,
  271. int64_t &NumBytes,
  272. int64_t &NumPredicateVectors,
  273. int64_t &NumDataVectors);
  274. static void decomposeStackOffsetForDwarfOffsets(const StackOffset &Offset,
  275. int64_t &ByteSized,
  276. int64_t &VGSized);
  277. #define GET_INSTRINFO_HELPER_DECLS
  278. #include "AArch64GenInstrInfo.inc"
  279. protected:
  280. /// If the specific machine instruction is an instruction that moves/copies
  281. /// value from one register to another register return destination and source
  282. /// registers as machine operands.
  283. std::optional<DestSourcePair>
  284. isCopyInstrImpl(const MachineInstr &MI) const override;
  285. private:
  286. unsigned getInstBundleLength(const MachineInstr &MI) const;
  287. /// Sets the offsets on outlined instructions in \p MBB which use SP
  288. /// so that they will be valid post-outlining.
  289. ///
  290. /// \param MBB A \p MachineBasicBlock in an outlined function.
  291. void fixupPostOutline(MachineBasicBlock &MBB) const;
  292. void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
  293. MachineBasicBlock *TBB,
  294. ArrayRef<MachineOperand> Cond) const;
  295. bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
  296. const MachineRegisterInfo &MRI) const;
  297. bool removeCmpToZeroOrOne(MachineInstr &CmpInstr, unsigned SrcReg,
  298. int CmpValue, const MachineRegisterInfo &MRI) const;
  299. /// Returns an unused general-purpose register which can be used for
  300. /// constructing an outlined call if one exists. Returns 0 otherwise.
  301. Register findRegisterToSaveLRTo(outliner::Candidate &C) const;
  302. /// Remove a ptest of a predicate-generating operation that already sets, or
  303. /// can be made to set, the condition codes in an identical manner
  304. bool optimizePTestInstr(MachineInstr *PTest, unsigned MaskReg,
  305. unsigned PredReg,
  306. const MachineRegisterInfo *MRI) const;
  307. };
  308. struct UsedNZCV {
  309. bool N = false;
  310. bool Z = false;
  311. bool C = false;
  312. bool V = false;
  313. UsedNZCV() = default;
  314. UsedNZCV &operator|=(const UsedNZCV &UsedFlags) {
  315. this->N |= UsedFlags.N;
  316. this->Z |= UsedFlags.Z;
  317. this->C |= UsedFlags.C;
  318. this->V |= UsedFlags.V;
  319. return *this;
  320. }
  321. };
  322. /// \returns Conditions flags used after \p CmpInstr in its MachineBB if NZCV
  323. /// flags are not alive in successors of the same \p CmpInstr and \p MI parent.
  324. /// \returns std::nullopt otherwise.
  325. ///
  326. /// Collect instructions using that flags in \p CCUseInstrs if provided.
  327. std::optional<UsedNZCV>
  328. examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
  329. const TargetRegisterInfo &TRI,
  330. SmallVectorImpl<MachineInstr *> *CCUseInstrs = nullptr);
  331. /// Return true if there is an instruction /after/ \p DefMI and before \p UseMI
  332. /// which either reads or clobbers NZCV.
  333. bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
  334. const MachineInstr &UseMI,
  335. const TargetRegisterInfo *TRI);
  336. MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg,
  337. unsigned Reg, const StackOffset &Offset,
  338. bool LastAdjustmentWasScalable = true);
  339. MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg,
  340. const StackOffset &OffsetFromDefCFA);
  341. /// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
  342. /// plus Offset. This is intended to be used from within the prolog/epilog
  343. /// insertion (PEI) pass, where a virtual scratch register may be allocated
  344. /// if necessary, to be replaced by the scavenger at the end of PEI.
  345. void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
  346. const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
  347. StackOffset Offset, const TargetInstrInfo *TII,
  348. MachineInstr::MIFlag = MachineInstr::NoFlags,
  349. bool SetNZCV = false, bool NeedsWinCFI = false,
  350. bool *HasWinCFI = nullptr, bool EmitCFAOffset = false,
  351. StackOffset InitialOffset = {},
  352. unsigned FrameReg = AArch64::SP);
  353. /// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
  354. /// FP. Return false if the offset could not be handled directly in MI, and
  355. /// return the left-over portion by reference.
  356. bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
  357. unsigned FrameReg, StackOffset &Offset,
  358. const AArch64InstrInfo *TII);
  359. /// Use to report the frame offset status in isAArch64FrameOffsetLegal.
  360. enum AArch64FrameOffsetStatus {
  361. AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
  362. AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal.
  363. AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly.
  364. };
  365. /// Check if the @p Offset is a valid frame offset for @p MI.
  366. /// The returned value reports the validity of the frame offset for @p MI.
  367. /// It uses the values defined by AArch64FrameOffsetStatus for that.
  368. /// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
  369. /// use an offset.eq
  370. /// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
  371. /// rewritten in @p MI.
  372. /// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
  373. /// amount that is off the limit of the legal offset.
  374. /// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
  375. /// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
  376. /// If set, @p EmittableOffset contains the amount that can be set in @p MI
  377. /// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
  378. /// is a legal offset.
  379. int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset,
  380. bool *OutUseUnscaledOp = nullptr,
  381. unsigned *OutUnscaledOp = nullptr,
  382. int64_t *EmittableOffset = nullptr);
  383. static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
  384. static inline bool isCondBranchOpcode(int Opc) {
  385. switch (Opc) {
  386. case AArch64::Bcc:
  387. case AArch64::CBZW:
  388. case AArch64::CBZX:
  389. case AArch64::CBNZW:
  390. case AArch64::CBNZX:
  391. case AArch64::TBZW:
  392. case AArch64::TBZX:
  393. case AArch64::TBNZW:
  394. case AArch64::TBNZX:
  395. return true;
  396. default:
  397. return false;
  398. }
  399. }
  400. static inline bool isIndirectBranchOpcode(int Opc) {
  401. switch (Opc) {
  402. case AArch64::BR:
  403. case AArch64::BRAA:
  404. case AArch64::BRAB:
  405. case AArch64::BRAAZ:
  406. case AArch64::BRABZ:
  407. return true;
  408. }
  409. return false;
  410. }
  411. static inline bool isPTrueOpcode(unsigned Opc) {
  412. switch (Opc) {
  413. case AArch64::PTRUE_B:
  414. case AArch64::PTRUE_H:
  415. case AArch64::PTRUE_S:
  416. case AArch64::PTRUE_D:
  417. return true;
  418. default:
  419. return false;
  420. }
  421. }
  422. /// Return opcode to be used for indirect calls.
  423. unsigned getBLRCallOpcode(const MachineFunction &MF);
  424. /// Return XPAC opcode to be used for a ptrauth strip using the given key.
  425. static inline unsigned getXPACOpcodeForKey(AArch64PACKey::ID K) {
  426. using namespace AArch64PACKey;
  427. switch (K) {
  428. case IA: case IB: return AArch64::XPACI;
  429. case DA: case DB: return AArch64::XPACD;
  430. }
  431. llvm_unreachable("Unhandled AArch64PACKey::ID enum");
  432. }
  433. /// Return AUT opcode to be used for a ptrauth auth using the given key, or its
  434. /// AUT*Z variant that doesn't take a discriminator operand, using zero instead.
  435. static inline unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
  436. using namespace AArch64PACKey;
  437. switch (K) {
  438. case IA: return Zero ? AArch64::AUTIZA : AArch64::AUTIA;
  439. case IB: return Zero ? AArch64::AUTIZB : AArch64::AUTIB;
  440. case DA: return Zero ? AArch64::AUTDZA : AArch64::AUTDA;
  441. case DB: return Zero ? AArch64::AUTDZB : AArch64::AUTDB;
  442. }
  443. }
  444. /// Return PAC opcode to be used for a ptrauth sign using the given key, or its
  445. /// PAC*Z variant that doesn't take a discriminator operand, using zero instead.
  446. static inline unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
  447. using namespace AArch64PACKey;
  448. switch (K) {
  449. case IA: return Zero ? AArch64::PACIZA : AArch64::PACIA;
  450. case IB: return Zero ? AArch64::PACIZB : AArch64::PACIB;
  451. case DA: return Zero ? AArch64::PACDZA : AArch64::PACDA;
  452. case DB: return Zero ? AArch64::PACDZB : AArch64::PACDB;
  453. }
  454. }
  455. // struct TSFlags {
  456. #define TSFLAG_ELEMENT_SIZE_TYPE(X) (X) // 3-bits
  457. #define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 4-bits
  458. #define TSFLAG_FALSE_LANE_TYPE(X) ((X) << 7) // 2-bits
  459. #define TSFLAG_INSTR_FLAGS(X) ((X) << 9) // 2-bits
  460. #define TSFLAG_SME_MATRIX_TYPE(X) ((X) << 11) // 3-bits
  461. // }
  462. namespace AArch64 {
  463. enum ElementSizeType {
  464. ElementSizeMask = TSFLAG_ELEMENT_SIZE_TYPE(0x7),
  465. ElementSizeNone = TSFLAG_ELEMENT_SIZE_TYPE(0x0),
  466. ElementSizeB = TSFLAG_ELEMENT_SIZE_TYPE(0x1),
  467. ElementSizeH = TSFLAG_ELEMENT_SIZE_TYPE(0x2),
  468. ElementSizeS = TSFLAG_ELEMENT_SIZE_TYPE(0x3),
  469. ElementSizeD = TSFLAG_ELEMENT_SIZE_TYPE(0x4),
  470. };
  471. enum DestructiveInstType {
  472. DestructiveInstTypeMask = TSFLAG_DESTRUCTIVE_INST_TYPE(0xf),
  473. NotDestructive = TSFLAG_DESTRUCTIVE_INST_TYPE(0x0),
  474. DestructiveOther = TSFLAG_DESTRUCTIVE_INST_TYPE(0x1),
  475. DestructiveUnary = TSFLAG_DESTRUCTIVE_INST_TYPE(0x2),
  476. DestructiveBinaryImm = TSFLAG_DESTRUCTIVE_INST_TYPE(0x3),
  477. DestructiveBinaryShImmUnpred = TSFLAG_DESTRUCTIVE_INST_TYPE(0x4),
  478. DestructiveBinary = TSFLAG_DESTRUCTIVE_INST_TYPE(0x5),
  479. DestructiveBinaryComm = TSFLAG_DESTRUCTIVE_INST_TYPE(0x6),
  480. DestructiveBinaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x7),
  481. DestructiveTernaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x8),
  482. DestructiveUnaryPassthru = TSFLAG_DESTRUCTIVE_INST_TYPE(0x9),
  483. };
  484. enum FalseLaneType {
  485. FalseLanesMask = TSFLAG_FALSE_LANE_TYPE(0x3),
  486. FalseLanesZero = TSFLAG_FALSE_LANE_TYPE(0x1),
  487. FalseLanesUndef = TSFLAG_FALSE_LANE_TYPE(0x2),
  488. };
  489. // NOTE: This is a bit field.
  490. static const uint64_t InstrFlagIsWhile = TSFLAG_INSTR_FLAGS(0x1);
  491. static const uint64_t InstrFlagIsPTestLike = TSFLAG_INSTR_FLAGS(0x2);
  492. enum SMEMatrixType {
  493. SMEMatrixTypeMask = TSFLAG_SME_MATRIX_TYPE(0x7),
  494. SMEMatrixNone = TSFLAG_SME_MATRIX_TYPE(0x0),
  495. SMEMatrixTileB = TSFLAG_SME_MATRIX_TYPE(0x1),
  496. SMEMatrixTileH = TSFLAG_SME_MATRIX_TYPE(0x2),
  497. SMEMatrixTileS = TSFLAG_SME_MATRIX_TYPE(0x3),
  498. SMEMatrixTileD = TSFLAG_SME_MATRIX_TYPE(0x4),
  499. SMEMatrixTileQ = TSFLAG_SME_MATRIX_TYPE(0x5),
  500. SMEMatrixArray = TSFLAG_SME_MATRIX_TYPE(0x6),
  501. };
  502. #undef TSFLAG_ELEMENT_SIZE_TYPE
  503. #undef TSFLAG_DESTRUCTIVE_INST_TYPE
  504. #undef TSFLAG_FALSE_LANE_TYPE
  505. #undef TSFLAG_INSTR_FLAGS
  506. #undef TSFLAG_SME_MATRIX_TYPE
  507. int getSVEPseudoMap(uint16_t Opcode);
  508. int getSVERevInstr(uint16_t Opcode);
  509. int getSVENonRevInstr(uint16_t Opcode);
  510. int getSMEPseudoMap(uint16_t Opcode);
  511. }
  512. } // end namespace llvm
  513. #endif