AArch64InstrInfo.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509
  1. //===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains the AArch64 implementation of the TargetInstrInfo class.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
  13. #define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
  14. #include "AArch64.h"
  15. #include "AArch64RegisterInfo.h"
  16. #include "llvm/ADT/Optional.h"
  17. #include "llvm/CodeGen/MachineCombinerPattern.h"
  18. #include "llvm/CodeGen/TargetInstrInfo.h"
  19. #include "llvm/Support/TypeSize.h"
  20. #define GET_INSTRINFO_HEADER
  21. #include "AArch64GenInstrInfo.inc"
  22. namespace llvm {
  23. class AArch64Subtarget;
  24. static const MachineMemOperand::Flags MOSuppressPair =
  25. MachineMemOperand::MOTargetFlag1;
  26. static const MachineMemOperand::Flags MOStridedAccess =
  27. MachineMemOperand::MOTargetFlag2;
  28. #define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
  29. class AArch64InstrInfo final : public AArch64GenInstrInfo {
  30. const AArch64RegisterInfo RI;
  31. const AArch64Subtarget &Subtarget;
  32. public:
  33. explicit AArch64InstrInfo(const AArch64Subtarget &STI);
  34. /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
  35. /// such, whenever a client has an instance of instruction info, it should
  36. /// always be able to get register info as well (through this method).
  37. const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
  38. unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
  39. bool isAsCheapAsAMove(const MachineInstr &MI) const override;
  40. bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
  41. Register &DstReg, unsigned &SubIdx) const override;
  42. bool
  43. areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
  44. const MachineInstr &MIb) const override;
  45. unsigned isLoadFromStackSlot(const MachineInstr &MI,
  46. int &FrameIndex) const override;
  47. unsigned isStoreToStackSlot(const MachineInstr &MI,
  48. int &FrameIndex) const override;
  49. /// Does this instruction set its full destination register to zero?
  50. static bool isGPRZero(const MachineInstr &MI);
  51. /// Does this instruction rename a GPR without modifying bits?
  52. static bool isGPRCopy(const MachineInstr &MI);
  53. /// Does this instruction rename an FPR without modifying bits?
  54. static bool isFPRCopy(const MachineInstr &MI);
  55. /// Return true if pairing the given load or store is hinted to be
  56. /// unprofitable.
  57. static bool isLdStPairSuppressed(const MachineInstr &MI);
  58. /// Return true if the given load or store is a strided memory access.
  59. static bool isStridedAccess(const MachineInstr &MI);
  60. /// Return true if it has an unscaled load/store offset.
  61. static bool hasUnscaledLdStOffset(unsigned Opc);
  62. static bool hasUnscaledLdStOffset(MachineInstr &MI) {
  63. return hasUnscaledLdStOffset(MI.getOpcode());
  64. }
  65. /// Returns the unscaled load/store for the scaled load/store opcode,
  66. /// if there is a corresponding unscaled variant available.
  67. static Optional<unsigned> getUnscaledLdSt(unsigned Opc);
  68. /// Scaling factor for (scaled or unscaled) load or store.
  69. static int getMemScale(unsigned Opc);
  70. static int getMemScale(const MachineInstr &MI) {
  71. return getMemScale(MI.getOpcode());
  72. }
  73. /// Returns whether the instruction is a pre-indexed load.
  74. static bool isPreLd(const MachineInstr &MI);
  75. /// Returns whether the instruction is a pre-indexed store.
  76. static bool isPreSt(const MachineInstr &MI);
  77. /// Returns whether the instruction is a pre-indexed load/store.
  78. static bool isPreLdSt(const MachineInstr &MI);
  79. /// Returns the index for the immediate for a given instruction.
  80. static unsigned getLoadStoreImmIdx(unsigned Opc);
  81. /// Return true if pairing the given load or store may be paired with another.
  82. static bool isPairableLdStInst(const MachineInstr &MI);
  83. /// Return the opcode that set flags when possible. The caller is
  84. /// responsible for ensuring the opc has a flag setting equivalent.
  85. static unsigned convertToFlagSettingOpc(unsigned Opc, bool &Is64Bit);
  86. /// Return true if this is a load/store that can be potentially paired/merged.
  87. bool isCandidateToMergeOrPair(const MachineInstr &MI) const;
  88. /// Hint that pairing the given load or store is unprofitable.
  89. static void suppressLdStPair(MachineInstr &MI);
  90. Optional<ExtAddrMode>
  91. getAddrModeFromMemoryOp(const MachineInstr &MemI,
  92. const TargetRegisterInfo *TRI) const override;
  93. bool getMemOperandsWithOffsetWidth(
  94. const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
  95. int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
  96. const TargetRegisterInfo *TRI) const override;
  97. /// If \p OffsetIsScalable is set to 'true', the offset is scaled by `vscale`.
  98. /// This is true for some SVE instructions like ldr/str that have a
  99. /// 'reg + imm' addressing mode where the immediate is an index to the
  100. /// scalable vector located at 'reg + imm * vscale x #bytes'.
  101. bool getMemOperandWithOffsetWidth(const MachineInstr &MI,
  102. const MachineOperand *&BaseOp,
  103. int64_t &Offset, bool &OffsetIsScalable,
  104. unsigned &Width,
  105. const TargetRegisterInfo *TRI) const;
  106. /// Return the immediate offset of the base register in a load/store \p LdSt.
  107. MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const;
  108. /// Returns true if opcode \p Opc is a memory operation. If it is, set
  109. /// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly.
  110. ///
  111. /// For unscaled instructions, \p Scale is set to 1.
  112. static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, unsigned &Width,
  113. int64_t &MinOffset, int64_t &MaxOffset);
  114. bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
  115. ArrayRef<const MachineOperand *> BaseOps2,
  116. unsigned NumLoads, unsigned NumBytes) const override;
  117. void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
  118. const DebugLoc &DL, MCRegister DestReg,
  119. MCRegister SrcReg, bool KillSrc, unsigned Opcode,
  120. llvm::ArrayRef<unsigned> Indices) const;
  121. void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
  122. DebugLoc DL, unsigned DestReg, unsigned SrcReg,
  123. bool KillSrc, unsigned Opcode, unsigned ZeroReg,
  124. llvm::ArrayRef<unsigned> Indices) const;
  125. void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
  126. const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
  127. bool KillSrc) const override;
  128. void storeRegToStackSlot(MachineBasicBlock &MBB,
  129. MachineBasicBlock::iterator MBBI, Register SrcReg,
  130. bool isKill, int FrameIndex,
  131. const TargetRegisterClass *RC,
  132. const TargetRegisterInfo *TRI) const override;
  133. void loadRegFromStackSlot(MachineBasicBlock &MBB,
  134. MachineBasicBlock::iterator MBBI, Register DestReg,
  135. int FrameIndex, const TargetRegisterClass *RC,
  136. const TargetRegisterInfo *TRI) const override;
  137. // This tells target independent code that it is okay to pass instructions
  138. // with subreg operands to foldMemoryOperandImpl.
  139. bool isSubregFoldable() const override { return true; }
  140. using TargetInstrInfo::foldMemoryOperandImpl;
  141. MachineInstr *
  142. foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
  143. ArrayRef<unsigned> Ops,
  144. MachineBasicBlock::iterator InsertPt, int FrameIndex,
  145. LiveIntervals *LIS = nullptr,
  146. VirtRegMap *VRM = nullptr) const override;
  147. /// \returns true if a branch from an instruction with opcode \p BranchOpc
  148. /// bytes is capable of jumping to a position \p BrOffset bytes away.
  149. bool isBranchOffsetInRange(unsigned BranchOpc,
  150. int64_t BrOffset) const override;
  151. MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
  152. bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
  153. MachineBasicBlock *&FBB,
  154. SmallVectorImpl<MachineOperand> &Cond,
  155. bool AllowModify = false) const override;
  156. bool analyzeBranchPredicate(MachineBasicBlock &MBB,
  157. MachineBranchPredicate &MBP,
  158. bool AllowModify) const override;
  159. unsigned removeBranch(MachineBasicBlock &MBB,
  160. int *BytesRemoved = nullptr) const override;
  161. unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
  162. MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
  163. const DebugLoc &DL,
  164. int *BytesAdded = nullptr) const override;
  165. bool
  166. reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
  167. bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
  168. Register, Register, Register, int &, int &,
  169. int &) const override;
  170. void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
  171. const DebugLoc &DL, Register DstReg,
  172. ArrayRef<MachineOperand> Cond, Register TrueReg,
  173. Register FalseReg) const override;
  174. MCInst getNop() const override;
  175. bool isSchedulingBoundary(const MachineInstr &MI,
  176. const MachineBasicBlock *MBB,
  177. const MachineFunction &MF) const override;
  178. /// analyzeCompare - For a comparison instruction, return the source registers
  179. /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
  180. /// Return true if the comparison instruction can be analyzed.
  181. bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
  182. Register &SrcReg2, int64_t &CmpMask,
  183. int64_t &CmpValue) const override;
  184. /// optimizeCompareInstr - Convert the instruction supplying the argument to
  185. /// the comparison into one that sets the zero bit in the flags register.
  186. bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
  187. Register SrcReg2, int64_t CmpMask, int64_t CmpValue,
  188. const MachineRegisterInfo *MRI) const override;
  189. bool optimizeCondBranch(MachineInstr &MI) const override;
  190. /// Return true when a code sequence can improve throughput. It
  191. /// should be called only for instructions in loops.
  192. /// \param Pattern - combiner pattern
  193. bool isThroughputPattern(MachineCombinerPattern Pattern) const override;
  194. /// Return true when there is potentially a faster code sequence
  195. /// for an instruction chain ending in ``Root``. All potential patterns are
  196. /// listed in the ``Patterns`` array.
  197. bool
  198. getMachineCombinerPatterns(MachineInstr &Root,
  199. SmallVectorImpl<MachineCombinerPattern> &Patterns,
  200. bool DoRegPressureReduce) const override;
  201. /// Return true when Inst is associative and commutative so that it can be
  202. /// reassociated.
  203. bool isAssociativeAndCommutative(const MachineInstr &Inst) const override;
  204. /// When getMachineCombinerPatterns() finds patterns, this function generates
  205. /// the instructions that could replace the original code sequence
  206. void genAlternativeCodeSequence(
  207. MachineInstr &Root, MachineCombinerPattern Pattern,
  208. SmallVectorImpl<MachineInstr *> &InsInstrs,
  209. SmallVectorImpl<MachineInstr *> &DelInstrs,
  210. DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
  211. /// AArch64 supports MachineCombiner.
  212. bool useMachineCombiner() const override;
  213. bool expandPostRAPseudo(MachineInstr &MI) const override;
  214. std::pair<unsigned, unsigned>
  215. decomposeMachineOperandsTargetFlags(unsigned TF) const override;
  216. ArrayRef<std::pair<unsigned, const char *>>
  217. getSerializableDirectMachineOperandTargetFlags() const override;
  218. ArrayRef<std::pair<unsigned, const char *>>
  219. getSerializableBitmaskMachineOperandTargetFlags() const override;
  220. ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
  221. getSerializableMachineMemOperandTargetFlags() const override;
  222. bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
  223. bool OutlineFromLinkOnceODRs) const override;
  224. outliner::OutlinedFunction getOutliningCandidateInfo(
  225. std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override;
  226. outliner::InstrType
  227. getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const override;
  228. bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
  229. unsigned &Flags) const override;
  230. void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
  231. const outliner::OutlinedFunction &OF) const override;
  232. MachineBasicBlock::iterator
  233. insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
  234. MachineBasicBlock::iterator &It, MachineFunction &MF,
  235. const outliner::Candidate &C) const override;
  236. bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override;
  237. /// Returns the vector element size (B, H, S or D) of an SVE opcode.
  238. uint64_t getElementSizeForOpcode(unsigned Opc) const;
  239. /// Returns true if the opcode is for an SVE instruction that sets the
  240. /// condition codes as if it's results had been fed to a PTEST instruction
  241. /// along with the same general predicate.
  242. bool isPTestLikeOpcode(unsigned Opc) const;
  243. /// Returns true if the opcode is for an SVE WHILE## instruction.
  244. bool isWhileOpcode(unsigned Opc) const;
  245. /// Returns true if the instruction has a shift by immediate that can be
  246. /// executed in one cycle less.
  247. static bool isFalkorShiftExtFast(const MachineInstr &MI);
  248. /// Return true if the instructions is a SEH instruciton used for unwinding
  249. /// on Windows.
  250. static bool isSEHInstruction(const MachineInstr &MI);
  251. Optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
  252. Register Reg) const override;
  253. Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI,
  254. Register Reg) const override;
  255. unsigned int getTailDuplicateSize(CodeGenOpt::Level OptLevel) const override;
  256. bool isExtendLikelyToBeFolded(MachineInstr &ExtMI,
  257. MachineRegisterInfo &MRI) const override;
  258. static void decomposeStackOffsetForFrameOffsets(const StackOffset &Offset,
  259. int64_t &NumBytes,
  260. int64_t &NumPredicateVectors,
  261. int64_t &NumDataVectors);
  262. static void decomposeStackOffsetForDwarfOffsets(const StackOffset &Offset,
  263. int64_t &ByteSized,
  264. int64_t &VGSized);
  265. #define GET_INSTRINFO_HELPER_DECLS
  266. #include "AArch64GenInstrInfo.inc"
  267. protected:
  268. /// If the specific machine instruction is an instruction that moves/copies
  269. /// value from one register to another register return destination and source
  270. /// registers as machine operands.
  271. Optional<DestSourcePair>
  272. isCopyInstrImpl(const MachineInstr &MI) const override;
  273. private:
  274. unsigned getInstBundleLength(const MachineInstr &MI) const;
  275. /// Sets the offsets on outlined instructions in \p MBB which use SP
  276. /// so that they will be valid post-outlining.
  277. ///
  278. /// \param MBB A \p MachineBasicBlock in an outlined function.
  279. void fixupPostOutline(MachineBasicBlock &MBB) const;
  280. void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
  281. MachineBasicBlock *TBB,
  282. ArrayRef<MachineOperand> Cond) const;
  283. bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
  284. const MachineRegisterInfo &MRI) const;
  285. bool removeCmpToZeroOrOne(MachineInstr &CmpInstr, unsigned SrcReg,
  286. int CmpValue, const MachineRegisterInfo &MRI) const;
  287. /// Returns an unused general-purpose register which can be used for
  288. /// constructing an outlined call if one exists. Returns 0 otherwise.
  289. unsigned findRegisterToSaveLRTo(const outliner::Candidate &C) const;
  290. /// Remove a ptest of a predicate-generating operation that already sets, or
  291. /// can be made to set, the condition codes in an identical manner
  292. bool optimizePTestInstr(MachineInstr *PTest, unsigned MaskReg,
  293. unsigned PredReg,
  294. const MachineRegisterInfo *MRI) const;
  295. };
  296. /// Return true if there is an instruction /after/ \p DefMI and before \p UseMI
  297. /// which either reads or clobbers NZCV.
  298. bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
  299. const MachineInstr &UseMI,
  300. const TargetRegisterInfo *TRI);
  301. /// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
  302. /// plus Offset. This is intended to be used from within the prolog/epilog
  303. /// insertion (PEI) pass, where a virtual scratch register may be allocated
  304. /// if necessary, to be replaced by the scavenger at the end of PEI.
  305. void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
  306. const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
  307. StackOffset Offset, const TargetInstrInfo *TII,
  308. MachineInstr::MIFlag = MachineInstr::NoFlags,
  309. bool SetNZCV = false, bool NeedsWinCFI = false,
  310. bool *HasWinCFI = nullptr);
  311. /// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
  312. /// FP. Return false if the offset could not be handled directly in MI, and
  313. /// return the left-over portion by reference.
  314. bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
  315. unsigned FrameReg, StackOffset &Offset,
  316. const AArch64InstrInfo *TII);
  317. /// Use to report the frame offset status in isAArch64FrameOffsetLegal.
  318. enum AArch64FrameOffsetStatus {
  319. AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
  320. AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal.
  321. AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly.
  322. };
  323. /// Check if the @p Offset is a valid frame offset for @p MI.
  324. /// The returned value reports the validity of the frame offset for @p MI.
  325. /// It uses the values defined by AArch64FrameOffsetStatus for that.
  326. /// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
  327. /// use an offset.eq
  328. /// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
  329. /// rewritten in @p MI.
  330. /// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
  331. /// amount that is off the limit of the legal offset.
  332. /// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
  333. /// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
  334. /// If set, @p EmittableOffset contains the amount that can be set in @p MI
  335. /// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
  336. /// is a legal offset.
  337. int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset,
  338. bool *OutUseUnscaledOp = nullptr,
  339. unsigned *OutUnscaledOp = nullptr,
  340. int64_t *EmittableOffset = nullptr);
  341. static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
  342. static inline bool isCondBranchOpcode(int Opc) {
  343. switch (Opc) {
  344. case AArch64::Bcc:
  345. case AArch64::CBZW:
  346. case AArch64::CBZX:
  347. case AArch64::CBNZW:
  348. case AArch64::CBNZX:
  349. case AArch64::TBZW:
  350. case AArch64::TBZX:
  351. case AArch64::TBNZW:
  352. case AArch64::TBNZX:
  353. return true;
  354. default:
  355. return false;
  356. }
  357. }
  358. static inline bool isIndirectBranchOpcode(int Opc) {
  359. switch (Opc) {
  360. case AArch64::BR:
  361. case AArch64::BRAA:
  362. case AArch64::BRAB:
  363. case AArch64::BRAAZ:
  364. case AArch64::BRABZ:
  365. return true;
  366. }
  367. return false;
  368. }
  369. static inline bool isPTrueOpcode(unsigned Opc) {
  370. switch (Opc) {
  371. case AArch64::PTRUE_B:
  372. case AArch64::PTRUE_H:
  373. case AArch64::PTRUE_S:
  374. case AArch64::PTRUE_D:
  375. return true;
  376. default:
  377. return false;
  378. }
  379. }
  380. /// Return opcode to be used for indirect calls.
  381. unsigned getBLRCallOpcode(const MachineFunction &MF);
  382. // struct TSFlags {
  383. #define TSFLAG_ELEMENT_SIZE_TYPE(X) (X) // 3-bits
  384. #define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 4-bits
  385. #define TSFLAG_FALSE_LANE_TYPE(X) ((X) << 7) // 2-bits
  386. #define TSFLAG_INSTR_FLAGS(X) ((X) << 9) // 2-bits
  387. // }
  388. namespace AArch64 {
  389. enum ElementSizeType {
  390. ElementSizeMask = TSFLAG_ELEMENT_SIZE_TYPE(0x7),
  391. ElementSizeNone = TSFLAG_ELEMENT_SIZE_TYPE(0x0),
  392. ElementSizeB = TSFLAG_ELEMENT_SIZE_TYPE(0x1),
  393. ElementSizeH = TSFLAG_ELEMENT_SIZE_TYPE(0x2),
  394. ElementSizeS = TSFLAG_ELEMENT_SIZE_TYPE(0x3),
  395. ElementSizeD = TSFLAG_ELEMENT_SIZE_TYPE(0x4),
  396. };
  397. enum DestructiveInstType {
  398. DestructiveInstTypeMask = TSFLAG_DESTRUCTIVE_INST_TYPE(0xf),
  399. NotDestructive = TSFLAG_DESTRUCTIVE_INST_TYPE(0x0),
  400. DestructiveOther = TSFLAG_DESTRUCTIVE_INST_TYPE(0x1),
  401. DestructiveUnary = TSFLAG_DESTRUCTIVE_INST_TYPE(0x2),
  402. DestructiveBinaryImm = TSFLAG_DESTRUCTIVE_INST_TYPE(0x3),
  403. DestructiveBinaryShImmUnpred = TSFLAG_DESTRUCTIVE_INST_TYPE(0x4),
  404. DestructiveBinary = TSFLAG_DESTRUCTIVE_INST_TYPE(0x5),
  405. DestructiveBinaryComm = TSFLAG_DESTRUCTIVE_INST_TYPE(0x6),
  406. DestructiveBinaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x7),
  407. DestructiveTernaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x8),
  408. DestructiveUnaryPassthru = TSFLAG_DESTRUCTIVE_INST_TYPE(0x9),
  409. };
  410. enum FalseLaneType {
  411. FalseLanesMask = TSFLAG_FALSE_LANE_TYPE(0x3),
  412. FalseLanesZero = TSFLAG_FALSE_LANE_TYPE(0x1),
  413. FalseLanesUndef = TSFLAG_FALSE_LANE_TYPE(0x2),
  414. };
  415. // NOTE: This is a bit field.
  416. static const uint64_t InstrFlagIsWhile = TSFLAG_INSTR_FLAGS(0x1);
  417. static const uint64_t InstrFlagIsPTestLike = TSFLAG_INSTR_FLAGS(0x2);
  418. #undef TSFLAG_ELEMENT_SIZE_TYPE
  419. #undef TSFLAG_DESTRUCTIVE_INST_TYPE
  420. #undef TSFLAG_FALSE_LANE_TYPE
  421. #undef TSFLAG_INSTR_FLAGS
  422. int getSVEPseudoMap(uint16_t Opcode);
  423. int getSVERevInstr(uint16_t Opcode);
  424. int getSVENonRevInstr(uint16_t Opcode);
  425. }
  426. } // end namespace llvm
  427. #endif