Utils.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531
  1. #pragma once
  2. #ifdef __GNUC__
  3. #pragma GCC diagnostic push
  4. #pragma GCC diagnostic ignored "-Wunused-parameter"
  5. #endif
  6. //==-- llvm/CodeGen/GlobalISel/Utils.h ---------------------------*- C++ -*-==//
  7. //
  8. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  9. // See https://llvm.org/LICENSE.txt for license information.
  10. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  11. //
  12. //===----------------------------------------------------------------------===//
  13. //
  14. /// \file This file declares the API of helper functions used throughout the
  15. /// GlobalISel pipeline.
  16. //
  17. //===----------------------------------------------------------------------===//
  18. #ifndef LLVM_CODEGEN_GLOBALISEL_UTILS_H
  19. #define LLVM_CODEGEN_GLOBALISEL_UTILS_H
  20. #include "GISelWorkList.h"
  21. #include "llvm/ADT/APFloat.h"
  22. #include "llvm/ADT/StringRef.h"
  23. #include "llvm/CodeGen/Register.h"
  24. #include "llvm/IR/DebugLoc.h"
  25. #include "llvm/Support/Alignment.h"
  26. #include "llvm/Support/Casting.h"
  27. #include "llvm/Support/LowLevelTypeImpl.h"
  28. #include <cstdint>
  29. namespace llvm {
  30. class AnalysisUsage;
  31. class LostDebugLocObserver;
  32. class MachineBasicBlock;
  33. class BlockFrequencyInfo;
  34. class GISelKnownBits;
  35. class MachineFunction;
  36. class MachineInstr;
  37. class MachineOperand;
  38. class MachineOptimizationRemarkEmitter;
  39. class MachineOptimizationRemarkMissed;
  40. struct MachinePointerInfo;
  41. class MachineRegisterInfo;
  42. class MCInstrDesc;
  43. class ProfileSummaryInfo;
  44. class RegisterBankInfo;
  45. class TargetInstrInfo;
  46. class TargetLowering;
  47. class TargetPassConfig;
  48. class TargetRegisterInfo;
  49. class TargetRegisterClass;
  50. class ConstantFP;
  51. class APFloat;
  52. // Convenience macros for dealing with vector reduction opcodes.
  53. #define GISEL_VECREDUCE_CASES_ALL \
  54. case TargetOpcode::G_VECREDUCE_SEQ_FADD: \
  55. case TargetOpcode::G_VECREDUCE_SEQ_FMUL: \
  56. case TargetOpcode::G_VECREDUCE_FADD: \
  57. case TargetOpcode::G_VECREDUCE_FMUL: \
  58. case TargetOpcode::G_VECREDUCE_FMAX: \
  59. case TargetOpcode::G_VECREDUCE_FMIN: \
  60. case TargetOpcode::G_VECREDUCE_ADD: \
  61. case TargetOpcode::G_VECREDUCE_MUL: \
  62. case TargetOpcode::G_VECREDUCE_AND: \
  63. case TargetOpcode::G_VECREDUCE_OR: \
  64. case TargetOpcode::G_VECREDUCE_XOR: \
  65. case TargetOpcode::G_VECREDUCE_SMAX: \
  66. case TargetOpcode::G_VECREDUCE_SMIN: \
  67. case TargetOpcode::G_VECREDUCE_UMAX: \
  68. case TargetOpcode::G_VECREDUCE_UMIN:
  69. #define GISEL_VECREDUCE_CASES_NONSEQ \
  70. case TargetOpcode::G_VECREDUCE_FADD: \
  71. case TargetOpcode::G_VECREDUCE_FMUL: \
  72. case TargetOpcode::G_VECREDUCE_FMAX: \
  73. case TargetOpcode::G_VECREDUCE_FMIN: \
  74. case TargetOpcode::G_VECREDUCE_ADD: \
  75. case TargetOpcode::G_VECREDUCE_MUL: \
  76. case TargetOpcode::G_VECREDUCE_AND: \
  77. case TargetOpcode::G_VECREDUCE_OR: \
  78. case TargetOpcode::G_VECREDUCE_XOR: \
  79. case TargetOpcode::G_VECREDUCE_SMAX: \
  80. case TargetOpcode::G_VECREDUCE_SMIN: \
  81. case TargetOpcode::G_VECREDUCE_UMAX: \
  82. case TargetOpcode::G_VECREDUCE_UMIN:
  83. /// Try to constrain Reg to the specified register class. If this fails,
  84. /// create a new virtual register in the correct class.
  85. ///
  86. /// \return The virtual register constrained to the right register class.
  87. Register constrainRegToClass(MachineRegisterInfo &MRI,
  88. const TargetInstrInfo &TII,
  89. const RegisterBankInfo &RBI, Register Reg,
  90. const TargetRegisterClass &RegClass);
  91. /// Constrain the Register operand OpIdx, so that it is now constrained to the
  92. /// TargetRegisterClass passed as an argument (RegClass).
  93. /// If this fails, create a new virtual register in the correct class and insert
  94. /// a COPY before \p InsertPt if it is a use or after if it is a definition.
  95. /// In both cases, the function also updates the register of RegMo. The debug
  96. /// location of \p InsertPt is used for the new copy.
  97. ///
  98. /// \return The virtual register constrained to the right register class.
  99. Register constrainOperandRegClass(const MachineFunction &MF,
  100. const TargetRegisterInfo &TRI,
  101. MachineRegisterInfo &MRI,
  102. const TargetInstrInfo &TII,
  103. const RegisterBankInfo &RBI,
  104. MachineInstr &InsertPt,
  105. const TargetRegisterClass &RegClass,
  106. MachineOperand &RegMO);
  107. /// Try to constrain Reg so that it is usable by argument OpIdx of the provided
  108. /// MCInstrDesc \p II. If this fails, create a new virtual register in the
  109. /// correct class and insert a COPY before \p InsertPt if it is a use or after
  110. /// if it is a definition. In both cases, the function also updates the register
  111. /// of RegMo.
  112. /// This is equivalent to constrainOperandRegClass(..., RegClass, ...)
  113. /// with RegClass obtained from the MCInstrDesc. The debug location of \p
  114. /// InsertPt is used for the new copy.
  115. ///
  116. /// \return The virtual register constrained to the right register class.
  117. Register constrainOperandRegClass(const MachineFunction &MF,
  118. const TargetRegisterInfo &TRI,
  119. MachineRegisterInfo &MRI,
  120. const TargetInstrInfo &TII,
  121. const RegisterBankInfo &RBI,
  122. MachineInstr &InsertPt, const MCInstrDesc &II,
  123. MachineOperand &RegMO, unsigned OpIdx);
  124. /// Mutate the newly-selected instruction \p I to constrain its (possibly
  125. /// generic) virtual register operands to the instruction's register class.
  126. /// This could involve inserting COPYs before (for uses) or after (for defs).
  127. /// This requires the number of operands to match the instruction description.
  128. /// \returns whether operand regclass constraining succeeded.
  129. ///
  130. // FIXME: Not all instructions have the same number of operands. We should
  131. // probably expose a constrain helper per operand and let the target selector
  132. // constrain individual registers, like fast-isel.
  133. bool constrainSelectedInstRegOperands(MachineInstr &I,
  134. const TargetInstrInfo &TII,
  135. const TargetRegisterInfo &TRI,
  136. const RegisterBankInfo &RBI);
  137. /// Check if DstReg can be replaced with SrcReg depending on the register
  138. /// constraints.
  139. bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI);
  140. /// Check whether an instruction \p MI is dead: it only defines dead virtual
  141. /// registers, and doesn't have other side effects.
  142. bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI);
  143. /// Report an ISel error as a missed optimization remark to the LLVMContext's
  144. /// diagnostic stream. Set the FailedISel MachineFunction property.
  145. void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
  146. MachineOptimizationRemarkEmitter &MORE,
  147. MachineOptimizationRemarkMissed &R);
  148. void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
  149. MachineOptimizationRemarkEmitter &MORE,
  150. const char *PassName, StringRef Msg,
  151. const MachineInstr &MI);
  152. /// Report an ISel warning as a missed optimization remark to the LLVMContext's
  153. /// diagnostic stream.
  154. void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
  155. MachineOptimizationRemarkEmitter &MORE,
  156. MachineOptimizationRemarkMissed &R);
  157. /// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
  158. std::optional<APInt> getIConstantVRegVal(Register VReg,
  159. const MachineRegisterInfo &MRI);
  160. /// If \p VReg is defined by a G_CONSTANT fits in int64_t returns it.
  161. std::optional<int64_t> getIConstantVRegSExtVal(Register VReg,
  162. const MachineRegisterInfo &MRI);
  163. /// Simple struct used to hold a constant integer value and a virtual
  164. /// register.
  165. struct ValueAndVReg {
  166. APInt Value;
  167. Register VReg;
  168. };
  169. /// If \p VReg is defined by a statically evaluable chain of instructions rooted
  170. /// on a G_CONSTANT returns its APInt value and def register.
  171. std::optional<ValueAndVReg>
  172. getIConstantVRegValWithLookThrough(Register VReg,
  173. const MachineRegisterInfo &MRI,
  174. bool LookThroughInstrs = true);
  175. /// If \p VReg is defined by a statically evaluable chain of instructions rooted
  176. /// on a G_CONSTANT or G_FCONSTANT returns its value as APInt and def register.
  177. std::optional<ValueAndVReg> getAnyConstantVRegValWithLookThrough(
  178. Register VReg, const MachineRegisterInfo &MRI,
  179. bool LookThroughInstrs = true, bool LookThroughAnyExt = false);
  180. struct FPValueAndVReg {
  181. APFloat Value;
  182. Register VReg;
  183. };
  184. /// If \p VReg is defined by a statically evaluable chain of instructions rooted
  185. /// on a G_FCONSTANT returns its APFloat value and def register.
  186. std::optional<FPValueAndVReg>
  187. getFConstantVRegValWithLookThrough(Register VReg,
  188. const MachineRegisterInfo &MRI,
  189. bool LookThroughInstrs = true);
  190. const ConstantFP* getConstantFPVRegVal(Register VReg,
  191. const MachineRegisterInfo &MRI);
  192. /// See if Reg is defined by an single def instruction that is
  193. /// Opcode. Also try to do trivial folding if it's a COPY with
  194. /// same types. Returns null otherwise.
  195. MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg,
  196. const MachineRegisterInfo &MRI);
  197. /// Simple struct used to hold a Register value and the instruction which
  198. /// defines it.
  199. struct DefinitionAndSourceRegister {
  200. MachineInstr *MI;
  201. Register Reg;
  202. };
  203. /// Find the def instruction for \p Reg, and underlying value Register folding
  204. /// away any copies.
  205. ///
  206. /// Also walks through hints such as G_ASSERT_ZEXT.
  207. std::optional<DefinitionAndSourceRegister>
  208. getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
  209. /// Find the def instruction for \p Reg, folding away any trivial copies. May
  210. /// return nullptr if \p Reg is not a generic virtual register.
  211. ///
  212. /// Also walks through hints such as G_ASSERT_ZEXT.
  213. MachineInstr *getDefIgnoringCopies(Register Reg,
  214. const MachineRegisterInfo &MRI);
  215. /// Find the source register for \p Reg, folding away any trivial copies. It
  216. /// will be an output register of the instruction that getDefIgnoringCopies
  217. /// returns. May return an invalid register if \p Reg is not a generic virtual
  218. /// register.
  219. ///
  220. /// Also walks through hints such as G_ASSERT_ZEXT.
  221. Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
  222. // Templated variant of getOpcodeDef returning a MachineInstr derived T.
  223. /// See if Reg is defined by an single def instruction of type T
  224. /// Also try to do trivial folding if it's a COPY with
  225. /// same types. Returns null otherwise.
  226. template <class T>
  227. T *getOpcodeDef(Register Reg, const MachineRegisterInfo &MRI) {
  228. MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
  229. return dyn_cast_or_null<T>(DefMI);
  230. }
  231. /// Returns an APFloat from Val converted to the appropriate size.
  232. APFloat getAPFloatFromSize(double Val, unsigned Size);
  233. /// Modify analysis usage so it preserves passes required for the SelectionDAG
  234. /// fallback.
  235. void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
  236. std::optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
  237. const Register Op2,
  238. const MachineRegisterInfo &MRI);
  239. std::optional<APFloat> ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
  240. const Register Op2,
  241. const MachineRegisterInfo &MRI);
  242. /// Tries to constant fold a vector binop with sources \p Op1 and \p Op2.
  243. /// Returns an empty vector on failure.
  244. SmallVector<APInt> ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
  245. const Register Op2,
  246. const MachineRegisterInfo &MRI);
  247. std::optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
  248. uint64_t Imm,
  249. const MachineRegisterInfo &MRI);
  250. std::optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
  251. Register Src,
  252. const MachineRegisterInfo &MRI);
  253. /// Tries to constant fold a G_CTLZ operation on \p Src. If \p Src is a vector
  254. /// then it tries to do an element-wise constant fold.
  255. std::optional<SmallVector<unsigned>>
  256. ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI);
  257. /// Test if the given value is known to have exactly one bit set. This differs
  258. /// from computeKnownBits in that it doesn't necessarily determine which bit is
  259. /// set.
  260. bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
  261. GISelKnownBits *KnownBits = nullptr);
  262. /// Returns true if \p Val can be assumed to never be a NaN. If \p SNaN is true,
  263. /// this returns if \p Val can be assumed to never be a signaling NaN.
  264. bool isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
  265. bool SNaN = false);
  266. /// Returns true if \p Val can be assumed to never be a signaling NaN.
  267. inline bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI) {
  268. return isKnownNeverNaN(Val, MRI, true);
  269. }
  270. Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO);
  271. /// Return a virtual register corresponding to the incoming argument register \p
  272. /// PhysReg. This register is expected to have class \p RC, and optional type \p
  273. /// RegTy. This assumes all references to the register will use the same type.
  274. ///
  275. /// If there is an existing live-in argument register, it will be returned.
  276. /// This will also ensure there is a valid copy
  277. Register getFunctionLiveInPhysReg(MachineFunction &MF,
  278. const TargetInstrInfo &TII,
  279. MCRegister PhysReg,
  280. const TargetRegisterClass &RC,
  281. const DebugLoc &DL, LLT RegTy = LLT());
  282. /// Return the least common multiple type of \p OrigTy and \p TargetTy, by changing the
  283. /// number of vector elements or scalar bitwidth. The intent is a
  284. /// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from
  285. /// \p OrigTy elements, and unmerged into \p TargetTy
  286. LLVM_READNONE
  287. LLT getLCMType(LLT OrigTy, LLT TargetTy);
  288. LLVM_READNONE
  289. /// Return smallest type that covers both \p OrigTy and \p TargetTy and is
  290. /// multiple of TargetTy.
  291. LLT getCoverTy(LLT OrigTy, LLT TargetTy);
  292. /// Return a type where the total size is the greatest common divisor of \p
  293. /// OrigTy and \p TargetTy. This will try to either change the number of vector
  294. /// elements, or bitwidth of scalars. The intent is the result type can be used
  295. /// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some
  296. /// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly
  297. /// with intermediate casts) can re-form \p TargetTy.
  298. ///
  299. /// If these are vectors with different element types, this will try to produce
  300. /// a vector with a compatible total size, but the element type of \p OrigTy. If
  301. /// this can't be satisfied, this will produce a scalar smaller than the
  302. /// original vector elements.
  303. ///
  304. /// In the worst case, this returns LLT::scalar(1)
  305. LLVM_READNONE
  306. LLT getGCDType(LLT OrigTy, LLT TargetTy);
  307. /// Represents a value which can be a Register or a constant.
  308. ///
  309. /// This is useful in situations where an instruction may have an interesting
  310. /// register operand or interesting constant operand. For a concrete example,
  311. /// \see getVectorSplat.
  312. class RegOrConstant {
  313. int64_t Cst;
  314. Register Reg;
  315. bool IsReg;
  316. public:
  317. explicit RegOrConstant(Register Reg) : Reg(Reg), IsReg(true) {}
  318. explicit RegOrConstant(int64_t Cst) : Cst(Cst), IsReg(false) {}
  319. bool isReg() const { return IsReg; }
  320. bool isCst() const { return !IsReg; }
  321. Register getReg() const {
  322. assert(isReg() && "Expected a register!");
  323. return Reg;
  324. }
  325. int64_t getCst() const {
  326. assert(isCst() && "Expected a constant!");
  327. return Cst;
  328. }
  329. };
  330. /// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
  331. /// If \p MI is not a splat, returns std::nullopt.
  332. std::optional<int> getSplatIndex(MachineInstr &MI);
  333. /// \returns the scalar integral splat value of \p Reg if possible.
  334. std::optional<APInt> getIConstantSplatVal(const Register Reg,
  335. const MachineRegisterInfo &MRI);
  336. /// \returns the scalar integral splat value defined by \p MI if possible.
  337. std::optional<APInt> getIConstantSplatVal(const MachineInstr &MI,
  338. const MachineRegisterInfo &MRI);
  339. /// \returns the scalar sign extended integral splat value of \p Reg if
  340. /// possible.
  341. std::optional<int64_t> getIConstantSplatSExtVal(const Register Reg,
  342. const MachineRegisterInfo &MRI);
  343. /// \returns the scalar sign extended integral splat value defined by \p MI if
  344. /// possible.
  345. std::optional<int64_t> getIConstantSplatSExtVal(const MachineInstr &MI,
  346. const MachineRegisterInfo &MRI);
  347. /// Returns a floating point scalar constant of a build vector splat if it
  348. /// exists. When \p AllowUndef == true some elements can be undef but not all.
  349. std::optional<FPValueAndVReg> getFConstantSplat(Register VReg,
  350. const MachineRegisterInfo &MRI,
  351. bool AllowUndef = true);
  352. /// Return true if the specified register is defined by G_BUILD_VECTOR or
  353. /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
  354. bool isBuildVectorConstantSplat(const Register Reg,
  355. const MachineRegisterInfo &MRI,
  356. int64_t SplatValue, bool AllowUndef);
  357. /// Return true if the specified instruction is a G_BUILD_VECTOR or
  358. /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
  359. bool isBuildVectorConstantSplat(const MachineInstr &MI,
  360. const MachineRegisterInfo &MRI,
  361. int64_t SplatValue, bool AllowUndef);
  362. /// Return true if the specified instruction is a G_BUILD_VECTOR or
  363. /// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef.
  364. bool isBuildVectorAllZeros(const MachineInstr &MI,
  365. const MachineRegisterInfo &MRI,
  366. bool AllowUndef = false);
  367. /// Return true if the specified instruction is a G_BUILD_VECTOR or
  368. /// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef.
  369. bool isBuildVectorAllOnes(const MachineInstr &MI,
  370. const MachineRegisterInfo &MRI,
  371. bool AllowUndef = false);
  372. /// Return true if the specified instruction is known to be a constant, or a
  373. /// vector of constants.
  374. ///
  375. /// If \p AllowFP is true, this will consider G_FCONSTANT in addition to
  376. /// G_CONSTANT. If \p AllowOpaqueConstants is true, constant-like instructions
  377. /// such as G_GLOBAL_VALUE will also be considered.
  378. bool isConstantOrConstantVector(const MachineInstr &MI,
  379. const MachineRegisterInfo &MRI,
  380. bool AllowFP = true,
  381. bool AllowOpaqueConstants = true);
  382. /// Return true if the value is a constant 0 integer or a splatted vector of a
  383. /// constant 0 integer (with no undefs if \p AllowUndefs is false). This will
  384. /// handle G_BUILD_VECTOR and G_BUILD_VECTOR_TRUNC as truncation is not an issue
  385. /// for null values.
  386. bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI,
  387. bool AllowUndefs = false);
  388. /// Return true if the value is a constant -1 integer or a splatted vector of a
  389. /// constant -1 integer (with no undefs if \p AllowUndefs is false).
  390. bool isAllOnesOrAllOnesSplat(const MachineInstr &MI,
  391. const MachineRegisterInfo &MRI,
  392. bool AllowUndefs = false);
  393. /// \returns a value when \p MI is a vector splat. The splat can be either a
  394. /// Register or a constant.
  395. ///
  396. /// Examples:
  397. ///
  398. /// \code
  399. /// %reg = COPY $physreg
  400. /// %reg_splat = G_BUILD_VECTOR %reg, %reg, ..., %reg
  401. /// \endcode
  402. ///
  403. /// If called on the G_BUILD_VECTOR above, this will return a RegOrConstant
  404. /// containing %reg.
  405. ///
  406. /// \code
  407. /// %cst = G_CONSTANT iN 4
  408. /// %constant_splat = G_BUILD_VECTOR %cst, %cst, ..., %cst
  409. /// \endcode
  410. ///
  411. /// In the above case, this will return a RegOrConstant containing 4.
  412. std::optional<RegOrConstant> getVectorSplat(const MachineInstr &MI,
  413. const MachineRegisterInfo &MRI);
  414. /// Determines if \p MI defines a constant integer or a build vector of
  415. /// constant integers. Treats undef values as constants.
  416. bool isConstantOrConstantVector(MachineInstr &MI,
  417. const MachineRegisterInfo &MRI);
  418. /// Determines if \p MI defines a constant integer or a splat vector of
  419. /// constant integers.
  420. /// \returns the scalar constant or std::nullopt.
  421. std::optional<APInt>
  422. isConstantOrConstantSplatVector(MachineInstr &MI,
  423. const MachineRegisterInfo &MRI);
  424. /// Attempt to match a unary predicate against a scalar/splat constant or every
  425. /// element of a constant G_BUILD_VECTOR. If \p ConstVal is null, the source
  426. /// value was undef.
  427. bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg,
  428. std::function<bool(const Constant *ConstVal)> Match,
  429. bool AllowUndefs = false);
  430. /// Returns true if given the TargetLowering's boolean contents information,
  431. /// the value \p Val contains a true value.
  432. bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
  433. bool IsFP);
  434. /// \returns true if given the TargetLowering's boolean contents information,
  435. /// the value \p Val contains a false value.
  436. bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
  437. bool IsFP);
  438. /// Returns an integer representing true, as defined by the
  439. /// TargetBooleanContents.
  440. int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP);
  441. /// Returns true if the given block should be optimized for size.
  442. bool shouldOptForSize(const MachineBasicBlock &MBB, ProfileSummaryInfo *PSI,
  443. BlockFrequencyInfo *BFI);
  444. using SmallInstListTy = GISelWorkList<4>;
  445. void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
  446. LostDebugLocObserver *LocObserver,
  447. SmallInstListTy &DeadInstChain);
  448. void eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs, MachineRegisterInfo &MRI,
  449. LostDebugLocObserver *LocObserver = nullptr);
  450. void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
  451. LostDebugLocObserver *LocObserver = nullptr);
  452. /// Assuming the instruction \p MI is going to be deleted, attempt to salvage
  453. /// debug users of \p MI by writing the effect of \p MI in a DIExpression.
  454. void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI);
  455. } // End namespace llvm.
  456. #endif
  457. #ifdef __GNUC__
  458. #pragma GCC diagnostic pop
  459. #endif