ValueTracking.h 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867
  1. #pragma once
  2. #ifdef __GNUC__
  3. #pragma GCC diagnostic push
  4. #pragma GCC diagnostic ignored "-Wunused-parameter"
  5. #endif
  6. //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
  7. //
  8. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  9. // See https://llvm.org/LICENSE.txt for license information.
  10. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  11. //
  12. //===----------------------------------------------------------------------===//
  13. //
  14. // This file contains routines that help analyze properties that chains of
  15. // computations have.
  16. //
  17. //===----------------------------------------------------------------------===//
  18. #ifndef LLVM_ANALYSIS_VALUETRACKING_H
  19. #define LLVM_ANALYSIS_VALUETRACKING_H
  20. #include "llvm/ADT/ArrayRef.h"
  21. #include "llvm/ADT/SmallSet.h"
  22. #include "llvm/IR/Constants.h"
  23. #include "llvm/IR/DataLayout.h"
  24. #include "llvm/IR/InstrTypes.h"
  25. #include "llvm/IR/Intrinsics.h"
  26. #include <cassert>
  27. #include <cstdint>
  28. namespace llvm {
  29. class Operator;
  30. class AddOperator;
  31. class AllocaInst;
  32. class APInt;
  33. class AssumptionCache;
  34. class DominatorTree;
  35. class GEPOperator;
  36. class LoadInst;
  37. class WithOverflowInst;
  38. struct KnownBits;
  39. class Loop;
  40. class LoopInfo;
  41. class MDNode;
  42. class OptimizationRemarkEmitter;
  43. class StringRef;
  44. class TargetLibraryInfo;
  45. class Value;
  46. constexpr unsigned MaxAnalysisRecursionDepth = 6;
  47. /// Determine which bits of V are known to be either zero or one and return
  48. /// them in the KnownZero/KnownOne bit sets.
  49. ///
  50. /// This function is defined on values with integer type, values with pointer
  51. /// type, and vectors of integers. In the case
  52. /// where V is a vector, the known zero and known one values are the
  53. /// same width as the vector element, and the bit is set only if it is true
  54. /// for all of the elements in the vector.
  55. void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL,
  56. unsigned Depth = 0, AssumptionCache *AC = nullptr,
  57. const Instruction *CxtI = nullptr,
  58. const DominatorTree *DT = nullptr,
  59. OptimizationRemarkEmitter *ORE = nullptr,
  60. bool UseInstrInfo = true);
  61. /// Determine which bits of V are known to be either zero or one and return
  62. /// them in the KnownZero/KnownOne bit sets.
  63. ///
  64. /// This function is defined on values with integer type, values with pointer
  65. /// type, and vectors of integers. In the case
  66. /// where V is a vector, the known zero and known one values are the
  67. /// same width as the vector element, and the bit is set only if it is true
  68. /// for all of the demanded elements in the vector.
  69. void computeKnownBits(const Value *V, const APInt &DemandedElts,
  70. KnownBits &Known, const DataLayout &DL,
  71. unsigned Depth = 0, AssumptionCache *AC = nullptr,
  72. const Instruction *CxtI = nullptr,
  73. const DominatorTree *DT = nullptr,
  74. OptimizationRemarkEmitter *ORE = nullptr,
  75. bool UseInstrInfo = true);
  76. /// Returns the known bits rather than passing by reference.
  77. KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
  78. unsigned Depth = 0, AssumptionCache *AC = nullptr,
  79. const Instruction *CxtI = nullptr,
  80. const DominatorTree *DT = nullptr,
  81. OptimizationRemarkEmitter *ORE = nullptr,
  82. bool UseInstrInfo = true);
  83. /// Returns the known bits rather than passing by reference.
  84. KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
  85. const DataLayout &DL, unsigned Depth = 0,
  86. AssumptionCache *AC = nullptr,
  87. const Instruction *CxtI = nullptr,
  88. const DominatorTree *DT = nullptr,
  89. OptimizationRemarkEmitter *ORE = nullptr,
  90. bool UseInstrInfo = true);
  91. /// Compute known bits from the range metadata.
  92. /// \p KnownZero the set of bits that are known to be zero
  93. /// \p KnownOne the set of bits that are known to be one
  94. void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known);
  95. /// Return true if LHS and RHS have no common bits set.
  96. bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
  97. const DataLayout &DL, AssumptionCache *AC = nullptr,
  98. const Instruction *CxtI = nullptr,
  99. const DominatorTree *DT = nullptr,
  100. bool UseInstrInfo = true);
  101. /// Return true if the given value is known to have exactly one bit set when
  102. /// defined. For vectors return true if every element is known to be a power
  103. /// of two when defined. Supports values with integer or pointer type and
  104. /// vectors of integers. If 'OrZero' is set, then return true if the given
  105. /// value is either a power of two or zero.
  106. bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
  107. bool OrZero = false, unsigned Depth = 0,
  108. AssumptionCache *AC = nullptr,
  109. const Instruction *CxtI = nullptr,
  110. const DominatorTree *DT = nullptr,
  111. bool UseInstrInfo = true);
  112. bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
  113. /// Return true if the given value is known to be non-zero when defined. For
  114. /// vectors, return true if every element is known to be non-zero when
  115. /// defined. For pointers, if the context instruction and dominator tree are
  116. /// specified, perform context-sensitive analysis and return true if the
  117. /// pointer couldn't possibly be null at the specified instruction.
  118. /// Supports values with integer or pointer type and vectors of integers.
  119. bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
  120. AssumptionCache *AC = nullptr,
  121. const Instruction *CxtI = nullptr,
  122. const DominatorTree *DT = nullptr,
  123. bool UseInstrInfo = true);
  124. /// Return true if the two given values are negation.
  125. /// Currently can recoginze Value pair:
  126. /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
  127. /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
  128. bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false);
  129. /// Returns true if the give value is known to be non-negative.
  130. bool isKnownNonNegative(const Value *V, const DataLayout &DL,
  131. unsigned Depth = 0, AssumptionCache *AC = nullptr,
  132. const Instruction *CxtI = nullptr,
  133. const DominatorTree *DT = nullptr,
  134. bool UseInstrInfo = true);
  135. /// Returns true if the given value is known be positive (i.e. non-negative
  136. /// and non-zero).
  137. bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0,
  138. AssumptionCache *AC = nullptr,
  139. const Instruction *CxtI = nullptr,
  140. const DominatorTree *DT = nullptr,
  141. bool UseInstrInfo = true);
  142. /// Returns true if the given value is known be negative (i.e. non-positive
  143. /// and non-zero).
  144. bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0,
  145. AssumptionCache *AC = nullptr,
  146. const Instruction *CxtI = nullptr,
  147. const DominatorTree *DT = nullptr,
  148. bool UseInstrInfo = true);
  149. /// Return true if the given values are known to be non-equal when defined.
  150. /// Supports scalar integer types only.
  151. bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
  152. AssumptionCache *AC = nullptr,
  153. const Instruction *CxtI = nullptr,
  154. const DominatorTree *DT = nullptr,
  155. bool UseInstrInfo = true);
  156. /// Return true if 'V & Mask' is known to be zero. We use this predicate to
  157. /// simplify operations downstream. Mask is known to be zero for bits that V
  158. /// cannot have.
  159. ///
  160. /// This function is defined on values with integer type, values with pointer
  161. /// type, and vectors of integers. In the case
  162. /// where V is a vector, the mask, known zero, and known one values are the
  163. /// same width as the vector element, and the bit is set only if it is true
  164. /// for all of the elements in the vector.
  165. bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL,
  166. unsigned Depth = 0, AssumptionCache *AC = nullptr,
  167. const Instruction *CxtI = nullptr,
  168. const DominatorTree *DT = nullptr,
  169. bool UseInstrInfo = true);
  170. /// Return the number of times the sign bit of the register is replicated into
  171. /// the other bits. We know that at least 1 bit is always equal to the sign
  172. /// bit (itself), but other cases can give us information. For example,
  173. /// immediately after an "ashr X, 2", we know that the top 3 bits are all
  174. /// equal to each other, so we return 3. For vectors, return the number of
  175. /// sign bits for the vector element with the mininum number of known sign
  176. /// bits.
  177. unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
  178. unsigned Depth = 0, AssumptionCache *AC = nullptr,
  179. const Instruction *CxtI = nullptr,
  180. const DominatorTree *DT = nullptr,
  181. bool UseInstrInfo = true);
  182. /// Get the upper bound on bit size for this Value \p Op as a signed integer.
  183. /// i.e. x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)).
  184. /// Similar to the APInt::getSignificantBits function.
  185. unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL,
  186. unsigned Depth = 0,
  187. AssumptionCache *AC = nullptr,
  188. const Instruction *CxtI = nullptr,
  189. const DominatorTree *DT = nullptr);
  190. /// Map a call instruction to an intrinsic ID. Libcalls which have equivalent
  191. /// intrinsics are treated as-if they were intrinsics.
  192. Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB,
  193. const TargetLibraryInfo *TLI);
  194. /// Return true if we can prove that the specified FP value is never equal to
  195. /// -0.0.
  196. bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
  197. unsigned Depth = 0);
  198. /// Return true if we can prove that the specified FP value is either NaN or
  199. /// never less than -0.0.
  200. ///
  201. /// NaN --> true
  202. /// +0 --> true
  203. /// -0 --> true
  204. /// x > +0 --> true
  205. /// x < -0 --> false
  206. bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI);
  207. /// Return true if the floating-point scalar value is not an infinity or if
  208. /// the floating-point vector value has no infinities. Return false if a value
  209. /// could ever be infinity.
  210. bool isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
  211. unsigned Depth = 0);
  212. /// Return true if the floating-point scalar value is not a NaN or if the
  213. /// floating-point vector value has no NaN elements. Return false if a value
  214. /// could ever be NaN.
  215. bool isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
  216. unsigned Depth = 0);
  217. /// Return true if we can prove that the specified FP value's sign bit is 0.
  218. ///
  219. /// NaN --> true/false (depending on the NaN's sign bit)
  220. /// +0 --> true
  221. /// -0 --> false
  222. /// x > +0 --> true
  223. /// x < -0 --> false
  224. bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI);
  225. /// If the specified value can be set by repeating the same byte in memory,
  226. /// return the i8 value that it is represented with. This is true for all i8
  227. /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
  228. /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
  229. /// i16 0x1234), return null. If the value is entirely undef and padding,
  230. /// return undef.
  231. Value *isBytewiseValue(Value *V, const DataLayout &DL);
  232. /// Given an aggregate and an sequence of indices, see if the scalar value
  233. /// indexed is already around as a register, for example if it were inserted
  234. /// directly into the aggregate.
  235. ///
  236. /// If InsertBefore is not null, this function will duplicate (modified)
  237. /// insertvalues when a part of a nested struct is extracted.
  238. Value *FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
  239. Instruction *InsertBefore = nullptr);
  240. /// Analyze the specified pointer to see if it can be expressed as a base
  241. /// pointer plus a constant offset. Return the base and offset to the caller.
  242. ///
  243. /// This is a wrapper around Value::stripAndAccumulateConstantOffsets that
  244. /// creates and later unpacks the required APInt.
  245. inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
  246. const DataLayout &DL,
  247. bool AllowNonInbounds = true) {
  248. APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
  249. Value *Base =
  250. Ptr->stripAndAccumulateConstantOffsets(DL, OffsetAPInt, AllowNonInbounds);
  251. Offset = OffsetAPInt.getSExtValue();
  252. return Base;
  253. }
  254. inline const Value *
  255. GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
  256. const DataLayout &DL,
  257. bool AllowNonInbounds = true) {
  258. return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL,
  259. AllowNonInbounds);
  260. }
  261. /// Returns true if the GEP is based on a pointer to a string (array of
  262. // \p CharSize integers) and is indexing into this string.
  263. bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize = 8);
  264. /// Represents offset+length into a ConstantDataArray.
  265. struct ConstantDataArraySlice {
  266. /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
  267. /// initializer, it just doesn't fit the ConstantDataArray interface).
  268. const ConstantDataArray *Array;
  269. /// Slice starts at this Offset.
  270. uint64_t Offset;
  271. /// Length of the slice.
  272. uint64_t Length;
  273. /// Moves the Offset and adjusts Length accordingly.
  274. void move(uint64_t Delta) {
  275. assert(Delta < Length);
  276. Offset += Delta;
  277. Length -= Delta;
  278. }
  279. /// Convenience accessor for elements in the slice.
  280. uint64_t operator[](unsigned I) const {
  281. return Array == nullptr ? 0 : Array->getElementAsInteger(I + Offset);
  282. }
  283. };
  284. /// Returns true if the value \p V is a pointer into a ConstantDataArray.
  285. /// If successful \p Slice will point to a ConstantDataArray info object
  286. /// with an appropriate offset.
  287. bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
  288. unsigned ElementSize, uint64_t Offset = 0);
  289. /// This function computes the length of a null-terminated C string pointed to
  290. /// by V. If successful, it returns true and returns the string in Str. If
  291. /// unsuccessful, it returns false. This does not include the trailing null
  292. /// character by default. If TrimAtNul is set to false, then this returns any
  293. /// trailing null characters as well as any other characters that come after
  294. /// it.
  295. bool getConstantStringInfo(const Value *V, StringRef &Str,
  296. bool TrimAtNul = true);
  297. /// If we can compute the length of the string pointed to by the specified
  298. /// pointer, return 'len+1'. If we can't, return 0.
  299. uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
  300. /// This function returns call pointer argument that is considered the same by
  301. /// aliasing rules. You CAN'T use it to replace one value with another. If
  302. /// \p MustPreserveNullness is true, the call must preserve the nullness of
  303. /// the pointer.
  304. const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call,
  305. bool MustPreserveNullness);
  306. inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call,
  307. bool MustPreserveNullness) {
  308. return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
  309. const_cast<const CallBase *>(Call), MustPreserveNullness));
  310. }
  311. /// {launder,strip}.invariant.group returns pointer that aliases its argument,
  312. /// and it only captures pointer by returning it.
  313. /// These intrinsics are not marked as nocapture, because returning is
  314. /// considered as capture. The arguments are not marked as returned neither,
  315. /// because it would make it useless. If \p MustPreserveNullness is true,
  316. /// the intrinsic must preserve the nullness of the pointer.
  317. bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
  318. const CallBase *Call, bool MustPreserveNullness);
  319. /// This method strips off any GEP address adjustments and pointer casts from
  320. /// the specified value, returning the original object being addressed. Note
  321. /// that the returned value has pointer type if the specified value does. If
  322. /// the MaxLookup value is non-zero, it limits the number of instructions to
  323. /// be stripped off.
  324. const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6);
  325. inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) {
  326. // Force const to avoid infinite recursion.
  327. const Value *VConst = V;
  328. return const_cast<Value *>(getUnderlyingObject(VConst, MaxLookup));
  329. }
  330. /// This method is similar to getUnderlyingObject except that it can
  331. /// look through phi and select instructions and return multiple objects.
  332. ///
  333. /// If LoopInfo is passed, loop phis are further analyzed. If a pointer
  334. /// accesses different objects in each iteration, we don't look through the
  335. /// phi node. E.g. consider this loop nest:
  336. ///
  337. /// int **A;
  338. /// for (i)
  339. /// for (j) {
  340. /// A[i][j] = A[i-1][j] * B[j]
  341. /// }
  342. ///
  343. /// This is transformed by Load-PRE to stash away A[i] for the next iteration
  344. /// of the outer loop:
  345. ///
  346. /// Curr = A[0]; // Prev_0
  347. /// for (i: 1..N) {
  348. /// Prev = Curr; // Prev = PHI (Prev_0, Curr)
  349. /// Curr = A[i];
  350. /// for (j: 0..N) {
  351. /// Curr[j] = Prev[j] * B[j]
  352. /// }
  353. /// }
  354. ///
  355. /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
  356. /// should not assume that Curr and Prev share the same underlying object thus
  357. /// it shouldn't look through the phi above.
  358. void getUnderlyingObjects(const Value *V,
  359. SmallVectorImpl<const Value *> &Objects,
  360. LoopInfo *LI = nullptr, unsigned MaxLookup = 6);
  361. /// This is a wrapper around getUnderlyingObjects and adds support for basic
  362. /// ptrtoint+arithmetic+inttoptr sequences.
  363. bool getUnderlyingObjectsForCodeGen(const Value *V,
  364. SmallVectorImpl<Value *> &Objects);
  365. /// Returns unique alloca where the value comes from, or nullptr.
  366. /// If OffsetZero is true check that V points to the begining of the alloca.
  367. AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false);
  368. inline const AllocaInst *findAllocaForValue(const Value *V,
  369. bool OffsetZero = false) {
  370. return findAllocaForValue(const_cast<Value *>(V), OffsetZero);
  371. }
  372. /// Return true if the only users of this pointer are lifetime markers.
  373. bool onlyUsedByLifetimeMarkers(const Value *V);
  374. /// Return true if the only users of this pointer are lifetime markers or
  375. /// droppable instructions.
  376. bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
  377. /// Return true if speculation of the given load must be suppressed to avoid
  378. /// ordering or interfering with an active sanitizer. If not suppressed,
  379. /// dereferenceability and alignment must be proven separately. Note: This
  380. /// is only needed for raw reasoning; if you use the interface below
  381. /// (isSafeToSpeculativelyExecute), this is handled internally.
  382. bool mustSuppressSpeculation(const LoadInst &LI);
  383. /// Return true if the instruction does not have any effects besides
  384. /// calculating the result and does not have undefined behavior.
  385. ///
  386. /// This method never returns true for an instruction that returns true for
  387. /// mayHaveSideEffects; however, this method also does some other checks in
  388. /// addition. It checks for undefined behavior, like dividing by zero or
  389. /// loading from an invalid pointer (but not for undefined results, like a
  390. /// shift with a shift amount larger than the width of the result). It checks
  391. /// for malloc and alloca because speculatively executing them might cause a
  392. /// memory leak. It also returns false for instructions related to control
  393. /// flow, specifically terminators and PHI nodes.
  394. ///
  395. /// If the CtxI is specified this method performs context-sensitive analysis
  396. /// and returns true if it is safe to execute the instruction immediately
  397. /// before the CtxI.
  398. ///
  399. /// If the CtxI is NOT specified this method only looks at the instruction
  400. /// itself and its operands, so if this method returns true, it is safe to
  401. /// move the instruction as long as the correct dominance relationships for
  402. /// the operands and users hold.
  403. ///
  404. /// This method can return true for instructions that read memory;
  405. /// for such instructions, moving them may change the resulting value.
  406. bool isSafeToSpeculativelyExecute(const Instruction *I,
  407. const Instruction *CtxI = nullptr,
  408. AssumptionCache *AC = nullptr,
  409. const DominatorTree *DT = nullptr,
  410. const TargetLibraryInfo *TLI = nullptr);
  411. /// This returns the same result as isSafeToSpeculativelyExecute if Opcode is
  412. /// the actual opcode of Inst. If the provided and actual opcode differ, the
  413. /// function (virtually) overrides the opcode of Inst with the provided
  414. /// Opcode. There are come constraints in this case:
  415. /// * If Opcode has a fixed number of operands (eg, as binary operators do),
  416. /// then Inst has to have at least as many leading operands. The function
  417. /// will ignore all trailing operands beyond that number.
  418. /// * If Opcode allows for an arbitrary number of operands (eg, as CallInsts
  419. /// do), then all operands are considered.
  420. /// * The virtual instruction has to satisfy all typing rules of the provided
  421. /// Opcode.
  422. /// * This function is pessimistic in the following sense: If one actually
  423. /// materialized the virtual instruction, then isSafeToSpeculativelyExecute
  424. /// may say that the materialized instruction is speculatable whereas this
  425. /// function may have said that the instruction wouldn't be speculatable.
  426. /// This behavior is a shortcoming in the current implementation and not
  427. /// intentional.
  428. bool isSafeToSpeculativelyExecuteWithOpcode(
  429. unsigned Opcode, const Instruction *Inst, const Instruction *CtxI = nullptr,
  430. AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr,
  431. const TargetLibraryInfo *TLI = nullptr);
  432. /// Returns true if the result or effects of the given instructions \p I
  433. /// depend values not reachable through the def use graph.
  434. /// * Memory dependence arises for example if the instruction reads from
  435. /// memory or may produce effects or undefined behaviour. Memory dependent
  436. /// instructions generally cannot be reorderd with respect to other memory
  437. /// dependent instructions.
  438. /// * Control dependence arises for example if the instruction may fault
  439. /// if lifted above a throwing call or infinite loop.
  440. bool mayHaveNonDefUseDependency(const Instruction &I);
  441. /// Return true if it is an intrinsic that cannot be speculated but also
  442. /// cannot trap.
  443. bool isAssumeLikeIntrinsic(const Instruction *I);
  444. /// Return true if it is valid to use the assumptions provided by an
  445. /// assume intrinsic, I, at the point in the control-flow identified by the
  446. /// context instruction, CxtI.
  447. bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
  448. const DominatorTree *DT = nullptr);
  449. enum class OverflowResult {
  450. /// Always overflows in the direction of signed/unsigned min value.
  451. AlwaysOverflowsLow,
  452. /// Always overflows in the direction of signed/unsigned max value.
  453. AlwaysOverflowsHigh,
  454. /// May or may not overflow.
  455. MayOverflow,
  456. /// Never overflows.
  457. NeverOverflows,
  458. };
  459. OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS,
  460. const DataLayout &DL,
  461. AssumptionCache *AC,
  462. const Instruction *CxtI,
  463. const DominatorTree *DT,
  464. bool UseInstrInfo = true);
  465. OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
  466. const DataLayout &DL,
  467. AssumptionCache *AC,
  468. const Instruction *CxtI,
  469. const DominatorTree *DT,
  470. bool UseInstrInfo = true);
  471. OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, const Value *RHS,
  472. const DataLayout &DL,
  473. AssumptionCache *AC,
  474. const Instruction *CxtI,
  475. const DominatorTree *DT,
  476. bool UseInstrInfo = true);
  477. OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
  478. const DataLayout &DL,
  479. AssumptionCache *AC = nullptr,
  480. const Instruction *CxtI = nullptr,
  481. const DominatorTree *DT = nullptr);
  482. /// This version also leverages the sign bit of Add if known.
  483. OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
  484. const DataLayout &DL,
  485. AssumptionCache *AC = nullptr,
  486. const Instruction *CxtI = nullptr,
  487. const DominatorTree *DT = nullptr);
  488. OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
  489. const DataLayout &DL,
  490. AssumptionCache *AC,
  491. const Instruction *CxtI,
  492. const DominatorTree *DT);
  493. OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
  494. const DataLayout &DL,
  495. AssumptionCache *AC,
  496. const Instruction *CxtI,
  497. const DominatorTree *DT);
  498. /// Returns true if the arithmetic part of the \p WO 's result is
  499. /// used only along the paths control dependent on the computation
  500. /// not overflowing, \p WO being an <op>.with.overflow intrinsic.
  501. bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
  502. const DominatorTree &DT);
  503. /// Determine the possible constant range of an integer or vector of integer
  504. /// value. This is intended as a cheap, non-recursive check.
  505. ConstantRange computeConstantRange(const Value *V, bool ForSigned,
  506. bool UseInstrInfo = true,
  507. AssumptionCache *AC = nullptr,
  508. const Instruction *CtxI = nullptr,
  509. const DominatorTree *DT = nullptr,
  510. unsigned Depth = 0);
  511. /// Return true if this function can prove that the instruction I will
  512. /// always transfer execution to one of its successors (including the next
  513. /// instruction that follows within a basic block). E.g. this is not
  514. /// guaranteed for function calls that could loop infinitely.
  515. ///
  516. /// In other words, this function returns false for instructions that may
  517. /// transfer execution or fail to transfer execution in a way that is not
  518. /// captured in the CFG nor in the sequence of instructions within a basic
  519. /// block.
  520. ///
  521. /// Undefined behavior is assumed not to happen, so e.g. division is
  522. /// guaranteed to transfer execution to the following instruction even
  523. /// though division by zero might cause undefined behavior.
  524. bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
  525. /// Returns true if this block does not contain a potential implicit exit.
  526. /// This is equivelent to saying that all instructions within the basic block
  527. /// are guaranteed to transfer execution to their successor within the basic
  528. /// block. This has the same assumptions w.r.t. undefined behavior as the
  529. /// instruction variant of this function.
  530. bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
  531. /// Return true if every instruction in the range (Begin, End) is
  532. /// guaranteed to transfer execution to its static successor. \p ScanLimit
  533. /// bounds the search to avoid scanning huge blocks.
  534. bool isGuaranteedToTransferExecutionToSuccessor(
  535. BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
  536. unsigned ScanLimit = 32);
  537. /// Same as previous, but with range expressed via iterator_range.
  538. bool isGuaranteedToTransferExecutionToSuccessor(
  539. iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit = 32);
  540. /// Return true if this function can prove that the instruction I
  541. /// is executed for every iteration of the loop L.
  542. ///
  543. /// Note that this currently only considers the loop header.
  544. bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
  545. const Loop *L);
  546. /// Return true if \p PoisonOp's user yields poison or raises UB if its
  547. /// operand \p PoisonOp is poison.
  548. ///
  549. /// If \p PoisonOp is a vector or an aggregate and the operation's result is a
  550. /// single value, any poison element in /p PoisonOp should make the result
  551. /// poison or raise UB.
  552. ///
  553. /// To filter out operands that raise UB on poison, you can use
  554. /// getGuaranteedNonPoisonOp.
  555. bool propagatesPoison(const Use &PoisonOp);
  556. /// Insert operands of I into Ops such that I will trigger undefined behavior
  557. /// if I is executed and that operand has a poison value.
  558. void getGuaranteedNonPoisonOps(const Instruction *I,
  559. SmallVectorImpl<const Value *> &Ops);
  560. /// Insert operands of I into Ops such that I will trigger undefined behavior
  561. /// if I is executed and that operand is not a well-defined value
  562. /// (i.e. has undef bits or poison).
  563. void getGuaranteedWellDefinedOps(const Instruction *I,
  564. SmallVectorImpl<const Value *> &Ops);
  565. /// Return true if the given instruction must trigger undefined behavior
  566. /// when I is executed with any operands which appear in KnownPoison holding
  567. /// a poison value at the point of execution.
  568. bool mustTriggerUB(const Instruction *I,
  569. const SmallSet<const Value *, 16> &KnownPoison);
  570. /// Return true if this function can prove that if Inst is executed
  571. /// and yields a poison value or undef bits, then that will trigger
  572. /// undefined behavior.
  573. ///
  574. /// Note that this currently only considers the basic block that is
  575. /// the parent of Inst.
  576. bool programUndefinedIfUndefOrPoison(const Instruction *Inst);
  577. bool programUndefinedIfPoison(const Instruction *Inst);
  578. /// canCreateUndefOrPoison returns true if Op can create undef or poison from
  579. /// non-undef & non-poison operands.
  580. /// For vectors, canCreateUndefOrPoison returns true if there is potential
  581. /// poison or undef in any element of the result when vectors without
  582. /// undef/poison poison are given as operands.
  583. /// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns
  584. /// true. If Op raises immediate UB but never creates poison or undef
  585. /// (e.g. sdiv I, 0), canCreatePoison returns false.
  586. ///
  587. /// \p ConsiderFlagsAndMetadata controls whether poison producing flags and
  588. /// metadata on the instruction are considered. This can be used to see if the
  589. /// instruction could still introduce undef or poison even without poison
  590. /// generating flags and metadata which might be on the instruction.
  591. /// (i.e. could the result of Op->dropPoisonGeneratingFlags() still create
  592. /// poison or undef)
  593. ///
  594. /// canCreatePoison returns true if Op can create poison from non-poison
  595. /// operands.
  596. bool canCreateUndefOrPoison(const Operator *Op,
  597. bool ConsiderFlagsAndMetadata = true);
  598. bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata = true);
  599. /// Return true if V is poison given that ValAssumedPoison is already poison.
  600. /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`,
  601. /// impliesPoison returns true.
  602. bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
  603. /// Return true if this function can prove that V does not have undef bits
  604. /// and is never poison. If V is an aggregate value or vector, check whether
  605. /// all elements (except padding) are not undef or poison.
  606. /// Note that this is different from canCreateUndefOrPoison because the
  607. /// function assumes Op's operands are not poison/undef.
  608. ///
  609. /// If CtxI and DT are specified this method performs flow-sensitive analysis
  610. /// and returns true if it is guaranteed to be never undef or poison
  611. /// immediately before the CtxI.
  612. bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
  613. AssumptionCache *AC = nullptr,
  614. const Instruction *CtxI = nullptr,
  615. const DominatorTree *DT = nullptr,
  616. unsigned Depth = 0);
  617. bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr,
  618. const Instruction *CtxI = nullptr,
  619. const DominatorTree *DT = nullptr,
  620. unsigned Depth = 0);
  621. /// Specific patterns of select instructions we can match.
  622. enum SelectPatternFlavor {
  623. SPF_UNKNOWN = 0,
  624. SPF_SMIN, /// Signed minimum
  625. SPF_UMIN, /// Unsigned minimum
  626. SPF_SMAX, /// Signed maximum
  627. SPF_UMAX, /// Unsigned maximum
  628. SPF_FMINNUM, /// Floating point minnum
  629. SPF_FMAXNUM, /// Floating point maxnum
  630. SPF_ABS, /// Absolute value
  631. SPF_NABS /// Negated absolute value
  632. };
  633. /// Behavior when a floating point min/max is given one NaN and one
  634. /// non-NaN as input.
  635. enum SelectPatternNaNBehavior {
  636. SPNB_NA = 0, /// NaN behavior not applicable.
  637. SPNB_RETURNS_NAN, /// Given one NaN input, returns the NaN.
  638. SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
  639. SPNB_RETURNS_ANY /// Given one NaN input, can return either (or
  640. /// it has been determined that no operands can
  641. /// be NaN).
  642. };
  643. struct SelectPatternResult {
  644. SelectPatternFlavor Flavor;
  645. SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
  646. /// SPF_FMINNUM or SPF_FMAXNUM.
  647. bool Ordered; /// When implementing this min/max pattern as
  648. /// fcmp; select, does the fcmp have to be
  649. /// ordered?
  650. /// Return true if \p SPF is a min or a max pattern.
  651. static bool isMinOrMax(SelectPatternFlavor SPF) {
  652. return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS;
  653. }
  654. };
  655. /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
  656. /// and providing the out parameter results if we successfully match.
  657. ///
  658. /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be
  659. /// the negation instruction from the idiom.
  660. ///
  661. /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
  662. /// not match that of the original select. If this is the case, the cast
  663. /// operation (one of Trunc,SExt,Zext) that must be done to transform the
  664. /// type of LHS and RHS into the type of V is returned in CastOp.
  665. ///
  666. /// For example:
  667. /// %1 = icmp slt i32 %a, i32 4
  668. /// %2 = sext i32 %a to i64
  669. /// %3 = select i1 %1, i64 %2, i64 4
  670. ///
  671. /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
  672. ///
  673. SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
  674. Instruction::CastOps *CastOp = nullptr,
  675. unsigned Depth = 0);
  676. inline SelectPatternResult matchSelectPattern(const Value *V, const Value *&LHS,
  677. const Value *&RHS) {
  678. Value *L = const_cast<Value *>(LHS);
  679. Value *R = const_cast<Value *>(RHS);
  680. auto Result = matchSelectPattern(const_cast<Value *>(V), L, R);
  681. LHS = L;
  682. RHS = R;
  683. return Result;
  684. }
  685. /// Determine the pattern that a select with the given compare as its
  686. /// predicate and given values as its true/false operands would match.
  687. SelectPatternResult matchDecomposedSelectPattern(
  688. CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
  689. Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0);
  690. /// Return the canonical comparison predicate for the specified
  691. /// minimum/maximum flavor.
  692. CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered = false);
  693. /// Return the inverse minimum/maximum flavor of the specified flavor.
  694. /// For example, signed minimum is the inverse of signed maximum.
  695. SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
  696. Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID);
  697. /// Return the minimum or maximum constant value for the specified integer
  698. /// min/max flavor and type.
  699. APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth);
  700. /// Check if the values in \p VL are select instructions that can be converted
  701. /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a
  702. /// conversion is possible, together with a bool indicating whether all select
  703. /// conditions are only used by the selects. Otherwise return
  704. /// Intrinsic::not_intrinsic.
  705. std::pair<Intrinsic::ID, bool>
  706. canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);
  707. /// Attempt to match a simple first order recurrence cycle of the form:
  708. /// %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
  709. /// %inc = binop %iv, %step
  710. /// OR
  711. /// %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
  712. /// %inc = binop %step, %iv
  713. ///
  714. /// A first order recurrence is a formula with the form: X_n = f(X_(n-1))
  715. ///
  716. /// A couple of notes on subtleties in that definition:
  717. /// * The Step does not have to be loop invariant. In math terms, it can
  718. /// be a free variable. We allow recurrences with both constant and
  719. /// variable coefficients. Callers may wish to filter cases where Step
  720. /// does not dominate P.
  721. /// * For non-commutative operators, we will match both forms. This
  722. /// results in some odd recurrence structures. Callers may wish to filter
  723. /// out recurrences where the phi is not the LHS of the returned operator.
  724. /// * Because of the structure matched, the caller can assume as a post
  725. /// condition of the match the presence of a Loop with P's parent as it's
  726. /// header *except* in unreachable code. (Dominance decays in unreachable
  727. /// code.)
  728. ///
  729. /// NOTE: This is intentional simple. If you want the ability to analyze
  730. /// non-trivial loop conditons, see ScalarEvolution instead.
  731. bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start,
  732. Value *&Step);
  733. /// Analogous to the above, but starting from the binary operator
  734. bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, Value *&Start,
  735. Value *&Step);
  736. /// Return true if RHS is known to be implied true by LHS. Return false if
  737. /// RHS is known to be implied false by LHS. Otherwise, return std::nullopt if
  738. /// no implication can be made. A & B must be i1 (boolean) values or a vector of
  739. /// such values. Note that the truth table for implication is the same as <=u on
  740. /// i1 values (but not
  741. /// <=s!). The truth table for both is:
  742. /// | T | F (B)
  743. /// T | T | F
  744. /// F | T | T
  745. /// (A)
  746. std::optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
  747. const DataLayout &DL,
  748. bool LHSIsTrue = true,
  749. unsigned Depth = 0);
  750. std::optional<bool> isImpliedCondition(const Value *LHS,
  751. CmpInst::Predicate RHSPred,
  752. const Value *RHSOp0, const Value *RHSOp1,
  753. const DataLayout &DL,
  754. bool LHSIsTrue = true,
  755. unsigned Depth = 0);
  756. /// Return the boolean condition value in the context of the given instruction
  757. /// if it is known based on dominating conditions.
  758. std::optional<bool> isImpliedByDomCondition(const Value *Cond,
  759. const Instruction *ContextI,
  760. const DataLayout &DL);
  761. std::optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred,
  762. const Value *LHS, const Value *RHS,
  763. const Instruction *ContextI,
  764. const DataLayout &DL);
  765. /// If Ptr1 is provably equal to Ptr2 plus a constant offset, return that
  766. /// offset. For example, Ptr1 might be &A[42], and Ptr2 might be &A[40]. In
  767. /// this case offset would be -8.
  768. std::optional<int64_t> isPointerOffset(const Value *Ptr1, const Value *Ptr2,
  769. const DataLayout &DL);
  770. } // end namespace llvm
  771. #endif // LLVM_ANALYSIS_VALUETRACKING_H
  772. #ifdef __GNUC__
  773. #pragma GCC diagnostic pop
  774. #endif