ValueTracking.h 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856
  1. #pragma once
  2. #ifdef __GNUC__
  3. #pragma GCC diagnostic push
  4. #pragma GCC diagnostic ignored "-Wunused-parameter"
  5. #endif
  6. //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
  7. //
  8. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  9. // See https://llvm.org/LICENSE.txt for license information.
  10. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  11. //
  12. //===----------------------------------------------------------------------===//
  13. //
  14. // This file contains routines that help analyze properties that chains of
  15. // computations have.
  16. //
  17. //===----------------------------------------------------------------------===//
  18. #ifndef LLVM_ANALYSIS_VALUETRACKING_H
  19. #define LLVM_ANALYSIS_VALUETRACKING_H
  20. #include "llvm/ADT/ArrayRef.h"
  21. #include "llvm/ADT/Optional.h"
  22. #include "llvm/ADT/SmallSet.h"
  23. #include "llvm/IR/Constants.h"
  24. #include "llvm/IR/DataLayout.h"
  25. #include "llvm/IR/InstrTypes.h"
  26. #include "llvm/IR/Intrinsics.h"
  27. #include "llvm/IR/Operator.h"
  28. #include <cassert>
  29. #include <cstdint>
  30. namespace llvm {
  31. class AddOperator;
  32. class AllocaInst;
  33. class APInt;
  34. class AssumptionCache;
  35. class DominatorTree;
  36. class GEPOperator;
  37. class LoadInst;
  38. class WithOverflowInst;
  39. struct KnownBits;
  40. class Loop;
  41. class LoopInfo;
  42. class MDNode;
  43. class OptimizationRemarkEmitter;
  44. class StringRef;
  45. class TargetLibraryInfo;
  46. class Value;
  47. constexpr unsigned MaxAnalysisRecursionDepth = 6;
  48. /// Determine which bits of V are known to be either zero or one and return
  49. /// them in the KnownZero/KnownOne bit sets.
  50. ///
  51. /// This function is defined on values with integer type, values with pointer
  52. /// type, and vectors of integers. In the case
  53. /// where V is a vector, the known zero and known one values are the
  54. /// same width as the vector element, and the bit is set only if it is true
  55. /// for all of the elements in the vector.
  56. void computeKnownBits(const Value *V, KnownBits &Known,
  57. const DataLayout &DL, unsigned Depth = 0,
  58. AssumptionCache *AC = nullptr,
  59. const Instruction *CxtI = nullptr,
  60. const DominatorTree *DT = nullptr,
  61. OptimizationRemarkEmitter *ORE = nullptr,
  62. bool UseInstrInfo = true);
  63. /// Determine which bits of V are known to be either zero or one and return
  64. /// them in the KnownZero/KnownOne bit sets.
  65. ///
  66. /// This function is defined on values with integer type, values with pointer
  67. /// type, and vectors of integers. In the case
  68. /// where V is a vector, the known zero and known one values are the
  69. /// same width as the vector element, and the bit is set only if it is true
  70. /// for all of the demanded elements in the vector.
  71. void computeKnownBits(const Value *V, const APInt &DemandedElts,
  72. KnownBits &Known, const DataLayout &DL,
  73. unsigned Depth = 0, AssumptionCache *AC = nullptr,
  74. const Instruction *CxtI = nullptr,
  75. const DominatorTree *DT = nullptr,
  76. OptimizationRemarkEmitter *ORE = nullptr,
  77. bool UseInstrInfo = true);
  78. /// Returns the known bits rather than passing by reference.
  79. KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
  80. unsigned Depth = 0, AssumptionCache *AC = nullptr,
  81. const Instruction *CxtI = nullptr,
  82. const DominatorTree *DT = nullptr,
  83. OptimizationRemarkEmitter *ORE = nullptr,
  84. bool UseInstrInfo = true);
  85. /// Returns the known bits rather than passing by reference.
  86. KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
  87. const DataLayout &DL, unsigned Depth = 0,
  88. AssumptionCache *AC = nullptr,
  89. const Instruction *CxtI = nullptr,
  90. const DominatorTree *DT = nullptr,
  91. OptimizationRemarkEmitter *ORE = nullptr,
  92. bool UseInstrInfo = true);
  93. /// Compute known bits from the range metadata.
  94. /// \p KnownZero the set of bits that are known to be zero
  95. /// \p KnownOne the set of bits that are known to be one
  96. void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
  97. KnownBits &Known);
  98. /// Return true if LHS and RHS have no common bits set.
  99. bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
  100. const DataLayout &DL,
  101. AssumptionCache *AC = nullptr,
  102. const Instruction *CxtI = nullptr,
  103. const DominatorTree *DT = nullptr,
  104. bool UseInstrInfo = true);
  105. /// Return true if the given value is known to have exactly one bit set when
  106. /// defined. For vectors return true if every element is known to be a power
  107. /// of two when defined. Supports values with integer or pointer type and
  108. /// vectors of integers. If 'OrZero' is set, then return true if the given
  109. /// value is either a power of two or zero.
  110. bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
  111. bool OrZero = false, unsigned Depth = 0,
  112. AssumptionCache *AC = nullptr,
  113. const Instruction *CxtI = nullptr,
  114. const DominatorTree *DT = nullptr,
  115. bool UseInstrInfo = true);
  116. bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
  117. /// Return true if the given value is known to be non-zero when defined. For
  118. /// vectors, return true if every element is known to be non-zero when
  119. /// defined. For pointers, if the context instruction and dominator tree are
  120. /// specified, perform context-sensitive analysis and return true if the
  121. /// pointer couldn't possibly be null at the specified instruction.
  122. /// Supports values with integer or pointer type and vectors of integers.
  123. bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
  124. AssumptionCache *AC = nullptr,
  125. const Instruction *CxtI = nullptr,
  126. const DominatorTree *DT = nullptr,
  127. bool UseInstrInfo = true);
  128. /// Return true if the two given values are negation.
  129. /// Currently can recoginze Value pair:
  130. /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
  131. /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
  132. bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false);
  133. /// Returns true if the give value is known to be non-negative.
  134. bool isKnownNonNegative(const Value *V, const DataLayout &DL,
  135. unsigned Depth = 0,
  136. AssumptionCache *AC = nullptr,
  137. const Instruction *CxtI = nullptr,
  138. const DominatorTree *DT = nullptr,
  139. bool UseInstrInfo = true);
  140. /// Returns true if the given value is known be positive (i.e. non-negative
  141. /// and non-zero).
  142. bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0,
  143. AssumptionCache *AC = nullptr,
  144. const Instruction *CxtI = nullptr,
  145. const DominatorTree *DT = nullptr,
  146. bool UseInstrInfo = true);
  147. /// Returns true if the given value is known be negative (i.e. non-positive
  148. /// and non-zero).
  149. bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0,
  150. AssumptionCache *AC = nullptr,
  151. const Instruction *CxtI = nullptr,
  152. const DominatorTree *DT = nullptr,
  153. bool UseInstrInfo = true);
  154. /// Return true if the given values are known to be non-equal when defined.
  155. /// Supports scalar integer types only.
  156. bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
  157. AssumptionCache *AC = nullptr,
  158. const Instruction *CxtI = nullptr,
  159. const DominatorTree *DT = nullptr,
  160. bool UseInstrInfo = true);
  161. /// Return true if 'V & Mask' is known to be zero. We use this predicate to
  162. /// simplify operations downstream. Mask is known to be zero for bits that V
  163. /// cannot have.
  164. ///
  165. /// This function is defined on values with integer type, values with pointer
  166. /// type, and vectors of integers. In the case
  167. /// where V is a vector, the mask, known zero, and known one values are the
  168. /// same width as the vector element, and the bit is set only if it is true
  169. /// for all of the elements in the vector.
  170. bool MaskedValueIsZero(const Value *V, const APInt &Mask,
  171. const DataLayout &DL,
  172. unsigned Depth = 0, AssumptionCache *AC = nullptr,
  173. const Instruction *CxtI = nullptr,
  174. const DominatorTree *DT = nullptr,
  175. bool UseInstrInfo = true);
  176. /// Return the number of times the sign bit of the register is replicated into
  177. /// the other bits. We know that at least 1 bit is always equal to the sign
  178. /// bit (itself), but other cases can give us information. For example,
  179. /// immediately after an "ashr X, 2", we know that the top 3 bits are all
  180. /// equal to each other, so we return 3. For vectors, return the number of
  181. /// sign bits for the vector element with the mininum number of known sign
  182. /// bits.
  183. unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
  184. unsigned Depth = 0, AssumptionCache *AC = nullptr,
  185. const Instruction *CxtI = nullptr,
  186. const DominatorTree *DT = nullptr,
  187. bool UseInstrInfo = true);
  188. /// Get the upper bound on bit size for this Value \p Op as a signed integer.
  189. /// i.e. x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)).
  190. /// Similar to the APInt::getSignificantBits function.
  191. unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL,
  192. unsigned Depth = 0,
  193. AssumptionCache *AC = nullptr,
  194. const Instruction *CxtI = nullptr,
  195. const DominatorTree *DT = nullptr);
  196. /// Map a call instruction to an intrinsic ID. Libcalls which have equivalent
  197. /// intrinsics are treated as-if they were intrinsics.
  198. Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB,
  199. const TargetLibraryInfo *TLI);
  200. /// Return true if we can prove that the specified FP value is never equal to
  201. /// -0.0.
  202. bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
  203. unsigned Depth = 0);
  204. /// Return true if we can prove that the specified FP value is either NaN or
  205. /// never less than -0.0.
  206. ///
  207. /// NaN --> true
  208. /// +0 --> true
  209. /// -0 --> true
  210. /// x > +0 --> true
  211. /// x < -0 --> false
  212. bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI);
  213. /// Return true if the floating-point scalar value is not an infinity or if
  214. /// the floating-point vector value has no infinities. Return false if a value
  215. /// could ever be infinity.
  216. bool isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
  217. unsigned Depth = 0);
  218. /// Return true if the floating-point scalar value is not a NaN or if the
  219. /// floating-point vector value has no NaN elements. Return false if a value
  220. /// could ever be NaN.
  221. bool isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
  222. unsigned Depth = 0);
  223. /// Return true if we can prove that the specified FP value's sign bit is 0.
  224. ///
  225. /// NaN --> true/false (depending on the NaN's sign bit)
  226. /// +0 --> true
  227. /// -0 --> false
  228. /// x > +0 --> true
  229. /// x < -0 --> false
  230. bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI);
  231. /// If the specified value can be set by repeating the same byte in memory,
  232. /// return the i8 value that it is represented with. This is true for all i8
  233. /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
  234. /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
  235. /// i16 0x1234), return null. If the value is entirely undef and padding,
  236. /// return undef.
  237. Value *isBytewiseValue(Value *V, const DataLayout &DL);
  238. /// Given an aggregate and an sequence of indices, see if the scalar value
  239. /// indexed is already around as a register, for example if it were inserted
  240. /// directly into the aggregate.
  241. ///
  242. /// If InsertBefore is not null, this function will duplicate (modified)
  243. /// insertvalues when a part of a nested struct is extracted.
  244. Value *FindInsertedValue(Value *V,
  245. ArrayRef<unsigned> idx_range,
  246. Instruction *InsertBefore = nullptr);
  247. /// Analyze the specified pointer to see if it can be expressed as a base
  248. /// pointer plus a constant offset. Return the base and offset to the caller.
  249. ///
  250. /// This is a wrapper around Value::stripAndAccumulateConstantOffsets that
  251. /// creates and later unpacks the required APInt.
  252. inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
  253. const DataLayout &DL,
  254. bool AllowNonInbounds = true) {
  255. APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
  256. Value *Base =
  257. Ptr->stripAndAccumulateConstantOffsets(DL, OffsetAPInt, AllowNonInbounds);
  258. Offset = OffsetAPInt.getSExtValue();
  259. return Base;
  260. }
  261. inline const Value *
  262. GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
  263. const DataLayout &DL,
  264. bool AllowNonInbounds = true) {
  265. return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL,
  266. AllowNonInbounds);
  267. }
  268. /// Returns true if the GEP is based on a pointer to a string (array of
  269. // \p CharSize integers) and is indexing into this string.
  270. bool isGEPBasedOnPointerToString(const GEPOperator *GEP,
  271. unsigned CharSize = 8);
  272. /// Represents offset+length into a ConstantDataArray.
  273. struct ConstantDataArraySlice {
  274. /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
  275. /// initializer, it just doesn't fit the ConstantDataArray interface).
  276. const ConstantDataArray *Array;
  277. /// Slice starts at this Offset.
  278. uint64_t Offset;
  279. /// Length of the slice.
  280. uint64_t Length;
  281. /// Moves the Offset and adjusts Length accordingly.
  282. void move(uint64_t Delta) {
  283. assert(Delta < Length);
  284. Offset += Delta;
  285. Length -= Delta;
  286. }
  287. /// Convenience accessor for elements in the slice.
  288. uint64_t operator[](unsigned I) const {
  289. return Array==nullptr ? 0 : Array->getElementAsInteger(I + Offset);
  290. }
  291. };
  292. /// Returns true if the value \p V is a pointer into a ConstantDataArray.
  293. /// If successful \p Slice will point to a ConstantDataArray info object
  294. /// with an appropriate offset.
  295. bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
  296. unsigned ElementSize, uint64_t Offset = 0);
  297. /// This function computes the length of a null-terminated C string pointed to
  298. /// by V. If successful, it returns true and returns the string in Str. If
  299. /// unsuccessful, it returns false. This does not include the trailing null
  300. /// character by default. If TrimAtNul is set to false, then this returns any
  301. /// trailing null characters as well as any other characters that come after
  302. /// it.
  303. bool getConstantStringInfo(const Value *V, StringRef &Str,
  304. uint64_t Offset = 0, bool TrimAtNul = true);
  305. /// If we can compute the length of the string pointed to by the specified
  306. /// pointer, return 'len+1'. If we can't, return 0.
  307. uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
  308. /// This function returns call pointer argument that is considered the same by
  309. /// aliasing rules. You CAN'T use it to replace one value with another. If
  310. /// \p MustPreserveNullness is true, the call must preserve the nullness of
  311. /// the pointer.
  312. const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call,
  313. bool MustPreserveNullness);
  314. inline Value *
  315. getArgumentAliasingToReturnedPointer(CallBase *Call,
  316. bool MustPreserveNullness) {
  317. return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
  318. const_cast<const CallBase *>(Call), MustPreserveNullness));
  319. }
  320. /// {launder,strip}.invariant.group returns pointer that aliases its argument,
  321. /// and it only captures pointer by returning it.
  322. /// These intrinsics are not marked as nocapture, because returning is
  323. /// considered as capture. The arguments are not marked as returned neither,
  324. /// because it would make it useless. If \p MustPreserveNullness is true,
  325. /// the intrinsic must preserve the nullness of the pointer.
  326. bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
  327. const CallBase *Call, bool MustPreserveNullness);
  328. /// This method strips off any GEP address adjustments and pointer casts from
  329. /// the specified value, returning the original object being addressed. Note
  330. /// that the returned value has pointer type if the specified value does. If
  331. /// the MaxLookup value is non-zero, it limits the number of instructions to
  332. /// be stripped off.
  333. const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6);
  334. inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) {
  335. // Force const to avoid infinite recursion.
  336. const Value *VConst = V;
  337. return const_cast<Value *>(getUnderlyingObject(VConst, MaxLookup));
  338. }
  339. /// This method is similar to getUnderlyingObject except that it can
  340. /// look through phi and select instructions and return multiple objects.
  341. ///
  342. /// If LoopInfo is passed, loop phis are further analyzed. If a pointer
  343. /// accesses different objects in each iteration, we don't look through the
  344. /// phi node. E.g. consider this loop nest:
  345. ///
  346. /// int **A;
  347. /// for (i)
  348. /// for (j) {
  349. /// A[i][j] = A[i-1][j] * B[j]
  350. /// }
  351. ///
  352. /// This is transformed by Load-PRE to stash away A[i] for the next iteration
  353. /// of the outer loop:
  354. ///
  355. /// Curr = A[0]; // Prev_0
  356. /// for (i: 1..N) {
  357. /// Prev = Curr; // Prev = PHI (Prev_0, Curr)
  358. /// Curr = A[i];
  359. /// for (j: 0..N) {
  360. /// Curr[j] = Prev[j] * B[j]
  361. /// }
  362. /// }
  363. ///
  364. /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
  365. /// should not assume that Curr and Prev share the same underlying object thus
  366. /// it shouldn't look through the phi above.
  367. void getUnderlyingObjects(const Value *V,
  368. SmallVectorImpl<const Value *> &Objects,
  369. LoopInfo *LI = nullptr, unsigned MaxLookup = 6);
  370. /// This is a wrapper around getUnderlyingObjects and adds support for basic
  371. /// ptrtoint+arithmetic+inttoptr sequences.
  372. bool getUnderlyingObjectsForCodeGen(const Value *V,
  373. SmallVectorImpl<Value *> &Objects);
  374. /// Returns unique alloca where the value comes from, or nullptr.
  375. /// If OffsetZero is true check that V points to the begining of the alloca.
  376. AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false);
  377. inline const AllocaInst *findAllocaForValue(const Value *V,
  378. bool OffsetZero = false) {
  379. return findAllocaForValue(const_cast<Value *>(V), OffsetZero);
  380. }
  381. /// Return true if the only users of this pointer are lifetime markers.
  382. bool onlyUsedByLifetimeMarkers(const Value *V);
  383. /// Return true if the only users of this pointer are lifetime markers or
  384. /// droppable instructions.
  385. bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
  386. /// Return true if speculation of the given load must be suppressed to avoid
  387. /// ordering or interfering with an active sanitizer. If not suppressed,
  388. /// dereferenceability and alignment must be proven separately. Note: This
  389. /// is only needed for raw reasoning; if you use the interface below
  390. /// (isSafeToSpeculativelyExecute), this is handled internally.
  391. bool mustSuppressSpeculation(const LoadInst &LI);
  392. /// Return true if the instruction does not have any effects besides
  393. /// calculating the result and does not have undefined behavior.
  394. ///
  395. /// This method never returns true for an instruction that returns true for
  396. /// mayHaveSideEffects; however, this method also does some other checks in
  397. /// addition. It checks for undefined behavior, like dividing by zero or
  398. /// loading from an invalid pointer (but not for undefined results, like a
  399. /// shift with a shift amount larger than the width of the result). It checks
  400. /// for malloc and alloca because speculatively executing them might cause a
  401. /// memory leak. It also returns false for instructions related to control
  402. /// flow, specifically terminators and PHI nodes.
  403. ///
  404. /// If the CtxI is specified this method performs context-sensitive analysis
  405. /// and returns true if it is safe to execute the instruction immediately
  406. /// before the CtxI.
  407. ///
  408. /// If the CtxI is NOT specified this method only looks at the instruction
  409. /// itself and its operands, so if this method returns true, it is safe to
  410. /// move the instruction as long as the correct dominance relationships for
  411. /// the operands and users hold.
  412. ///
  413. /// This method can return true for instructions that read memory;
  414. /// for such instructions, moving them may change the resulting value.
  415. bool isSafeToSpeculativelyExecute(const Value *V,
  416. const Instruction *CtxI = nullptr,
  417. const DominatorTree *DT = nullptr,
  418. const TargetLibraryInfo *TLI = nullptr);
  419. /// Returns true if the result or effects of the given instructions \p I
  420. /// depend on or influence global memory.
  421. /// Memory dependence arises for example if the instruction reads from
  422. /// memory or may produce effects or undefined behaviour. Memory dependent
  423. /// instructions generally cannot be reorderd with respect to other memory
  424. /// dependent instructions or moved into non-dominated basic blocks.
  425. /// Instructions which just compute a value based on the values of their
  426. /// operands are not memory dependent.
  427. bool mayBeMemoryDependent(const Instruction &I);
  428. /// Return true if it is an intrinsic that cannot be speculated but also
  429. /// cannot trap.
  430. bool isAssumeLikeIntrinsic(const Instruction *I);
  431. /// Return true if it is valid to use the assumptions provided by an
  432. /// assume intrinsic, I, at the point in the control-flow identified by the
  433. /// context instruction, CxtI.
  434. bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
  435. const DominatorTree *DT = nullptr);
  436. enum class OverflowResult {
  437. /// Always overflows in the direction of signed/unsigned min value.
  438. AlwaysOverflowsLow,
  439. /// Always overflows in the direction of signed/unsigned max value.
  440. AlwaysOverflowsHigh,
  441. /// May or may not overflow.
  442. MayOverflow,
  443. /// Never overflows.
  444. NeverOverflows,
  445. };
  446. OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
  447. const Value *RHS,
  448. const DataLayout &DL,
  449. AssumptionCache *AC,
  450. const Instruction *CxtI,
  451. const DominatorTree *DT,
  452. bool UseInstrInfo = true);
  453. OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
  454. const DataLayout &DL,
  455. AssumptionCache *AC,
  456. const Instruction *CxtI,
  457. const DominatorTree *DT,
  458. bool UseInstrInfo = true);
  459. OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
  460. const Value *RHS,
  461. const DataLayout &DL,
  462. AssumptionCache *AC,
  463. const Instruction *CxtI,
  464. const DominatorTree *DT,
  465. bool UseInstrInfo = true);
  466. OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
  467. const DataLayout &DL,
  468. AssumptionCache *AC = nullptr,
  469. const Instruction *CxtI = nullptr,
  470. const DominatorTree *DT = nullptr);
  471. /// This version also leverages the sign bit of Add if known.
  472. OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
  473. const DataLayout &DL,
  474. AssumptionCache *AC = nullptr,
  475. const Instruction *CxtI = nullptr,
  476. const DominatorTree *DT = nullptr);
  477. OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
  478. const DataLayout &DL,
  479. AssumptionCache *AC,
  480. const Instruction *CxtI,
  481. const DominatorTree *DT);
  482. OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
  483. const DataLayout &DL,
  484. AssumptionCache *AC,
  485. const Instruction *CxtI,
  486. const DominatorTree *DT);
  487. /// Returns true if the arithmetic part of the \p WO 's result is
  488. /// used only along the paths control dependent on the computation
  489. /// not overflowing, \p WO being an <op>.with.overflow intrinsic.
  490. bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
  491. const DominatorTree &DT);
  492. /// Determine the possible constant range of an integer or vector of integer
  493. /// value. This is intended as a cheap, non-recursive check.
  494. ConstantRange computeConstantRange(const Value *V, bool ForSigned,
  495. bool UseInstrInfo = true,
  496. AssumptionCache *AC = nullptr,
  497. const Instruction *CtxI = nullptr,
  498. const DominatorTree *DT = nullptr,
  499. unsigned Depth = 0);
  500. /// Return true if this function can prove that the instruction I will
  501. /// always transfer execution to one of its successors (including the next
  502. /// instruction that follows within a basic block). E.g. this is not
  503. /// guaranteed for function calls that could loop infinitely.
  504. ///
  505. /// In other words, this function returns false for instructions that may
  506. /// transfer execution or fail to transfer execution in a way that is not
  507. /// captured in the CFG nor in the sequence of instructions within a basic
  508. /// block.
  509. ///
  510. /// Undefined behavior is assumed not to happen, so e.g. division is
  511. /// guaranteed to transfer execution to the following instruction even
  512. /// though division by zero might cause undefined behavior.
  513. bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
  514. /// Returns true if this block does not contain a potential implicit exit.
  515. /// This is equivelent to saying that all instructions within the basic block
  516. /// are guaranteed to transfer execution to their successor within the basic
  517. /// block. This has the same assumptions w.r.t. undefined behavior as the
  518. /// instruction variant of this function.
  519. bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
  520. /// Return true if every instruction in the range (Begin, End) is
  521. /// guaranteed to transfer execution to its static successor. \p ScanLimit
  522. /// bounds the search to avoid scanning huge blocks.
  523. bool isGuaranteedToTransferExecutionToSuccessor(
  524. BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
  525. unsigned ScanLimit = 32);
  526. /// Same as previous, but with range expressed via iterator_range.
  527. bool isGuaranteedToTransferExecutionToSuccessor(
  528. iterator_range<BasicBlock::const_iterator> Range,
  529. unsigned ScanLimit = 32);
  530. /// Return true if this function can prove that the instruction I
  531. /// is executed for every iteration of the loop L.
  532. ///
  533. /// Note that this currently only considers the loop header.
  534. bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
  535. const Loop *L);
  536. /// Return true if I yields poison or raises UB if any of its operands is
  537. /// poison.
  538. /// Formally, given I = `r = op v1 v2 .. vN`, propagatesPoison returns true
  539. /// if, for all i, r is evaluated to poison or op raises UB if vi = poison.
  540. /// If vi is a vector or an aggregate and r is a single value, any poison
  541. /// element in vi should make r poison or raise UB.
  542. /// To filter out operands that raise UB on poison, you can use
  543. /// getGuaranteedNonPoisonOp.
  544. bool propagatesPoison(const Operator *I);
  545. /// Insert operands of I into Ops such that I will trigger undefined behavior
  546. /// if I is executed and that operand has a poison value.
  547. void getGuaranteedNonPoisonOps(const Instruction *I,
  548. SmallPtrSetImpl<const Value *> &Ops);
  549. /// Insert operands of I into Ops such that I will trigger undefined behavior
  550. /// if I is executed and that operand is not a well-defined value
  551. /// (i.e. has undef bits or poison).
  552. void getGuaranteedWellDefinedOps(const Instruction *I,
  553. SmallPtrSetImpl<const Value *> &Ops);
  554. /// Return true if the given instruction must trigger undefined behavior
  555. /// when I is executed with any operands which appear in KnownPoison holding
  556. /// a poison value at the point of execution.
  557. bool mustTriggerUB(const Instruction *I,
  558. const SmallSet<const Value *, 16>& KnownPoison);
  559. /// Return true if this function can prove that if Inst is executed
  560. /// and yields a poison value or undef bits, then that will trigger
  561. /// undefined behavior.
  562. ///
  563. /// Note that this currently only considers the basic block that is
  564. /// the parent of Inst.
  565. bool programUndefinedIfUndefOrPoison(const Instruction *Inst);
  566. bool programUndefinedIfPoison(const Instruction *Inst);
  567. /// canCreateUndefOrPoison returns true if Op can create undef or poison from
  568. /// non-undef & non-poison operands.
  569. /// For vectors, canCreateUndefOrPoison returns true if there is potential
  570. /// poison or undef in any element of the result when vectors without
  571. /// undef/poison poison are given as operands.
  572. /// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns
  573. /// true. If Op raises immediate UB but never creates poison or undef
  574. /// (e.g. sdiv I, 0), canCreatePoison returns false.
  575. ///
  576. /// \p ConsiderFlags controls whether poison producing flags on the
  577. /// instruction are considered. This can be used to see if the instruction
  578. /// could still introduce undef or poison even without poison generating flags
  579. /// which might be on the instruction. (i.e. could the result of
  580. /// Op->dropPoisonGeneratingFlags() still create poison or undef)
  581. ///
  582. /// canCreatePoison returns true if Op can create poison from non-poison
  583. /// operands.
  584. bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlags = true);
  585. bool canCreatePoison(const Operator *Op, bool ConsiderFlags = true);
  586. /// Return true if V is poison given that ValAssumedPoison is already poison.
  587. /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`,
  588. /// impliesPoison returns true.
  589. bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
  590. /// Return true if this function can prove that V does not have undef bits
  591. /// and is never poison. If V is an aggregate value or vector, check whether
  592. /// all elements (except padding) are not undef or poison.
  593. /// Note that this is different from canCreateUndefOrPoison because the
  594. /// function assumes Op's operands are not poison/undef.
  595. ///
  596. /// If CtxI and DT are specified this method performs flow-sensitive analysis
  597. /// and returns true if it is guaranteed to be never undef or poison
  598. /// immediately before the CtxI.
  599. bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
  600. AssumptionCache *AC = nullptr,
  601. const Instruction *CtxI = nullptr,
  602. const DominatorTree *DT = nullptr,
  603. unsigned Depth = 0);
  604. bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr,
  605. const Instruction *CtxI = nullptr,
  606. const DominatorTree *DT = nullptr,
  607. unsigned Depth = 0);
  608. /// Specific patterns of select instructions we can match.
  609. enum SelectPatternFlavor {
  610. SPF_UNKNOWN = 0,
  611. SPF_SMIN, /// Signed minimum
  612. SPF_UMIN, /// Unsigned minimum
  613. SPF_SMAX, /// Signed maximum
  614. SPF_UMAX, /// Unsigned maximum
  615. SPF_FMINNUM, /// Floating point minnum
  616. SPF_FMAXNUM, /// Floating point maxnum
  617. SPF_ABS, /// Absolute value
  618. SPF_NABS /// Negated absolute value
  619. };
  620. /// Behavior when a floating point min/max is given one NaN and one
  621. /// non-NaN as input.
  622. enum SelectPatternNaNBehavior {
  623. SPNB_NA = 0, /// NaN behavior not applicable.
  624. SPNB_RETURNS_NAN, /// Given one NaN input, returns the NaN.
  625. SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
  626. SPNB_RETURNS_ANY /// Given one NaN input, can return either (or
  627. /// it has been determined that no operands can
  628. /// be NaN).
  629. };
  630. struct SelectPatternResult {
  631. SelectPatternFlavor Flavor;
  632. SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
  633. /// SPF_FMINNUM or SPF_FMAXNUM.
  634. bool Ordered; /// When implementing this min/max pattern as
  635. /// fcmp; select, does the fcmp have to be
  636. /// ordered?
  637. /// Return true if \p SPF is a min or a max pattern.
  638. static bool isMinOrMax(SelectPatternFlavor SPF) {
  639. return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS;
  640. }
  641. };
  642. /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
  643. /// and providing the out parameter results if we successfully match.
  644. ///
  645. /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be
  646. /// the negation instruction from the idiom.
  647. ///
  648. /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
  649. /// not match that of the original select. If this is the case, the cast
  650. /// operation (one of Trunc,SExt,Zext) that must be done to transform the
  651. /// type of LHS and RHS into the type of V is returned in CastOp.
  652. ///
  653. /// For example:
  654. /// %1 = icmp slt i32 %a, i32 4
  655. /// %2 = sext i32 %a to i64
  656. /// %3 = select i1 %1, i64 %2, i64 4
  657. ///
  658. /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
  659. ///
  660. SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
  661. Instruction::CastOps *CastOp = nullptr,
  662. unsigned Depth = 0);
  663. inline SelectPatternResult
  664. matchSelectPattern(const Value *V, const Value *&LHS, const Value *&RHS) {
  665. Value *L = const_cast<Value *>(LHS);
  666. Value *R = const_cast<Value *>(RHS);
  667. auto Result = matchSelectPattern(const_cast<Value *>(V), L, R);
  668. LHS = L;
  669. RHS = R;
  670. return Result;
  671. }
  672. /// Determine the pattern that a select with the given compare as its
  673. /// predicate and given values as its true/false operands would match.
  674. SelectPatternResult matchDecomposedSelectPattern(
  675. CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
  676. Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0);
  677. /// Return the canonical comparison predicate for the specified
  678. /// minimum/maximum flavor.
  679. CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF,
  680. bool Ordered = false);
  681. /// Return the inverse minimum/maximum flavor of the specified flavor.
  682. /// For example, signed minimum is the inverse of signed maximum.
  683. SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
  684. Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID);
  685. /// Return the canonical inverse comparison predicate for the specified
  686. /// minimum/maximum flavor.
  687. CmpInst::Predicate getInverseMinMaxPred(SelectPatternFlavor SPF);
  688. /// Return the minimum or maximum constant value for the specified integer
  689. /// min/max flavor and type.
  690. APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth);
  691. /// Check if the values in \p VL are select instructions that can be converted
  692. /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a
  693. /// conversion is possible, together with a bool indicating whether all select
  694. /// conditions are only used by the selects. Otherwise return
  695. /// Intrinsic::not_intrinsic.
  696. std::pair<Intrinsic::ID, bool>
  697. canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);
  698. /// Attempt to match a simple first order recurrence cycle of the form:
  699. /// %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
  700. /// %inc = binop %iv, %step
  701. /// OR
  702. /// %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
  703. /// %inc = binop %step, %iv
  704. ///
  705. /// A first order recurrence is a formula with the form: X_n = f(X_(n-1))
  706. ///
  707. /// A couple of notes on subtleties in that definition:
  708. /// * The Step does not have to be loop invariant. In math terms, it can
  709. /// be a free variable. We allow recurrences with both constant and
  710. /// variable coefficients. Callers may wish to filter cases where Step
  711. /// does not dominate P.
  712. /// * For non-commutative operators, we will match both forms. This
  713. /// results in some odd recurrence structures. Callers may wish to filter
  714. /// out recurrences where the phi is not the LHS of the returned operator.
  715. /// * Because of the structure matched, the caller can assume as a post
  716. /// condition of the match the presence of a Loop with P's parent as it's
  717. /// header *except* in unreachable code. (Dominance decays in unreachable
  718. /// code.)
  719. ///
  720. /// NOTE: This is intentional simple. If you want the ability to analyze
  721. /// non-trivial loop conditons, see ScalarEvolution instead.
  722. bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
  723. Value *&Start, Value *&Step);
  724. /// Analogous to the above, but starting from the binary operator
  725. bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
  726. Value *&Start, Value *&Step);
  727. /// Return true if RHS is known to be implied true by LHS. Return false if
  728. /// RHS is known to be implied false by LHS. Otherwise, return None if no
  729. /// implication can be made.
  730. /// A & B must be i1 (boolean) values or a vector of such values. Note that
  731. /// the truth table for implication is the same as <=u on i1 values (but not
  732. /// <=s!). The truth table for both is:
  733. /// | T | F (B)
  734. /// T | T | F
  735. /// F | T | T
  736. /// (A)
  737. Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
  738. const DataLayout &DL, bool LHSIsTrue = true,
  739. unsigned Depth = 0);
  740. Optional<bool> isImpliedCondition(const Value *LHS,
  741. CmpInst::Predicate RHSPred,
  742. const Value *RHSOp0, const Value *RHSOp1,
  743. const DataLayout &DL, bool LHSIsTrue = true,
  744. unsigned Depth = 0);
  745. /// Return the boolean condition value in the context of the given instruction
  746. /// if it is known based on dominating conditions.
  747. Optional<bool> isImpliedByDomCondition(const Value *Cond,
  748. const Instruction *ContextI,
  749. const DataLayout &DL);
  750. Optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred,
  751. const Value *LHS, const Value *RHS,
  752. const Instruction *ContextI,
  753. const DataLayout &DL);
  754. /// If Ptr1 is provably equal to Ptr2 plus a constant offset, return that
  755. /// offset. For example, Ptr1 might be &A[42], and Ptr2 might be &A[40]. In
  756. /// this case offset would be -8.
  757. Optional<int64_t> isPointerOffset(const Value *Ptr1, const Value *Ptr2,
  758. const DataLayout &DL);
  759. } // end namespace llvm
  760. #endif // LLVM_ANALYSIS_VALUETRACKING_H
  761. #ifdef __GNUC__
  762. #pragma GCC diagnostic pop
  763. #endif