ValueTracking.h 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791
  1. #pragma once
  2. #ifdef __GNUC__
  3. #pragma GCC diagnostic push
  4. #pragma GCC diagnostic ignored "-Wunused-parameter"
  5. #endif
  6. //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
  7. //
  8. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  9. // See https://llvm.org/LICENSE.txt for license information.
  10. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  11. //
  12. //===----------------------------------------------------------------------===//
  13. //
  14. // This file contains routines that help analyze properties that chains of
  15. // computations have.
  16. //
  17. //===----------------------------------------------------------------------===//
  18. #ifndef LLVM_ANALYSIS_VALUETRACKING_H
  19. #define LLVM_ANALYSIS_VALUETRACKING_H
  20. #include "llvm/ADT/ArrayRef.h"
  21. #include "llvm/ADT/Optional.h"
  22. #include "llvm/ADT/SmallSet.h"
  23. #include "llvm/IR/Constants.h"
  24. #include "llvm/IR/DataLayout.h"
  25. #include "llvm/IR/InstrTypes.h"
  26. #include "llvm/IR/Intrinsics.h"
  27. #include "llvm/IR/Operator.h"
  28. #include <cassert>
  29. #include <cstdint>
  30. namespace llvm {
  31. class AddOperator;
  32. class AllocaInst;
  33. class APInt;
  34. class AssumptionCache;
  35. class DominatorTree;
  36. class GEPOperator;
  37. class IntrinsicInst;
  38. class LoadInst;
  39. class WithOverflowInst;
  40. struct KnownBits;
  41. class Loop;
  42. class LoopInfo;
  43. class MDNode;
  44. class OptimizationRemarkEmitter;
  45. class StringRef;
  46. class TargetLibraryInfo;
  47. class Value;
  48. constexpr unsigned MaxAnalysisRecursionDepth = 6;
  49. /// Determine which bits of V are known to be either zero or one and return
  50. /// them in the KnownZero/KnownOne bit sets.
  51. ///
  52. /// This function is defined on values with integer type, values with pointer
  53. /// type, and vectors of integers. In the case
  54. /// where V is a vector, the known zero and known one values are the
  55. /// same width as the vector element, and the bit is set only if it is true
  56. /// for all of the elements in the vector.
  57. void computeKnownBits(const Value *V, KnownBits &Known,
  58. const DataLayout &DL, unsigned Depth = 0,
  59. AssumptionCache *AC = nullptr,
  60. const Instruction *CxtI = nullptr,
  61. const DominatorTree *DT = nullptr,
  62. OptimizationRemarkEmitter *ORE = nullptr,
  63. bool UseInstrInfo = true);
  64. /// Determine which bits of V are known to be either zero or one and return
  65. /// them in the KnownZero/KnownOne bit sets.
  66. ///
  67. /// This function is defined on values with integer type, values with pointer
  68. /// type, and vectors of integers. In the case
  69. /// where V is a vector, the known zero and known one values are the
  70. /// same width as the vector element, and the bit is set only if it is true
  71. /// for all of the demanded elements in the vector.
  72. void computeKnownBits(const Value *V, const APInt &DemandedElts,
  73. KnownBits &Known, const DataLayout &DL,
  74. unsigned Depth = 0, AssumptionCache *AC = nullptr,
  75. const Instruction *CxtI = nullptr,
  76. const DominatorTree *DT = nullptr,
  77. OptimizationRemarkEmitter *ORE = nullptr,
  78. bool UseInstrInfo = true);
  79. /// Returns the known bits rather than passing by reference.
  80. KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
  81. unsigned Depth = 0, AssumptionCache *AC = nullptr,
  82. const Instruction *CxtI = nullptr,
  83. const DominatorTree *DT = nullptr,
  84. OptimizationRemarkEmitter *ORE = nullptr,
  85. bool UseInstrInfo = true);
  86. /// Returns the known bits rather than passing by reference.
  87. KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
  88. const DataLayout &DL, unsigned Depth = 0,
  89. AssumptionCache *AC = nullptr,
  90. const Instruction *CxtI = nullptr,
  91. const DominatorTree *DT = nullptr,
  92. OptimizationRemarkEmitter *ORE = nullptr,
  93. bool UseInstrInfo = true);
  94. /// Compute known bits from the range metadata.
  95. /// \p KnownZero the set of bits that are known to be zero
  96. /// \p KnownOne the set of bits that are known to be one
  97. void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
  98. KnownBits &Known);
  99. /// Return true if LHS and RHS have no common bits set.
  100. bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
  101. const DataLayout &DL,
  102. AssumptionCache *AC = nullptr,
  103. const Instruction *CxtI = nullptr,
  104. const DominatorTree *DT = nullptr,
  105. bool UseInstrInfo = true);
  106. /// Return true if the given value is known to have exactly one bit set when
  107. /// defined. For vectors return true if every element is known to be a power
  108. /// of two when defined. Supports values with integer or pointer type and
  109. /// vectors of integers. If 'OrZero' is set, then return true if the given
  110. /// value is either a power of two or zero.
  111. bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
  112. bool OrZero = false, unsigned Depth = 0,
  113. AssumptionCache *AC = nullptr,
  114. const Instruction *CxtI = nullptr,
  115. const DominatorTree *DT = nullptr,
  116. bool UseInstrInfo = true);
  117. bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
  118. /// Return true if the given value is known to be non-zero when defined. For
  119. /// vectors, return true if every element is known to be non-zero when
  120. /// defined. For pointers, if the context instruction and dominator tree are
  121. /// specified, perform context-sensitive analysis and return true if the
  122. /// pointer couldn't possibly be null at the specified instruction.
  123. /// Supports values with integer or pointer type and vectors of integers.
  124. bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
  125. AssumptionCache *AC = nullptr,
  126. const Instruction *CxtI = nullptr,
  127. const DominatorTree *DT = nullptr,
  128. bool UseInstrInfo = true);
  129. /// Return true if the two given values are negation.
  130. /// Currently can recoginze Value pair:
  131. /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
  132. /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
  133. bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false);
  134. /// Returns true if the give value is known to be non-negative.
  135. bool isKnownNonNegative(const Value *V, const DataLayout &DL,
  136. unsigned Depth = 0,
  137. AssumptionCache *AC = nullptr,
  138. const Instruction *CxtI = nullptr,
  139. const DominatorTree *DT = nullptr,
  140. bool UseInstrInfo = true);
  141. /// Returns true if the given value is known be positive (i.e. non-negative
  142. /// and non-zero).
  143. bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0,
  144. AssumptionCache *AC = nullptr,
  145. const Instruction *CxtI = nullptr,
  146. const DominatorTree *DT = nullptr,
  147. bool UseInstrInfo = true);
  148. /// Returns true if the given value is known be negative (i.e. non-positive
  149. /// and non-zero).
  150. bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0,
  151. AssumptionCache *AC = nullptr,
  152. const Instruction *CxtI = nullptr,
  153. const DominatorTree *DT = nullptr,
  154. bool UseInstrInfo = true);
  155. /// Return true if the given values are known to be non-equal when defined.
  156. /// Supports scalar integer types only.
  157. bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
  158. AssumptionCache *AC = nullptr,
  159. const Instruction *CxtI = nullptr,
  160. const DominatorTree *DT = nullptr,
  161. bool UseInstrInfo = true);
  162. /// Return true if 'V & Mask' is known to be zero. We use this predicate to
  163. /// simplify operations downstream. Mask is known to be zero for bits that V
  164. /// cannot have.
  165. ///
  166. /// This function is defined on values with integer type, values with pointer
  167. /// type, and vectors of integers. In the case
  168. /// where V is a vector, the mask, known zero, and known one values are the
  169. /// same width as the vector element, and the bit is set only if it is true
  170. /// for all of the elements in the vector.
  171. bool MaskedValueIsZero(const Value *V, const APInt &Mask,
  172. const DataLayout &DL,
  173. unsigned Depth = 0, AssumptionCache *AC = nullptr,
  174. const Instruction *CxtI = nullptr,
  175. const DominatorTree *DT = nullptr,
  176. bool UseInstrInfo = true);
  177. /// Return the number of times the sign bit of the register is replicated into
  178. /// the other bits. We know that at least 1 bit is always equal to the sign
  179. /// bit (itself), but other cases can give us information. For example,
  180. /// immediately after an "ashr X, 2", we know that the top 3 bits are all
  181. /// equal to each other, so we return 3. For vectors, return the number of
  182. /// sign bits for the vector element with the mininum number of known sign
  183. /// bits.
  184. unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
  185. unsigned Depth = 0, AssumptionCache *AC = nullptr,
  186. const Instruction *CxtI = nullptr,
  187. const DominatorTree *DT = nullptr,
  188. bool UseInstrInfo = true);
  189. /// This function computes the integer multiple of Base that equals V. If
  190. /// successful, it returns true and returns the multiple in Multiple. If
  191. /// unsuccessful, it returns false. Also, if V can be simplified to an
  192. /// integer, then the simplified V is returned in Val. Look through sext only
  193. /// if LookThroughSExt=true.
  194. bool ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
  195. bool LookThroughSExt = false,
  196. unsigned Depth = 0);
  197. /// Map a call instruction to an intrinsic ID. Libcalls which have equivalent
  198. /// intrinsics are treated as-if they were intrinsics.
  199. Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB,
  200. const TargetLibraryInfo *TLI);
  201. /// Return true if we can prove that the specified FP value is never equal to
  202. /// -0.0.
  203. bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
  204. unsigned Depth = 0);
  205. /// Return true if we can prove that the specified FP value is either NaN or
  206. /// never less than -0.0.
  207. ///
  208. /// NaN --> true
  209. /// +0 --> true
  210. /// -0 --> true
  211. /// x > +0 --> true
  212. /// x < -0 --> false
  213. bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI);
  214. /// Return true if the floating-point scalar value is not an infinity or if
  215. /// the floating-point vector value has no infinities. Return false if a value
  216. /// could ever be infinity.
  217. bool isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
  218. unsigned Depth = 0);
  219. /// Return true if the floating-point scalar value is not a NaN or if the
  220. /// floating-point vector value has no NaN elements. Return false if a value
  221. /// could ever be NaN.
  222. bool isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
  223. unsigned Depth = 0);
  224. /// Return true if we can prove that the specified FP value's sign bit is 0.
  225. ///
  226. /// NaN --> true/false (depending on the NaN's sign bit)
  227. /// +0 --> true
  228. /// -0 --> false
  229. /// x > +0 --> true
  230. /// x < -0 --> false
  231. bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI);
  232. /// If the specified value can be set by repeating the same byte in memory,
  233. /// return the i8 value that it is represented with. This is true for all i8
  234. /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
  235. /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
  236. /// i16 0x1234), return null. If the value is entirely undef and padding,
  237. /// return undef.
  238. Value *isBytewiseValue(Value *V, const DataLayout &DL);
  239. /// Given an aggregate and an sequence of indices, see if the scalar value
  240. /// indexed is already around as a register, for example if it were inserted
  241. /// directly into the aggregate.
  242. ///
  243. /// If InsertBefore is not null, this function will duplicate (modified)
  244. /// insertvalues when a part of a nested struct is extracted.
  245. Value *FindInsertedValue(Value *V,
  246. ArrayRef<unsigned> idx_range,
  247. Instruction *InsertBefore = nullptr);
  248. /// Analyze the specified pointer to see if it can be expressed as a base
  249. /// pointer plus a constant offset. Return the base and offset to the caller.
  250. ///
  251. /// This is a wrapper around Value::stripAndAccumulateConstantOffsets that
  252. /// creates and later unpacks the required APInt.
  253. inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
  254. const DataLayout &DL,
  255. bool AllowNonInbounds = true) {
  256. APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
  257. Value *Base =
  258. Ptr->stripAndAccumulateConstantOffsets(DL, OffsetAPInt, AllowNonInbounds);
  259. Offset = OffsetAPInt.getSExtValue();
  260. return Base;
  261. }
  262. inline const Value *
  263. GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
  264. const DataLayout &DL,
  265. bool AllowNonInbounds = true) {
  266. return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL,
  267. AllowNonInbounds);
  268. }
  269. /// Returns true if the GEP is based on a pointer to a string (array of
  270. // \p CharSize integers) and is indexing into this string.
  271. bool isGEPBasedOnPointerToString(const GEPOperator *GEP,
  272. unsigned CharSize = 8);
  273. /// Represents offset+length into a ConstantDataArray.
  274. struct ConstantDataArraySlice {
  275. /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
  276. /// initializer, it just doesn't fit the ConstantDataArray interface).
  277. const ConstantDataArray *Array;
  278. /// Slice starts at this Offset.
  279. uint64_t Offset;
  280. /// Length of the slice.
  281. uint64_t Length;
  282. /// Moves the Offset and adjusts Length accordingly.
  283. void move(uint64_t Delta) {
  284. assert(Delta < Length);
  285. Offset += Delta;
  286. Length -= Delta;
  287. }
  288. /// Convenience accessor for elements in the slice.
  289. uint64_t operator[](unsigned I) const {
  290. return Array==nullptr ? 0 : Array->getElementAsInteger(I + Offset);
  291. }
  292. };
  293. /// Returns true if the value \p V is a pointer into a ConstantDataArray.
  294. /// If successful \p Slice will point to a ConstantDataArray info object
  295. /// with an appropriate offset.
  296. bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
  297. unsigned ElementSize, uint64_t Offset = 0);
  298. /// This function computes the length of a null-terminated C string pointed to
  299. /// by V. If successful, it returns true and returns the string in Str. If
  300. /// unsuccessful, it returns false. This does not include the trailing null
  301. /// character by default. If TrimAtNul is set to false, then this returns any
  302. /// trailing null characters as well as any other characters that come after
  303. /// it.
  304. bool getConstantStringInfo(const Value *V, StringRef &Str,
  305. uint64_t Offset = 0, bool TrimAtNul = true);
  306. /// If we can compute the length of the string pointed to by the specified
  307. /// pointer, return 'len+1'. If we can't, return 0.
  308. uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
  309. /// This function returns call pointer argument that is considered the same by
  310. /// aliasing rules. You CAN'T use it to replace one value with another. If
  311. /// \p MustPreserveNullness is true, the call must preserve the nullness of
  312. /// the pointer.
  313. const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call,
  314. bool MustPreserveNullness);
  315. inline Value *
  316. getArgumentAliasingToReturnedPointer(CallBase *Call,
  317. bool MustPreserveNullness) {
  318. return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
  319. const_cast<const CallBase *>(Call), MustPreserveNullness));
  320. }
  321. /// {launder,strip}.invariant.group returns pointer that aliases its argument,
  322. /// and it only captures pointer by returning it.
  323. /// These intrinsics are not marked as nocapture, because returning is
  324. /// considered as capture. The arguments are not marked as returned neither,
  325. /// because it would make it useless. If \p MustPreserveNullness is true,
  326. /// the intrinsic must preserve the nullness of the pointer.
  327. bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
  328. const CallBase *Call, bool MustPreserveNullness);
  329. /// This method strips off any GEP address adjustments and pointer casts from
  330. /// the specified value, returning the original object being addressed. Note
  331. /// that the returned value has pointer type if the specified value does. If
  332. /// the MaxLookup value is non-zero, it limits the number of instructions to
  333. /// be stripped off.
  334. Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6);
  335. inline const Value *getUnderlyingObject(const Value *V,
  336. unsigned MaxLookup = 6) {
  337. return getUnderlyingObject(const_cast<Value *>(V), MaxLookup);
  338. }
  339. /// This method is similar to getUnderlyingObject except that it can
  340. /// look through phi and select instructions and return multiple objects.
  341. ///
  342. /// If LoopInfo is passed, loop phis are further analyzed. If a pointer
  343. /// accesses different objects in each iteration, we don't look through the
  344. /// phi node. E.g. consider this loop nest:
  345. ///
  346. /// int **A;
  347. /// for (i)
  348. /// for (j) {
  349. /// A[i][j] = A[i-1][j] * B[j]
  350. /// }
  351. ///
  352. /// This is transformed by Load-PRE to stash away A[i] for the next iteration
  353. /// of the outer loop:
  354. ///
  355. /// Curr = A[0]; // Prev_0
  356. /// for (i: 1..N) {
  357. /// Prev = Curr; // Prev = PHI (Prev_0, Curr)
  358. /// Curr = A[i];
  359. /// for (j: 0..N) {
  360. /// Curr[j] = Prev[j] * B[j]
  361. /// }
  362. /// }
  363. ///
  364. /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
  365. /// should not assume that Curr and Prev share the same underlying object thus
  366. /// it shouldn't look through the phi above.
  367. void getUnderlyingObjects(const Value *V,
  368. SmallVectorImpl<const Value *> &Objects,
  369. LoopInfo *LI = nullptr, unsigned MaxLookup = 6);
  370. /// This is a wrapper around getUnderlyingObjects and adds support for basic
  371. /// ptrtoint+arithmetic+inttoptr sequences.
  372. bool getUnderlyingObjectsForCodeGen(const Value *V,
  373. SmallVectorImpl<Value *> &Objects);
  374. /// Returns unique alloca where the value comes from, or nullptr.
  375. /// If OffsetZero is true check that V points to the begining of the alloca.
  376. AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false);
  377. inline const AllocaInst *findAllocaForValue(const Value *V,
  378. bool OffsetZero = false) {
  379. return findAllocaForValue(const_cast<Value *>(V), OffsetZero);
  380. }
  381. /// Return true if the only users of this pointer are lifetime markers.
  382. bool onlyUsedByLifetimeMarkers(const Value *V);
  383. /// Return true if the only users of this pointer are lifetime markers or
  384. /// droppable instructions.
  385. bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
  386. /// Return true if speculation of the given load must be suppressed to avoid
  387. /// ordering or interfering with an active sanitizer. If not suppressed,
  388. /// dereferenceability and alignment must be proven separately. Note: This
  389. /// is only needed for raw reasoning; if you use the interface below
  390. /// (isSafeToSpeculativelyExecute), this is handled internally.
  391. bool mustSuppressSpeculation(const LoadInst &LI);
  392. /// Return true if the instruction does not have any effects besides
  393. /// calculating the result and does not have undefined behavior.
  394. ///
  395. /// This method never returns true for an instruction that returns true for
  396. /// mayHaveSideEffects; however, this method also does some other checks in
  397. /// addition. It checks for undefined behavior, like dividing by zero or
  398. /// loading from an invalid pointer (but not for undefined results, like a
  399. /// shift with a shift amount larger than the width of the result). It checks
  400. /// for malloc and alloca because speculatively executing them might cause a
  401. /// memory leak. It also returns false for instructions related to control
  402. /// flow, specifically terminators and PHI nodes.
  403. ///
  404. /// If the CtxI is specified this method performs context-sensitive analysis
  405. /// and returns true if it is safe to execute the instruction immediately
  406. /// before the CtxI.
  407. ///
  408. /// If the CtxI is NOT specified this method only looks at the instruction
  409. /// itself and its operands, so if this method returns true, it is safe to
  410. /// move the instruction as long as the correct dominance relationships for
  411. /// the operands and users hold.
  412. ///
  413. /// This method can return true for instructions that read memory;
  414. /// for such instructions, moving them may change the resulting value.
  415. bool isSafeToSpeculativelyExecute(const Value *V,
  416. const Instruction *CtxI = nullptr,
  417. const DominatorTree *DT = nullptr);
  418. /// Returns true if the result or effects of the given instructions \p I
  419. /// depend on or influence global memory.
  420. /// Memory dependence arises for example if the instruction reads from
  421. /// memory or may produce effects or undefined behaviour. Memory dependent
  422. /// instructions generally cannot be reorderd with respect to other memory
  423. /// dependent instructions or moved into non-dominated basic blocks.
  424. /// Instructions which just compute a value based on the values of their
  425. /// operands are not memory dependent.
  426. bool mayBeMemoryDependent(const Instruction &I);
  427. /// Return true if it is an intrinsic that cannot be speculated but also
  428. /// cannot trap.
  429. bool isAssumeLikeIntrinsic(const Instruction *I);
  430. /// Return true if it is valid to use the assumptions provided by an
  431. /// assume intrinsic, I, at the point in the control-flow identified by the
  432. /// context instruction, CxtI.
  433. bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
  434. const DominatorTree *DT = nullptr);
  435. enum class OverflowResult {
  436. /// Always overflows in the direction of signed/unsigned min value.
  437. AlwaysOverflowsLow,
  438. /// Always overflows in the direction of signed/unsigned max value.
  439. AlwaysOverflowsHigh,
  440. /// May or may not overflow.
  441. MayOverflow,
  442. /// Never overflows.
  443. NeverOverflows,
  444. };
  445. OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
  446. const Value *RHS,
  447. const DataLayout &DL,
  448. AssumptionCache *AC,
  449. const Instruction *CxtI,
  450. const DominatorTree *DT,
  451. bool UseInstrInfo = true);
  452. OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
  453. const DataLayout &DL,
  454. AssumptionCache *AC,
  455. const Instruction *CxtI,
  456. const DominatorTree *DT,
  457. bool UseInstrInfo = true);
  458. OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
  459. const Value *RHS,
  460. const DataLayout &DL,
  461. AssumptionCache *AC,
  462. const Instruction *CxtI,
  463. const DominatorTree *DT,
  464. bool UseInstrInfo = true);
  465. OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
  466. const DataLayout &DL,
  467. AssumptionCache *AC = nullptr,
  468. const Instruction *CxtI = nullptr,
  469. const DominatorTree *DT = nullptr);
  470. /// This version also leverages the sign bit of Add if known.
  471. OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
  472. const DataLayout &DL,
  473. AssumptionCache *AC = nullptr,
  474. const Instruction *CxtI = nullptr,
  475. const DominatorTree *DT = nullptr);
  476. OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
  477. const DataLayout &DL,
  478. AssumptionCache *AC,
  479. const Instruction *CxtI,
  480. const DominatorTree *DT);
  481. OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
  482. const DataLayout &DL,
  483. AssumptionCache *AC,
  484. const Instruction *CxtI,
  485. const DominatorTree *DT);
  486. /// Returns true if the arithmetic part of the \p WO 's result is
  487. /// used only along the paths control dependent on the computation
  488. /// not overflowing, \p WO being an <op>.with.overflow intrinsic.
  489. bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
  490. const DominatorTree &DT);
  491. /// Determine the possible constant range of an integer or vector of integer
  492. /// value. This is intended as a cheap, non-recursive check.
  493. ConstantRange computeConstantRange(const Value *V, bool UseInstrInfo = true,
  494. AssumptionCache *AC = nullptr,
  495. const Instruction *CtxI = nullptr,
  496. unsigned Depth = 0);
  497. /// Return true if this function can prove that the instruction I will
  498. /// always transfer execution to one of its successors (including the next
  499. /// instruction that follows within a basic block). E.g. this is not
  500. /// guaranteed for function calls that could loop infinitely.
  501. ///
  502. /// In other words, this function returns false for instructions that may
  503. /// transfer execution or fail to transfer execution in a way that is not
  504. /// captured in the CFG nor in the sequence of instructions within a basic
  505. /// block.
  506. ///
  507. /// Undefined behavior is assumed not to happen, so e.g. division is
  508. /// guaranteed to transfer execution to the following instruction even
  509. /// though division by zero might cause undefined behavior.
  510. bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
  511. /// Returns true if this block does not contain a potential implicit exit.
  512. /// This is equivelent to saying that all instructions within the basic block
  513. /// are guaranteed to transfer execution to their successor within the basic
  514. /// block. This has the same assumptions w.r.t. undefined behavior as the
  515. /// instruction variant of this function.
  516. bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
  517. /// Return true if this function can prove that the instruction I
  518. /// is executed for every iteration of the loop L.
  519. ///
  520. /// Note that this currently only considers the loop header.
  521. bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
  522. const Loop *L);
  523. /// Return true if I yields poison or raises UB if any of its operands is
  524. /// poison.
  525. /// Formally, given I = `r = op v1 v2 .. vN`, propagatesPoison returns true
  526. /// if, for all i, r is evaluated to poison or op raises UB if vi = poison.
  527. /// To filter out operands that raise UB on poison, you can use
  528. /// getGuaranteedNonPoisonOp.
  529. bool propagatesPoison(const Operator *I);
  530. /// Insert operands of I into Ops such that I will trigger undefined behavior
  531. /// if I is executed and that operand has a poison value.
  532. void getGuaranteedNonPoisonOps(const Instruction *I,
  533. SmallPtrSetImpl<const Value *> &Ops);
  534. /// Return true if the given instruction must trigger undefined behavior
  535. /// when I is executed with any operands which appear in KnownPoison holding
  536. /// a poison value at the point of execution.
  537. bool mustTriggerUB(const Instruction *I,
  538. const SmallSet<const Value *, 16>& KnownPoison);
  539. /// Return true if this function can prove that if Inst is executed
  540. /// and yields a poison value or undef bits, then that will trigger
  541. /// undefined behavior.
  542. ///
  543. /// Note that this currently only considers the basic block that is
  544. /// the parent of Inst.
  545. bool programUndefinedIfUndefOrPoison(const Instruction *Inst);
  546. bool programUndefinedIfPoison(const Instruction *Inst);
  547. /// canCreateUndefOrPoison returns true if Op can create undef or poison from
  548. /// non-undef & non-poison operands.
  549. /// For vectors, canCreateUndefOrPoison returns true if there is potential
  550. /// poison or undef in any element of the result when vectors without
  551. /// undef/poison poison are given as operands.
  552. /// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns
  553. /// true. If Op raises immediate UB but never creates poison or undef
  554. /// (e.g. sdiv I, 0), canCreatePoison returns false.
  555. ///
  556. /// canCreatePoison returns true if Op can create poison from non-poison
  557. /// operands.
  558. bool canCreateUndefOrPoison(const Operator *Op);
  559. bool canCreatePoison(const Operator *Op);
  560. /// Return true if V is poison given that ValAssumedPoison is already poison.
  561. /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`,
  562. /// impliesPoison returns true.
  563. bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
  564. /// Return true if this function can prove that V does not have undef bits
  565. /// and is never poison. If V is an aggregate value or vector, check whether
  566. /// all elements (except padding) are not undef or poison.
  567. /// Note that this is different from canCreateUndefOrPoison because the
  568. /// function assumes Op's operands are not poison/undef.
  569. ///
  570. /// If CtxI and DT are specified this method performs flow-sensitive analysis
  571. /// and returns true if it is guaranteed to be never undef or poison
  572. /// immediately before the CtxI.
  573. bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
  574. AssumptionCache *AC = nullptr,
  575. const Instruction *CtxI = nullptr,
  576. const DominatorTree *DT = nullptr,
  577. unsigned Depth = 0);
  578. bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr,
  579. const Instruction *CtxI = nullptr,
  580. const DominatorTree *DT = nullptr,
  581. unsigned Depth = 0);
  582. /// Specific patterns of select instructions we can match.
  583. enum SelectPatternFlavor {
  584. SPF_UNKNOWN = 0,
  585. SPF_SMIN, /// Signed minimum
  586. SPF_UMIN, /// Unsigned minimum
  587. SPF_SMAX, /// Signed maximum
  588. SPF_UMAX, /// Unsigned maximum
  589. SPF_FMINNUM, /// Floating point minnum
  590. SPF_FMAXNUM, /// Floating point maxnum
  591. SPF_ABS, /// Absolute value
  592. SPF_NABS /// Negated absolute value
  593. };
  594. /// Behavior when a floating point min/max is given one NaN and one
  595. /// non-NaN as input.
  596. enum SelectPatternNaNBehavior {
  597. SPNB_NA = 0, /// NaN behavior not applicable.
  598. SPNB_RETURNS_NAN, /// Given one NaN input, returns the NaN.
  599. SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
  600. SPNB_RETURNS_ANY /// Given one NaN input, can return either (or
  601. /// it has been determined that no operands can
  602. /// be NaN).
  603. };
  604. struct SelectPatternResult {
  605. SelectPatternFlavor Flavor;
  606. SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
  607. /// SPF_FMINNUM or SPF_FMAXNUM.
  608. bool Ordered; /// When implementing this min/max pattern as
  609. /// fcmp; select, does the fcmp have to be
  610. /// ordered?
  611. /// Return true if \p SPF is a min or a max pattern.
  612. static bool isMinOrMax(SelectPatternFlavor SPF) {
  613. return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS;
  614. }
  615. };
  616. /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
  617. /// and providing the out parameter results if we successfully match.
  618. ///
  619. /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be
  620. /// the negation instruction from the idiom.
  621. ///
  622. /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
  623. /// not match that of the original select. If this is the case, the cast
  624. /// operation (one of Trunc,SExt,Zext) that must be done to transform the
  625. /// type of LHS and RHS into the type of V is returned in CastOp.
  626. ///
  627. /// For example:
  628. /// %1 = icmp slt i32 %a, i32 4
  629. /// %2 = sext i32 %a to i64
  630. /// %3 = select i1 %1, i64 %2, i64 4
  631. ///
  632. /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
  633. ///
  634. SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
  635. Instruction::CastOps *CastOp = nullptr,
  636. unsigned Depth = 0);
  637. inline SelectPatternResult
  638. matchSelectPattern(const Value *V, const Value *&LHS, const Value *&RHS) {
  639. Value *L = const_cast<Value *>(LHS);
  640. Value *R = const_cast<Value *>(RHS);
  641. auto Result = matchSelectPattern(const_cast<Value *>(V), L, R);
  642. LHS = L;
  643. RHS = R;
  644. return Result;
  645. }
  646. /// Determine the pattern that a select with the given compare as its
  647. /// predicate and given values as its true/false operands would match.
  648. SelectPatternResult matchDecomposedSelectPattern(
  649. CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
  650. Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0);
  651. /// Return the canonical comparison predicate for the specified
  652. /// minimum/maximum flavor.
  653. CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF,
  654. bool Ordered = false);
  655. /// Return the inverse minimum/maximum flavor of the specified flavor.
  656. /// For example, signed minimum is the inverse of signed maximum.
  657. SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
  658. /// Return the canonical inverse comparison predicate for the specified
  659. /// minimum/maximum flavor.
  660. CmpInst::Predicate getInverseMinMaxPred(SelectPatternFlavor SPF);
  661. /// Check if the values in \p VL are select instructions that can be converted
  662. /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a
  663. /// conversion is possible, together with a bool indicating whether all select
  664. /// conditions are only used by the selects. Otherwise return
  665. /// Intrinsic::not_intrinsic.
  666. std::pair<Intrinsic::ID, bool>
  667. canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);
  668. /// Return true if RHS is known to be implied true by LHS. Return false if
  669. /// RHS is known to be implied false by LHS. Otherwise, return None if no
  670. /// implication can be made.
  671. /// A & B must be i1 (boolean) values or a vector of such values. Note that
  672. /// the truth table for implication is the same as <=u on i1 values (but not
  673. /// <=s!). The truth table for both is:
  674. /// | T | F (B)
  675. /// T | T | F
  676. /// F | T | T
  677. /// (A)
  678. Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
  679. const DataLayout &DL, bool LHSIsTrue = true,
  680. unsigned Depth = 0);
  681. Optional<bool> isImpliedCondition(const Value *LHS,
  682. CmpInst::Predicate RHSPred,
  683. const Value *RHSOp0, const Value *RHSOp1,
  684. const DataLayout &DL, bool LHSIsTrue = true,
  685. unsigned Depth = 0);
  686. /// Return the boolean condition value in the context of the given instruction
  687. /// if it is known based on dominating conditions.
  688. Optional<bool> isImpliedByDomCondition(const Value *Cond,
  689. const Instruction *ContextI,
  690. const DataLayout &DL);
  691. Optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred,
  692. const Value *LHS, const Value *RHS,
  693. const Instruction *ContextI,
  694. const DataLayout &DL);
  695. /// If Ptr1 is provably equal to Ptr2 plus a constant offset, return that
  696. /// offset. For example, Ptr1 might be &A[42], and Ptr2 might be &A[40]. In
  697. /// this case offset would be -8.
  698. Optional<int64_t> isPointerOffset(const Value *Ptr1, const Value *Ptr2,
  699. const DataLayout &DL);
  700. } // end namespace llvm
  701. #endif // LLVM_ANALYSIS_VALUETRACKING_H
  702. #ifdef __GNUC__
  703. #pragma GCC diagnostic pop
  704. #endif