Operator.cpp 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. //===-- Operator.cpp - Implement the LLVM operators -----------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the non-inline methods for the LLVM Operator classes.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "llvm/IR/Operator.h"
  13. #include "llvm/IR/DataLayout.h"
  14. #include "llvm/IR/GetElementPtrTypeIterator.h"
  15. #include "llvm/IR/Instructions.h"
  16. #include "ConstantsContext.h"
  17. namespace llvm {
  18. bool Operator::hasPoisonGeneratingFlags() const {
  19. switch (getOpcode()) {
  20. case Instruction::Add:
  21. case Instruction::Sub:
  22. case Instruction::Mul:
  23. case Instruction::Shl: {
  24. auto *OBO = cast<OverflowingBinaryOperator>(this);
  25. return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
  26. }
  27. case Instruction::UDiv:
  28. case Instruction::SDiv:
  29. case Instruction::AShr:
  30. case Instruction::LShr:
  31. return cast<PossiblyExactOperator>(this)->isExact();
  32. case Instruction::GetElementPtr: {
  33. auto *GEP = cast<GEPOperator>(this);
  34. // Note: inrange exists on constexpr only
  35. return GEP->isInBounds() || GEP->getInRangeIndex() != std::nullopt;
  36. }
  37. default:
  38. if (const auto *FP = dyn_cast<FPMathOperator>(this))
  39. return FP->hasNoNaNs() || FP->hasNoInfs();
  40. return false;
  41. }
  42. }
  43. bool Operator::hasPoisonGeneratingFlagsOrMetadata() const {
  44. if (hasPoisonGeneratingFlags())
  45. return true;
  46. auto *I = dyn_cast<Instruction>(this);
  47. return I && I->hasPoisonGeneratingMetadata();
  48. }
  49. Type *GEPOperator::getSourceElementType() const {
  50. if (auto *I = dyn_cast<GetElementPtrInst>(this))
  51. return I->getSourceElementType();
  52. return cast<GetElementPtrConstantExpr>(this)->getSourceElementType();
  53. }
  54. Type *GEPOperator::getResultElementType() const {
  55. if (auto *I = dyn_cast<GetElementPtrInst>(this))
  56. return I->getResultElementType();
  57. return cast<GetElementPtrConstantExpr>(this)->getResultElementType();
  58. }
  59. Align GEPOperator::getMaxPreservedAlignment(const DataLayout &DL) const {
  60. /// compute the worse possible offset for every level of the GEP et accumulate
  61. /// the minimum alignment into Result.
  62. Align Result = Align(llvm::Value::MaximumAlignment);
  63. for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
  64. GTI != GTE; ++GTI) {
  65. uint64_t Offset;
  66. ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
  67. if (StructType *STy = GTI.getStructTypeOrNull()) {
  68. const StructLayout *SL = DL.getStructLayout(STy);
  69. Offset = SL->getElementOffset(OpC->getZExtValue());
  70. } else {
  71. assert(GTI.isSequential() && "should be sequencial");
  72. /// If the index isn't known, we take 1 because it is the index that will
  73. /// give the worse alignment of the offset.
  74. const uint64_t ElemCount = OpC ? OpC->getZExtValue() : 1;
  75. Offset = DL.getTypeAllocSize(GTI.getIndexedType()) * ElemCount;
  76. }
  77. Result = Align(MinAlign(Offset, Result.value()));
  78. }
  79. return Result;
  80. }
  81. bool GEPOperator::accumulateConstantOffset(
  82. const DataLayout &DL, APInt &Offset,
  83. function_ref<bool(Value &, APInt &)> ExternalAnalysis) const {
  84. assert(Offset.getBitWidth() ==
  85. DL.getIndexSizeInBits(getPointerAddressSpace()) &&
  86. "The offset bit width does not match DL specification.");
  87. SmallVector<const Value *> Index(llvm::drop_begin(operand_values()));
  88. return GEPOperator::accumulateConstantOffset(getSourceElementType(), Index,
  89. DL, Offset, ExternalAnalysis);
  90. }
  91. bool GEPOperator::accumulateConstantOffset(
  92. Type *SourceType, ArrayRef<const Value *> Index, const DataLayout &DL,
  93. APInt &Offset, function_ref<bool(Value &, APInt &)> ExternalAnalysis) {
  94. bool UsedExternalAnalysis = false;
  95. auto AccumulateOffset = [&](APInt Index, uint64_t Size) -> bool {
  96. Index = Index.sextOrTrunc(Offset.getBitWidth());
  97. APInt IndexedSize = APInt(Offset.getBitWidth(), Size);
  98. // For array or vector indices, scale the index by the size of the type.
  99. if (!UsedExternalAnalysis) {
  100. Offset += Index * IndexedSize;
  101. } else {
  102. // External Analysis can return a result higher/lower than the value
  103. // represents. We need to detect overflow/underflow.
  104. bool Overflow = false;
  105. APInt OffsetPlus = Index.smul_ov(IndexedSize, Overflow);
  106. if (Overflow)
  107. return false;
  108. Offset = Offset.sadd_ov(OffsetPlus, Overflow);
  109. if (Overflow)
  110. return false;
  111. }
  112. return true;
  113. };
  114. auto begin = generic_gep_type_iterator<decltype(Index.begin())>::begin(
  115. SourceType, Index.begin());
  116. auto end = generic_gep_type_iterator<decltype(Index.end())>::end(Index.end());
  117. for (auto GTI = begin, GTE = end; GTI != GTE; ++GTI) {
  118. // Scalable vectors are multiplied by a runtime constant.
  119. bool ScalableType = false;
  120. if (isa<ScalableVectorType>(GTI.getIndexedType()))
  121. ScalableType = true;
  122. Value *V = GTI.getOperand();
  123. StructType *STy = GTI.getStructTypeOrNull();
  124. // Handle ConstantInt if possible.
  125. if (auto ConstOffset = dyn_cast<ConstantInt>(V)) {
  126. if (ConstOffset->isZero())
  127. continue;
  128. // if the type is scalable and the constant is not zero (vscale * n * 0 =
  129. // 0) bailout.
  130. if (ScalableType)
  131. return false;
  132. // Handle a struct index, which adds its field offset to the pointer.
  133. if (STy) {
  134. unsigned ElementIdx = ConstOffset->getZExtValue();
  135. const StructLayout *SL = DL.getStructLayout(STy);
  136. // Element offset is in bytes.
  137. if (!AccumulateOffset(
  138. APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)),
  139. 1))
  140. return false;
  141. continue;
  142. }
  143. if (!AccumulateOffset(ConstOffset->getValue(),
  144. DL.getTypeAllocSize(GTI.getIndexedType())))
  145. return false;
  146. continue;
  147. }
  148. // The operand is not constant, check if an external analysis was provided.
  149. // External analsis is not applicable to a struct type.
  150. if (!ExternalAnalysis || STy || ScalableType)
  151. return false;
  152. APInt AnalysisIndex;
  153. if (!ExternalAnalysis(*V, AnalysisIndex))
  154. return false;
  155. UsedExternalAnalysis = true;
  156. if (!AccumulateOffset(AnalysisIndex,
  157. DL.getTypeAllocSize(GTI.getIndexedType())))
  158. return false;
  159. }
  160. return true;
  161. }
  162. bool GEPOperator::collectOffset(
  163. const DataLayout &DL, unsigned BitWidth,
  164. MapVector<Value *, APInt> &VariableOffsets,
  165. APInt &ConstantOffset) const {
  166. assert(BitWidth == DL.getIndexSizeInBits(getPointerAddressSpace()) &&
  167. "The offset bit width does not match DL specification.");
  168. auto CollectConstantOffset = [&](APInt Index, uint64_t Size) {
  169. Index = Index.sextOrTrunc(BitWidth);
  170. APInt IndexedSize = APInt(BitWidth, Size);
  171. ConstantOffset += Index * IndexedSize;
  172. };
  173. for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
  174. GTI != GTE; ++GTI) {
  175. // Scalable vectors are multiplied by a runtime constant.
  176. bool ScalableType = isa<ScalableVectorType>(GTI.getIndexedType());
  177. Value *V = GTI.getOperand();
  178. StructType *STy = GTI.getStructTypeOrNull();
  179. // Handle ConstantInt if possible.
  180. if (auto ConstOffset = dyn_cast<ConstantInt>(V)) {
  181. if (ConstOffset->isZero())
  182. continue;
  183. // If the type is scalable and the constant is not zero (vscale * n * 0 =
  184. // 0) bailout.
  185. // TODO: If the runtime value is accessible at any point before DWARF
  186. // emission, then we could potentially keep a forward reference to it
  187. // in the debug value to be filled in later.
  188. if (ScalableType)
  189. return false;
  190. // Handle a struct index, which adds its field offset to the pointer.
  191. if (STy) {
  192. unsigned ElementIdx = ConstOffset->getZExtValue();
  193. const StructLayout *SL = DL.getStructLayout(STy);
  194. // Element offset is in bytes.
  195. CollectConstantOffset(APInt(BitWidth, SL->getElementOffset(ElementIdx)),
  196. 1);
  197. continue;
  198. }
  199. CollectConstantOffset(ConstOffset->getValue(),
  200. DL.getTypeAllocSize(GTI.getIndexedType()));
  201. continue;
  202. }
  203. if (STy || ScalableType)
  204. return false;
  205. APInt IndexedSize =
  206. APInt(BitWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
  207. // Insert an initial offset of 0 for V iff none exists already, then
  208. // increment the offset by IndexedSize.
  209. if (!IndexedSize.isZero()) {
  210. VariableOffsets.insert({V, APInt(BitWidth, 0)});
  211. VariableOffsets[V] += IndexedSize;
  212. }
  213. }
  214. return true;
  215. }
  216. void FastMathFlags::print(raw_ostream &O) const {
  217. if (all())
  218. O << " fast";
  219. else {
  220. if (allowReassoc())
  221. O << " reassoc";
  222. if (noNaNs())
  223. O << " nnan";
  224. if (noInfs())
  225. O << " ninf";
  226. if (noSignedZeros())
  227. O << " nsz";
  228. if (allowReciprocal())
  229. O << " arcp";
  230. if (allowContract())
  231. O << " contract";
  232. if (approxFunc())
  233. O << " afn";
  234. }
  235. }
  236. } // namespace llvm