IntrinsicInst.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. //===-- InstrinsicInst.cpp - Intrinsic Instruction Wrappers ---------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements methods that make it really easy to deal with intrinsic
  10. // functions.
  11. //
  12. // All intrinsic function calls are instances of the call instruction, so these
  13. // are all subclasses of the CallInst class. Note that none of these classes
  14. // has state or virtual methods, which is an important part of this gross/neat
  15. // hack working.
  16. //
  17. // In some cases, arguments to intrinsics need to be generic and are defined as
  18. // type pointer to empty struct { }*. To access the real item of interest the
  19. // cast instruction needs to be stripped away.
  20. //
  21. //===----------------------------------------------------------------------===//
  22. #include "llvm/IR/IntrinsicInst.h"
  23. #include "llvm/ADT/StringSwitch.h"
  24. #include "llvm/IR/Constants.h"
  25. #include "llvm/IR/DebugInfoMetadata.h"
  26. #include "llvm/IR/GlobalVariable.h"
  27. #include "llvm/IR/Metadata.h"
  28. #include "llvm/IR/Module.h"
  29. #include "llvm/IR/Operator.h"
  30. #include "llvm/IR/PatternMatch.h"
  31. #include "llvm/Support/raw_ostream.h"
  32. using namespace llvm;
  33. //===----------------------------------------------------------------------===//
  34. /// DbgVariableIntrinsic - This is the common base class for debug info
  35. /// intrinsics for variables.
  36. ///
  37. Value *DbgVariableIntrinsic::getVariableLocation(bool AllowNullOp) const {
  38. Value *Op = getArgOperand(0);
  39. if (AllowNullOp && !Op)
  40. return nullptr;
  41. auto *MD = cast<MetadataAsValue>(Op)->getMetadata();
  42. if (auto *V = dyn_cast<ValueAsMetadata>(MD))
  43. return V->getValue();
  44. // When the value goes to null, it gets replaced by an empty MDNode.
  45. assert(!cast<MDNode>(MD)->getNumOperands() && "Expected an empty MDNode");
  46. return nullptr;
  47. }
  48. Optional<uint64_t> DbgVariableIntrinsic::getFragmentSizeInBits() const {
  49. if (auto Fragment = getExpression()->getFragmentInfo())
  50. return Fragment->SizeInBits;
  51. return getVariable()->getSizeInBits();
  52. }
  53. int llvm::Intrinsic::lookupLLVMIntrinsicByName(ArrayRef<const char *> NameTable,
  54. StringRef Name) {
  55. assert(Name.startswith("llvm."));
  56. // Do successive binary searches of the dotted name components. For
  57. // "llvm.gc.experimental.statepoint.p1i8.p1i32", we will find the range of
  58. // intrinsics starting with "llvm.gc", then "llvm.gc.experimental", then
  59. // "llvm.gc.experimental.statepoint", and then we will stop as the range is
  60. // size 1. During the search, we can skip the prefix that we already know is
  61. // identical. By using strncmp we consider names with differing suffixes to
  62. // be part of the equal range.
  63. size_t CmpEnd = 4; // Skip the "llvm" component.
  64. const char *const *Low = NameTable.begin();
  65. const char *const *High = NameTable.end();
  66. const char *const *LastLow = Low;
  67. while (CmpEnd < Name.size() && High - Low > 0) {
  68. size_t CmpStart = CmpEnd;
  69. CmpEnd = Name.find('.', CmpStart + 1);
  70. CmpEnd = CmpEnd == StringRef::npos ? Name.size() : CmpEnd;
  71. auto Cmp = [CmpStart, CmpEnd](const char *LHS, const char *RHS) {
  72. return strncmp(LHS + CmpStart, RHS + CmpStart, CmpEnd - CmpStart) < 0;
  73. };
  74. LastLow = Low;
  75. std::tie(Low, High) = std::equal_range(Low, High, Name.data(), Cmp);
  76. }
  77. if (High - Low > 0)
  78. LastLow = Low;
  79. if (LastLow == NameTable.end())
  80. return -1;
  81. StringRef NameFound = *LastLow;
  82. if (Name == NameFound ||
  83. (Name.startswith(NameFound) && Name[NameFound.size()] == '.'))
  84. return LastLow - NameTable.begin();
  85. return -1;
  86. }
  87. Value *InstrProfIncrementInst::getStep() const {
  88. if (InstrProfIncrementInstStep::classof(this)) {
  89. return const_cast<Value *>(getArgOperand(4));
  90. }
  91. const Module *M = getModule();
  92. LLVMContext &Context = M->getContext();
  93. return ConstantInt::get(Type::getInt64Ty(Context), 1);
  94. }
  95. Optional<RoundingMode> ConstrainedFPIntrinsic::getRoundingMode() const {
  96. unsigned NumOperands = getNumArgOperands();
  97. Metadata *MD =
  98. cast<MetadataAsValue>(getArgOperand(NumOperands - 2))->getMetadata();
  99. if (!MD || !isa<MDString>(MD))
  100. return None;
  101. return StrToRoundingMode(cast<MDString>(MD)->getString());
  102. }
  103. Optional<fp::ExceptionBehavior>
  104. ConstrainedFPIntrinsic::getExceptionBehavior() const {
  105. unsigned NumOperands = getNumArgOperands();
  106. Metadata *MD =
  107. cast<MetadataAsValue>(getArgOperand(NumOperands - 1))->getMetadata();
  108. if (!MD || !isa<MDString>(MD))
  109. return None;
  110. return StrToExceptionBehavior(cast<MDString>(MD)->getString());
  111. }
  112. FCmpInst::Predicate ConstrainedFPCmpIntrinsic::getPredicate() const {
  113. Metadata *MD = cast<MetadataAsValue>(getArgOperand(2))->getMetadata();
  114. if (!MD || !isa<MDString>(MD))
  115. return FCmpInst::BAD_FCMP_PREDICATE;
  116. return StringSwitch<FCmpInst::Predicate>(cast<MDString>(MD)->getString())
  117. .Case("oeq", FCmpInst::FCMP_OEQ)
  118. .Case("ogt", FCmpInst::FCMP_OGT)
  119. .Case("oge", FCmpInst::FCMP_OGE)
  120. .Case("olt", FCmpInst::FCMP_OLT)
  121. .Case("ole", FCmpInst::FCMP_OLE)
  122. .Case("one", FCmpInst::FCMP_ONE)
  123. .Case("ord", FCmpInst::FCMP_ORD)
  124. .Case("uno", FCmpInst::FCMP_UNO)
  125. .Case("ueq", FCmpInst::FCMP_UEQ)
  126. .Case("ugt", FCmpInst::FCMP_UGT)
  127. .Case("uge", FCmpInst::FCMP_UGE)
  128. .Case("ult", FCmpInst::FCMP_ULT)
  129. .Case("ule", FCmpInst::FCMP_ULE)
  130. .Case("une", FCmpInst::FCMP_UNE)
  131. .Default(FCmpInst::BAD_FCMP_PREDICATE);
  132. }
  133. bool ConstrainedFPIntrinsic::isUnaryOp() const {
  134. switch (getIntrinsicID()) {
  135. default:
  136. return false;
  137. #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
  138. case Intrinsic::INTRINSIC: \
  139. return NARG == 1;
  140. #include "llvm/IR/ConstrainedOps.def"
  141. }
  142. }
  143. bool ConstrainedFPIntrinsic::isTernaryOp() const {
  144. switch (getIntrinsicID()) {
  145. default:
  146. return false;
  147. #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
  148. case Intrinsic::INTRINSIC: \
  149. return NARG == 3;
  150. #include "llvm/IR/ConstrainedOps.def"
  151. }
  152. }
  153. bool ConstrainedFPIntrinsic::classof(const IntrinsicInst *I) {
  154. switch (I->getIntrinsicID()) {
  155. #define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
  156. case Intrinsic::INTRINSIC:
  157. #include "llvm/IR/ConstrainedOps.def"
  158. return true;
  159. default:
  160. return false;
  161. }
  162. }
  163. ElementCount VPIntrinsic::getStaticVectorLength() const {
  164. auto GetVectorLengthOfType = [](const Type *T) -> ElementCount {
  165. auto VT = cast<VectorType>(T);
  166. auto ElemCount = VT->getElementCount();
  167. return ElemCount;
  168. };
  169. auto VPMask = getMaskParam();
  170. return GetVectorLengthOfType(VPMask->getType());
  171. }
  172. Value *VPIntrinsic::getMaskParam() const {
  173. auto maskPos = GetMaskParamPos(getIntrinsicID());
  174. if (maskPos)
  175. return getArgOperand(maskPos.getValue());
  176. return nullptr;
  177. }
  178. Value *VPIntrinsic::getVectorLengthParam() const {
  179. auto vlenPos = GetVectorLengthParamPos(getIntrinsicID());
  180. if (vlenPos)
  181. return getArgOperand(vlenPos.getValue());
  182. return nullptr;
  183. }
  184. Optional<int> VPIntrinsic::GetMaskParamPos(Intrinsic::ID IntrinsicID) {
  185. switch (IntrinsicID) {
  186. default:
  187. return None;
  188. #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
  189. case Intrinsic::VPID: \
  190. return MASKPOS;
  191. #include "llvm/IR/VPIntrinsics.def"
  192. }
  193. }
  194. Optional<int> VPIntrinsic::GetVectorLengthParamPos(Intrinsic::ID IntrinsicID) {
  195. switch (IntrinsicID) {
  196. default:
  197. return None;
  198. #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
  199. case Intrinsic::VPID: \
  200. return VLENPOS;
  201. #include "llvm/IR/VPIntrinsics.def"
  202. }
  203. }
  204. bool VPIntrinsic::IsVPIntrinsic(Intrinsic::ID ID) {
  205. switch (ID) {
  206. default:
  207. return false;
  208. #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
  209. case Intrinsic::VPID: \
  210. break;
  211. #include "llvm/IR/VPIntrinsics.def"
  212. }
  213. return true;
  214. }
  215. // Equivalent non-predicated opcode
  216. unsigned VPIntrinsic::GetFunctionalOpcodeForVP(Intrinsic::ID ID) {
  217. unsigned FunctionalOC = Instruction::Call;
  218. switch (ID) {
  219. default:
  220. break;
  221. #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
  222. #define HANDLE_VP_TO_OPC(OPC) FunctionalOC = Instruction::OPC;
  223. #define END_REGISTER_VP_INTRINSIC(...) break;
  224. #include "llvm/IR/VPIntrinsics.def"
  225. }
  226. return FunctionalOC;
  227. }
  228. Intrinsic::ID VPIntrinsic::GetForOpcode(unsigned IROPC) {
  229. switch (IROPC) {
  230. default:
  231. return Intrinsic::not_intrinsic;
  232. #define HANDLE_VP_TO_OPC(OPC) case Instruction::OPC:
  233. #define END_REGISTER_VP_INTRINSIC(VPID) return Intrinsic::VPID;
  234. #include "llvm/IR/VPIntrinsics.def"
  235. }
  236. }
  237. bool VPIntrinsic::canIgnoreVectorLengthParam() const {
  238. using namespace PatternMatch;
  239. ElementCount EC = getStaticVectorLength();
  240. // No vlen param - no lanes masked-off by it.
  241. auto *VLParam = getVectorLengthParam();
  242. if (!VLParam)
  243. return true;
  244. // Note that the VP intrinsic causes undefined behavior if the Explicit Vector
  245. // Length parameter is strictly greater-than the number of vector elements of
  246. // the operation. This function returns true when this is detected statically
  247. // in the IR.
  248. // Check whether "W == vscale * EC.getKnownMinValue()"
  249. if (EC.isScalable()) {
  250. // Undig the DL
  251. auto ParMod = this->getModule();
  252. if (!ParMod)
  253. return false;
  254. const auto &DL = ParMod->getDataLayout();
  255. // Compare vscale patterns
  256. uint64_t VScaleFactor;
  257. if (match(VLParam, m_c_Mul(m_ConstantInt(VScaleFactor), m_VScale(DL))))
  258. return VScaleFactor >= EC.getKnownMinValue();
  259. return (EC.getKnownMinValue() == 1) && match(VLParam, m_VScale(DL));
  260. }
  261. // standard SIMD operation
  262. auto VLConst = dyn_cast<ConstantInt>(VLParam);
  263. if (!VLConst)
  264. return false;
  265. uint64_t VLNum = VLConst->getZExtValue();
  266. if (VLNum >= EC.getKnownMinValue())
  267. return true;
  268. return false;
  269. }
  270. Instruction::BinaryOps BinaryOpIntrinsic::getBinaryOp() const {
  271. switch (getIntrinsicID()) {
  272. case Intrinsic::uadd_with_overflow:
  273. case Intrinsic::sadd_with_overflow:
  274. case Intrinsic::uadd_sat:
  275. case Intrinsic::sadd_sat:
  276. return Instruction::Add;
  277. case Intrinsic::usub_with_overflow:
  278. case Intrinsic::ssub_with_overflow:
  279. case Intrinsic::usub_sat:
  280. case Intrinsic::ssub_sat:
  281. return Instruction::Sub;
  282. case Intrinsic::umul_with_overflow:
  283. case Intrinsic::smul_with_overflow:
  284. return Instruction::Mul;
  285. default:
  286. llvm_unreachable("Invalid intrinsic");
  287. }
  288. }
  289. bool BinaryOpIntrinsic::isSigned() const {
  290. switch (getIntrinsicID()) {
  291. case Intrinsic::sadd_with_overflow:
  292. case Intrinsic::ssub_with_overflow:
  293. case Intrinsic::smul_with_overflow:
  294. case Intrinsic::sadd_sat:
  295. case Intrinsic::ssub_sat:
  296. return true;
  297. default:
  298. return false;
  299. }
  300. }
  301. unsigned BinaryOpIntrinsic::getNoWrapKind() const {
  302. if (isSigned())
  303. return OverflowingBinaryOperator::NoSignedWrap;
  304. else
  305. return OverflowingBinaryOperator::NoUnsignedWrap;
  306. }