IntrinsicInst.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. //===-- IntrinsicInst.cpp - Intrinsic Instruction Wrappers ---------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements methods that make it really easy to deal with intrinsic
  10. // functions.
  11. //
  12. // All intrinsic function calls are instances of the call instruction, so these
  13. // are all subclasses of the CallInst class. Note that none of these classes
  14. // has state or virtual methods, which is an important part of this gross/neat
  15. // hack working.
  16. //
  17. // In some cases, arguments to intrinsics need to be generic and are defined as
  18. // type pointer to empty struct { }*. To access the real item of interest the
  19. // cast instruction needs to be stripped away.
  20. //
  21. //===----------------------------------------------------------------------===//
  22. #include "llvm/IR/IntrinsicInst.h"
  23. #include "llvm/ADT/StringSwitch.h"
  24. #include "llvm/IR/Constants.h"
  25. #include "llvm/IR/DebugInfoMetadata.h"
  26. #include "llvm/IR/Metadata.h"
  27. #include "llvm/IR/Module.h"
  28. #include "llvm/IR/Operator.h"
  29. #include "llvm/IR/PatternMatch.h"
  30. #include "llvm/IR/Statepoint.h"
  31. using namespace llvm;
  32. //===----------------------------------------------------------------------===//
  33. /// DbgVariableIntrinsic - This is the common base class for debug info
  34. /// intrinsics for variables.
  35. ///
  36. iterator_range<DbgVariableIntrinsic::location_op_iterator>
  37. DbgVariableIntrinsic::location_ops() const {
  38. auto *MD = getRawLocation();
  39. assert(MD && "First operand of DbgVariableIntrinsic should be non-null.");
  40. // If operand is ValueAsMetadata, return a range over just that operand.
  41. if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
  42. return {location_op_iterator(VAM), location_op_iterator(VAM + 1)};
  43. }
  44. // If operand is DIArgList, return a range over its args.
  45. if (auto *AL = dyn_cast<DIArgList>(MD))
  46. return {location_op_iterator(AL->args_begin()),
  47. location_op_iterator(AL->args_end())};
  48. // Operand must be an empty metadata tuple, so return empty iterator.
  49. return {location_op_iterator(static_cast<ValueAsMetadata *>(nullptr)),
  50. location_op_iterator(static_cast<ValueAsMetadata *>(nullptr))};
  51. }
  52. Value *DbgVariableIntrinsic::getVariableLocationOp(unsigned OpIdx) const {
  53. auto *MD = getRawLocation();
  54. assert(MD && "First operand of DbgVariableIntrinsic should be non-null.");
  55. if (auto *AL = dyn_cast<DIArgList>(MD))
  56. return AL->getArgs()[OpIdx]->getValue();
  57. if (isa<MDNode>(MD))
  58. return nullptr;
  59. assert(
  60. isa<ValueAsMetadata>(MD) &&
  61. "Attempted to get location operand from DbgVariableIntrinsic with none.");
  62. auto *V = cast<ValueAsMetadata>(MD);
  63. assert(OpIdx == 0 && "Operand Index must be 0 for a debug intrinsic with a "
  64. "single location operand.");
  65. return V->getValue();
  66. }
  67. static ValueAsMetadata *getAsMetadata(Value *V) {
  68. return isa<MetadataAsValue>(V) ? dyn_cast<ValueAsMetadata>(
  69. cast<MetadataAsValue>(V)->getMetadata())
  70. : ValueAsMetadata::get(V);
  71. }
  72. void DbgVariableIntrinsic::replaceVariableLocationOp(Value *OldValue,
  73. Value *NewValue) {
  74. assert(NewValue && "Values must be non-null");
  75. auto Locations = location_ops();
  76. auto OldIt = find(Locations, OldValue);
  77. assert(OldIt != Locations.end() && "OldValue must be a current location");
  78. if (!hasArgList()) {
  79. Value *NewOperand = isa<MetadataAsValue>(NewValue)
  80. ? NewValue
  81. : MetadataAsValue::get(
  82. getContext(), ValueAsMetadata::get(NewValue));
  83. return setArgOperand(0, NewOperand);
  84. }
  85. SmallVector<ValueAsMetadata *, 4> MDs;
  86. ValueAsMetadata *NewOperand = getAsMetadata(NewValue);
  87. for (auto *VMD : Locations)
  88. MDs.push_back(VMD == *OldIt ? NewOperand : getAsMetadata(VMD));
  89. setArgOperand(
  90. 0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs)));
  91. }
  92. void DbgVariableIntrinsic::replaceVariableLocationOp(unsigned OpIdx,
  93. Value *NewValue) {
  94. assert(OpIdx < getNumVariableLocationOps() && "Invalid Operand Index");
  95. if (!hasArgList()) {
  96. Value *NewOperand = isa<MetadataAsValue>(NewValue)
  97. ? NewValue
  98. : MetadataAsValue::get(
  99. getContext(), ValueAsMetadata::get(NewValue));
  100. return setArgOperand(0, NewOperand);
  101. }
  102. SmallVector<ValueAsMetadata *, 4> MDs;
  103. ValueAsMetadata *NewOperand = getAsMetadata(NewValue);
  104. for (unsigned Idx = 0; Idx < getNumVariableLocationOps(); ++Idx)
  105. MDs.push_back(Idx == OpIdx ? NewOperand
  106. : getAsMetadata(getVariableLocationOp(Idx)));
  107. setArgOperand(
  108. 0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs)));
  109. }
  110. void DbgVariableIntrinsic::addVariableLocationOps(ArrayRef<Value *> NewValues,
  111. DIExpression *NewExpr) {
  112. assert(NewExpr->hasAllLocationOps(getNumVariableLocationOps() +
  113. NewValues.size()) &&
  114. "NewExpr for debug variable intrinsic does not reference every "
  115. "location operand.");
  116. assert(!is_contained(NewValues, nullptr) && "New values must be non-null");
  117. setArgOperand(2, MetadataAsValue::get(getContext(), NewExpr));
  118. SmallVector<ValueAsMetadata *, 4> MDs;
  119. for (auto *VMD : location_ops())
  120. MDs.push_back(getAsMetadata(VMD));
  121. for (auto *VMD : NewValues)
  122. MDs.push_back(getAsMetadata(VMD));
  123. setArgOperand(
  124. 0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs)));
  125. }
  126. Optional<uint64_t> DbgVariableIntrinsic::getFragmentSizeInBits() const {
  127. if (auto Fragment = getExpression()->getFragmentInfo())
  128. return Fragment->SizeInBits;
  129. return getVariable()->getSizeInBits();
  130. }
  131. int llvm::Intrinsic::lookupLLVMIntrinsicByName(ArrayRef<const char *> NameTable,
  132. StringRef Name) {
  133. assert(Name.startswith("llvm."));
  134. // Do successive binary searches of the dotted name components. For
  135. // "llvm.gc.experimental.statepoint.p1i8.p1i32", we will find the range of
  136. // intrinsics starting with "llvm.gc", then "llvm.gc.experimental", then
  137. // "llvm.gc.experimental.statepoint", and then we will stop as the range is
  138. // size 1. During the search, we can skip the prefix that we already know is
  139. // identical. By using strncmp we consider names with differing suffixes to
  140. // be part of the equal range.
  141. size_t CmpEnd = 4; // Skip the "llvm" component.
  142. const char *const *Low = NameTable.begin();
  143. const char *const *High = NameTable.end();
  144. const char *const *LastLow = Low;
  145. while (CmpEnd < Name.size() && High - Low > 0) {
  146. size_t CmpStart = CmpEnd;
  147. CmpEnd = Name.find('.', CmpStart + 1);
  148. CmpEnd = CmpEnd == StringRef::npos ? Name.size() : CmpEnd;
  149. auto Cmp = [CmpStart, CmpEnd](const char *LHS, const char *RHS) {
  150. return strncmp(LHS + CmpStart, RHS + CmpStart, CmpEnd - CmpStart) < 0;
  151. };
  152. LastLow = Low;
  153. std::tie(Low, High) = std::equal_range(Low, High, Name.data(), Cmp);
  154. }
  155. if (High - Low > 0)
  156. LastLow = Low;
  157. if (LastLow == NameTable.end())
  158. return -1;
  159. StringRef NameFound = *LastLow;
  160. if (Name == NameFound ||
  161. (Name.startswith(NameFound) && Name[NameFound.size()] == '.'))
  162. return LastLow - NameTable.begin();
  163. return -1;
  164. }
  165. ConstantInt *InstrProfInstBase::getNumCounters() const {
  166. if (InstrProfValueProfileInst::classof(this))
  167. llvm_unreachable("InstrProfValueProfileInst does not have counters!");
  168. return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2)));
  169. }
  170. ConstantInt *InstrProfInstBase::getIndex() const {
  171. if (InstrProfValueProfileInst::classof(this))
  172. llvm_unreachable("Please use InstrProfValueProfileInst::getIndex()");
  173. return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
  174. }
  175. Value *InstrProfIncrementInst::getStep() const {
  176. if (InstrProfIncrementInstStep::classof(this)) {
  177. return const_cast<Value *>(getArgOperand(4));
  178. }
  179. const Module *M = getModule();
  180. LLVMContext &Context = M->getContext();
  181. return ConstantInt::get(Type::getInt64Ty(Context), 1);
  182. }
  183. Optional<RoundingMode> ConstrainedFPIntrinsic::getRoundingMode() const {
  184. unsigned NumOperands = arg_size();
  185. Metadata *MD = nullptr;
  186. auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 2));
  187. if (MAV)
  188. MD = MAV->getMetadata();
  189. if (!MD || !isa<MDString>(MD))
  190. return None;
  191. return convertStrToRoundingMode(cast<MDString>(MD)->getString());
  192. }
  193. Optional<fp::ExceptionBehavior>
  194. ConstrainedFPIntrinsic::getExceptionBehavior() const {
  195. unsigned NumOperands = arg_size();
  196. Metadata *MD = nullptr;
  197. auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 1));
  198. if (MAV)
  199. MD = MAV->getMetadata();
  200. if (!MD || !isa<MDString>(MD))
  201. return None;
  202. return convertStrToExceptionBehavior(cast<MDString>(MD)->getString());
  203. }
  204. bool ConstrainedFPIntrinsic::isDefaultFPEnvironment() const {
  205. Optional<fp::ExceptionBehavior> Except = getExceptionBehavior();
  206. if (Except) {
  207. if (Except.getValue() != fp::ebIgnore)
  208. return false;
  209. }
  210. Optional<RoundingMode> Rounding = getRoundingMode();
  211. if (Rounding) {
  212. if (Rounding.getValue() != RoundingMode::NearestTiesToEven)
  213. return false;
  214. }
  215. return true;
  216. }
  217. FCmpInst::Predicate ConstrainedFPCmpIntrinsic::getPredicate() const {
  218. Metadata *MD = cast<MetadataAsValue>(getArgOperand(2))->getMetadata();
  219. if (!MD || !isa<MDString>(MD))
  220. return FCmpInst::BAD_FCMP_PREDICATE;
  221. return StringSwitch<FCmpInst::Predicate>(cast<MDString>(MD)->getString())
  222. .Case("oeq", FCmpInst::FCMP_OEQ)
  223. .Case("ogt", FCmpInst::FCMP_OGT)
  224. .Case("oge", FCmpInst::FCMP_OGE)
  225. .Case("olt", FCmpInst::FCMP_OLT)
  226. .Case("ole", FCmpInst::FCMP_OLE)
  227. .Case("one", FCmpInst::FCMP_ONE)
  228. .Case("ord", FCmpInst::FCMP_ORD)
  229. .Case("uno", FCmpInst::FCMP_UNO)
  230. .Case("ueq", FCmpInst::FCMP_UEQ)
  231. .Case("ugt", FCmpInst::FCMP_UGT)
  232. .Case("uge", FCmpInst::FCMP_UGE)
  233. .Case("ult", FCmpInst::FCMP_ULT)
  234. .Case("ule", FCmpInst::FCMP_ULE)
  235. .Case("une", FCmpInst::FCMP_UNE)
  236. .Default(FCmpInst::BAD_FCMP_PREDICATE);
  237. }
  238. bool ConstrainedFPIntrinsic::isUnaryOp() const {
  239. switch (getIntrinsicID()) {
  240. default:
  241. return false;
  242. #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
  243. case Intrinsic::INTRINSIC: \
  244. return NARG == 1;
  245. #include "llvm/IR/ConstrainedOps.def"
  246. }
  247. }
  248. bool ConstrainedFPIntrinsic::isTernaryOp() const {
  249. switch (getIntrinsicID()) {
  250. default:
  251. return false;
  252. #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
  253. case Intrinsic::INTRINSIC: \
  254. return NARG == 3;
  255. #include "llvm/IR/ConstrainedOps.def"
  256. }
  257. }
  258. bool ConstrainedFPIntrinsic::classof(const IntrinsicInst *I) {
  259. switch (I->getIntrinsicID()) {
  260. #define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
  261. case Intrinsic::INTRINSIC:
  262. #include "llvm/IR/ConstrainedOps.def"
  263. return true;
  264. default:
  265. return false;
  266. }
  267. }
  268. ElementCount VPIntrinsic::getStaticVectorLength() const {
  269. auto GetVectorLengthOfType = [](const Type *T) -> ElementCount {
  270. const auto *VT = cast<VectorType>(T);
  271. auto ElemCount = VT->getElementCount();
  272. return ElemCount;
  273. };
  274. Value *VPMask = getMaskParam();
  275. assert(VPMask && "No mask param?");
  276. return GetVectorLengthOfType(VPMask->getType());
  277. }
  278. Value *VPIntrinsic::getMaskParam() const {
  279. if (auto MaskPos = getMaskParamPos(getIntrinsicID()))
  280. return getArgOperand(MaskPos.getValue());
  281. return nullptr;
  282. }
  283. void VPIntrinsic::setMaskParam(Value *NewMask) {
  284. auto MaskPos = getMaskParamPos(getIntrinsicID());
  285. setArgOperand(*MaskPos, NewMask);
  286. }
  287. Value *VPIntrinsic::getVectorLengthParam() const {
  288. if (auto EVLPos = getVectorLengthParamPos(getIntrinsicID()))
  289. return getArgOperand(EVLPos.getValue());
  290. return nullptr;
  291. }
  292. void VPIntrinsic::setVectorLengthParam(Value *NewEVL) {
  293. auto EVLPos = getVectorLengthParamPos(getIntrinsicID());
  294. setArgOperand(*EVLPos, NewEVL);
  295. }
  296. Optional<unsigned> VPIntrinsic::getMaskParamPos(Intrinsic::ID IntrinsicID) {
  297. switch (IntrinsicID) {
  298. default:
  299. return None;
  300. #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
  301. case Intrinsic::VPID: \
  302. return MASKPOS;
  303. #include "llvm/IR/VPIntrinsics.def"
  304. }
  305. }
  306. Optional<unsigned>
  307. VPIntrinsic::getVectorLengthParamPos(Intrinsic::ID IntrinsicID) {
  308. switch (IntrinsicID) {
  309. default:
  310. return None;
  311. #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
  312. case Intrinsic::VPID: \
  313. return VLENPOS;
  314. #include "llvm/IR/VPIntrinsics.def"
  315. }
  316. }
  317. /// \return the alignment of the pointer used by this load/store/gather or
  318. /// scatter.
  319. MaybeAlign VPIntrinsic::getPointerAlignment() const {
  320. Optional<unsigned> PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID());
  321. assert(PtrParamOpt.hasValue() && "no pointer argument!");
  322. return getParamAlign(PtrParamOpt.getValue());
  323. }
  324. /// \return The pointer operand of this load,store, gather or scatter.
  325. Value *VPIntrinsic::getMemoryPointerParam() const {
  326. if (auto PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID()))
  327. return getArgOperand(PtrParamOpt.getValue());
  328. return nullptr;
  329. }
  330. Optional<unsigned> VPIntrinsic::getMemoryPointerParamPos(Intrinsic::ID VPID) {
  331. switch (VPID) {
  332. default:
  333. break;
  334. #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
  335. #define VP_PROPERTY_MEMOP(POINTERPOS, ...) return POINTERPOS;
  336. #define END_REGISTER_VP_INTRINSIC(VPID) break;
  337. #include "llvm/IR/VPIntrinsics.def"
  338. }
  339. return None;
  340. }
  341. /// \return The data (payload) operand of this store or scatter.
  342. Value *VPIntrinsic::getMemoryDataParam() const {
  343. auto DataParamOpt = getMemoryDataParamPos(getIntrinsicID());
  344. if (!DataParamOpt.hasValue())
  345. return nullptr;
  346. return getArgOperand(DataParamOpt.getValue());
  347. }
  348. Optional<unsigned> VPIntrinsic::getMemoryDataParamPos(Intrinsic::ID VPID) {
  349. switch (VPID) {
  350. default:
  351. break;
  352. #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
  353. #define VP_PROPERTY_MEMOP(POINTERPOS, DATAPOS) return DATAPOS;
  354. #define END_REGISTER_VP_INTRINSIC(VPID) break;
  355. #include "llvm/IR/VPIntrinsics.def"
  356. }
  357. return None;
  358. }
  359. bool VPIntrinsic::isVPIntrinsic(Intrinsic::ID ID) {
  360. switch (ID) {
  361. default:
  362. break;
  363. #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
  364. case Intrinsic::VPID: \
  365. return true;
  366. #include "llvm/IR/VPIntrinsics.def"
  367. }
  368. return false;
  369. }
  370. // Equivalent non-predicated opcode
  371. Optional<unsigned> VPIntrinsic::getFunctionalOpcodeForVP(Intrinsic::ID ID) {
  372. switch (ID) {
  373. default:
  374. break;
  375. #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
  376. #define VP_PROPERTY_FUNCTIONAL_OPC(OPC) return Instruction::OPC;
  377. #define END_REGISTER_VP_INTRINSIC(VPID) break;
  378. #include "llvm/IR/VPIntrinsics.def"
  379. }
  380. return None;
  381. }
  382. Intrinsic::ID VPIntrinsic::getForOpcode(unsigned IROPC) {
  383. switch (IROPC) {
  384. default:
  385. break;
  386. #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) break;
  387. #define VP_PROPERTY_FUNCTIONAL_OPC(OPC) case Instruction::OPC:
  388. #define END_REGISTER_VP_INTRINSIC(VPID) return Intrinsic::VPID;
  389. #include "llvm/IR/VPIntrinsics.def"
  390. }
  391. return Intrinsic::not_intrinsic;
  392. }
  393. bool VPIntrinsic::canIgnoreVectorLengthParam() const {
  394. using namespace PatternMatch;
  395. ElementCount EC = getStaticVectorLength();
  396. // No vlen param - no lanes masked-off by it.
  397. auto *VLParam = getVectorLengthParam();
  398. if (!VLParam)
  399. return true;
  400. // Note that the VP intrinsic causes undefined behavior if the Explicit Vector
  401. // Length parameter is strictly greater-than the number of vector elements of
  402. // the operation. This function returns true when this is detected statically
  403. // in the IR.
  404. // Check whether "W == vscale * EC.getKnownMinValue()"
  405. if (EC.isScalable()) {
  406. // Undig the DL
  407. const auto *ParMod = this->getModule();
  408. if (!ParMod)
  409. return false;
  410. const auto &DL = ParMod->getDataLayout();
  411. // Compare vscale patterns
  412. uint64_t VScaleFactor;
  413. if (match(VLParam, m_c_Mul(m_ConstantInt(VScaleFactor), m_VScale(DL))))
  414. return VScaleFactor >= EC.getKnownMinValue();
  415. return (EC.getKnownMinValue() == 1) && match(VLParam, m_VScale(DL));
  416. }
  417. // standard SIMD operation
  418. const auto *VLConst = dyn_cast<ConstantInt>(VLParam);
  419. if (!VLConst)
  420. return false;
  421. uint64_t VLNum = VLConst->getZExtValue();
  422. if (VLNum >= EC.getKnownMinValue())
  423. return true;
  424. return false;
  425. }
  426. Function *VPIntrinsic::getDeclarationForParams(Module *M, Intrinsic::ID VPID,
  427. Type *ReturnType,
  428. ArrayRef<Value *> Params) {
  429. assert(isVPIntrinsic(VPID) && "not a VP intrinsic");
  430. Function *VPFunc;
  431. switch (VPID) {
  432. default: {
  433. Type *OverloadTy = Params[0]->getType();
  434. if (VPReductionIntrinsic::isVPReduction(VPID))
  435. OverloadTy =
  436. Params[*VPReductionIntrinsic::getVectorParamPos(VPID)]->getType();
  437. VPFunc = Intrinsic::getDeclaration(M, VPID, OverloadTy);
  438. break;
  439. }
  440. case Intrinsic::vp_merge:
  441. case Intrinsic::vp_select:
  442. VPFunc = Intrinsic::getDeclaration(M, VPID, {Params[1]->getType()});
  443. break;
  444. case Intrinsic::vp_load:
  445. VPFunc = Intrinsic::getDeclaration(
  446. M, VPID, {ReturnType, Params[0]->getType()});
  447. break;
  448. case Intrinsic::vp_gather:
  449. VPFunc = Intrinsic::getDeclaration(
  450. M, VPID, {ReturnType, Params[0]->getType()});
  451. break;
  452. case Intrinsic::vp_store:
  453. VPFunc = Intrinsic::getDeclaration(
  454. M, VPID, {Params[0]->getType(), Params[1]->getType()});
  455. break;
  456. case Intrinsic::vp_scatter:
  457. VPFunc = Intrinsic::getDeclaration(
  458. M, VPID, {Params[0]->getType(), Params[1]->getType()});
  459. break;
  460. }
  461. assert(VPFunc && "Could not declare VP intrinsic");
  462. return VPFunc;
  463. }
  464. bool VPReductionIntrinsic::isVPReduction(Intrinsic::ID ID) {
  465. switch (ID) {
  466. default:
  467. break;
  468. #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
  469. #define VP_PROPERTY_REDUCTION(STARTPOS, ...) return true;
  470. #define END_REGISTER_VP_INTRINSIC(VPID) break;
  471. #include "llvm/IR/VPIntrinsics.def"
  472. }
  473. return false;
  474. }
  475. unsigned VPReductionIntrinsic::getVectorParamPos() const {
  476. return *VPReductionIntrinsic::getVectorParamPos(getIntrinsicID());
  477. }
  478. unsigned VPReductionIntrinsic::getStartParamPos() const {
  479. return *VPReductionIntrinsic::getStartParamPos(getIntrinsicID());
  480. }
  481. Optional<unsigned> VPReductionIntrinsic::getVectorParamPos(Intrinsic::ID ID) {
  482. switch (ID) {
  483. #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
  484. #define VP_PROPERTY_REDUCTION(STARTPOS, VECTORPOS) return VECTORPOS;
  485. #define END_REGISTER_VP_INTRINSIC(VPID) break;
  486. #include "llvm/IR/VPIntrinsics.def"
  487. default:
  488. break;
  489. }
  490. return None;
  491. }
  492. Optional<unsigned> VPReductionIntrinsic::getStartParamPos(Intrinsic::ID ID) {
  493. switch (ID) {
  494. #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
  495. #define VP_PROPERTY_REDUCTION(STARTPOS, VECTORPOS) return STARTPOS;
  496. #define END_REGISTER_VP_INTRINSIC(VPID) break;
  497. #include "llvm/IR/VPIntrinsics.def"
  498. default:
  499. break;
  500. }
  501. return None;
  502. }
  503. Instruction::BinaryOps BinaryOpIntrinsic::getBinaryOp() const {
  504. switch (getIntrinsicID()) {
  505. case Intrinsic::uadd_with_overflow:
  506. case Intrinsic::sadd_with_overflow:
  507. case Intrinsic::uadd_sat:
  508. case Intrinsic::sadd_sat:
  509. return Instruction::Add;
  510. case Intrinsic::usub_with_overflow:
  511. case Intrinsic::ssub_with_overflow:
  512. case Intrinsic::usub_sat:
  513. case Intrinsic::ssub_sat:
  514. return Instruction::Sub;
  515. case Intrinsic::umul_with_overflow:
  516. case Intrinsic::smul_with_overflow:
  517. return Instruction::Mul;
  518. default:
  519. llvm_unreachable("Invalid intrinsic");
  520. }
  521. }
  522. bool BinaryOpIntrinsic::isSigned() const {
  523. switch (getIntrinsicID()) {
  524. case Intrinsic::sadd_with_overflow:
  525. case Intrinsic::ssub_with_overflow:
  526. case Intrinsic::smul_with_overflow:
  527. case Intrinsic::sadd_sat:
  528. case Intrinsic::ssub_sat:
  529. return true;
  530. default:
  531. return false;
  532. }
  533. }
  534. unsigned BinaryOpIntrinsic::getNoWrapKind() const {
  535. if (isSigned())
  536. return OverflowingBinaryOperator::NoSignedWrap;
  537. else
  538. return OverflowingBinaryOperator::NoUnsignedWrap;
  539. }
  540. const GCStatepointInst *GCProjectionInst::getStatepoint() const {
  541. const Value *Token = getArgOperand(0);
  542. // This takes care both of relocates for call statepoints and relocates
  543. // on normal path of invoke statepoint.
  544. if (!isa<LandingPadInst>(Token))
  545. return cast<GCStatepointInst>(Token);
  546. // This relocate is on exceptional path of an invoke statepoint
  547. const BasicBlock *InvokeBB =
  548. cast<Instruction>(Token)->getParent()->getUniquePredecessor();
  549. assert(InvokeBB && "safepoints should have unique landingpads");
  550. assert(InvokeBB->getTerminator() &&
  551. "safepoint block should be well formed");
  552. return cast<GCStatepointInst>(InvokeBB->getTerminator());
  553. }
  554. Value *GCRelocateInst::getBasePtr() const {
  555. if (auto Opt = getStatepoint()->getOperandBundle(LLVMContext::OB_gc_live))
  556. return *(Opt->Inputs.begin() + getBasePtrIndex());
  557. return *(getStatepoint()->arg_begin() + getBasePtrIndex());
  558. }
  559. Value *GCRelocateInst::getDerivedPtr() const {
  560. if (auto Opt = getStatepoint()->getOperandBundle(LLVMContext::OB_gc_live))
  561. return *(Opt->Inputs.begin() + getDerivedPtrIndex());
  562. return *(getStatepoint()->arg_begin() + getDerivedPtrIndex());
  563. }