IRBuilder.cpp 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269
  1. //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the IRBuilder class, which is used as a convenient way
  10. // to create LLVM instructions with a consistent and simplified interface.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/IR/IRBuilder.h"
  14. #include "llvm/ADT/ArrayRef.h"
  15. #include "llvm/ADT/None.h"
  16. #include "llvm/IR/Constant.h"
  17. #include "llvm/IR/Constants.h"
  18. #include "llvm/IR/DerivedTypes.h"
  19. #include "llvm/IR/Function.h"
  20. #include "llvm/IR/GlobalValue.h"
  21. #include "llvm/IR/GlobalVariable.h"
  22. #include "llvm/IR/IntrinsicInst.h"
  23. #include "llvm/IR/Intrinsics.h"
  24. #include "llvm/IR/LLVMContext.h"
  25. #include "llvm/IR/NoFolder.h"
  26. #include "llvm/IR/Operator.h"
  27. #include "llvm/IR/Statepoint.h"
  28. #include "llvm/IR/Type.h"
  29. #include "llvm/IR/Value.h"
  30. #include "llvm/Support/Casting.h"
  31. #include <cassert>
  32. #include <cstdint>
  33. #include <vector>
  34. using namespace llvm;
  35. /// CreateGlobalString - Make a new global variable with an initializer that
  36. /// has array of i8 type filled in with the nul terminated string value
  37. /// specified. If Name is specified, it is the name of the global variable
  38. /// created.
  39. GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
  40. const Twine &Name,
  41. unsigned AddressSpace,
  42. Module *M) {
  43. Constant *StrConstant = ConstantDataArray::getString(Context, Str);
  44. if (!M)
  45. M = BB->getParent()->getParent();
  46. auto *GV = new GlobalVariable(
  47. *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage,
  48. StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace);
  49. GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
  50. GV->setAlignment(Align(1));
  51. return GV;
  52. }
  53. Type *IRBuilderBase::getCurrentFunctionReturnType() const {
  54. assert(BB && BB->getParent() && "No current function!");
  55. return BB->getParent()->getReturnType();
  56. }
  57. Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) {
  58. auto *PT = cast<PointerType>(Ptr->getType());
  59. if (PT->isOpaqueOrPointeeTypeMatches(getInt8Ty()))
  60. return Ptr;
  61. // Otherwise, we need to insert a bitcast.
  62. return CreateBitCast(Ptr, getInt8PtrTy(PT->getAddressSpace()));
  63. }
  64. static CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
  65. IRBuilderBase *Builder,
  66. const Twine &Name = "",
  67. Instruction *FMFSource = nullptr,
  68. ArrayRef<OperandBundleDef> OpBundles = {}) {
  69. CallInst *CI = Builder->CreateCall(Callee, Ops, OpBundles, Name);
  70. if (FMFSource)
  71. CI->copyFastMathFlags(FMFSource);
  72. return CI;
  73. }
  74. Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) {
  75. assert(isa<ConstantInt>(Scaling) && "Expected constant integer");
  76. if (cast<ConstantInt>(Scaling)->isZero())
  77. return Scaling;
  78. Module *M = GetInsertBlock()->getParent()->getParent();
  79. Function *TheFn =
  80. Intrinsic::getDeclaration(M, Intrinsic::vscale, {Scaling->getType()});
  81. CallInst *CI = createCallHelper(TheFn, {}, this, Name);
  82. return cast<ConstantInt>(Scaling)->getSExtValue() == 1
  83. ? CI
  84. : CreateMul(CI, Scaling);
  85. }
  86. Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) {
  87. Type *STy = DstType->getScalarType();
  88. if (isa<ScalableVectorType>(DstType)) {
  89. Type *StepVecType = DstType;
  90. // TODO: We expect this special case (element type < 8 bits) to be
  91. // temporary - once the intrinsic properly supports < 8 bits this code
  92. // can be removed.
  93. if (STy->getScalarSizeInBits() < 8)
  94. StepVecType =
  95. VectorType::get(getInt8Ty(), cast<ScalableVectorType>(DstType));
  96. Value *Res = CreateIntrinsic(Intrinsic::experimental_stepvector,
  97. {StepVecType}, {}, nullptr, Name);
  98. if (StepVecType != DstType)
  99. Res = CreateTrunc(Res, DstType);
  100. return Res;
  101. }
  102. unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements();
  103. // Create a vector of consecutive numbers from zero to VF.
  104. SmallVector<Constant *, 8> Indices;
  105. for (unsigned i = 0; i < NumEls; ++i)
  106. Indices.push_back(ConstantInt::get(STy, i));
  107. // Add the consecutive indices to the vector value.
  108. return ConstantVector::get(Indices);
  109. }
  110. CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size,
  111. MaybeAlign Align, bool isVolatile,
  112. MDNode *TBAATag, MDNode *ScopeTag,
  113. MDNode *NoAliasTag) {
  114. Ptr = getCastedInt8PtrValue(Ptr);
  115. Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
  116. Type *Tys[] = { Ptr->getType(), Size->getType() };
  117. Module *M = BB->getParent()->getParent();
  118. Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
  119. CallInst *CI = createCallHelper(TheFn, Ops, this);
  120. if (Align)
  121. cast<MemSetInst>(CI)->setDestAlignment(Align->value());
  122. // Set the TBAA info if present.
  123. if (TBAATag)
  124. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  125. if (ScopeTag)
  126. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  127. if (NoAliasTag)
  128. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  129. return CI;
  130. }
  131. CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
  132. Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize,
  133. MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) {
  134. Ptr = getCastedInt8PtrValue(Ptr);
  135. Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
  136. Type *Tys[] = {Ptr->getType(), Size->getType()};
  137. Module *M = BB->getParent()->getParent();
  138. Function *TheFn = Intrinsic::getDeclaration(
  139. M, Intrinsic::memset_element_unordered_atomic, Tys);
  140. CallInst *CI = createCallHelper(TheFn, Ops, this);
  141. cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
  142. // Set the TBAA info if present.
  143. if (TBAATag)
  144. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  145. if (ScopeTag)
  146. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  147. if (NoAliasTag)
  148. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  149. return CI;
  150. }
  151. CallInst *IRBuilderBase::CreateMemTransferInst(
  152. Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
  153. MaybeAlign SrcAlign, Value *Size, bool isVolatile, MDNode *TBAATag,
  154. MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) {
  155. Dst = getCastedInt8PtrValue(Dst);
  156. Src = getCastedInt8PtrValue(Src);
  157. Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
  158. Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
  159. Module *M = BB->getParent()->getParent();
  160. Function *TheFn = Intrinsic::getDeclaration(M, IntrID, Tys);
  161. CallInst *CI = createCallHelper(TheFn, Ops, this);
  162. auto* MCI = cast<MemTransferInst>(CI);
  163. if (DstAlign)
  164. MCI->setDestAlignment(*DstAlign);
  165. if (SrcAlign)
  166. MCI->setSourceAlignment(*SrcAlign);
  167. // Set the TBAA info if present.
  168. if (TBAATag)
  169. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  170. // Set the TBAA Struct info if present.
  171. if (TBAAStructTag)
  172. CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
  173. if (ScopeTag)
  174. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  175. if (NoAliasTag)
  176. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  177. return CI;
  178. }
  179. CallInst *IRBuilderBase::CreateMemCpyInline(
  180. Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign,
  181. Value *Size, bool IsVolatile, MDNode *TBAATag, MDNode *TBAAStructTag,
  182. MDNode *ScopeTag, MDNode *NoAliasTag) {
  183. Dst = getCastedInt8PtrValue(Dst);
  184. Src = getCastedInt8PtrValue(Src);
  185. Value *Ops[] = {Dst, Src, Size, getInt1(IsVolatile)};
  186. Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
  187. Function *F = BB->getParent();
  188. Module *M = F->getParent();
  189. Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy_inline, Tys);
  190. CallInst *CI = createCallHelper(TheFn, Ops, this);
  191. auto *MCI = cast<MemCpyInlineInst>(CI);
  192. if (DstAlign)
  193. MCI->setDestAlignment(*DstAlign);
  194. if (SrcAlign)
  195. MCI->setSourceAlignment(*SrcAlign);
  196. // Set the TBAA info if present.
  197. if (TBAATag)
  198. MCI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  199. // Set the TBAA Struct info if present.
  200. if (TBAAStructTag)
  201. MCI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
  202. if (ScopeTag)
  203. MCI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  204. if (NoAliasTag)
  205. MCI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  206. return CI;
  207. }
  208. CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
  209. Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
  210. uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
  211. MDNode *ScopeTag, MDNode *NoAliasTag) {
  212. assert(DstAlign >= ElementSize &&
  213. "Pointer alignment must be at least element size");
  214. assert(SrcAlign >= ElementSize &&
  215. "Pointer alignment must be at least element size");
  216. Dst = getCastedInt8PtrValue(Dst);
  217. Src = getCastedInt8PtrValue(Src);
  218. Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
  219. Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
  220. Module *M = BB->getParent()->getParent();
  221. Function *TheFn = Intrinsic::getDeclaration(
  222. M, Intrinsic::memcpy_element_unordered_atomic, Tys);
  223. CallInst *CI = createCallHelper(TheFn, Ops, this);
  224. // Set the alignment of the pointer args.
  225. auto *AMCI = cast<AtomicMemCpyInst>(CI);
  226. AMCI->setDestAlignment(DstAlign);
  227. AMCI->setSourceAlignment(SrcAlign);
  228. // Set the TBAA info if present.
  229. if (TBAATag)
  230. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  231. // Set the TBAA Struct info if present.
  232. if (TBAAStructTag)
  233. CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
  234. if (ScopeTag)
  235. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  236. if (NoAliasTag)
  237. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  238. return CI;
  239. }
  240. CallInst *IRBuilderBase::CreateMemMove(Value *Dst, MaybeAlign DstAlign,
  241. Value *Src, MaybeAlign SrcAlign,
  242. Value *Size, bool isVolatile,
  243. MDNode *TBAATag, MDNode *ScopeTag,
  244. MDNode *NoAliasTag) {
  245. Dst = getCastedInt8PtrValue(Dst);
  246. Src = getCastedInt8PtrValue(Src);
  247. Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
  248. Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
  249. Module *M = BB->getParent()->getParent();
  250. Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys);
  251. CallInst *CI = createCallHelper(TheFn, Ops, this);
  252. auto *MMI = cast<MemMoveInst>(CI);
  253. if (DstAlign)
  254. MMI->setDestAlignment(*DstAlign);
  255. if (SrcAlign)
  256. MMI->setSourceAlignment(*SrcAlign);
  257. // Set the TBAA info if present.
  258. if (TBAATag)
  259. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  260. if (ScopeTag)
  261. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  262. if (NoAliasTag)
  263. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  264. return CI;
  265. }
  266. CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
  267. Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
  268. uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
  269. MDNode *ScopeTag, MDNode *NoAliasTag) {
  270. assert(DstAlign >= ElementSize &&
  271. "Pointer alignment must be at least element size");
  272. assert(SrcAlign >= ElementSize &&
  273. "Pointer alignment must be at least element size");
  274. Dst = getCastedInt8PtrValue(Dst);
  275. Src = getCastedInt8PtrValue(Src);
  276. Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
  277. Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
  278. Module *M = BB->getParent()->getParent();
  279. Function *TheFn = Intrinsic::getDeclaration(
  280. M, Intrinsic::memmove_element_unordered_atomic, Tys);
  281. CallInst *CI = createCallHelper(TheFn, Ops, this);
  282. // Set the alignment of the pointer args.
  283. CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign));
  284. CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign));
  285. // Set the TBAA info if present.
  286. if (TBAATag)
  287. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  288. // Set the TBAA Struct info if present.
  289. if (TBAAStructTag)
  290. CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
  291. if (ScopeTag)
  292. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  293. if (NoAliasTag)
  294. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  295. return CI;
  296. }
  297. static CallInst *getReductionIntrinsic(IRBuilderBase *Builder, Intrinsic::ID ID,
  298. Value *Src) {
  299. Module *M = Builder->GetInsertBlock()->getParent()->getParent();
  300. Value *Ops[] = {Src};
  301. Type *Tys[] = { Src->getType() };
  302. auto Decl = Intrinsic::getDeclaration(M, ID, Tys);
  303. return createCallHelper(Decl, Ops, Builder);
  304. }
  305. CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) {
  306. Module *M = GetInsertBlock()->getParent()->getParent();
  307. Value *Ops[] = {Acc, Src};
  308. auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fadd,
  309. {Src->getType()});
  310. return createCallHelper(Decl, Ops, this);
  311. }
  312. CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) {
  313. Module *M = GetInsertBlock()->getParent()->getParent();
  314. Value *Ops[] = {Acc, Src};
  315. auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fmul,
  316. {Src->getType()});
  317. return createCallHelper(Decl, Ops, this);
  318. }
  319. CallInst *IRBuilderBase::CreateAddReduce(Value *Src) {
  320. return getReductionIntrinsic(this, Intrinsic::vector_reduce_add, Src);
  321. }
  322. CallInst *IRBuilderBase::CreateMulReduce(Value *Src) {
  323. return getReductionIntrinsic(this, Intrinsic::vector_reduce_mul, Src);
  324. }
  325. CallInst *IRBuilderBase::CreateAndReduce(Value *Src) {
  326. return getReductionIntrinsic(this, Intrinsic::vector_reduce_and, Src);
  327. }
  328. CallInst *IRBuilderBase::CreateOrReduce(Value *Src) {
  329. return getReductionIntrinsic(this, Intrinsic::vector_reduce_or, Src);
  330. }
  331. CallInst *IRBuilderBase::CreateXorReduce(Value *Src) {
  332. return getReductionIntrinsic(this, Intrinsic::vector_reduce_xor, Src);
  333. }
  334. CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) {
  335. auto ID =
  336. IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax;
  337. return getReductionIntrinsic(this, ID, Src);
  338. }
  339. CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) {
  340. auto ID =
  341. IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin;
  342. return getReductionIntrinsic(this, ID, Src);
  343. }
  344. CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) {
  345. return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmax, Src);
  346. }
  347. CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) {
  348. return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmin, Src);
  349. }
  350. CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
  351. assert(isa<PointerType>(Ptr->getType()) &&
  352. "lifetime.start only applies to pointers.");
  353. Ptr = getCastedInt8PtrValue(Ptr);
  354. if (!Size)
  355. Size = getInt64(-1);
  356. else
  357. assert(Size->getType() == getInt64Ty() &&
  358. "lifetime.start requires the size to be an i64");
  359. Value *Ops[] = { Size, Ptr };
  360. Module *M = BB->getParent()->getParent();
  361. Function *TheFn =
  362. Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()});
  363. return createCallHelper(TheFn, Ops, this);
  364. }
  365. CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
  366. assert(isa<PointerType>(Ptr->getType()) &&
  367. "lifetime.end only applies to pointers.");
  368. Ptr = getCastedInt8PtrValue(Ptr);
  369. if (!Size)
  370. Size = getInt64(-1);
  371. else
  372. assert(Size->getType() == getInt64Ty() &&
  373. "lifetime.end requires the size to be an i64");
  374. Value *Ops[] = { Size, Ptr };
  375. Module *M = BB->getParent()->getParent();
  376. Function *TheFn =
  377. Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()});
  378. return createCallHelper(TheFn, Ops, this);
  379. }
  380. CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
  381. assert(isa<PointerType>(Ptr->getType()) &&
  382. "invariant.start only applies to pointers.");
  383. Ptr = getCastedInt8PtrValue(Ptr);
  384. if (!Size)
  385. Size = getInt64(-1);
  386. else
  387. assert(Size->getType() == getInt64Ty() &&
  388. "invariant.start requires the size to be an i64");
  389. Value *Ops[] = {Size, Ptr};
  390. // Fill in the single overloaded type: memory object type.
  391. Type *ObjectPtr[1] = {Ptr->getType()};
  392. Module *M = BB->getParent()->getParent();
  393. Function *TheFn =
  394. Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr);
  395. return createCallHelper(TheFn, Ops, this);
  396. }
  397. CallInst *
  398. IRBuilderBase::CreateAssumption(Value *Cond,
  399. ArrayRef<OperandBundleDef> OpBundles) {
  400. assert(Cond->getType() == getInt1Ty() &&
  401. "an assumption condition must be of type i1");
  402. Value *Ops[] = { Cond };
  403. Module *M = BB->getParent()->getParent();
  404. Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
  405. return createCallHelper(FnAssume, Ops, this, "", nullptr, OpBundles);
  406. }
  407. Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) {
  408. Module *M = BB->getModule();
  409. auto *FnIntrinsic = Intrinsic::getDeclaration(
  410. M, Intrinsic::experimental_noalias_scope_decl, {});
  411. return createCallHelper(FnIntrinsic, {Scope}, this);
  412. }
  413. /// Create a call to a Masked Load intrinsic.
  414. /// \p Ty - vector type to load
  415. /// \p Ptr - base pointer for the load
  416. /// \p Alignment - alignment of the source location
  417. /// \p Mask - vector of booleans which indicates what vector lanes should
  418. /// be accessed in memory
  419. /// \p PassThru - pass-through value that is used to fill the masked-off lanes
  420. /// of the result
  421. /// \p Name - name of the result variable
  422. CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment,
  423. Value *Mask, Value *PassThru,
  424. const Twine &Name) {
  425. auto *PtrTy = cast<PointerType>(Ptr->getType());
  426. assert(Ty->isVectorTy() && "Type should be vector");
  427. assert(PtrTy->isOpaqueOrPointeeTypeMatches(Ty) && "Wrong element type");
  428. assert(Mask && "Mask should not be all-ones (null)");
  429. if (!PassThru)
  430. PassThru = UndefValue::get(Ty);
  431. Type *OverloadedTypes[] = { Ty, PtrTy };
  432. Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
  433. return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
  434. OverloadedTypes, Name);
  435. }
  436. /// Create a call to a Masked Store intrinsic.
  437. /// \p Val - data to be stored,
  438. /// \p Ptr - base pointer for the store
  439. /// \p Alignment - alignment of the destination location
  440. /// \p Mask - vector of booleans which indicates what vector lanes should
  441. /// be accessed in memory
  442. CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
  443. Align Alignment, Value *Mask) {
  444. auto *PtrTy = cast<PointerType>(Ptr->getType());
  445. Type *DataTy = Val->getType();
  446. assert(DataTy->isVectorTy() && "Val should be a vector");
  447. assert(PtrTy->isOpaqueOrPointeeTypeMatches(DataTy) && "Wrong element type");
  448. assert(Mask && "Mask should not be all-ones (null)");
  449. Type *OverloadedTypes[] = { DataTy, PtrTy };
  450. Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask};
  451. return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
  452. }
  453. /// Create a call to a Masked intrinsic, with given intrinsic Id,
  454. /// an array of operands - Ops, and an array of overloaded types -
  455. /// OverloadedTypes.
  456. CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
  457. ArrayRef<Value *> Ops,
  458. ArrayRef<Type *> OverloadedTypes,
  459. const Twine &Name) {
  460. Module *M = BB->getParent()->getParent();
  461. Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes);
  462. return createCallHelper(TheFn, Ops, this, Name);
  463. }
  464. /// Create a call to a Masked Gather intrinsic.
  465. /// \p Ty - vector type to gather
  466. /// \p Ptrs - vector of pointers for loading
  467. /// \p Align - alignment for one element
  468. /// \p Mask - vector of booleans which indicates what vector lanes should
  469. /// be accessed in memory
  470. /// \p PassThru - pass-through value that is used to fill the masked-off lanes
  471. /// of the result
  472. /// \p Name - name of the result variable
  473. CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs,
  474. Align Alignment, Value *Mask,
  475. Value *PassThru,
  476. const Twine &Name) {
  477. auto *VecTy = cast<VectorType>(Ty);
  478. ElementCount NumElts = VecTy->getElementCount();
  479. auto *PtrsTy = cast<VectorType>(Ptrs->getType());
  480. assert(cast<PointerType>(PtrsTy->getElementType())
  481. ->isOpaqueOrPointeeTypeMatches(
  482. cast<VectorType>(Ty)->getElementType()) &&
  483. "Element type mismatch");
  484. assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch");
  485. if (!Mask)
  486. Mask = Constant::getAllOnesValue(
  487. VectorType::get(Type::getInt1Ty(Context), NumElts));
  488. if (!PassThru)
  489. PassThru = UndefValue::get(Ty);
  490. Type *OverloadedTypes[] = {Ty, PtrsTy};
  491. Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru};
  492. // We specify only one type when we create this intrinsic. Types of other
  493. // arguments are derived from this type.
  494. return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
  495. Name);
  496. }
  497. /// Create a call to a Masked Scatter intrinsic.
  498. /// \p Data - data to be stored,
  499. /// \p Ptrs - the vector of pointers, where the \p Data elements should be
  500. /// stored
  501. /// \p Align - alignment for one element
  502. /// \p Mask - vector of booleans which indicates what vector lanes should
  503. /// be accessed in memory
  504. CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
  505. Align Alignment, Value *Mask) {
  506. auto *PtrsTy = cast<VectorType>(Ptrs->getType());
  507. auto *DataTy = cast<VectorType>(Data->getType());
  508. ElementCount NumElts = PtrsTy->getElementCount();
  509. #ifndef NDEBUG
  510. auto *PtrTy = cast<PointerType>(PtrsTy->getElementType());
  511. assert(NumElts == DataTy->getElementCount() &&
  512. PtrTy->isOpaqueOrPointeeTypeMatches(DataTy->getElementType()) &&
  513. "Incompatible pointer and data types");
  514. #endif
  515. if (!Mask)
  516. Mask = Constant::getAllOnesValue(
  517. VectorType::get(Type::getInt1Ty(Context), NumElts));
  518. Type *OverloadedTypes[] = {DataTy, PtrsTy};
  519. Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask};
  520. // We specify only one type when we create this intrinsic. Types of other
  521. // arguments are derived from this type.
  522. return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
  523. }
  524. template <typename T0>
  525. static std::vector<Value *>
  526. getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes,
  527. Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) {
  528. std::vector<Value *> Args;
  529. Args.push_back(B.getInt64(ID));
  530. Args.push_back(B.getInt32(NumPatchBytes));
  531. Args.push_back(ActualCallee);
  532. Args.push_back(B.getInt32(CallArgs.size()));
  533. Args.push_back(B.getInt32(Flags));
  534. llvm::append_range(Args, CallArgs);
  535. // GC Transition and Deopt args are now always handled via operand bundle.
  536. // They will be removed from the signature of gc.statepoint shortly.
  537. Args.push_back(B.getInt32(0));
  538. Args.push_back(B.getInt32(0));
  539. // GC args are now encoded in the gc-live operand bundle
  540. return Args;
  541. }
  542. template<typename T1, typename T2, typename T3>
  543. static std::vector<OperandBundleDef>
  544. getStatepointBundles(Optional<ArrayRef<T1>> TransitionArgs,
  545. Optional<ArrayRef<T2>> DeoptArgs,
  546. ArrayRef<T3> GCArgs) {
  547. std::vector<OperandBundleDef> Rval;
  548. if (DeoptArgs) {
  549. SmallVector<Value*, 16> DeoptValues;
  550. llvm::append_range(DeoptValues, *DeoptArgs);
  551. Rval.emplace_back("deopt", DeoptValues);
  552. }
  553. if (TransitionArgs) {
  554. SmallVector<Value*, 16> TransitionValues;
  555. llvm::append_range(TransitionValues, *TransitionArgs);
  556. Rval.emplace_back("gc-transition", TransitionValues);
  557. }
  558. if (GCArgs.size()) {
  559. SmallVector<Value*, 16> LiveValues;
  560. llvm::append_range(LiveValues, GCArgs);
  561. Rval.emplace_back("gc-live", LiveValues);
  562. }
  563. return Rval;
  564. }
  565. template <typename T0, typename T1, typename T2, typename T3>
  566. static CallInst *CreateGCStatepointCallCommon(
  567. IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
  568. Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
  569. Optional<ArrayRef<T1>> TransitionArgs,
  570. Optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
  571. const Twine &Name) {
  572. // Extract out the type of the callee.
  573. auto *FuncPtrType = cast<PointerType>(ActualCallee->getType());
  574. assert(isa<FunctionType>(FuncPtrType->getPointerElementType()) &&
  575. "actual callee must be a callable value");
  576. Module *M = Builder->GetInsertBlock()->getParent()->getParent();
  577. // Fill in the one generic type'd argument (the function is also vararg)
  578. Type *ArgTypes[] = { FuncPtrType };
  579. Function *FnStatepoint =
  580. Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
  581. ArgTypes);
  582. std::vector<Value *> Args =
  583. getStatepointArgs(*Builder, ID, NumPatchBytes, ActualCallee, Flags,
  584. CallArgs);
  585. return Builder->CreateCall(FnStatepoint, Args,
  586. getStatepointBundles(TransitionArgs, DeoptArgs,
  587. GCArgs),
  588. Name);
  589. }
  590. CallInst *IRBuilderBase::CreateGCStatepointCall(
  591. uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
  592. ArrayRef<Value *> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
  593. ArrayRef<Value *> GCArgs, const Twine &Name) {
  594. return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
  595. this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
  596. CallArgs, None /* No Transition Args */, DeoptArgs, GCArgs, Name);
  597. }
  598. CallInst *IRBuilderBase::CreateGCStatepointCall(
  599. uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, uint32_t Flags,
  600. ArrayRef<Value *> CallArgs, Optional<ArrayRef<Use>> TransitionArgs,
  601. Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
  602. const Twine &Name) {
  603. return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>(
  604. this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
  605. DeoptArgs, GCArgs, Name);
  606. }
  607. CallInst *IRBuilderBase::CreateGCStatepointCall(
  608. uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
  609. ArrayRef<Use> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
  610. ArrayRef<Value *> GCArgs, const Twine &Name) {
  611. return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
  612. this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
  613. CallArgs, None, DeoptArgs, GCArgs, Name);
  614. }
  615. template <typename T0, typename T1, typename T2, typename T3>
  616. static InvokeInst *CreateGCStatepointInvokeCommon(
  617. IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
  618. Value *ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest,
  619. uint32_t Flags, ArrayRef<T0> InvokeArgs,
  620. Optional<ArrayRef<T1>> TransitionArgs, Optional<ArrayRef<T2>> DeoptArgs,
  621. ArrayRef<T3> GCArgs, const Twine &Name) {
  622. // Extract out the type of the callee.
  623. auto *FuncPtrType = cast<PointerType>(ActualInvokee->getType());
  624. assert(isa<FunctionType>(FuncPtrType->getPointerElementType()) &&
  625. "actual callee must be a callable value");
  626. Module *M = Builder->GetInsertBlock()->getParent()->getParent();
  627. // Fill in the one generic type'd argument (the function is also vararg)
  628. Function *FnStatepoint = Intrinsic::getDeclaration(
  629. M, Intrinsic::experimental_gc_statepoint, {FuncPtrType});
  630. std::vector<Value *> Args =
  631. getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee, Flags,
  632. InvokeArgs);
  633. return Builder->CreateInvoke(FnStatepoint, NormalDest, UnwindDest, Args,
  634. getStatepointBundles(TransitionArgs, DeoptArgs,
  635. GCArgs),
  636. Name);
  637. }
  638. InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
  639. uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
  640. BasicBlock *NormalDest, BasicBlock *UnwindDest,
  641. ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Value *>> DeoptArgs,
  642. ArrayRef<Value *> GCArgs, const Twine &Name) {
  643. return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
  644. this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
  645. uint32_t(StatepointFlags::None), InvokeArgs, None /* No Transition Args*/,
  646. DeoptArgs, GCArgs, Name);
  647. }
  648. InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
  649. uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
  650. BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
  651. ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
  652. Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
  653. return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>(
  654. this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
  655. InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name);
  656. }
  657. InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
  658. uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
  659. BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
  660. Optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
  661. return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
  662. this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
  663. uint32_t(StatepointFlags::None), InvokeArgs, None, DeoptArgs, GCArgs,
  664. Name);
  665. }
  666. CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint,
  667. Type *ResultType,
  668. const Twine &Name) {
  669. Intrinsic::ID ID = Intrinsic::experimental_gc_result;
  670. Module *M = BB->getParent()->getParent();
  671. Type *Types[] = {ResultType};
  672. Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types);
  673. Value *Args[] = {Statepoint};
  674. return createCallHelper(FnGCResult, Args, this, Name);
  675. }
  676. CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
  677. int BaseOffset,
  678. int DerivedOffset,
  679. Type *ResultType,
  680. const Twine &Name) {
  681. Module *M = BB->getParent()->getParent();
  682. Type *Types[] = {ResultType};
  683. Function *FnGCRelocate =
  684. Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types);
  685. Value *Args[] = {Statepoint,
  686. getInt32(BaseOffset),
  687. getInt32(DerivedOffset)};
  688. return createCallHelper(FnGCRelocate, Args, this, Name);
  689. }
  690. CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr,
  691. const Twine &Name) {
  692. Module *M = BB->getParent()->getParent();
  693. Type *PtrTy = DerivedPtr->getType();
  694. Function *FnGCFindBase = Intrinsic::getDeclaration(
  695. M, Intrinsic::experimental_gc_get_pointer_base, {PtrTy, PtrTy});
  696. return createCallHelper(FnGCFindBase, {DerivedPtr}, this, Name);
  697. }
  698. CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr,
  699. const Twine &Name) {
  700. Module *M = BB->getParent()->getParent();
  701. Type *PtrTy = DerivedPtr->getType();
  702. Function *FnGCGetOffset = Intrinsic::getDeclaration(
  703. M, Intrinsic::experimental_gc_get_pointer_offset, {PtrTy});
  704. return createCallHelper(FnGCGetOffset, {DerivedPtr}, this, Name);
  705. }
  706. CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
  707. Instruction *FMFSource,
  708. const Twine &Name) {
  709. Module *M = BB->getModule();
  710. Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()});
  711. return createCallHelper(Fn, {V}, this, Name, FMFSource);
  712. }
  713. CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS,
  714. Value *RHS,
  715. Instruction *FMFSource,
  716. const Twine &Name) {
  717. Module *M = BB->getModule();
  718. Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() });
  719. return createCallHelper(Fn, {LHS, RHS}, this, Name, FMFSource);
  720. }
  721. CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
  722. ArrayRef<Type *> Types,
  723. ArrayRef<Value *> Args,
  724. Instruction *FMFSource,
  725. const Twine &Name) {
  726. Module *M = BB->getModule();
  727. Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
  728. return createCallHelper(Fn, Args, this, Name, FMFSource);
  729. }
  730. CallInst *IRBuilderBase::CreateConstrainedFPBinOp(
  731. Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource,
  732. const Twine &Name, MDNode *FPMathTag,
  733. Optional<RoundingMode> Rounding,
  734. Optional<fp::ExceptionBehavior> Except) {
  735. Value *RoundingV = getConstrainedFPRounding(Rounding);
  736. Value *ExceptV = getConstrainedFPExcept(Except);
  737. FastMathFlags UseFMF = FMF;
  738. if (FMFSource)
  739. UseFMF = FMFSource->getFastMathFlags();
  740. CallInst *C = CreateIntrinsic(ID, {L->getType()},
  741. {L, R, RoundingV, ExceptV}, nullptr, Name);
  742. setConstrainedFPCallAttr(C);
  743. setFPAttrs(C, FPMathTag, UseFMF);
  744. return C;
  745. }
  746. Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
  747. const Twine &Name, MDNode *FPMathTag) {
  748. if (Instruction::isBinaryOp(Opc)) {
  749. assert(Ops.size() == 2 && "Invalid number of operands!");
  750. return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc),
  751. Ops[0], Ops[1], Name, FPMathTag);
  752. }
  753. if (Instruction::isUnaryOp(Opc)) {
  754. assert(Ops.size() == 1 && "Invalid number of operands!");
  755. return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc),
  756. Ops[0], Name, FPMathTag);
  757. }
  758. llvm_unreachable("Unexpected opcode!");
  759. }
  760. CallInst *IRBuilderBase::CreateConstrainedFPCast(
  761. Intrinsic::ID ID, Value *V, Type *DestTy,
  762. Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag,
  763. Optional<RoundingMode> Rounding,
  764. Optional<fp::ExceptionBehavior> Except) {
  765. Value *ExceptV = getConstrainedFPExcept(Except);
  766. FastMathFlags UseFMF = FMF;
  767. if (FMFSource)
  768. UseFMF = FMFSource->getFastMathFlags();
  769. CallInst *C;
  770. bool HasRoundingMD = false;
  771. switch (ID) {
  772. default:
  773. break;
  774. #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
  775. case Intrinsic::INTRINSIC: \
  776. HasRoundingMD = ROUND_MODE; \
  777. break;
  778. #include "llvm/IR/ConstrainedOps.def"
  779. }
  780. if (HasRoundingMD) {
  781. Value *RoundingV = getConstrainedFPRounding(Rounding);
  782. C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
  783. nullptr, Name);
  784. } else
  785. C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
  786. Name);
  787. setConstrainedFPCallAttr(C);
  788. if (isa<FPMathOperator>(C))
  789. setFPAttrs(C, FPMathTag, UseFMF);
  790. return C;
  791. }
  792. Value *IRBuilderBase::CreateFCmpHelper(
  793. CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name,
  794. MDNode *FPMathTag, bool IsSignaling) {
  795. if (IsFPConstrained) {
  796. auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
  797. : Intrinsic::experimental_constrained_fcmp;
  798. return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name);
  799. }
  800. if (auto *LC = dyn_cast<Constant>(LHS))
  801. if (auto *RC = dyn_cast<Constant>(RHS))
  802. return Insert(Folder.CreateFCmp(P, LC, RC), Name);
  803. return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
  804. }
  805. CallInst *IRBuilderBase::CreateConstrainedFPCmp(
  806. Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
  807. const Twine &Name, Optional<fp::ExceptionBehavior> Except) {
  808. Value *PredicateV = getConstrainedFPPredicate(P);
  809. Value *ExceptV = getConstrainedFPExcept(Except);
  810. CallInst *C = CreateIntrinsic(ID, {L->getType()},
  811. {L, R, PredicateV, ExceptV}, nullptr, Name);
  812. setConstrainedFPCallAttr(C);
  813. return C;
  814. }
  815. CallInst *IRBuilderBase::CreateConstrainedFPCall(
  816. Function *Callee, ArrayRef<Value *> Args, const Twine &Name,
  817. Optional<RoundingMode> Rounding,
  818. Optional<fp::ExceptionBehavior> Except) {
  819. llvm::SmallVector<Value *, 6> UseArgs;
  820. append_range(UseArgs, Args);
  821. bool HasRoundingMD = false;
  822. switch (Callee->getIntrinsicID()) {
  823. default:
  824. break;
  825. #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
  826. case Intrinsic::INTRINSIC: \
  827. HasRoundingMD = ROUND_MODE; \
  828. break;
  829. #include "llvm/IR/ConstrainedOps.def"
  830. }
  831. if (HasRoundingMD)
  832. UseArgs.push_back(getConstrainedFPRounding(Rounding));
  833. UseArgs.push_back(getConstrainedFPExcept(Except));
  834. CallInst *C = CreateCall(Callee, UseArgs, Name);
  835. setConstrainedFPCallAttr(C);
  836. return C;
  837. }
  838. Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False,
  839. const Twine &Name, Instruction *MDFrom) {
  840. if (auto *V = Folder.FoldSelect(C, True, False))
  841. return V;
  842. SelectInst *Sel = SelectInst::Create(C, True, False);
  843. if (MDFrom) {
  844. MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
  845. MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
  846. Sel = addBranchMetadata(Sel, Prof, Unpred);
  847. }
  848. if (isa<FPMathOperator>(Sel))
  849. setFPAttrs(Sel, nullptr /* MDNode* */, FMF);
  850. return Insert(Sel, Name);
  851. }
  852. Value *IRBuilderBase::CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS,
  853. const Twine &Name) {
  854. assert(LHS->getType() == RHS->getType() &&
  855. "Pointer subtraction operand types must match!");
  856. assert(cast<PointerType>(LHS->getType())
  857. ->isOpaqueOrPointeeTypeMatches(ElemTy) &&
  858. "Pointer type must match element type");
  859. Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
  860. Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
  861. Value *Difference = CreateSub(LHS_int, RHS_int);
  862. return CreateExactSDiv(Difference, ConstantExpr::getSizeOf(ElemTy),
  863. Name);
  864. }
  865. Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) {
  866. assert(isa<PointerType>(Ptr->getType()) &&
  867. "launder.invariant.group only applies to pointers.");
  868. // FIXME: we could potentially avoid casts to/from i8*.
  869. auto *PtrType = Ptr->getType();
  870. auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
  871. if (PtrType != Int8PtrTy)
  872. Ptr = CreateBitCast(Ptr, Int8PtrTy);
  873. Module *M = BB->getParent()->getParent();
  874. Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
  875. M, Intrinsic::launder_invariant_group, {Int8PtrTy});
  876. assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
  877. FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
  878. Int8PtrTy &&
  879. "LaunderInvariantGroup should take and return the same type");
  880. CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr});
  881. if (PtrType != Int8PtrTy)
  882. return CreateBitCast(Fn, PtrType);
  883. return Fn;
  884. }
  885. Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) {
  886. assert(isa<PointerType>(Ptr->getType()) &&
  887. "strip.invariant.group only applies to pointers.");
  888. // FIXME: we could potentially avoid casts to/from i8*.
  889. auto *PtrType = Ptr->getType();
  890. auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
  891. if (PtrType != Int8PtrTy)
  892. Ptr = CreateBitCast(Ptr, Int8PtrTy);
  893. Module *M = BB->getParent()->getParent();
  894. Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
  895. M, Intrinsic::strip_invariant_group, {Int8PtrTy});
  896. assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
  897. FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
  898. Int8PtrTy &&
  899. "StripInvariantGroup should take and return the same type");
  900. CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr});
  901. if (PtrType != Int8PtrTy)
  902. return CreateBitCast(Fn, PtrType);
  903. return Fn;
  904. }
  905. Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) {
  906. auto *Ty = cast<VectorType>(V->getType());
  907. if (isa<ScalableVectorType>(Ty)) {
  908. Module *M = BB->getParent()->getParent();
  909. Function *F = Intrinsic::getDeclaration(
  910. M, Intrinsic::experimental_vector_reverse, Ty);
  911. return Insert(CallInst::Create(F, V), Name);
  912. }
  913. // Keep the original behaviour for fixed vector
  914. SmallVector<int, 8> ShuffleMask;
  915. int NumElts = Ty->getElementCount().getKnownMinValue();
  916. for (int i = 0; i < NumElts; ++i)
  917. ShuffleMask.push_back(NumElts - i - 1);
  918. return CreateShuffleVector(V, ShuffleMask, Name);
  919. }
  920. Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
  921. const Twine &Name) {
  922. assert(isa<VectorType>(V1->getType()) && "Unexpected type");
  923. assert(V1->getType() == V2->getType() &&
  924. "Splice expects matching operand types!");
  925. if (auto *VTy = dyn_cast<ScalableVectorType>(V1->getType())) {
  926. Module *M = BB->getParent()->getParent();
  927. Function *F = Intrinsic::getDeclaration(
  928. M, Intrinsic::experimental_vector_splice, VTy);
  929. Value *Ops[] = {V1, V2, getInt32(Imm)};
  930. return Insert(CallInst::Create(F, Ops), Name);
  931. }
  932. unsigned NumElts = cast<FixedVectorType>(V1->getType())->getNumElements();
  933. assert(((-Imm <= NumElts) || (Imm < NumElts)) &&
  934. "Invalid immediate for vector splice!");
  935. // Keep the original behaviour for fixed vector
  936. unsigned Idx = (NumElts + Imm) % NumElts;
  937. SmallVector<int, 8> Mask;
  938. for (unsigned I = 0; I < NumElts; ++I)
  939. Mask.push_back(Idx + I);
  940. return CreateShuffleVector(V1, V2, Mask);
  941. }
  942. Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V,
  943. const Twine &Name) {
  944. auto EC = ElementCount::getFixed(NumElts);
  945. return CreateVectorSplat(EC, V, Name);
  946. }
  947. Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V,
  948. const Twine &Name) {
  949. assert(EC.isNonZero() && "Cannot splat to an empty vector!");
  950. // First insert it into a poison vector so we can shuffle it.
  951. Type *I32Ty = getInt32Ty();
  952. Value *Poison = PoisonValue::get(VectorType::get(V->getType(), EC));
  953. V = CreateInsertElement(Poison, V, ConstantInt::get(I32Ty, 0),
  954. Name + ".splatinsert");
  955. // Shuffle the value across the desired number of elements.
  956. SmallVector<int, 16> Zeros;
  957. Zeros.resize(EC.getKnownMinValue());
  958. return CreateShuffleVector(V, Zeros, Name + ".splat");
  959. }
  960. Value *IRBuilderBase::CreateExtractInteger(
  961. const DataLayout &DL, Value *From, IntegerType *ExtractedTy,
  962. uint64_t Offset, const Twine &Name) {
  963. auto *IntTy = cast<IntegerType>(From->getType());
  964. assert(DL.getTypeStoreSize(ExtractedTy) + Offset <=
  965. DL.getTypeStoreSize(IntTy) &&
  966. "Element extends past full value");
  967. uint64_t ShAmt = 8 * Offset;
  968. Value *V = From;
  969. if (DL.isBigEndian())
  970. ShAmt = 8 * (DL.getTypeStoreSize(IntTy) -
  971. DL.getTypeStoreSize(ExtractedTy) - Offset);
  972. if (ShAmt) {
  973. V = CreateLShr(V, ShAmt, Name + ".shift");
  974. }
  975. assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() &&
  976. "Cannot extract to a larger integer!");
  977. if (ExtractedTy != IntTy) {
  978. V = CreateTrunc(V, ExtractedTy, Name + ".trunc");
  979. }
  980. return V;
  981. }
  982. Value *IRBuilderBase::CreatePreserveArrayAccessIndex(
  983. Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex,
  984. MDNode *DbgInfo) {
  985. auto *BaseType = Base->getType();
  986. assert(isa<PointerType>(BaseType) &&
  987. "Invalid Base ptr type for preserve.array.access.index.");
  988. assert(cast<PointerType>(BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) &&
  989. "Pointer element type mismatch");
  990. Value *LastIndexV = getInt32(LastIndex);
  991. Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
  992. SmallVector<Value *, 4> IdxList(Dimension, Zero);
  993. IdxList.push_back(LastIndexV);
  994. Type *ResultType =
  995. GetElementPtrInst::getGEPReturnType(ElTy, Base, IdxList);
  996. Module *M = BB->getParent()->getParent();
  997. Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration(
  998. M, Intrinsic::preserve_array_access_index, {ResultType, BaseType});
  999. Value *DimV = getInt32(Dimension);
  1000. CallInst *Fn =
  1001. CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV});
  1002. Fn->addParamAttr(
  1003. 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
  1004. if (DbgInfo)
  1005. Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
  1006. return Fn;
  1007. }
  1008. Value *IRBuilderBase::CreatePreserveUnionAccessIndex(
  1009. Value *Base, unsigned FieldIndex, MDNode *DbgInfo) {
  1010. assert(isa<PointerType>(Base->getType()) &&
  1011. "Invalid Base ptr type for preserve.union.access.index.");
  1012. auto *BaseType = Base->getType();
  1013. Module *M = BB->getParent()->getParent();
  1014. Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration(
  1015. M, Intrinsic::preserve_union_access_index, {BaseType, BaseType});
  1016. Value *DIIndex = getInt32(FieldIndex);
  1017. CallInst *Fn =
  1018. CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex});
  1019. if (DbgInfo)
  1020. Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
  1021. return Fn;
  1022. }
  1023. Value *IRBuilderBase::CreatePreserveStructAccessIndex(
  1024. Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex,
  1025. MDNode *DbgInfo) {
  1026. auto *BaseType = Base->getType();
  1027. assert(isa<PointerType>(BaseType) &&
  1028. "Invalid Base ptr type for preserve.struct.access.index.");
  1029. assert(cast<PointerType>(BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) &&
  1030. "Pointer element type mismatch");
  1031. Value *GEPIndex = getInt32(Index);
  1032. Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
  1033. Type *ResultType =
  1034. GetElementPtrInst::getGEPReturnType(ElTy, Base, {Zero, GEPIndex});
  1035. Module *M = BB->getParent()->getParent();
  1036. Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration(
  1037. M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType});
  1038. Value *DIIndex = getInt32(FieldIndex);
  1039. CallInst *Fn = CreateCall(FnPreserveStructAccessIndex,
  1040. {Base, GEPIndex, DIIndex});
  1041. Fn->addParamAttr(
  1042. 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
  1043. if (DbgInfo)
  1044. Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
  1045. return Fn;
  1046. }
  1047. CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL,
  1048. Value *PtrValue,
  1049. Value *AlignValue,
  1050. Value *OffsetValue) {
  1051. SmallVector<Value *, 4> Vals({PtrValue, AlignValue});
  1052. if (OffsetValue)
  1053. Vals.push_back(OffsetValue);
  1054. OperandBundleDefT<Value *> AlignOpB("align", Vals);
  1055. return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB});
  1056. }
  1057. CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
  1058. Value *PtrValue,
  1059. unsigned Alignment,
  1060. Value *OffsetValue) {
  1061. assert(isa<PointerType>(PtrValue->getType()) &&
  1062. "trying to create an alignment assumption on a non-pointer?");
  1063. assert(Alignment != 0 && "Invalid Alignment");
  1064. auto *PtrTy = cast<PointerType>(PtrValue->getType());
  1065. Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
  1066. Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment);
  1067. return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue);
  1068. }
  1069. CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
  1070. Value *PtrValue,
  1071. Value *Alignment,
  1072. Value *OffsetValue) {
  1073. assert(isa<PointerType>(PtrValue->getType()) &&
  1074. "trying to create an alignment assumption on a non-pointer?");
  1075. return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);
  1076. }
  1077. IRBuilderDefaultInserter::~IRBuilderDefaultInserter() {}
  1078. IRBuilderCallbackInserter::~IRBuilderCallbackInserter() {}
  1079. IRBuilderFolder::~IRBuilderFolder() {}
  1080. void ConstantFolder::anchor() {}
  1081. void NoFolder::anchor() {}