IRBuilder.cpp 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150
  1. //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the IRBuilder class, which is used as a convenient way
  10. // to create LLVM instructions with a consistent and simplified interface.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/IR/IRBuilder.h"
  14. #include "llvm/ADT/ArrayRef.h"
  15. #include "llvm/ADT/None.h"
  16. #include "llvm/IR/Constant.h"
  17. #include "llvm/IR/Constants.h"
  18. #include "llvm/IR/DerivedTypes.h"
  19. #include "llvm/IR/Function.h"
  20. #include "llvm/IR/GlobalValue.h"
  21. #include "llvm/IR/GlobalVariable.h"
  22. #include "llvm/IR/IntrinsicInst.h"
  23. #include "llvm/IR/Intrinsics.h"
  24. #include "llvm/IR/LLVMContext.h"
  25. #include "llvm/IR/NoFolder.h"
  26. #include "llvm/IR/Operator.h"
  27. #include "llvm/IR/Statepoint.h"
  28. #include "llvm/IR/Type.h"
  29. #include "llvm/IR/Value.h"
  30. #include "llvm/Support/Casting.h"
  31. #include "llvm/Support/MathExtras.h"
  32. #include <cassert>
  33. #include <cstdint>
  34. #include <vector>
  35. using namespace llvm;
  36. /// CreateGlobalString - Make a new global variable with an initializer that
  37. /// has array of i8 type filled in with the nul terminated string value
  38. /// specified. If Name is specified, it is the name of the global variable
  39. /// created.
  40. GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
  41. const Twine &Name,
  42. unsigned AddressSpace,
  43. Module *M) {
  44. Constant *StrConstant = ConstantDataArray::getString(Context, Str);
  45. if (!M)
  46. M = BB->getParent()->getParent();
  47. auto *GV = new GlobalVariable(
  48. *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage,
  49. StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace);
  50. GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
  51. GV->setAlignment(Align(1));
  52. return GV;
  53. }
  54. Type *IRBuilderBase::getCurrentFunctionReturnType() const {
  55. assert(BB && BB->getParent() && "No current function!");
  56. return BB->getParent()->getReturnType();
  57. }
  58. Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) {
  59. auto *PT = cast<PointerType>(Ptr->getType());
  60. if (PT->getElementType()->isIntegerTy(8))
  61. return Ptr;
  62. // Otherwise, we need to insert a bitcast.
  63. return CreateBitCast(Ptr, getInt8PtrTy(PT->getAddressSpace()));
  64. }
  65. static CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
  66. IRBuilderBase *Builder,
  67. const Twine &Name = "",
  68. Instruction *FMFSource = nullptr,
  69. ArrayRef<OperandBundleDef> OpBundles = {}) {
  70. CallInst *CI = Builder->CreateCall(Callee, Ops, OpBundles, Name);
  71. if (FMFSource)
  72. CI->copyFastMathFlags(FMFSource);
  73. return CI;
  74. }
  75. Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) {
  76. Module *M = GetInsertBlock()->getParent()->getParent();
  77. assert(isa<ConstantInt>(Scaling) && "Expected constant integer");
  78. Function *TheFn =
  79. Intrinsic::getDeclaration(M, Intrinsic::vscale, {Scaling->getType()});
  80. CallInst *CI = createCallHelper(TheFn, {}, this, Name);
  81. return cast<ConstantInt>(Scaling)->getSExtValue() == 1
  82. ? CI
  83. : CreateMul(CI, Scaling);
  84. }
  85. CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size,
  86. MaybeAlign Align, bool isVolatile,
  87. MDNode *TBAATag, MDNode *ScopeTag,
  88. MDNode *NoAliasTag) {
  89. Ptr = getCastedInt8PtrValue(Ptr);
  90. Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
  91. Type *Tys[] = { Ptr->getType(), Size->getType() };
  92. Module *M = BB->getParent()->getParent();
  93. Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
  94. CallInst *CI = createCallHelper(TheFn, Ops, this);
  95. if (Align)
  96. cast<MemSetInst>(CI)->setDestAlignment(Align->value());
  97. // Set the TBAA info if present.
  98. if (TBAATag)
  99. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  100. if (ScopeTag)
  101. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  102. if (NoAliasTag)
  103. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  104. return CI;
  105. }
  106. CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
  107. Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize,
  108. MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) {
  109. Ptr = getCastedInt8PtrValue(Ptr);
  110. Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
  111. Type *Tys[] = {Ptr->getType(), Size->getType()};
  112. Module *M = BB->getParent()->getParent();
  113. Function *TheFn = Intrinsic::getDeclaration(
  114. M, Intrinsic::memset_element_unordered_atomic, Tys);
  115. CallInst *CI = createCallHelper(TheFn, Ops, this);
  116. cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
  117. // Set the TBAA info if present.
  118. if (TBAATag)
  119. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  120. if (ScopeTag)
  121. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  122. if (NoAliasTag)
  123. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  124. return CI;
  125. }
  126. CallInst *IRBuilderBase::CreateMemTransferInst(
  127. Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
  128. MaybeAlign SrcAlign, Value *Size, bool isVolatile, MDNode *TBAATag,
  129. MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) {
  130. Dst = getCastedInt8PtrValue(Dst);
  131. Src = getCastedInt8PtrValue(Src);
  132. Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
  133. Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
  134. Module *M = BB->getParent()->getParent();
  135. Function *TheFn = Intrinsic::getDeclaration(M, IntrID, Tys);
  136. CallInst *CI = createCallHelper(TheFn, Ops, this);
  137. auto* MCI = cast<MemTransferInst>(CI);
  138. if (DstAlign)
  139. MCI->setDestAlignment(*DstAlign);
  140. if (SrcAlign)
  141. MCI->setSourceAlignment(*SrcAlign);
  142. // Set the TBAA info if present.
  143. if (TBAATag)
  144. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  145. // Set the TBAA Struct info if present.
  146. if (TBAAStructTag)
  147. CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
  148. if (ScopeTag)
  149. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  150. if (NoAliasTag)
  151. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  152. return CI;
  153. }
  154. CallInst *IRBuilderBase::CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign,
  155. Value *Src, MaybeAlign SrcAlign,
  156. Value *Size) {
  157. Dst = getCastedInt8PtrValue(Dst);
  158. Src = getCastedInt8PtrValue(Src);
  159. Value *IsVolatile = getInt1(false);
  160. Value *Ops[] = {Dst, Src, Size, IsVolatile};
  161. Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
  162. Function *F = BB->getParent();
  163. Module *M = F->getParent();
  164. Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy_inline, Tys);
  165. CallInst *CI = createCallHelper(TheFn, Ops, this);
  166. auto *MCI = cast<MemCpyInlineInst>(CI);
  167. if (DstAlign)
  168. MCI->setDestAlignment(*DstAlign);
  169. if (SrcAlign)
  170. MCI->setSourceAlignment(*SrcAlign);
  171. return CI;
  172. }
  173. CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
  174. Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
  175. uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
  176. MDNode *ScopeTag, MDNode *NoAliasTag) {
  177. assert(DstAlign >= ElementSize &&
  178. "Pointer alignment must be at least element size");
  179. assert(SrcAlign >= ElementSize &&
  180. "Pointer alignment must be at least element size");
  181. Dst = getCastedInt8PtrValue(Dst);
  182. Src = getCastedInt8PtrValue(Src);
  183. Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
  184. Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
  185. Module *M = BB->getParent()->getParent();
  186. Function *TheFn = Intrinsic::getDeclaration(
  187. M, Intrinsic::memcpy_element_unordered_atomic, Tys);
  188. CallInst *CI = createCallHelper(TheFn, Ops, this);
  189. // Set the alignment of the pointer args.
  190. auto *AMCI = cast<AtomicMemCpyInst>(CI);
  191. AMCI->setDestAlignment(DstAlign);
  192. AMCI->setSourceAlignment(SrcAlign);
  193. // Set the TBAA info if present.
  194. if (TBAATag)
  195. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  196. // Set the TBAA Struct info if present.
  197. if (TBAAStructTag)
  198. CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
  199. if (ScopeTag)
  200. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  201. if (NoAliasTag)
  202. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  203. return CI;
  204. }
  205. CallInst *IRBuilderBase::CreateMemMove(Value *Dst, MaybeAlign DstAlign,
  206. Value *Src, MaybeAlign SrcAlign,
  207. Value *Size, bool isVolatile,
  208. MDNode *TBAATag, MDNode *ScopeTag,
  209. MDNode *NoAliasTag) {
  210. Dst = getCastedInt8PtrValue(Dst);
  211. Src = getCastedInt8PtrValue(Src);
  212. Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
  213. Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
  214. Module *M = BB->getParent()->getParent();
  215. Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys);
  216. CallInst *CI = createCallHelper(TheFn, Ops, this);
  217. auto *MMI = cast<MemMoveInst>(CI);
  218. if (DstAlign)
  219. MMI->setDestAlignment(*DstAlign);
  220. if (SrcAlign)
  221. MMI->setSourceAlignment(*SrcAlign);
  222. // Set the TBAA info if present.
  223. if (TBAATag)
  224. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  225. if (ScopeTag)
  226. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  227. if (NoAliasTag)
  228. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  229. return CI;
  230. }
  231. CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
  232. Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
  233. uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
  234. MDNode *ScopeTag, MDNode *NoAliasTag) {
  235. assert(DstAlign >= ElementSize &&
  236. "Pointer alignment must be at least element size");
  237. assert(SrcAlign >= ElementSize &&
  238. "Pointer alignment must be at least element size");
  239. Dst = getCastedInt8PtrValue(Dst);
  240. Src = getCastedInt8PtrValue(Src);
  241. Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
  242. Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
  243. Module *M = BB->getParent()->getParent();
  244. Function *TheFn = Intrinsic::getDeclaration(
  245. M, Intrinsic::memmove_element_unordered_atomic, Tys);
  246. CallInst *CI = createCallHelper(TheFn, Ops, this);
  247. // Set the alignment of the pointer args.
  248. CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign));
  249. CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign));
  250. // Set the TBAA info if present.
  251. if (TBAATag)
  252. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  253. // Set the TBAA Struct info if present.
  254. if (TBAAStructTag)
  255. CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
  256. if (ScopeTag)
  257. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  258. if (NoAliasTag)
  259. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  260. return CI;
  261. }
  262. static CallInst *getReductionIntrinsic(IRBuilderBase *Builder, Intrinsic::ID ID,
  263. Value *Src) {
  264. Module *M = Builder->GetInsertBlock()->getParent()->getParent();
  265. Value *Ops[] = {Src};
  266. Type *Tys[] = { Src->getType() };
  267. auto Decl = Intrinsic::getDeclaration(M, ID, Tys);
  268. return createCallHelper(Decl, Ops, Builder);
  269. }
  270. CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) {
  271. Module *M = GetInsertBlock()->getParent()->getParent();
  272. Value *Ops[] = {Acc, Src};
  273. auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fadd,
  274. {Src->getType()});
  275. return createCallHelper(Decl, Ops, this);
  276. }
  277. CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) {
  278. Module *M = GetInsertBlock()->getParent()->getParent();
  279. Value *Ops[] = {Acc, Src};
  280. auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fmul,
  281. {Src->getType()});
  282. return createCallHelper(Decl, Ops, this);
  283. }
  284. CallInst *IRBuilderBase::CreateAddReduce(Value *Src) {
  285. return getReductionIntrinsic(this, Intrinsic::vector_reduce_add, Src);
  286. }
  287. CallInst *IRBuilderBase::CreateMulReduce(Value *Src) {
  288. return getReductionIntrinsic(this, Intrinsic::vector_reduce_mul, Src);
  289. }
  290. CallInst *IRBuilderBase::CreateAndReduce(Value *Src) {
  291. return getReductionIntrinsic(this, Intrinsic::vector_reduce_and, Src);
  292. }
  293. CallInst *IRBuilderBase::CreateOrReduce(Value *Src) {
  294. return getReductionIntrinsic(this, Intrinsic::vector_reduce_or, Src);
  295. }
  296. CallInst *IRBuilderBase::CreateXorReduce(Value *Src) {
  297. return getReductionIntrinsic(this, Intrinsic::vector_reduce_xor, Src);
  298. }
  299. CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) {
  300. auto ID =
  301. IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax;
  302. return getReductionIntrinsic(this, ID, Src);
  303. }
  304. CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) {
  305. auto ID =
  306. IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin;
  307. return getReductionIntrinsic(this, ID, Src);
  308. }
  309. CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) {
  310. return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmax, Src);
  311. }
  312. CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) {
  313. return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmin, Src);
  314. }
  315. CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
  316. assert(isa<PointerType>(Ptr->getType()) &&
  317. "lifetime.start only applies to pointers.");
  318. Ptr = getCastedInt8PtrValue(Ptr);
  319. if (!Size)
  320. Size = getInt64(-1);
  321. else
  322. assert(Size->getType() == getInt64Ty() &&
  323. "lifetime.start requires the size to be an i64");
  324. Value *Ops[] = { Size, Ptr };
  325. Module *M = BB->getParent()->getParent();
  326. Function *TheFn =
  327. Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()});
  328. return createCallHelper(TheFn, Ops, this);
  329. }
  330. CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
  331. assert(isa<PointerType>(Ptr->getType()) &&
  332. "lifetime.end only applies to pointers.");
  333. Ptr = getCastedInt8PtrValue(Ptr);
  334. if (!Size)
  335. Size = getInt64(-1);
  336. else
  337. assert(Size->getType() == getInt64Ty() &&
  338. "lifetime.end requires the size to be an i64");
  339. Value *Ops[] = { Size, Ptr };
  340. Module *M = BB->getParent()->getParent();
  341. Function *TheFn =
  342. Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()});
  343. return createCallHelper(TheFn, Ops, this);
  344. }
  345. CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
  346. assert(isa<PointerType>(Ptr->getType()) &&
  347. "invariant.start only applies to pointers.");
  348. Ptr = getCastedInt8PtrValue(Ptr);
  349. if (!Size)
  350. Size = getInt64(-1);
  351. else
  352. assert(Size->getType() == getInt64Ty() &&
  353. "invariant.start requires the size to be an i64");
  354. Value *Ops[] = {Size, Ptr};
  355. // Fill in the single overloaded type: memory object type.
  356. Type *ObjectPtr[1] = {Ptr->getType()};
  357. Module *M = BB->getParent()->getParent();
  358. Function *TheFn =
  359. Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr);
  360. return createCallHelper(TheFn, Ops, this);
  361. }
  362. CallInst *
  363. IRBuilderBase::CreateAssumption(Value *Cond,
  364. ArrayRef<OperandBundleDef> OpBundles) {
  365. assert(Cond->getType() == getInt1Ty() &&
  366. "an assumption condition must be of type i1");
  367. Value *Ops[] = { Cond };
  368. Module *M = BB->getParent()->getParent();
  369. Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
  370. return createCallHelper(FnAssume, Ops, this, "", nullptr, OpBundles);
  371. }
  372. Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) {
  373. Module *M = BB->getModule();
  374. auto *FnIntrinsic = Intrinsic::getDeclaration(
  375. M, Intrinsic::experimental_noalias_scope_decl, {});
  376. return createCallHelper(FnIntrinsic, {Scope}, this);
  377. }
  378. /// Create a call to a Masked Load intrinsic.
  379. /// \p Ptr - base pointer for the load
  380. /// \p Alignment - alignment of the source location
  381. /// \p Mask - vector of booleans which indicates what vector lanes should
  382. /// be accessed in memory
  383. /// \p PassThru - pass-through value that is used to fill the masked-off lanes
  384. /// of the result
  385. /// \p Name - name of the result variable
  386. CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, Align Alignment,
  387. Value *Mask, Value *PassThru,
  388. const Twine &Name) {
  389. auto *PtrTy = cast<PointerType>(Ptr->getType());
  390. Type *DataTy = PtrTy->getElementType();
  391. assert(DataTy->isVectorTy() && "Ptr should point to a vector");
  392. assert(Mask && "Mask should not be all-ones (null)");
  393. if (!PassThru)
  394. PassThru = UndefValue::get(DataTy);
  395. Type *OverloadedTypes[] = { DataTy, PtrTy };
  396. Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
  397. return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
  398. OverloadedTypes, Name);
  399. }
  400. /// Create a call to a Masked Store intrinsic.
  401. /// \p Val - data to be stored,
  402. /// \p Ptr - base pointer for the store
  403. /// \p Alignment - alignment of the destination location
  404. /// \p Mask - vector of booleans which indicates what vector lanes should
  405. /// be accessed in memory
  406. CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
  407. Align Alignment, Value *Mask) {
  408. auto *PtrTy = cast<PointerType>(Ptr->getType());
  409. Type *DataTy = PtrTy->getElementType();
  410. assert(DataTy->isVectorTy() && "Ptr should point to a vector");
  411. assert(Mask && "Mask should not be all-ones (null)");
  412. Type *OverloadedTypes[] = { DataTy, PtrTy };
  413. Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask};
  414. return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
  415. }
  416. /// Create a call to a Masked intrinsic, with given intrinsic Id,
  417. /// an array of operands - Ops, and an array of overloaded types -
  418. /// OverloadedTypes.
  419. CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
  420. ArrayRef<Value *> Ops,
  421. ArrayRef<Type *> OverloadedTypes,
  422. const Twine &Name) {
  423. Module *M = BB->getParent()->getParent();
  424. Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes);
  425. return createCallHelper(TheFn, Ops, this, Name);
  426. }
  427. /// Create a call to a Masked Gather intrinsic.
  428. /// \p Ptrs - vector of pointers for loading
  429. /// \p Align - alignment for one element
  430. /// \p Mask - vector of booleans which indicates what vector lanes should
  431. /// be accessed in memory
  432. /// \p PassThru - pass-through value that is used to fill the masked-off lanes
  433. /// of the result
  434. /// \p Name - name of the result variable
  435. CallInst *IRBuilderBase::CreateMaskedGather(Value *Ptrs, Align Alignment,
  436. Value *Mask, Value *PassThru,
  437. const Twine &Name) {
  438. auto *PtrsTy = cast<FixedVectorType>(Ptrs->getType());
  439. auto *PtrTy = cast<PointerType>(PtrsTy->getElementType());
  440. unsigned NumElts = PtrsTy->getNumElements();
  441. auto *DataTy = FixedVectorType::get(PtrTy->getElementType(), NumElts);
  442. if (!Mask)
  443. Mask = Constant::getAllOnesValue(
  444. FixedVectorType::get(Type::getInt1Ty(Context), NumElts));
  445. if (!PassThru)
  446. PassThru = UndefValue::get(DataTy);
  447. Type *OverloadedTypes[] = {DataTy, PtrsTy};
  448. Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru};
  449. // We specify only one type when we create this intrinsic. Types of other
  450. // arguments are derived from this type.
  451. return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
  452. Name);
  453. }
  454. /// Create a call to a Masked Scatter intrinsic.
  455. /// \p Data - data to be stored,
  456. /// \p Ptrs - the vector of pointers, where the \p Data elements should be
  457. /// stored
  458. /// \p Align - alignment for one element
  459. /// \p Mask - vector of booleans which indicates what vector lanes should
  460. /// be accessed in memory
  461. CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
  462. Align Alignment, Value *Mask) {
  463. auto *PtrsTy = cast<FixedVectorType>(Ptrs->getType());
  464. auto *DataTy = cast<FixedVectorType>(Data->getType());
  465. unsigned NumElts = PtrsTy->getNumElements();
  466. #ifndef NDEBUG
  467. auto PtrTy = cast<PointerType>(PtrsTy->getElementType());
  468. assert(NumElts == DataTy->getNumElements() &&
  469. PtrTy->getElementType() == DataTy->getElementType() &&
  470. "Incompatible pointer and data types");
  471. #endif
  472. if (!Mask)
  473. Mask = Constant::getAllOnesValue(
  474. FixedVectorType::get(Type::getInt1Ty(Context), NumElts));
  475. Type *OverloadedTypes[] = {DataTy, PtrsTy};
  476. Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask};
  477. // We specify only one type when we create this intrinsic. Types of other
  478. // arguments are derived from this type.
  479. return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
  480. }
  481. template <typename T0>
  482. static std::vector<Value *>
  483. getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes,
  484. Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) {
  485. std::vector<Value *> Args;
  486. Args.push_back(B.getInt64(ID));
  487. Args.push_back(B.getInt32(NumPatchBytes));
  488. Args.push_back(ActualCallee);
  489. Args.push_back(B.getInt32(CallArgs.size()));
  490. Args.push_back(B.getInt32(Flags));
  491. llvm::append_range(Args, CallArgs);
  492. // GC Transition and Deopt args are now always handled via operand bundle.
  493. // They will be removed from the signature of gc.statepoint shortly.
  494. Args.push_back(B.getInt32(0));
  495. Args.push_back(B.getInt32(0));
  496. // GC args are now encoded in the gc-live operand bundle
  497. return Args;
  498. }
  499. template<typename T1, typename T2, typename T3>
  500. static std::vector<OperandBundleDef>
  501. getStatepointBundles(Optional<ArrayRef<T1>> TransitionArgs,
  502. Optional<ArrayRef<T2>> DeoptArgs,
  503. ArrayRef<T3> GCArgs) {
  504. std::vector<OperandBundleDef> Rval;
  505. if (DeoptArgs) {
  506. SmallVector<Value*, 16> DeoptValues;
  507. llvm::append_range(DeoptValues, *DeoptArgs);
  508. Rval.emplace_back("deopt", DeoptValues);
  509. }
  510. if (TransitionArgs) {
  511. SmallVector<Value*, 16> TransitionValues;
  512. llvm::append_range(TransitionValues, *TransitionArgs);
  513. Rval.emplace_back("gc-transition", TransitionValues);
  514. }
  515. if (GCArgs.size()) {
  516. SmallVector<Value*, 16> LiveValues;
  517. llvm::append_range(LiveValues, GCArgs);
  518. Rval.emplace_back("gc-live", LiveValues);
  519. }
  520. return Rval;
  521. }
  522. template <typename T0, typename T1, typename T2, typename T3>
  523. static CallInst *CreateGCStatepointCallCommon(
  524. IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
  525. Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
  526. Optional<ArrayRef<T1>> TransitionArgs,
  527. Optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
  528. const Twine &Name) {
  529. // Extract out the type of the callee.
  530. auto *FuncPtrType = cast<PointerType>(ActualCallee->getType());
  531. assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
  532. "actual callee must be a callable value");
  533. Module *M = Builder->GetInsertBlock()->getParent()->getParent();
  534. // Fill in the one generic type'd argument (the function is also vararg)
  535. Type *ArgTypes[] = { FuncPtrType };
  536. Function *FnStatepoint =
  537. Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
  538. ArgTypes);
  539. std::vector<Value *> Args =
  540. getStatepointArgs(*Builder, ID, NumPatchBytes, ActualCallee, Flags,
  541. CallArgs);
  542. return Builder->CreateCall(FnStatepoint, Args,
  543. getStatepointBundles(TransitionArgs, DeoptArgs,
  544. GCArgs),
  545. Name);
  546. }
  547. CallInst *IRBuilderBase::CreateGCStatepointCall(
  548. uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
  549. ArrayRef<Value *> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
  550. ArrayRef<Value *> GCArgs, const Twine &Name) {
  551. return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
  552. this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
  553. CallArgs, None /* No Transition Args */, DeoptArgs, GCArgs, Name);
  554. }
  555. CallInst *IRBuilderBase::CreateGCStatepointCall(
  556. uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, uint32_t Flags,
  557. ArrayRef<Value *> CallArgs, Optional<ArrayRef<Use>> TransitionArgs,
  558. Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
  559. const Twine &Name) {
  560. return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>(
  561. this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
  562. DeoptArgs, GCArgs, Name);
  563. }
  564. CallInst *IRBuilderBase::CreateGCStatepointCall(
  565. uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
  566. ArrayRef<Use> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
  567. ArrayRef<Value *> GCArgs, const Twine &Name) {
  568. return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
  569. this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
  570. CallArgs, None, DeoptArgs, GCArgs, Name);
  571. }
  572. template <typename T0, typename T1, typename T2, typename T3>
  573. static InvokeInst *CreateGCStatepointInvokeCommon(
  574. IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
  575. Value *ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest,
  576. uint32_t Flags, ArrayRef<T0> InvokeArgs,
  577. Optional<ArrayRef<T1>> TransitionArgs, Optional<ArrayRef<T2>> DeoptArgs,
  578. ArrayRef<T3> GCArgs, const Twine &Name) {
  579. // Extract out the type of the callee.
  580. auto *FuncPtrType = cast<PointerType>(ActualInvokee->getType());
  581. assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
  582. "actual callee must be a callable value");
  583. Module *M = Builder->GetInsertBlock()->getParent()->getParent();
  584. // Fill in the one generic type'd argument (the function is also vararg)
  585. Function *FnStatepoint = Intrinsic::getDeclaration(
  586. M, Intrinsic::experimental_gc_statepoint, {FuncPtrType});
  587. std::vector<Value *> Args =
  588. getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee, Flags,
  589. InvokeArgs);
  590. return Builder->CreateInvoke(FnStatepoint, NormalDest, UnwindDest, Args,
  591. getStatepointBundles(TransitionArgs, DeoptArgs,
  592. GCArgs),
  593. Name);
  594. }
  595. InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
  596. uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
  597. BasicBlock *NormalDest, BasicBlock *UnwindDest,
  598. ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Value *>> DeoptArgs,
  599. ArrayRef<Value *> GCArgs, const Twine &Name) {
  600. return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
  601. this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
  602. uint32_t(StatepointFlags::None), InvokeArgs, None /* No Transition Args*/,
  603. DeoptArgs, GCArgs, Name);
  604. }
  605. InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
  606. uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
  607. BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
  608. ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
  609. Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
  610. return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>(
  611. this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
  612. InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name);
  613. }
  614. InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
  615. uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
  616. BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
  617. Optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
  618. return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
  619. this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
  620. uint32_t(StatepointFlags::None), InvokeArgs, None, DeoptArgs, GCArgs,
  621. Name);
  622. }
  623. CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint,
  624. Type *ResultType,
  625. const Twine &Name) {
  626. Intrinsic::ID ID = Intrinsic::experimental_gc_result;
  627. Module *M = BB->getParent()->getParent();
  628. Type *Types[] = {ResultType};
  629. Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types);
  630. Value *Args[] = {Statepoint};
  631. return createCallHelper(FnGCResult, Args, this, Name);
  632. }
  633. CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
  634. int BaseOffset,
  635. int DerivedOffset,
  636. Type *ResultType,
  637. const Twine &Name) {
  638. Module *M = BB->getParent()->getParent();
  639. Type *Types[] = {ResultType};
  640. Function *FnGCRelocate =
  641. Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types);
  642. Value *Args[] = {Statepoint,
  643. getInt32(BaseOffset),
  644. getInt32(DerivedOffset)};
  645. return createCallHelper(FnGCRelocate, Args, this, Name);
  646. }
  647. CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
  648. Instruction *FMFSource,
  649. const Twine &Name) {
  650. Module *M = BB->getModule();
  651. Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()});
  652. return createCallHelper(Fn, {V}, this, Name, FMFSource);
  653. }
  654. CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS,
  655. Value *RHS,
  656. Instruction *FMFSource,
  657. const Twine &Name) {
  658. Module *M = BB->getModule();
  659. Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() });
  660. return createCallHelper(Fn, {LHS, RHS}, this, Name, FMFSource);
  661. }
  662. CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
  663. ArrayRef<Type *> Types,
  664. ArrayRef<Value *> Args,
  665. Instruction *FMFSource,
  666. const Twine &Name) {
  667. Module *M = BB->getModule();
  668. Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
  669. return createCallHelper(Fn, Args, this, Name, FMFSource);
  670. }
  671. CallInst *IRBuilderBase::CreateConstrainedFPBinOp(
  672. Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource,
  673. const Twine &Name, MDNode *FPMathTag,
  674. Optional<RoundingMode> Rounding,
  675. Optional<fp::ExceptionBehavior> Except) {
  676. Value *RoundingV = getConstrainedFPRounding(Rounding);
  677. Value *ExceptV = getConstrainedFPExcept(Except);
  678. FastMathFlags UseFMF = FMF;
  679. if (FMFSource)
  680. UseFMF = FMFSource->getFastMathFlags();
  681. CallInst *C = CreateIntrinsic(ID, {L->getType()},
  682. {L, R, RoundingV, ExceptV}, nullptr, Name);
  683. setConstrainedFPCallAttr(C);
  684. setFPAttrs(C, FPMathTag, UseFMF);
  685. return C;
  686. }
  687. Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
  688. const Twine &Name, MDNode *FPMathTag) {
  689. if (Instruction::isBinaryOp(Opc)) {
  690. assert(Ops.size() == 2 && "Invalid number of operands!");
  691. return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc),
  692. Ops[0], Ops[1], Name, FPMathTag);
  693. }
  694. if (Instruction::isUnaryOp(Opc)) {
  695. assert(Ops.size() == 1 && "Invalid number of operands!");
  696. return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc),
  697. Ops[0], Name, FPMathTag);
  698. }
  699. llvm_unreachable("Unexpected opcode!");
  700. }
  701. CallInst *IRBuilderBase::CreateConstrainedFPCast(
  702. Intrinsic::ID ID, Value *V, Type *DestTy,
  703. Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag,
  704. Optional<RoundingMode> Rounding,
  705. Optional<fp::ExceptionBehavior> Except) {
  706. Value *ExceptV = getConstrainedFPExcept(Except);
  707. FastMathFlags UseFMF = FMF;
  708. if (FMFSource)
  709. UseFMF = FMFSource->getFastMathFlags();
  710. CallInst *C;
  711. bool HasRoundingMD = false;
  712. switch (ID) {
  713. default:
  714. break;
  715. #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
  716. case Intrinsic::INTRINSIC: \
  717. HasRoundingMD = ROUND_MODE; \
  718. break;
  719. #include "llvm/IR/ConstrainedOps.def"
  720. }
  721. if (HasRoundingMD) {
  722. Value *RoundingV = getConstrainedFPRounding(Rounding);
  723. C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
  724. nullptr, Name);
  725. } else
  726. C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
  727. Name);
  728. setConstrainedFPCallAttr(C);
  729. if (isa<FPMathOperator>(C))
  730. setFPAttrs(C, FPMathTag, UseFMF);
  731. return C;
  732. }
  733. Value *IRBuilderBase::CreateFCmpHelper(
  734. CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name,
  735. MDNode *FPMathTag, bool IsSignaling) {
  736. if (IsFPConstrained) {
  737. auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
  738. : Intrinsic::experimental_constrained_fcmp;
  739. return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name);
  740. }
  741. if (auto *LC = dyn_cast<Constant>(LHS))
  742. if (auto *RC = dyn_cast<Constant>(RHS))
  743. return Insert(Folder.CreateFCmp(P, LC, RC), Name);
  744. return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
  745. }
  746. CallInst *IRBuilderBase::CreateConstrainedFPCmp(
  747. Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
  748. const Twine &Name, Optional<fp::ExceptionBehavior> Except) {
  749. Value *PredicateV = getConstrainedFPPredicate(P);
  750. Value *ExceptV = getConstrainedFPExcept(Except);
  751. CallInst *C = CreateIntrinsic(ID, {L->getType()},
  752. {L, R, PredicateV, ExceptV}, nullptr, Name);
  753. setConstrainedFPCallAttr(C);
  754. return C;
  755. }
  756. CallInst *IRBuilderBase::CreateConstrainedFPCall(
  757. Function *Callee, ArrayRef<Value *> Args, const Twine &Name,
  758. Optional<RoundingMode> Rounding,
  759. Optional<fp::ExceptionBehavior> Except) {
  760. llvm::SmallVector<Value *, 6> UseArgs;
  761. for (auto *OneArg : Args)
  762. UseArgs.push_back(OneArg);
  763. bool HasRoundingMD = false;
  764. switch (Callee->getIntrinsicID()) {
  765. default:
  766. break;
  767. #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
  768. case Intrinsic::INTRINSIC: \
  769. HasRoundingMD = ROUND_MODE; \
  770. break;
  771. #include "llvm/IR/ConstrainedOps.def"
  772. }
  773. if (HasRoundingMD)
  774. UseArgs.push_back(getConstrainedFPRounding(Rounding));
  775. UseArgs.push_back(getConstrainedFPExcept(Except));
  776. CallInst *C = CreateCall(Callee, UseArgs, Name);
  777. setConstrainedFPCallAttr(C);
  778. return C;
  779. }
  780. Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False,
  781. const Twine &Name, Instruction *MDFrom) {
  782. if (auto *CC = dyn_cast<Constant>(C))
  783. if (auto *TC = dyn_cast<Constant>(True))
  784. if (auto *FC = dyn_cast<Constant>(False))
  785. return Insert(Folder.CreateSelect(CC, TC, FC), Name);
  786. SelectInst *Sel = SelectInst::Create(C, True, False);
  787. if (MDFrom) {
  788. MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
  789. MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
  790. Sel = addBranchMetadata(Sel, Prof, Unpred);
  791. }
  792. if (isa<FPMathOperator>(Sel))
  793. setFPAttrs(Sel, nullptr /* MDNode* */, FMF);
  794. return Insert(Sel, Name);
  795. }
  796. Value *IRBuilderBase::CreatePtrDiff(Value *LHS, Value *RHS,
  797. const Twine &Name) {
  798. assert(LHS->getType() == RHS->getType() &&
  799. "Pointer subtraction operand types must match!");
  800. auto *ArgType = cast<PointerType>(LHS->getType());
  801. Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
  802. Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
  803. Value *Difference = CreateSub(LHS_int, RHS_int);
  804. return CreateExactSDiv(Difference,
  805. ConstantExpr::getSizeOf(ArgType->getElementType()),
  806. Name);
  807. }
  808. Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) {
  809. assert(isa<PointerType>(Ptr->getType()) &&
  810. "launder.invariant.group only applies to pointers.");
  811. // FIXME: we could potentially avoid casts to/from i8*.
  812. auto *PtrType = Ptr->getType();
  813. auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
  814. if (PtrType != Int8PtrTy)
  815. Ptr = CreateBitCast(Ptr, Int8PtrTy);
  816. Module *M = BB->getParent()->getParent();
  817. Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
  818. M, Intrinsic::launder_invariant_group, {Int8PtrTy});
  819. assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
  820. FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
  821. Int8PtrTy &&
  822. "LaunderInvariantGroup should take and return the same type");
  823. CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr});
  824. if (PtrType != Int8PtrTy)
  825. return CreateBitCast(Fn, PtrType);
  826. return Fn;
  827. }
  828. Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) {
  829. assert(isa<PointerType>(Ptr->getType()) &&
  830. "strip.invariant.group only applies to pointers.");
  831. // FIXME: we could potentially avoid casts to/from i8*.
  832. auto *PtrType = Ptr->getType();
  833. auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
  834. if (PtrType != Int8PtrTy)
  835. Ptr = CreateBitCast(Ptr, Int8PtrTy);
  836. Module *M = BB->getParent()->getParent();
  837. Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
  838. M, Intrinsic::strip_invariant_group, {Int8PtrTy});
  839. assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
  840. FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
  841. Int8PtrTy &&
  842. "StripInvariantGroup should take and return the same type");
  843. CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr});
  844. if (PtrType != Int8PtrTy)
  845. return CreateBitCast(Fn, PtrType);
  846. return Fn;
  847. }
  848. Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V,
  849. const Twine &Name) {
  850. auto EC = ElementCount::getFixed(NumElts);
  851. return CreateVectorSplat(EC, V, Name);
  852. }
  853. Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V,
  854. const Twine &Name) {
  855. assert(EC.isNonZero() && "Cannot splat to an empty vector!");
  856. // First insert it into a poison vector so we can shuffle it.
  857. Type *I32Ty = getInt32Ty();
  858. Value *Poison = PoisonValue::get(VectorType::get(V->getType(), EC));
  859. V = CreateInsertElement(Poison, V, ConstantInt::get(I32Ty, 0),
  860. Name + ".splatinsert");
  861. // Shuffle the value across the desired number of elements.
  862. SmallVector<int, 16> Zeros;
  863. Zeros.resize(EC.getKnownMinValue());
  864. return CreateShuffleVector(V, Zeros, Name + ".splat");
  865. }
  866. Value *IRBuilderBase::CreateExtractInteger(
  867. const DataLayout &DL, Value *From, IntegerType *ExtractedTy,
  868. uint64_t Offset, const Twine &Name) {
  869. auto *IntTy = cast<IntegerType>(From->getType());
  870. assert(DL.getTypeStoreSize(ExtractedTy) + Offset <=
  871. DL.getTypeStoreSize(IntTy) &&
  872. "Element extends past full value");
  873. uint64_t ShAmt = 8 * Offset;
  874. Value *V = From;
  875. if (DL.isBigEndian())
  876. ShAmt = 8 * (DL.getTypeStoreSize(IntTy) -
  877. DL.getTypeStoreSize(ExtractedTy) - Offset);
  878. if (ShAmt) {
  879. V = CreateLShr(V, ShAmt, Name + ".shift");
  880. }
  881. assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() &&
  882. "Cannot extract to a larger integer!");
  883. if (ExtractedTy != IntTy) {
  884. V = CreateTrunc(V, ExtractedTy, Name + ".trunc");
  885. }
  886. return V;
  887. }
  888. Value *IRBuilderBase::CreatePreserveArrayAccessIndex(
  889. Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex,
  890. MDNode *DbgInfo) {
  891. assert(isa<PointerType>(Base->getType()) &&
  892. "Invalid Base ptr type for preserve.array.access.index.");
  893. auto *BaseType = Base->getType();
  894. Value *LastIndexV = getInt32(LastIndex);
  895. Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
  896. SmallVector<Value *, 4> IdxList(Dimension, Zero);
  897. IdxList.push_back(LastIndexV);
  898. Type *ResultType =
  899. GetElementPtrInst::getGEPReturnType(ElTy, Base, IdxList);
  900. Module *M = BB->getParent()->getParent();
  901. Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration(
  902. M, Intrinsic::preserve_array_access_index, {ResultType, BaseType});
  903. Value *DimV = getInt32(Dimension);
  904. CallInst *Fn =
  905. CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV});
  906. if (DbgInfo)
  907. Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
  908. return Fn;
  909. }
  910. Value *IRBuilderBase::CreatePreserveUnionAccessIndex(
  911. Value *Base, unsigned FieldIndex, MDNode *DbgInfo) {
  912. assert(isa<PointerType>(Base->getType()) &&
  913. "Invalid Base ptr type for preserve.union.access.index.");
  914. auto *BaseType = Base->getType();
  915. Module *M = BB->getParent()->getParent();
  916. Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration(
  917. M, Intrinsic::preserve_union_access_index, {BaseType, BaseType});
  918. Value *DIIndex = getInt32(FieldIndex);
  919. CallInst *Fn =
  920. CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex});
  921. if (DbgInfo)
  922. Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
  923. return Fn;
  924. }
  925. Value *IRBuilderBase::CreatePreserveStructAccessIndex(
  926. Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex,
  927. MDNode *DbgInfo) {
  928. assert(isa<PointerType>(Base->getType()) &&
  929. "Invalid Base ptr type for preserve.struct.access.index.");
  930. auto *BaseType = Base->getType();
  931. Value *GEPIndex = getInt32(Index);
  932. Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
  933. Type *ResultType =
  934. GetElementPtrInst::getGEPReturnType(ElTy, Base, {Zero, GEPIndex});
  935. Module *M = BB->getParent()->getParent();
  936. Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration(
  937. M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType});
  938. Value *DIIndex = getInt32(FieldIndex);
  939. CallInst *Fn = CreateCall(FnPreserveStructAccessIndex,
  940. {Base, GEPIndex, DIIndex});
  941. if (DbgInfo)
  942. Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
  943. return Fn;
  944. }
  945. CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL,
  946. Value *PtrValue,
  947. Value *AlignValue,
  948. Value *OffsetValue) {
  949. SmallVector<Value *, 4> Vals({PtrValue, AlignValue});
  950. if (OffsetValue)
  951. Vals.push_back(OffsetValue);
  952. OperandBundleDefT<Value *> AlignOpB("align", Vals);
  953. return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB});
  954. }
  955. CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
  956. Value *PtrValue,
  957. unsigned Alignment,
  958. Value *OffsetValue) {
  959. assert(isa<PointerType>(PtrValue->getType()) &&
  960. "trying to create an alignment assumption on a non-pointer?");
  961. assert(Alignment != 0 && "Invalid Alignment");
  962. auto *PtrTy = cast<PointerType>(PtrValue->getType());
  963. Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
  964. Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment);
  965. return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue);
  966. }
  967. CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
  968. Value *PtrValue,
  969. Value *Alignment,
  970. Value *OffsetValue) {
  971. assert(isa<PointerType>(PtrValue->getType()) &&
  972. "trying to create an alignment assumption on a non-pointer?");
  973. return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);
  974. }
  975. IRBuilderDefaultInserter::~IRBuilderDefaultInserter() {}
  976. IRBuilderCallbackInserter::~IRBuilderCallbackInserter() {}
  977. IRBuilderFolder::~IRBuilderFolder() {}
  978. void ConstantFolder::anchor() {}
  979. void NoFolder::anchor() {}