IRBuilder.cpp 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409
  1. //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the IRBuilder class, which is used as a convenient way
  10. // to create LLVM instructions with a consistent and simplified interface.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/IR/IRBuilder.h"
  14. #include "llvm/ADT/ArrayRef.h"
  15. #include "llvm/IR/Constant.h"
  16. #include "llvm/IR/Constants.h"
  17. #include "llvm/IR/DebugInfoMetadata.h"
  18. #include "llvm/IR/DerivedTypes.h"
  19. #include "llvm/IR/Function.h"
  20. #include "llvm/IR/GlobalValue.h"
  21. #include "llvm/IR/GlobalVariable.h"
  22. #include "llvm/IR/IntrinsicInst.h"
  23. #include "llvm/IR/Intrinsics.h"
  24. #include "llvm/IR/LLVMContext.h"
  25. #include "llvm/IR/NoFolder.h"
  26. #include "llvm/IR/Operator.h"
  27. #include "llvm/IR/Statepoint.h"
  28. #include "llvm/IR/Type.h"
  29. #include "llvm/IR/Value.h"
  30. #include "llvm/Support/Casting.h"
  31. #include <cassert>
  32. #include <cstdint>
  33. #include <optional>
  34. #include <vector>
  35. using namespace llvm;
  36. /// CreateGlobalString - Make a new global variable with an initializer that
  37. /// has array of i8 type filled in with the nul terminated string value
  38. /// specified. If Name is specified, it is the name of the global variable
  39. /// created.
  40. GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
  41. const Twine &Name,
  42. unsigned AddressSpace,
  43. Module *M) {
  44. Constant *StrConstant = ConstantDataArray::getString(Context, Str);
  45. if (!M)
  46. M = BB->getParent()->getParent();
  47. auto *GV = new GlobalVariable(
  48. *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage,
  49. StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace);
  50. GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
  51. GV->setAlignment(Align(1));
  52. return GV;
  53. }
  54. Type *IRBuilderBase::getCurrentFunctionReturnType() const {
  55. assert(BB && BB->getParent() && "No current function!");
  56. return BB->getParent()->getReturnType();
  57. }
  58. Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) {
  59. auto *PT = cast<PointerType>(Ptr->getType());
  60. if (PT->isOpaqueOrPointeeTypeMatches(getInt8Ty()))
  61. return Ptr;
  62. // Otherwise, we need to insert a bitcast.
  63. return CreateBitCast(Ptr, getInt8PtrTy(PT->getAddressSpace()));
  64. }
  65. DebugLoc IRBuilderBase::getCurrentDebugLocation() const {
  66. for (auto &KV : MetadataToCopy)
  67. if (KV.first == LLVMContext::MD_dbg)
  68. return {cast<DILocation>(KV.second)};
  69. return {};
  70. }
  71. void IRBuilderBase::SetInstDebugLocation(Instruction *I) const {
  72. for (const auto &KV : MetadataToCopy)
  73. if (KV.first == LLVMContext::MD_dbg) {
  74. I->setDebugLoc(DebugLoc(KV.second));
  75. return;
  76. }
  77. }
  78. CallInst *
  79. IRBuilderBase::createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
  80. const Twine &Name, Instruction *FMFSource,
  81. ArrayRef<OperandBundleDef> OpBundles) {
  82. CallInst *CI = CreateCall(Callee, Ops, OpBundles, Name);
  83. if (FMFSource)
  84. CI->copyFastMathFlags(FMFSource);
  85. return CI;
  86. }
  87. Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) {
  88. assert(isa<ConstantInt>(Scaling) && "Expected constant integer");
  89. if (cast<ConstantInt>(Scaling)->isZero())
  90. return Scaling;
  91. Module *M = GetInsertBlock()->getParent()->getParent();
  92. Function *TheFn =
  93. Intrinsic::getDeclaration(M, Intrinsic::vscale, {Scaling->getType()});
  94. CallInst *CI = CreateCall(TheFn, {}, {}, Name);
  95. return cast<ConstantInt>(Scaling)->getSExtValue() == 1
  96. ? CI
  97. : CreateMul(CI, Scaling);
  98. }
  99. Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) {
  100. Type *STy = DstType->getScalarType();
  101. if (isa<ScalableVectorType>(DstType)) {
  102. Type *StepVecType = DstType;
  103. // TODO: We expect this special case (element type < 8 bits) to be
  104. // temporary - once the intrinsic properly supports < 8 bits this code
  105. // can be removed.
  106. if (STy->getScalarSizeInBits() < 8)
  107. StepVecType =
  108. VectorType::get(getInt8Ty(), cast<ScalableVectorType>(DstType));
  109. Value *Res = CreateIntrinsic(Intrinsic::experimental_stepvector,
  110. {StepVecType}, {}, nullptr, Name);
  111. if (StepVecType != DstType)
  112. Res = CreateTrunc(Res, DstType);
  113. return Res;
  114. }
  115. unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements();
  116. // Create a vector of consecutive numbers from zero to VF.
  117. SmallVector<Constant *, 8> Indices;
  118. for (unsigned i = 0; i < NumEls; ++i)
  119. Indices.push_back(ConstantInt::get(STy, i));
  120. // Add the consecutive indices to the vector value.
  121. return ConstantVector::get(Indices);
  122. }
  123. CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size,
  124. MaybeAlign Align, bool isVolatile,
  125. MDNode *TBAATag, MDNode *ScopeTag,
  126. MDNode *NoAliasTag) {
  127. Ptr = getCastedInt8PtrValue(Ptr);
  128. Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
  129. Type *Tys[] = { Ptr->getType(), Size->getType() };
  130. Module *M = BB->getParent()->getParent();
  131. Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
  132. CallInst *CI = CreateCall(TheFn, Ops);
  133. if (Align)
  134. cast<MemSetInst>(CI)->setDestAlignment(*Align);
  135. // Set the TBAA info if present.
  136. if (TBAATag)
  137. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  138. if (ScopeTag)
  139. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  140. if (NoAliasTag)
  141. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  142. return CI;
  143. }
  144. CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign,
  145. Value *Val, Value *Size,
  146. bool IsVolatile, MDNode *TBAATag,
  147. MDNode *ScopeTag,
  148. MDNode *NoAliasTag) {
  149. Dst = getCastedInt8PtrValue(Dst);
  150. Value *Ops[] = {Dst, Val, Size, getInt1(IsVolatile)};
  151. Type *Tys[] = {Dst->getType(), Size->getType()};
  152. Module *M = BB->getParent()->getParent();
  153. Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset_inline, Tys);
  154. CallInst *CI = CreateCall(TheFn, Ops);
  155. if (DstAlign)
  156. cast<MemSetInlineInst>(CI)->setDestAlignment(*DstAlign);
  157. // Set the TBAA info if present.
  158. if (TBAATag)
  159. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  160. if (ScopeTag)
  161. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  162. if (NoAliasTag)
  163. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  164. return CI;
  165. }
  166. CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
  167. Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize,
  168. MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) {
  169. Ptr = getCastedInt8PtrValue(Ptr);
  170. Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
  171. Type *Tys[] = {Ptr->getType(), Size->getType()};
  172. Module *M = BB->getParent()->getParent();
  173. Function *TheFn = Intrinsic::getDeclaration(
  174. M, Intrinsic::memset_element_unordered_atomic, Tys);
  175. CallInst *CI = CreateCall(TheFn, Ops);
  176. cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
  177. // Set the TBAA info if present.
  178. if (TBAATag)
  179. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  180. if (ScopeTag)
  181. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  182. if (NoAliasTag)
  183. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  184. return CI;
  185. }
  186. CallInst *IRBuilderBase::CreateMemTransferInst(
  187. Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
  188. MaybeAlign SrcAlign, Value *Size, bool isVolatile, MDNode *TBAATag,
  189. MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) {
  190. Dst = getCastedInt8PtrValue(Dst);
  191. Src = getCastedInt8PtrValue(Src);
  192. Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
  193. Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
  194. Module *M = BB->getParent()->getParent();
  195. Function *TheFn = Intrinsic::getDeclaration(M, IntrID, Tys);
  196. CallInst *CI = CreateCall(TheFn, Ops);
  197. auto* MCI = cast<MemTransferInst>(CI);
  198. if (DstAlign)
  199. MCI->setDestAlignment(*DstAlign);
  200. if (SrcAlign)
  201. MCI->setSourceAlignment(*SrcAlign);
  202. // Set the TBAA info if present.
  203. if (TBAATag)
  204. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  205. // Set the TBAA Struct info if present.
  206. if (TBAAStructTag)
  207. CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
  208. if (ScopeTag)
  209. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  210. if (NoAliasTag)
  211. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  212. return CI;
  213. }
  214. CallInst *IRBuilderBase::CreateMemCpyInline(
  215. Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign,
  216. Value *Size, bool IsVolatile, MDNode *TBAATag, MDNode *TBAAStructTag,
  217. MDNode *ScopeTag, MDNode *NoAliasTag) {
  218. Dst = getCastedInt8PtrValue(Dst);
  219. Src = getCastedInt8PtrValue(Src);
  220. Value *Ops[] = {Dst, Src, Size, getInt1(IsVolatile)};
  221. Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
  222. Function *F = BB->getParent();
  223. Module *M = F->getParent();
  224. Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy_inline, Tys);
  225. CallInst *CI = CreateCall(TheFn, Ops);
  226. auto *MCI = cast<MemCpyInlineInst>(CI);
  227. if (DstAlign)
  228. MCI->setDestAlignment(*DstAlign);
  229. if (SrcAlign)
  230. MCI->setSourceAlignment(*SrcAlign);
  231. // Set the TBAA info if present.
  232. if (TBAATag)
  233. MCI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  234. // Set the TBAA Struct info if present.
  235. if (TBAAStructTag)
  236. MCI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
  237. if (ScopeTag)
  238. MCI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  239. if (NoAliasTag)
  240. MCI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  241. return CI;
  242. }
  243. CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
  244. Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
  245. uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
  246. MDNode *ScopeTag, MDNode *NoAliasTag) {
  247. assert(DstAlign >= ElementSize &&
  248. "Pointer alignment must be at least element size");
  249. assert(SrcAlign >= ElementSize &&
  250. "Pointer alignment must be at least element size");
  251. Dst = getCastedInt8PtrValue(Dst);
  252. Src = getCastedInt8PtrValue(Src);
  253. Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
  254. Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
  255. Module *M = BB->getParent()->getParent();
  256. Function *TheFn = Intrinsic::getDeclaration(
  257. M, Intrinsic::memcpy_element_unordered_atomic, Tys);
  258. CallInst *CI = CreateCall(TheFn, Ops);
  259. // Set the alignment of the pointer args.
  260. auto *AMCI = cast<AtomicMemCpyInst>(CI);
  261. AMCI->setDestAlignment(DstAlign);
  262. AMCI->setSourceAlignment(SrcAlign);
  263. // Set the TBAA info if present.
  264. if (TBAATag)
  265. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  266. // Set the TBAA Struct info if present.
  267. if (TBAAStructTag)
  268. CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
  269. if (ScopeTag)
  270. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  271. if (NoAliasTag)
  272. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  273. return CI;
  274. }
  275. CallInst *IRBuilderBase::CreateMemMove(Value *Dst, MaybeAlign DstAlign,
  276. Value *Src, MaybeAlign SrcAlign,
  277. Value *Size, bool isVolatile,
  278. MDNode *TBAATag, MDNode *ScopeTag,
  279. MDNode *NoAliasTag) {
  280. Dst = getCastedInt8PtrValue(Dst);
  281. Src = getCastedInt8PtrValue(Src);
  282. Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
  283. Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
  284. Module *M = BB->getParent()->getParent();
  285. Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys);
  286. CallInst *CI = CreateCall(TheFn, Ops);
  287. auto *MMI = cast<MemMoveInst>(CI);
  288. if (DstAlign)
  289. MMI->setDestAlignment(*DstAlign);
  290. if (SrcAlign)
  291. MMI->setSourceAlignment(*SrcAlign);
  292. // Set the TBAA info if present.
  293. if (TBAATag)
  294. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  295. if (ScopeTag)
  296. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  297. if (NoAliasTag)
  298. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  299. return CI;
  300. }
  301. CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
  302. Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
  303. uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
  304. MDNode *ScopeTag, MDNode *NoAliasTag) {
  305. assert(DstAlign >= ElementSize &&
  306. "Pointer alignment must be at least element size");
  307. assert(SrcAlign >= ElementSize &&
  308. "Pointer alignment must be at least element size");
  309. Dst = getCastedInt8PtrValue(Dst);
  310. Src = getCastedInt8PtrValue(Src);
  311. Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
  312. Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
  313. Module *M = BB->getParent()->getParent();
  314. Function *TheFn = Intrinsic::getDeclaration(
  315. M, Intrinsic::memmove_element_unordered_atomic, Tys);
  316. CallInst *CI = CreateCall(TheFn, Ops);
  317. // Set the alignment of the pointer args.
  318. CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign));
  319. CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign));
  320. // Set the TBAA info if present.
  321. if (TBAATag)
  322. CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
  323. // Set the TBAA Struct info if present.
  324. if (TBAAStructTag)
  325. CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
  326. if (ScopeTag)
  327. CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
  328. if (NoAliasTag)
  329. CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
  330. return CI;
  331. }
  332. CallInst *IRBuilderBase::getReductionIntrinsic(Intrinsic::ID ID, Value *Src) {
  333. Module *M = GetInsertBlock()->getParent()->getParent();
  334. Value *Ops[] = {Src};
  335. Type *Tys[] = { Src->getType() };
  336. auto Decl = Intrinsic::getDeclaration(M, ID, Tys);
  337. return CreateCall(Decl, Ops);
  338. }
  339. CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) {
  340. Module *M = GetInsertBlock()->getParent()->getParent();
  341. Value *Ops[] = {Acc, Src};
  342. auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fadd,
  343. {Src->getType()});
  344. return CreateCall(Decl, Ops);
  345. }
  346. CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) {
  347. Module *M = GetInsertBlock()->getParent()->getParent();
  348. Value *Ops[] = {Acc, Src};
  349. auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fmul,
  350. {Src->getType()});
  351. return CreateCall(Decl, Ops);
  352. }
  353. CallInst *IRBuilderBase::CreateAddReduce(Value *Src) {
  354. return getReductionIntrinsic(Intrinsic::vector_reduce_add, Src);
  355. }
  356. CallInst *IRBuilderBase::CreateMulReduce(Value *Src) {
  357. return getReductionIntrinsic(Intrinsic::vector_reduce_mul, Src);
  358. }
  359. CallInst *IRBuilderBase::CreateAndReduce(Value *Src) {
  360. return getReductionIntrinsic(Intrinsic::vector_reduce_and, Src);
  361. }
  362. CallInst *IRBuilderBase::CreateOrReduce(Value *Src) {
  363. return getReductionIntrinsic(Intrinsic::vector_reduce_or, Src);
  364. }
  365. CallInst *IRBuilderBase::CreateXorReduce(Value *Src) {
  366. return getReductionIntrinsic(Intrinsic::vector_reduce_xor, Src);
  367. }
  368. CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) {
  369. auto ID =
  370. IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax;
  371. return getReductionIntrinsic(ID, Src);
  372. }
  373. CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) {
  374. auto ID =
  375. IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin;
  376. return getReductionIntrinsic(ID, Src);
  377. }
  378. CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) {
  379. return getReductionIntrinsic(Intrinsic::vector_reduce_fmax, Src);
  380. }
  381. CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) {
  382. return getReductionIntrinsic(Intrinsic::vector_reduce_fmin, Src);
  383. }
  384. CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
  385. assert(isa<PointerType>(Ptr->getType()) &&
  386. "lifetime.start only applies to pointers.");
  387. Ptr = getCastedInt8PtrValue(Ptr);
  388. if (!Size)
  389. Size = getInt64(-1);
  390. else
  391. assert(Size->getType() == getInt64Ty() &&
  392. "lifetime.start requires the size to be an i64");
  393. Value *Ops[] = { Size, Ptr };
  394. Module *M = BB->getParent()->getParent();
  395. Function *TheFn =
  396. Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()});
  397. return CreateCall(TheFn, Ops);
  398. }
  399. CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
  400. assert(isa<PointerType>(Ptr->getType()) &&
  401. "lifetime.end only applies to pointers.");
  402. Ptr = getCastedInt8PtrValue(Ptr);
  403. if (!Size)
  404. Size = getInt64(-1);
  405. else
  406. assert(Size->getType() == getInt64Ty() &&
  407. "lifetime.end requires the size to be an i64");
  408. Value *Ops[] = { Size, Ptr };
  409. Module *M = BB->getParent()->getParent();
  410. Function *TheFn =
  411. Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()});
  412. return CreateCall(TheFn, Ops);
  413. }
  414. CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
  415. assert(isa<PointerType>(Ptr->getType()) &&
  416. "invariant.start only applies to pointers.");
  417. Ptr = getCastedInt8PtrValue(Ptr);
  418. if (!Size)
  419. Size = getInt64(-1);
  420. else
  421. assert(Size->getType() == getInt64Ty() &&
  422. "invariant.start requires the size to be an i64");
  423. Value *Ops[] = {Size, Ptr};
  424. // Fill in the single overloaded type: memory object type.
  425. Type *ObjectPtr[1] = {Ptr->getType()};
  426. Module *M = BB->getParent()->getParent();
  427. Function *TheFn =
  428. Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr);
  429. return CreateCall(TheFn, Ops);
  430. }
  431. static MaybeAlign getAlign(Value *Ptr) {
  432. if (auto *O = dyn_cast<GlobalObject>(Ptr))
  433. return O->getAlign();
  434. if (auto *A = dyn_cast<GlobalAlias>(Ptr))
  435. return A->getAliaseeObject()->getAlign();
  436. return {};
  437. }
  438. CallInst *IRBuilderBase::CreateThreadLocalAddress(Value *Ptr) {
  439. #ifndef NDEBUG
  440. // Handle specially for constexpr cast. This is possible when
  441. // opaque pointers not enabled since constant could be sinked
  442. // directly by the design of llvm. This could be eliminated
  443. // after we eliminate the abuse of constexpr.
  444. auto *V = Ptr;
  445. if (auto *CE = dyn_cast<ConstantExpr>(V))
  446. if (CE->isCast())
  447. V = CE->getOperand(0);
  448. assert(isa<GlobalValue>(V) && cast<GlobalValue>(V)->isThreadLocal() &&
  449. "threadlocal_address only applies to thread local variables.");
  450. #endif
  451. CallInst *CI = CreateIntrinsic(llvm::Intrinsic::threadlocal_address,
  452. {Ptr->getType()}, {Ptr});
  453. if (MaybeAlign A = getAlign(Ptr)) {
  454. CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), *A));
  455. CI->addRetAttr(Attribute::getWithAlignment(CI->getContext(), *A));
  456. }
  457. return CI;
  458. }
  459. CallInst *
  460. IRBuilderBase::CreateAssumption(Value *Cond,
  461. ArrayRef<OperandBundleDef> OpBundles) {
  462. assert(Cond->getType() == getInt1Ty() &&
  463. "an assumption condition must be of type i1");
  464. Value *Ops[] = { Cond };
  465. Module *M = BB->getParent()->getParent();
  466. Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
  467. return CreateCall(FnAssume, Ops, OpBundles);
  468. }
  469. Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) {
  470. Module *M = BB->getModule();
  471. auto *FnIntrinsic = Intrinsic::getDeclaration(
  472. M, Intrinsic::experimental_noalias_scope_decl, {});
  473. return CreateCall(FnIntrinsic, {Scope});
  474. }
  475. /// Create a call to a Masked Load intrinsic.
  476. /// \p Ty - vector type to load
  477. /// \p Ptr - base pointer for the load
  478. /// \p Alignment - alignment of the source location
  479. /// \p Mask - vector of booleans which indicates what vector lanes should
  480. /// be accessed in memory
  481. /// \p PassThru - pass-through value that is used to fill the masked-off lanes
  482. /// of the result
  483. /// \p Name - name of the result variable
  484. CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment,
  485. Value *Mask, Value *PassThru,
  486. const Twine &Name) {
  487. auto *PtrTy = cast<PointerType>(Ptr->getType());
  488. assert(Ty->isVectorTy() && "Type should be vector");
  489. assert(PtrTy->isOpaqueOrPointeeTypeMatches(Ty) && "Wrong element type");
  490. assert(Mask && "Mask should not be all-ones (null)");
  491. if (!PassThru)
  492. PassThru = PoisonValue::get(Ty);
  493. Type *OverloadedTypes[] = { Ty, PtrTy };
  494. Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
  495. return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
  496. OverloadedTypes, Name);
  497. }
  498. /// Create a call to a Masked Store intrinsic.
  499. /// \p Val - data to be stored,
  500. /// \p Ptr - base pointer for the store
  501. /// \p Alignment - alignment of the destination location
  502. /// \p Mask - vector of booleans which indicates what vector lanes should
  503. /// be accessed in memory
  504. CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
  505. Align Alignment, Value *Mask) {
  506. auto *PtrTy = cast<PointerType>(Ptr->getType());
  507. Type *DataTy = Val->getType();
  508. assert(DataTy->isVectorTy() && "Val should be a vector");
  509. assert(PtrTy->isOpaqueOrPointeeTypeMatches(DataTy) && "Wrong element type");
  510. assert(Mask && "Mask should not be all-ones (null)");
  511. Type *OverloadedTypes[] = { DataTy, PtrTy };
  512. Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask};
  513. return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
  514. }
  515. /// Create a call to a Masked intrinsic, with given intrinsic Id,
  516. /// an array of operands - Ops, and an array of overloaded types -
  517. /// OverloadedTypes.
  518. CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
  519. ArrayRef<Value *> Ops,
  520. ArrayRef<Type *> OverloadedTypes,
  521. const Twine &Name) {
  522. Module *M = BB->getParent()->getParent();
  523. Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes);
  524. return CreateCall(TheFn, Ops, {}, Name);
  525. }
  526. /// Create a call to a Masked Gather intrinsic.
  527. /// \p Ty - vector type to gather
  528. /// \p Ptrs - vector of pointers for loading
  529. /// \p Align - alignment for one element
  530. /// \p Mask - vector of booleans which indicates what vector lanes should
  531. /// be accessed in memory
  532. /// \p PassThru - pass-through value that is used to fill the masked-off lanes
  533. /// of the result
  534. /// \p Name - name of the result variable
  535. CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs,
  536. Align Alignment, Value *Mask,
  537. Value *PassThru,
  538. const Twine &Name) {
  539. auto *VecTy = cast<VectorType>(Ty);
  540. ElementCount NumElts = VecTy->getElementCount();
  541. auto *PtrsTy = cast<VectorType>(Ptrs->getType());
  542. assert(cast<PointerType>(PtrsTy->getElementType())
  543. ->isOpaqueOrPointeeTypeMatches(
  544. cast<VectorType>(Ty)->getElementType()) &&
  545. "Element type mismatch");
  546. assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch");
  547. if (!Mask)
  548. Mask = Constant::getAllOnesValue(
  549. VectorType::get(Type::getInt1Ty(Context), NumElts));
  550. if (!PassThru)
  551. PassThru = PoisonValue::get(Ty);
  552. Type *OverloadedTypes[] = {Ty, PtrsTy};
  553. Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru};
  554. // We specify only one type when we create this intrinsic. Types of other
  555. // arguments are derived from this type.
  556. return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
  557. Name);
  558. }
  559. /// Create a call to a Masked Scatter intrinsic.
  560. /// \p Data - data to be stored,
  561. /// \p Ptrs - the vector of pointers, where the \p Data elements should be
  562. /// stored
  563. /// \p Align - alignment for one element
  564. /// \p Mask - vector of booleans which indicates what vector lanes should
  565. /// be accessed in memory
  566. CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
  567. Align Alignment, Value *Mask) {
  568. auto *PtrsTy = cast<VectorType>(Ptrs->getType());
  569. auto *DataTy = cast<VectorType>(Data->getType());
  570. ElementCount NumElts = PtrsTy->getElementCount();
  571. #ifndef NDEBUG
  572. auto *PtrTy = cast<PointerType>(PtrsTy->getElementType());
  573. assert(NumElts == DataTy->getElementCount() &&
  574. PtrTy->isOpaqueOrPointeeTypeMatches(DataTy->getElementType()) &&
  575. "Incompatible pointer and data types");
  576. #endif
  577. if (!Mask)
  578. Mask = Constant::getAllOnesValue(
  579. VectorType::get(Type::getInt1Ty(Context), NumElts));
  580. Type *OverloadedTypes[] = {DataTy, PtrsTy};
  581. Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask};
  582. // We specify only one type when we create this intrinsic. Types of other
  583. // arguments are derived from this type.
  584. return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
  585. }
  586. /// Create a call to Masked Expand Load intrinsic
  587. /// \p Ty - vector type to load
  588. /// \p Ptr - base pointer for the load
  589. /// \p Mask - vector of booleans which indicates what vector lanes should
  590. /// be accessed in memory
  591. /// \p PassThru - pass-through value that is used to fill the masked-off lanes
  592. /// of the result
  593. /// \p Name - name of the result variable
  594. CallInst *IRBuilderBase::CreateMaskedExpandLoad(Type *Ty, Value *Ptr,
  595. Value *Mask, Value *PassThru,
  596. const Twine &Name) {
  597. auto *PtrTy = cast<PointerType>(Ptr->getType());
  598. assert(Ty->isVectorTy() && "Type should be vector");
  599. assert(PtrTy->isOpaqueOrPointeeTypeMatches(
  600. cast<FixedVectorType>(Ty)->getElementType()) &&
  601. "Wrong element type");
  602. (void)PtrTy;
  603. assert(Mask && "Mask should not be all-ones (null)");
  604. if (!PassThru)
  605. PassThru = PoisonValue::get(Ty);
  606. Type *OverloadedTypes[] = {Ty};
  607. Value *Ops[] = {Ptr, Mask, PassThru};
  608. return CreateMaskedIntrinsic(Intrinsic::masked_expandload, Ops,
  609. OverloadedTypes, Name);
  610. }
  611. /// Create a call to Masked Compress Store intrinsic
  612. /// \p Val - data to be stored,
  613. /// \p Ptr - base pointer for the store
  614. /// \p Mask - vector of booleans which indicates what vector lanes should
  615. /// be accessed in memory
  616. CallInst *IRBuilderBase::CreateMaskedCompressStore(Value *Val, Value *Ptr,
  617. Value *Mask) {
  618. auto *PtrTy = cast<PointerType>(Ptr->getType());
  619. Type *DataTy = Val->getType();
  620. assert(DataTy->isVectorTy() && "Val should be a vector");
  621. assert(PtrTy->isOpaqueOrPointeeTypeMatches(
  622. cast<FixedVectorType>(DataTy)->getElementType()) &&
  623. "Wrong element type");
  624. (void)PtrTy;
  625. assert(Mask && "Mask should not be all-ones (null)");
  626. Type *OverloadedTypes[] = {DataTy};
  627. Value *Ops[] = {Val, Ptr, Mask};
  628. return CreateMaskedIntrinsic(Intrinsic::masked_compressstore, Ops,
  629. OverloadedTypes);
  630. }
  631. template <typename T0>
  632. static std::vector<Value *>
  633. getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes,
  634. Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) {
  635. std::vector<Value *> Args;
  636. Args.push_back(B.getInt64(ID));
  637. Args.push_back(B.getInt32(NumPatchBytes));
  638. Args.push_back(ActualCallee);
  639. Args.push_back(B.getInt32(CallArgs.size()));
  640. Args.push_back(B.getInt32(Flags));
  641. llvm::append_range(Args, CallArgs);
  642. // GC Transition and Deopt args are now always handled via operand bundle.
  643. // They will be removed from the signature of gc.statepoint shortly.
  644. Args.push_back(B.getInt32(0));
  645. Args.push_back(B.getInt32(0));
  646. // GC args are now encoded in the gc-live operand bundle
  647. return Args;
  648. }
  649. template<typename T1, typename T2, typename T3>
  650. static std::vector<OperandBundleDef>
  651. getStatepointBundles(std::optional<ArrayRef<T1>> TransitionArgs,
  652. std::optional<ArrayRef<T2>> DeoptArgs,
  653. ArrayRef<T3> GCArgs) {
  654. std::vector<OperandBundleDef> Rval;
  655. if (DeoptArgs) {
  656. SmallVector<Value*, 16> DeoptValues;
  657. llvm::append_range(DeoptValues, *DeoptArgs);
  658. Rval.emplace_back("deopt", DeoptValues);
  659. }
  660. if (TransitionArgs) {
  661. SmallVector<Value*, 16> TransitionValues;
  662. llvm::append_range(TransitionValues, *TransitionArgs);
  663. Rval.emplace_back("gc-transition", TransitionValues);
  664. }
  665. if (GCArgs.size()) {
  666. SmallVector<Value*, 16> LiveValues;
  667. llvm::append_range(LiveValues, GCArgs);
  668. Rval.emplace_back("gc-live", LiveValues);
  669. }
  670. return Rval;
  671. }
  672. template <typename T0, typename T1, typename T2, typename T3>
  673. static CallInst *CreateGCStatepointCallCommon(
  674. IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
  675. FunctionCallee ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
  676. std::optional<ArrayRef<T1>> TransitionArgs,
  677. std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
  678. const Twine &Name) {
  679. Module *M = Builder->GetInsertBlock()->getParent()->getParent();
  680. // Fill in the one generic type'd argument (the function is also vararg)
  681. Function *FnStatepoint =
  682. Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
  683. {ActualCallee.getCallee()->getType()});
  684. std::vector<Value *> Args = getStatepointArgs(
  685. *Builder, ID, NumPatchBytes, ActualCallee.getCallee(), Flags, CallArgs);
  686. CallInst *CI = Builder->CreateCall(
  687. FnStatepoint, Args,
  688. getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name);
  689. CI->addParamAttr(2,
  690. Attribute::get(Builder->getContext(), Attribute::ElementType,
  691. ActualCallee.getFunctionType()));
  692. return CI;
  693. }
  694. CallInst *IRBuilderBase::CreateGCStatepointCall(
  695. uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
  696. ArrayRef<Value *> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs,
  697. ArrayRef<Value *> GCArgs, const Twine &Name) {
  698. return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
  699. this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
  700. CallArgs, std::nullopt /* No Transition Args */, DeoptArgs, GCArgs, Name);
  701. }
  702. CallInst *IRBuilderBase::CreateGCStatepointCall(
  703. uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
  704. uint32_t Flags, ArrayRef<Value *> CallArgs,
  705. std::optional<ArrayRef<Use>> TransitionArgs,
  706. std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
  707. const Twine &Name) {
  708. return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>(
  709. this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
  710. DeoptArgs, GCArgs, Name);
  711. }
  712. CallInst *IRBuilderBase::CreateGCStatepointCall(
  713. uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
  714. ArrayRef<Use> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs,
  715. ArrayRef<Value *> GCArgs, const Twine &Name) {
  716. return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
  717. this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
  718. CallArgs, std::nullopt, DeoptArgs, GCArgs, Name);
  719. }
  720. template <typename T0, typename T1, typename T2, typename T3>
  721. static InvokeInst *CreateGCStatepointInvokeCommon(
  722. IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
  723. FunctionCallee ActualInvokee, BasicBlock *NormalDest,
  724. BasicBlock *UnwindDest, uint32_t Flags, ArrayRef<T0> InvokeArgs,
  725. std::optional<ArrayRef<T1>> TransitionArgs,
  726. std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
  727. const Twine &Name) {
  728. Module *M = Builder->GetInsertBlock()->getParent()->getParent();
  729. // Fill in the one generic type'd argument (the function is also vararg)
  730. Function *FnStatepoint =
  731. Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
  732. {ActualInvokee.getCallee()->getType()});
  733. std::vector<Value *> Args =
  734. getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee.getCallee(),
  735. Flags, InvokeArgs);
  736. InvokeInst *II = Builder->CreateInvoke(
  737. FnStatepoint, NormalDest, UnwindDest, Args,
  738. getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name);
  739. II->addParamAttr(2,
  740. Attribute::get(Builder->getContext(), Attribute::ElementType,
  741. ActualInvokee.getFunctionType()));
  742. return II;
  743. }
  744. InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
  745. uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
  746. BasicBlock *NormalDest, BasicBlock *UnwindDest,
  747. ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Value *>> DeoptArgs,
  748. ArrayRef<Value *> GCArgs, const Twine &Name) {
  749. return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
  750. this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
  751. uint32_t(StatepointFlags::None), InvokeArgs,
  752. std::nullopt /* No Transition Args*/, DeoptArgs, GCArgs, Name);
  753. }
  754. InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
  755. uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
  756. BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
  757. ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Use>> TransitionArgs,
  758. std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
  759. const Twine &Name) {
  760. return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>(
  761. this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
  762. InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name);
  763. }
  764. InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
  765. uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
  766. BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
  767. std::optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs,
  768. const Twine &Name) {
  769. return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
  770. this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
  771. uint32_t(StatepointFlags::None), InvokeArgs, std::nullopt, DeoptArgs,
  772. GCArgs, Name);
  773. }
  774. CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint,
  775. Type *ResultType, const Twine &Name) {
  776. Intrinsic::ID ID = Intrinsic::experimental_gc_result;
  777. Module *M = BB->getParent()->getParent();
  778. Type *Types[] = {ResultType};
  779. Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types);
  780. Value *Args[] = {Statepoint};
  781. return CreateCall(FnGCResult, Args, {}, Name);
  782. }
  783. CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
  784. int BaseOffset, int DerivedOffset,
  785. Type *ResultType, const Twine &Name) {
  786. Module *M = BB->getParent()->getParent();
  787. Type *Types[] = {ResultType};
  788. Function *FnGCRelocate =
  789. Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types);
  790. Value *Args[] = {Statepoint, getInt32(BaseOffset), getInt32(DerivedOffset)};
  791. return CreateCall(FnGCRelocate, Args, {}, Name);
  792. }
  793. CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr,
  794. const Twine &Name) {
  795. Module *M = BB->getParent()->getParent();
  796. Type *PtrTy = DerivedPtr->getType();
  797. Function *FnGCFindBase = Intrinsic::getDeclaration(
  798. M, Intrinsic::experimental_gc_get_pointer_base, {PtrTy, PtrTy});
  799. return CreateCall(FnGCFindBase, {DerivedPtr}, {}, Name);
  800. }
  801. CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr,
  802. const Twine &Name) {
  803. Module *M = BB->getParent()->getParent();
  804. Type *PtrTy = DerivedPtr->getType();
  805. Function *FnGCGetOffset = Intrinsic::getDeclaration(
  806. M, Intrinsic::experimental_gc_get_pointer_offset, {PtrTy});
  807. return CreateCall(FnGCGetOffset, {DerivedPtr}, {}, Name);
  808. }
  809. CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
  810. Instruction *FMFSource,
  811. const Twine &Name) {
  812. Module *M = BB->getModule();
  813. Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()});
  814. return createCallHelper(Fn, {V}, Name, FMFSource);
  815. }
  816. CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS,
  817. Value *RHS,
  818. Instruction *FMFSource,
  819. const Twine &Name) {
  820. Module *M = BB->getModule();
  821. Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() });
  822. return createCallHelper(Fn, {LHS, RHS}, Name, FMFSource);
  823. }
  824. CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
  825. ArrayRef<Type *> Types,
  826. ArrayRef<Value *> Args,
  827. Instruction *FMFSource,
  828. const Twine &Name) {
  829. Module *M = BB->getModule();
  830. Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
  831. return createCallHelper(Fn, Args, Name, FMFSource);
  832. }
  833. CallInst *IRBuilderBase::CreateIntrinsic(Type *RetTy, Intrinsic::ID ID,
  834. ArrayRef<Value *> Args,
  835. Instruction *FMFSource,
  836. const Twine &Name) {
  837. Module *M = BB->getModule();
  838. SmallVector<Intrinsic::IITDescriptor> Table;
  839. Intrinsic::getIntrinsicInfoTableEntries(ID, Table);
  840. ArrayRef<Intrinsic::IITDescriptor> TableRef(Table);
  841. SmallVector<Type *> ArgTys;
  842. ArgTys.reserve(Args.size());
  843. for (auto &I : Args)
  844. ArgTys.push_back(I->getType());
  845. FunctionType *FTy = FunctionType::get(RetTy, ArgTys, false);
  846. SmallVector<Type *> OverloadTys;
  847. Intrinsic::MatchIntrinsicTypesResult Res =
  848. matchIntrinsicSignature(FTy, TableRef, OverloadTys);
  849. (void)Res;
  850. assert(Res == Intrinsic::MatchIntrinsicTypes_Match && TableRef.empty() &&
  851. "Wrong types for intrinsic!");
  852. // TODO: Handle varargs intrinsics.
  853. Function *Fn = Intrinsic::getDeclaration(M, ID, OverloadTys);
  854. return createCallHelper(Fn, Args, Name, FMFSource);
  855. }
  856. CallInst *IRBuilderBase::CreateConstrainedFPBinOp(
  857. Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource,
  858. const Twine &Name, MDNode *FPMathTag,
  859. std::optional<RoundingMode> Rounding,
  860. std::optional<fp::ExceptionBehavior> Except) {
  861. Value *RoundingV = getConstrainedFPRounding(Rounding);
  862. Value *ExceptV = getConstrainedFPExcept(Except);
  863. FastMathFlags UseFMF = FMF;
  864. if (FMFSource)
  865. UseFMF = FMFSource->getFastMathFlags();
  866. CallInst *C = CreateIntrinsic(ID, {L->getType()},
  867. {L, R, RoundingV, ExceptV}, nullptr, Name);
  868. setConstrainedFPCallAttr(C);
  869. setFPAttrs(C, FPMathTag, UseFMF);
  870. return C;
  871. }
  872. Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
  873. const Twine &Name, MDNode *FPMathTag) {
  874. if (Instruction::isBinaryOp(Opc)) {
  875. assert(Ops.size() == 2 && "Invalid number of operands!");
  876. return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc),
  877. Ops[0], Ops[1], Name, FPMathTag);
  878. }
  879. if (Instruction::isUnaryOp(Opc)) {
  880. assert(Ops.size() == 1 && "Invalid number of operands!");
  881. return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc),
  882. Ops[0], Name, FPMathTag);
  883. }
  884. llvm_unreachable("Unexpected opcode!");
  885. }
  886. CallInst *IRBuilderBase::CreateConstrainedFPCast(
  887. Intrinsic::ID ID, Value *V, Type *DestTy,
  888. Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag,
  889. std::optional<RoundingMode> Rounding,
  890. std::optional<fp::ExceptionBehavior> Except) {
  891. Value *ExceptV = getConstrainedFPExcept(Except);
  892. FastMathFlags UseFMF = FMF;
  893. if (FMFSource)
  894. UseFMF = FMFSource->getFastMathFlags();
  895. CallInst *C;
  896. bool HasRoundingMD = false;
  897. switch (ID) {
  898. default:
  899. break;
  900. #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
  901. case Intrinsic::INTRINSIC: \
  902. HasRoundingMD = ROUND_MODE; \
  903. break;
  904. #include "llvm/IR/ConstrainedOps.def"
  905. }
  906. if (HasRoundingMD) {
  907. Value *RoundingV = getConstrainedFPRounding(Rounding);
  908. C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
  909. nullptr, Name);
  910. } else
  911. C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
  912. Name);
  913. setConstrainedFPCallAttr(C);
  914. if (isa<FPMathOperator>(C))
  915. setFPAttrs(C, FPMathTag, UseFMF);
  916. return C;
  917. }
  918. Value *IRBuilderBase::CreateFCmpHelper(
  919. CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name,
  920. MDNode *FPMathTag, bool IsSignaling) {
  921. if (IsFPConstrained) {
  922. auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
  923. : Intrinsic::experimental_constrained_fcmp;
  924. return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name);
  925. }
  926. if (auto *LC = dyn_cast<Constant>(LHS))
  927. if (auto *RC = dyn_cast<Constant>(RHS))
  928. return Insert(Folder.CreateFCmp(P, LC, RC), Name);
  929. return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
  930. }
  931. CallInst *IRBuilderBase::CreateConstrainedFPCmp(
  932. Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
  933. const Twine &Name, std::optional<fp::ExceptionBehavior> Except) {
  934. Value *PredicateV = getConstrainedFPPredicate(P);
  935. Value *ExceptV = getConstrainedFPExcept(Except);
  936. CallInst *C = CreateIntrinsic(ID, {L->getType()},
  937. {L, R, PredicateV, ExceptV}, nullptr, Name);
  938. setConstrainedFPCallAttr(C);
  939. return C;
  940. }
  941. CallInst *IRBuilderBase::CreateConstrainedFPCall(
  942. Function *Callee, ArrayRef<Value *> Args, const Twine &Name,
  943. std::optional<RoundingMode> Rounding,
  944. std::optional<fp::ExceptionBehavior> Except) {
  945. llvm::SmallVector<Value *, 6> UseArgs;
  946. append_range(UseArgs, Args);
  947. bool HasRoundingMD = false;
  948. switch (Callee->getIntrinsicID()) {
  949. default:
  950. break;
  951. #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
  952. case Intrinsic::INTRINSIC: \
  953. HasRoundingMD = ROUND_MODE; \
  954. break;
  955. #include "llvm/IR/ConstrainedOps.def"
  956. }
  957. if (HasRoundingMD)
  958. UseArgs.push_back(getConstrainedFPRounding(Rounding));
  959. UseArgs.push_back(getConstrainedFPExcept(Except));
  960. CallInst *C = CreateCall(Callee, UseArgs, Name);
  961. setConstrainedFPCallAttr(C);
  962. return C;
  963. }
  964. Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False,
  965. const Twine &Name, Instruction *MDFrom) {
  966. if (auto *V = Folder.FoldSelect(C, True, False))
  967. return V;
  968. SelectInst *Sel = SelectInst::Create(C, True, False);
  969. if (MDFrom) {
  970. MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
  971. MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
  972. Sel = addBranchMetadata(Sel, Prof, Unpred);
  973. }
  974. if (isa<FPMathOperator>(Sel))
  975. setFPAttrs(Sel, nullptr /* MDNode* */, FMF);
  976. return Insert(Sel, Name);
  977. }
  978. Value *IRBuilderBase::CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS,
  979. const Twine &Name) {
  980. assert(LHS->getType() == RHS->getType() &&
  981. "Pointer subtraction operand types must match!");
  982. assert(cast<PointerType>(LHS->getType())
  983. ->isOpaqueOrPointeeTypeMatches(ElemTy) &&
  984. "Pointer type must match element type");
  985. Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
  986. Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
  987. Value *Difference = CreateSub(LHS_int, RHS_int);
  988. return CreateExactSDiv(Difference, ConstantExpr::getSizeOf(ElemTy),
  989. Name);
  990. }
  991. Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) {
  992. assert(isa<PointerType>(Ptr->getType()) &&
  993. "launder.invariant.group only applies to pointers.");
  994. // FIXME: we could potentially avoid casts to/from i8*.
  995. auto *PtrType = Ptr->getType();
  996. auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
  997. if (PtrType != Int8PtrTy)
  998. Ptr = CreateBitCast(Ptr, Int8PtrTy);
  999. Module *M = BB->getParent()->getParent();
  1000. Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
  1001. M, Intrinsic::launder_invariant_group, {Int8PtrTy});
  1002. assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
  1003. FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
  1004. Int8PtrTy &&
  1005. "LaunderInvariantGroup should take and return the same type");
  1006. CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr});
  1007. if (PtrType != Int8PtrTy)
  1008. return CreateBitCast(Fn, PtrType);
  1009. return Fn;
  1010. }
  1011. Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) {
  1012. assert(isa<PointerType>(Ptr->getType()) &&
  1013. "strip.invariant.group only applies to pointers.");
  1014. // FIXME: we could potentially avoid casts to/from i8*.
  1015. auto *PtrType = Ptr->getType();
  1016. auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
  1017. if (PtrType != Int8PtrTy)
  1018. Ptr = CreateBitCast(Ptr, Int8PtrTy);
  1019. Module *M = BB->getParent()->getParent();
  1020. Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
  1021. M, Intrinsic::strip_invariant_group, {Int8PtrTy});
  1022. assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
  1023. FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
  1024. Int8PtrTy &&
  1025. "StripInvariantGroup should take and return the same type");
  1026. CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr});
  1027. if (PtrType != Int8PtrTy)
  1028. return CreateBitCast(Fn, PtrType);
  1029. return Fn;
  1030. }
  1031. Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) {
  1032. auto *Ty = cast<VectorType>(V->getType());
  1033. if (isa<ScalableVectorType>(Ty)) {
  1034. Module *M = BB->getParent()->getParent();
  1035. Function *F = Intrinsic::getDeclaration(
  1036. M, Intrinsic::experimental_vector_reverse, Ty);
  1037. return Insert(CallInst::Create(F, V), Name);
  1038. }
  1039. // Keep the original behaviour for fixed vector
  1040. SmallVector<int, 8> ShuffleMask;
  1041. int NumElts = Ty->getElementCount().getKnownMinValue();
  1042. for (int i = 0; i < NumElts; ++i)
  1043. ShuffleMask.push_back(NumElts - i - 1);
  1044. return CreateShuffleVector(V, ShuffleMask, Name);
  1045. }
  1046. Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
  1047. const Twine &Name) {
  1048. assert(isa<VectorType>(V1->getType()) && "Unexpected type");
  1049. assert(V1->getType() == V2->getType() &&
  1050. "Splice expects matching operand types!");
  1051. if (auto *VTy = dyn_cast<ScalableVectorType>(V1->getType())) {
  1052. Module *M = BB->getParent()->getParent();
  1053. Function *F = Intrinsic::getDeclaration(
  1054. M, Intrinsic::experimental_vector_splice, VTy);
  1055. Value *Ops[] = {V1, V2, getInt32(Imm)};
  1056. return Insert(CallInst::Create(F, Ops), Name);
  1057. }
  1058. unsigned NumElts = cast<FixedVectorType>(V1->getType())->getNumElements();
  1059. assert(((-Imm <= NumElts) || (Imm < NumElts)) &&
  1060. "Invalid immediate for vector splice!");
  1061. // Keep the original behaviour for fixed vector
  1062. unsigned Idx = (NumElts + Imm) % NumElts;
  1063. SmallVector<int, 8> Mask;
  1064. for (unsigned I = 0; I < NumElts; ++I)
  1065. Mask.push_back(Idx + I);
  1066. return CreateShuffleVector(V1, V2, Mask);
  1067. }
  1068. Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V,
  1069. const Twine &Name) {
  1070. auto EC = ElementCount::getFixed(NumElts);
  1071. return CreateVectorSplat(EC, V, Name);
  1072. }
  1073. Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V,
  1074. const Twine &Name) {
  1075. assert(EC.isNonZero() && "Cannot splat to an empty vector!");
  1076. // First insert it into a poison vector so we can shuffle it.
  1077. Value *Poison = PoisonValue::get(VectorType::get(V->getType(), EC));
  1078. V = CreateInsertElement(Poison, V, getInt64(0), Name + ".splatinsert");
  1079. // Shuffle the value across the desired number of elements.
  1080. SmallVector<int, 16> Zeros;
  1081. Zeros.resize(EC.getKnownMinValue());
  1082. return CreateShuffleVector(V, Zeros, Name + ".splat");
  1083. }
  1084. Value *IRBuilderBase::CreateExtractInteger(
  1085. const DataLayout &DL, Value *From, IntegerType *ExtractedTy,
  1086. uint64_t Offset, const Twine &Name) {
  1087. auto *IntTy = cast<IntegerType>(From->getType());
  1088. assert(DL.getTypeStoreSize(ExtractedTy) + Offset <=
  1089. DL.getTypeStoreSize(IntTy) &&
  1090. "Element extends past full value");
  1091. uint64_t ShAmt = 8 * Offset;
  1092. Value *V = From;
  1093. if (DL.isBigEndian())
  1094. ShAmt = 8 * (DL.getTypeStoreSize(IntTy) -
  1095. DL.getTypeStoreSize(ExtractedTy) - Offset);
  1096. if (ShAmt) {
  1097. V = CreateLShr(V, ShAmt, Name + ".shift");
  1098. }
  1099. assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() &&
  1100. "Cannot extract to a larger integer!");
  1101. if (ExtractedTy != IntTy) {
  1102. V = CreateTrunc(V, ExtractedTy, Name + ".trunc");
  1103. }
  1104. return V;
  1105. }
  1106. Value *IRBuilderBase::CreatePreserveArrayAccessIndex(
  1107. Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex,
  1108. MDNode *DbgInfo) {
  1109. auto *BaseType = Base->getType();
  1110. assert(isa<PointerType>(BaseType) &&
  1111. "Invalid Base ptr type for preserve.array.access.index.");
  1112. assert(cast<PointerType>(BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) &&
  1113. "Pointer element type mismatch");
  1114. Value *LastIndexV = getInt32(LastIndex);
  1115. Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
  1116. SmallVector<Value *, 4> IdxList(Dimension, Zero);
  1117. IdxList.push_back(LastIndexV);
  1118. Type *ResultType =
  1119. GetElementPtrInst::getGEPReturnType(ElTy, Base, IdxList);
  1120. Module *M = BB->getParent()->getParent();
  1121. Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration(
  1122. M, Intrinsic::preserve_array_access_index, {ResultType, BaseType});
  1123. Value *DimV = getInt32(Dimension);
  1124. CallInst *Fn =
  1125. CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV});
  1126. Fn->addParamAttr(
  1127. 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
  1128. if (DbgInfo)
  1129. Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
  1130. return Fn;
  1131. }
  1132. Value *IRBuilderBase::CreatePreserveUnionAccessIndex(
  1133. Value *Base, unsigned FieldIndex, MDNode *DbgInfo) {
  1134. assert(isa<PointerType>(Base->getType()) &&
  1135. "Invalid Base ptr type for preserve.union.access.index.");
  1136. auto *BaseType = Base->getType();
  1137. Module *M = BB->getParent()->getParent();
  1138. Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration(
  1139. M, Intrinsic::preserve_union_access_index, {BaseType, BaseType});
  1140. Value *DIIndex = getInt32(FieldIndex);
  1141. CallInst *Fn =
  1142. CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex});
  1143. if (DbgInfo)
  1144. Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
  1145. return Fn;
  1146. }
  1147. Value *IRBuilderBase::CreatePreserveStructAccessIndex(
  1148. Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex,
  1149. MDNode *DbgInfo) {
  1150. auto *BaseType = Base->getType();
  1151. assert(isa<PointerType>(BaseType) &&
  1152. "Invalid Base ptr type for preserve.struct.access.index.");
  1153. assert(cast<PointerType>(BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) &&
  1154. "Pointer element type mismatch");
  1155. Value *GEPIndex = getInt32(Index);
  1156. Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
  1157. Type *ResultType =
  1158. GetElementPtrInst::getGEPReturnType(ElTy, Base, {Zero, GEPIndex});
  1159. Module *M = BB->getParent()->getParent();
  1160. Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration(
  1161. M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType});
  1162. Value *DIIndex = getInt32(FieldIndex);
  1163. CallInst *Fn = CreateCall(FnPreserveStructAccessIndex,
  1164. {Base, GEPIndex, DIIndex});
  1165. Fn->addParamAttr(
  1166. 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
  1167. if (DbgInfo)
  1168. Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
  1169. return Fn;
  1170. }
  1171. CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL,
  1172. Value *PtrValue,
  1173. Value *AlignValue,
  1174. Value *OffsetValue) {
  1175. SmallVector<Value *, 4> Vals({PtrValue, AlignValue});
  1176. if (OffsetValue)
  1177. Vals.push_back(OffsetValue);
  1178. OperandBundleDefT<Value *> AlignOpB("align", Vals);
  1179. return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB});
  1180. }
  1181. CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
  1182. Value *PtrValue,
  1183. unsigned Alignment,
  1184. Value *OffsetValue) {
  1185. assert(isa<PointerType>(PtrValue->getType()) &&
  1186. "trying to create an alignment assumption on a non-pointer?");
  1187. assert(Alignment != 0 && "Invalid Alignment");
  1188. auto *PtrTy = cast<PointerType>(PtrValue->getType());
  1189. Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
  1190. Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment);
  1191. return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue);
  1192. }
  1193. CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
  1194. Value *PtrValue,
  1195. Value *Alignment,
  1196. Value *OffsetValue) {
  1197. assert(isa<PointerType>(PtrValue->getType()) &&
  1198. "trying to create an alignment assumption on a non-pointer?");
  1199. return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);
  1200. }
  1201. IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default;
  1202. IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default;
  1203. IRBuilderFolder::~IRBuilderFolder() = default;
  1204. void ConstantFolder::anchor() {}
  1205. void NoFolder::anchor() {}