StatepointLowering.cpp 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314
  1. //===- StatepointLowering.cpp - SDAGBuilder's statepoint code -------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file includes support code use by SelectionDAGBuilder when lowering a
  10. // statepoint sequence in SelectionDAG IR.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "StatepointLowering.h"
  14. #include "SelectionDAGBuilder.h"
  15. #include "llvm/ADT/ArrayRef.h"
  16. #include "llvm/ADT/STLExtras.h"
  17. #include "llvm/ADT/SetVector.h"
  18. #include "llvm/ADT/SmallBitVector.h"
  19. #include "llvm/ADT/SmallSet.h"
  20. #include "llvm/ADT/SmallVector.h"
  21. #include "llvm/ADT/Statistic.h"
  22. #include "llvm/CodeGen/FunctionLoweringInfo.h"
  23. #include "llvm/CodeGen/GCMetadata.h"
  24. #include "llvm/CodeGen/ISDOpcodes.h"
  25. #include "llvm/CodeGen/MachineFrameInfo.h"
  26. #include "llvm/CodeGen/MachineFunction.h"
  27. #include "llvm/CodeGen/MachineMemOperand.h"
  28. #include "llvm/CodeGen/RuntimeLibcalls.h"
  29. #include "llvm/CodeGen/SelectionDAG.h"
  30. #include "llvm/CodeGen/SelectionDAGNodes.h"
  31. #include "llvm/CodeGen/StackMaps.h"
  32. #include "llvm/CodeGen/TargetLowering.h"
  33. #include "llvm/CodeGen/TargetOpcodes.h"
  34. #include "llvm/IR/CallingConv.h"
  35. #include "llvm/IR/DerivedTypes.h"
  36. #include "llvm/IR/GCStrategy.h"
  37. #include "llvm/IR/Instruction.h"
  38. #include "llvm/IR/Instructions.h"
  39. #include "llvm/IR/LLVMContext.h"
  40. #include "llvm/IR/Statepoint.h"
  41. #include "llvm/IR/Type.h"
  42. #include "llvm/Support/Casting.h"
  43. #include "llvm/Support/CommandLine.h"
  44. #include "llvm/Support/MachineValueType.h"
  45. #include "llvm/Target/TargetMachine.h"
  46. #include "llvm/Target/TargetOptions.h"
  47. #include <cassert>
  48. #include <cstddef>
  49. #include <cstdint>
  50. #include <iterator>
  51. #include <tuple>
  52. #include <utility>
  53. using namespace llvm;
  54. #define DEBUG_TYPE "statepoint-lowering"
  55. STATISTIC(NumSlotsAllocatedForStatepoints,
  56. "Number of stack slots allocated for statepoints");
  57. STATISTIC(NumOfStatepoints, "Number of statepoint nodes encountered");
  58. STATISTIC(StatepointMaxSlotsRequired,
  59. "Maximum number of stack slots required for a singe statepoint");
  60. cl::opt<bool> UseRegistersForDeoptValues(
  61. "use-registers-for-deopt-values", cl::Hidden, cl::init(false),
  62. cl::desc("Allow using registers for non pointer deopt args"));
  63. cl::opt<bool> UseRegistersForGCPointersInLandingPad(
  64. "use-registers-for-gc-values-in-landing-pad", cl::Hidden, cl::init(false),
  65. cl::desc("Allow using registers for gc pointer in landing pad"));
  66. cl::opt<unsigned> MaxRegistersForGCPointers(
  67. "max-registers-for-gc-values", cl::Hidden, cl::init(0),
  68. cl::desc("Max number of VRegs allowed to pass GC pointer meta args in"));
  69. typedef FunctionLoweringInfo::StatepointRelocationRecord RecordType;
  70. static void pushStackMapConstant(SmallVectorImpl<SDValue>& Ops,
  71. SelectionDAGBuilder &Builder, uint64_t Value) {
  72. SDLoc L = Builder.getCurSDLoc();
  73. Ops.push_back(Builder.DAG.getTargetConstant(StackMaps::ConstantOp, L,
  74. MVT::i64));
  75. Ops.push_back(Builder.DAG.getTargetConstant(Value, L, MVT::i64));
  76. }
  77. void StatepointLoweringState::startNewStatepoint(SelectionDAGBuilder &Builder) {
  78. // Consistency check
  79. assert(PendingGCRelocateCalls.empty() &&
  80. "Trying to visit statepoint before finished processing previous one");
  81. Locations.clear();
  82. NextSlotToAllocate = 0;
  83. // Need to resize this on each safepoint - we need the two to stay in sync and
  84. // the clear patterns of a SelectionDAGBuilder have no relation to
  85. // FunctionLoweringInfo. Also need to ensure used bits get cleared.
  86. AllocatedStackSlots.clear();
  87. AllocatedStackSlots.resize(Builder.FuncInfo.StatepointStackSlots.size());
  88. }
  89. void StatepointLoweringState::clear() {
  90. Locations.clear();
  91. AllocatedStackSlots.clear();
  92. assert(PendingGCRelocateCalls.empty() &&
  93. "cleared before statepoint sequence completed");
  94. }
  95. SDValue
  96. StatepointLoweringState::allocateStackSlot(EVT ValueType,
  97. SelectionDAGBuilder &Builder) {
  98. NumSlotsAllocatedForStatepoints++;
  99. MachineFrameInfo &MFI = Builder.DAG.getMachineFunction().getFrameInfo();
  100. unsigned SpillSize = ValueType.getStoreSize();
  101. assert((SpillSize * 8) ==
  102. (-8u & (7 + ValueType.getSizeInBits())) && // Round up modulo 8.
  103. "Size not in bytes?");
  104. // First look for a previously created stack slot which is not in
  105. // use (accounting for the fact arbitrary slots may already be
  106. // reserved), or to create a new stack slot and use it.
  107. const size_t NumSlots = AllocatedStackSlots.size();
  108. assert(NextSlotToAllocate <= NumSlots && "Broken invariant");
  109. assert(AllocatedStackSlots.size() ==
  110. Builder.FuncInfo.StatepointStackSlots.size() &&
  111. "Broken invariant");
  112. for (; NextSlotToAllocate < NumSlots; NextSlotToAllocate++) {
  113. if (!AllocatedStackSlots.test(NextSlotToAllocate)) {
  114. const int FI = Builder.FuncInfo.StatepointStackSlots[NextSlotToAllocate];
  115. if (MFI.getObjectSize(FI) == SpillSize) {
  116. AllocatedStackSlots.set(NextSlotToAllocate);
  117. // TODO: Is ValueType the right thing to use here?
  118. return Builder.DAG.getFrameIndex(FI, ValueType);
  119. }
  120. }
  121. }
  122. // Couldn't find a free slot, so create a new one:
  123. SDValue SpillSlot = Builder.DAG.CreateStackTemporary(ValueType);
  124. const unsigned FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
  125. MFI.markAsStatepointSpillSlotObjectIndex(FI);
  126. Builder.FuncInfo.StatepointStackSlots.push_back(FI);
  127. AllocatedStackSlots.resize(AllocatedStackSlots.size()+1, true);
  128. assert(AllocatedStackSlots.size() ==
  129. Builder.FuncInfo.StatepointStackSlots.size() &&
  130. "Broken invariant");
  131. StatepointMaxSlotsRequired.updateMax(
  132. Builder.FuncInfo.StatepointStackSlots.size());
  133. return SpillSlot;
  134. }
  135. /// Utility function for reservePreviousStackSlotForValue. Tries to find
  136. /// stack slot index to which we have spilled value for previous statepoints.
  137. /// LookUpDepth specifies maximum DFS depth this function is allowed to look.
  138. static std::optional<int> findPreviousSpillSlot(const Value *Val,
  139. SelectionDAGBuilder &Builder,
  140. int LookUpDepth) {
  141. // Can not look any further - give up now
  142. if (LookUpDepth <= 0)
  143. return std::nullopt;
  144. // Spill location is known for gc relocates
  145. if (const auto *Relocate = dyn_cast<GCRelocateInst>(Val)) {
  146. const Value *Statepoint = Relocate->getStatepoint();
  147. assert((isa<GCStatepointInst>(Statepoint) || isa<UndefValue>(Statepoint)) &&
  148. "GetStatepoint must return one of two types");
  149. if (isa<UndefValue>(Statepoint))
  150. return std::nullopt;
  151. const auto &RelocationMap = Builder.FuncInfo.StatepointRelocationMaps
  152. [cast<GCStatepointInst>(Statepoint)];
  153. auto It = RelocationMap.find(Relocate);
  154. if (It == RelocationMap.end())
  155. return std::nullopt;
  156. auto &Record = It->second;
  157. if (Record.type != RecordType::Spill)
  158. return std::nullopt;
  159. return Record.payload.FI;
  160. }
  161. // Look through bitcast instructions.
  162. if (const BitCastInst *Cast = dyn_cast<BitCastInst>(Val))
  163. return findPreviousSpillSlot(Cast->getOperand(0), Builder, LookUpDepth - 1);
  164. // Look through phi nodes
  165. // All incoming values should have same known stack slot, otherwise result
  166. // is unknown.
  167. if (const PHINode *Phi = dyn_cast<PHINode>(Val)) {
  168. std::optional<int> MergedResult;
  169. for (const auto &IncomingValue : Phi->incoming_values()) {
  170. std::optional<int> SpillSlot =
  171. findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth - 1);
  172. if (!SpillSlot)
  173. return std::nullopt;
  174. if (MergedResult && *MergedResult != *SpillSlot)
  175. return std::nullopt;
  176. MergedResult = SpillSlot;
  177. }
  178. return MergedResult;
  179. }
  180. // TODO: We can do better for PHI nodes. In cases like this:
  181. // ptr = phi(relocated_pointer, not_relocated_pointer)
  182. // statepoint(ptr)
  183. // We will return that stack slot for ptr is unknown. And later we might
  184. // assign different stack slots for ptr and relocated_pointer. This limits
  185. // llvm's ability to remove redundant stores.
  186. // Unfortunately it's hard to accomplish in current infrastructure.
  187. // We use this function to eliminate spill store completely, while
  188. // in example we still need to emit store, but instead of any location
  189. // we need to use special "preferred" location.
  190. // TODO: handle simple updates. If a value is modified and the original
  191. // value is no longer live, it would be nice to put the modified value in the
  192. // same slot. This allows folding of the memory accesses for some
  193. // instructions types (like an increment).
  194. // statepoint (i)
  195. // i1 = i+1
  196. // statepoint (i1)
  197. // However we need to be careful for cases like this:
  198. // statepoint(i)
  199. // i1 = i+1
  200. // statepoint(i, i1)
  201. // Here we want to reserve spill slot for 'i', but not for 'i+1'. If we just
  202. // put handling of simple modifications in this function like it's done
  203. // for bitcasts we might end up reserving i's slot for 'i+1' because order in
  204. // which we visit values is unspecified.
  205. // Don't know any information about this instruction
  206. return std::nullopt;
  207. }
  208. /// Return true if-and-only-if the given SDValue can be lowered as either a
  209. /// constant argument or a stack reference. The key point is that the value
  210. /// doesn't need to be spilled or tracked as a vreg use.
  211. static bool willLowerDirectly(SDValue Incoming) {
  212. // We are making an unchecked assumption that the frame size <= 2^16 as that
  213. // is the largest offset which can be encoded in the stackmap format.
  214. if (isa<FrameIndexSDNode>(Incoming))
  215. return true;
  216. // The largest constant describeable in the StackMap format is 64 bits.
  217. // Potential Optimization: Constants values are sign extended by consumer,
  218. // and thus there are many constants of static type > 64 bits whose value
  219. // happens to be sext(Con64) and could thus be lowered directly.
  220. if (Incoming.getValueType().getSizeInBits() > 64)
  221. return false;
  222. return (isa<ConstantSDNode>(Incoming) || isa<ConstantFPSDNode>(Incoming) ||
  223. Incoming.isUndef());
  224. }
  225. /// Try to find existing copies of the incoming values in stack slots used for
  226. /// statepoint spilling. If we can find a spill slot for the incoming value,
  227. /// mark that slot as allocated, and reuse the same slot for this safepoint.
  228. /// This helps to avoid series of loads and stores that only serve to reshuffle
  229. /// values on the stack between calls.
  230. static void reservePreviousStackSlotForValue(const Value *IncomingValue,
  231. SelectionDAGBuilder &Builder) {
  232. SDValue Incoming = Builder.getValue(IncomingValue);
  233. // If we won't spill this, we don't need to check for previously allocated
  234. // stack slots.
  235. if (willLowerDirectly(Incoming))
  236. return;
  237. SDValue OldLocation = Builder.StatepointLowering.getLocation(Incoming);
  238. if (OldLocation.getNode())
  239. // Duplicates in input
  240. return;
  241. const int LookUpDepth = 6;
  242. std::optional<int> Index =
  243. findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth);
  244. if (!Index)
  245. return;
  246. const auto &StatepointSlots = Builder.FuncInfo.StatepointStackSlots;
  247. auto SlotIt = find(StatepointSlots, *Index);
  248. assert(SlotIt != StatepointSlots.end() &&
  249. "Value spilled to the unknown stack slot");
  250. // This is one of our dedicated lowering slots
  251. const int Offset = std::distance(StatepointSlots.begin(), SlotIt);
  252. if (Builder.StatepointLowering.isStackSlotAllocated(Offset)) {
  253. // stack slot already assigned to someone else, can't use it!
  254. // TODO: currently we reserve space for gc arguments after doing
  255. // normal allocation for deopt arguments. We should reserve for
  256. // _all_ deopt and gc arguments, then start allocating. This
  257. // will prevent some moves being inserted when vm state changes,
  258. // but gc state doesn't between two calls.
  259. return;
  260. }
  261. // Reserve this stack slot
  262. Builder.StatepointLowering.reserveStackSlot(Offset);
  263. // Cache this slot so we find it when going through the normal
  264. // assignment loop.
  265. SDValue Loc =
  266. Builder.DAG.getTargetFrameIndex(*Index, Builder.getFrameIndexTy());
  267. Builder.StatepointLowering.setLocation(Incoming, Loc);
  268. }
  269. /// Extract call from statepoint, lower it and return pointer to the
  270. /// call node. Also update NodeMap so that getValue(statepoint) will
  271. /// reference lowered call result
  272. static std::pair<SDValue, SDNode *> lowerCallFromStatepointLoweringInfo(
  273. SelectionDAGBuilder::StatepointLoweringInfo &SI,
  274. SelectionDAGBuilder &Builder) {
  275. SDValue ReturnValue, CallEndVal;
  276. std::tie(ReturnValue, CallEndVal) =
  277. Builder.lowerInvokable(SI.CLI, SI.EHPadBB);
  278. SDNode *CallEnd = CallEndVal.getNode();
  279. // Get a call instruction from the call sequence chain. Tail calls are not
  280. // allowed. The following code is essentially reverse engineering X86's
  281. // LowerCallTo.
  282. //
  283. // We are expecting DAG to have the following form:
  284. //
  285. // ch = eh_label (only in case of invoke statepoint)
  286. // ch, glue = callseq_start ch
  287. // ch, glue = X86::Call ch, glue
  288. // ch, glue = callseq_end ch, glue
  289. // get_return_value ch, glue
  290. //
  291. // get_return_value can either be a sequence of CopyFromReg instructions
  292. // to grab the return value from the return register(s), or it can be a LOAD
  293. // to load a value returned by reference via a stack slot.
  294. bool HasDef = !SI.CLI.RetTy->isVoidTy();
  295. if (HasDef) {
  296. if (CallEnd->getOpcode() == ISD::LOAD)
  297. CallEnd = CallEnd->getOperand(0).getNode();
  298. else
  299. while (CallEnd->getOpcode() == ISD::CopyFromReg)
  300. CallEnd = CallEnd->getOperand(0).getNode();
  301. }
  302. assert(CallEnd->getOpcode() == ISD::CALLSEQ_END && "expected!");
  303. return std::make_pair(ReturnValue, CallEnd->getOperand(0).getNode());
  304. }
  305. static MachineMemOperand* getMachineMemOperand(MachineFunction &MF,
  306. FrameIndexSDNode &FI) {
  307. auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FI.getIndex());
  308. auto MMOFlags = MachineMemOperand::MOStore |
  309. MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
  310. auto &MFI = MF.getFrameInfo();
  311. return MF.getMachineMemOperand(PtrInfo, MMOFlags,
  312. MFI.getObjectSize(FI.getIndex()),
  313. MFI.getObjectAlign(FI.getIndex()));
  314. }
  315. /// Spill a value incoming to the statepoint. It might be either part of
  316. /// vmstate
  317. /// or gcstate. In both cases unconditionally spill it on the stack unless it
  318. /// is a null constant. Return pair with first element being frame index
  319. /// containing saved value and second element with outgoing chain from the
  320. /// emitted store
  321. static std::tuple<SDValue, SDValue, MachineMemOperand*>
  322. spillIncomingStatepointValue(SDValue Incoming, SDValue Chain,
  323. SelectionDAGBuilder &Builder) {
  324. SDValue Loc = Builder.StatepointLowering.getLocation(Incoming);
  325. MachineMemOperand* MMO = nullptr;
  326. // Emit new store if we didn't do it for this ptr before
  327. if (!Loc.getNode()) {
  328. Loc = Builder.StatepointLowering.allocateStackSlot(Incoming.getValueType(),
  329. Builder);
  330. int Index = cast<FrameIndexSDNode>(Loc)->getIndex();
  331. // We use TargetFrameIndex so that isel will not select it into LEA
  332. Loc = Builder.DAG.getTargetFrameIndex(Index, Builder.getFrameIndexTy());
  333. // Right now we always allocate spill slots that are of the same
  334. // size as the value we're about to spill (the size of spillee can
  335. // vary since we spill vectors of pointers too). At some point we
  336. // can consider allowing spills of smaller values to larger slots
  337. // (i.e. change the '==' in the assert below to a '>=').
  338. MachineFrameInfo &MFI = Builder.DAG.getMachineFunction().getFrameInfo();
  339. assert((MFI.getObjectSize(Index) * 8) ==
  340. (-8 & (7 + // Round up modulo 8.
  341. (int64_t)Incoming.getValueSizeInBits())) &&
  342. "Bad spill: stack slot does not match!");
  343. // Note: Using the alignment of the spill slot (rather than the abi or
  344. // preferred alignment) is required for correctness when dealing with spill
  345. // slots with preferred alignments larger than frame alignment..
  346. auto &MF = Builder.DAG.getMachineFunction();
  347. auto PtrInfo = MachinePointerInfo::getFixedStack(MF, Index);
  348. auto *StoreMMO = MF.getMachineMemOperand(
  349. PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(Index),
  350. MFI.getObjectAlign(Index));
  351. Chain = Builder.DAG.getStore(Chain, Builder.getCurSDLoc(), Incoming, Loc,
  352. StoreMMO);
  353. MMO = getMachineMemOperand(MF, *cast<FrameIndexSDNode>(Loc));
  354. Builder.StatepointLowering.setLocation(Incoming, Loc);
  355. }
  356. assert(Loc.getNode());
  357. return std::make_tuple(Loc, Chain, MMO);
  358. }
  359. /// Lower a single value incoming to a statepoint node. This value can be
  360. /// either a deopt value or a gc value, the handling is the same. We special
  361. /// case constants and allocas, then fall back to spilling if required.
  362. static void
  363. lowerIncomingStatepointValue(SDValue Incoming, bool RequireSpillSlot,
  364. SmallVectorImpl<SDValue> &Ops,
  365. SmallVectorImpl<MachineMemOperand *> &MemRefs,
  366. SelectionDAGBuilder &Builder) {
  367. if (willLowerDirectly(Incoming)) {
  368. if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Incoming)) {
  369. // This handles allocas as arguments to the statepoint (this is only
  370. // really meaningful for a deopt value. For GC, we'd be trying to
  371. // relocate the address of the alloca itself?)
  372. assert(Incoming.getValueType() == Builder.getFrameIndexTy() &&
  373. "Incoming value is a frame index!");
  374. Ops.push_back(Builder.DAG.getTargetFrameIndex(FI->getIndex(),
  375. Builder.getFrameIndexTy()));
  376. auto &MF = Builder.DAG.getMachineFunction();
  377. auto *MMO = getMachineMemOperand(MF, *FI);
  378. MemRefs.push_back(MMO);
  379. return;
  380. }
  381. assert(Incoming.getValueType().getSizeInBits() <= 64);
  382. if (Incoming.isUndef()) {
  383. // Put an easily recognized constant that's unlikely to be a valid
  384. // value so that uses of undef by the consumer of the stackmap is
  385. // easily recognized. This is legal since the compiler is always
  386. // allowed to chose an arbitrary value for undef.
  387. pushStackMapConstant(Ops, Builder, 0xFEFEFEFE);
  388. return;
  389. }
  390. // If the original value was a constant, make sure it gets recorded as
  391. // such in the stackmap. This is required so that the consumer can
  392. // parse any internal format to the deopt state. It also handles null
  393. // pointers and other constant pointers in GC states.
  394. if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Incoming)) {
  395. pushStackMapConstant(Ops, Builder, C->getSExtValue());
  396. return;
  397. } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Incoming)) {
  398. pushStackMapConstant(Ops, Builder,
  399. C->getValueAPF().bitcastToAPInt().getZExtValue());
  400. return;
  401. }
  402. llvm_unreachable("unhandled direct lowering case");
  403. }
  404. if (!RequireSpillSlot) {
  405. // If this value is live in (not live-on-return, or live-through), we can
  406. // treat it the same way patchpoint treats it's "live in" values. We'll
  407. // end up folding some of these into stack references, but they'll be
  408. // handled by the register allocator. Note that we do not have the notion
  409. // of a late use so these values might be placed in registers which are
  410. // clobbered by the call. This is fine for live-in. For live-through
  411. // fix-up pass should be executed to force spilling of such registers.
  412. Ops.push_back(Incoming);
  413. } else {
  414. // Otherwise, locate a spill slot and explicitly spill it so it can be
  415. // found by the runtime later. Note: We know all of these spills are
  416. // independent, but don't bother to exploit that chain wise. DAGCombine
  417. // will happily do so as needed, so doing it here would be a small compile
  418. // time win at most.
  419. SDValue Chain = Builder.getRoot();
  420. auto Res = spillIncomingStatepointValue(Incoming, Chain, Builder);
  421. Ops.push_back(std::get<0>(Res));
  422. if (auto *MMO = std::get<2>(Res))
  423. MemRefs.push_back(MMO);
  424. Chain = std::get<1>(Res);;
  425. Builder.DAG.setRoot(Chain);
  426. }
  427. }
  428. /// Return true if value V represents the GC value. The behavior is conservative
  429. /// in case it is not sure that value is not GC the function returns true.
  430. static bool isGCValue(const Value *V, SelectionDAGBuilder &Builder) {
  431. auto *Ty = V->getType();
  432. if (!Ty->isPtrOrPtrVectorTy())
  433. return false;
  434. if (auto *GFI = Builder.GFI)
  435. if (auto IsManaged = GFI->getStrategy().isGCManagedPointer(Ty))
  436. return *IsManaged;
  437. return true; // conservative
  438. }
  439. /// Lower deopt state and gc pointer arguments of the statepoint. The actual
  440. /// lowering is described in lowerIncomingStatepointValue. This function is
  441. /// responsible for lowering everything in the right position and playing some
  442. /// tricks to avoid redundant stack manipulation where possible. On
  443. /// completion, 'Ops' will contain ready to use operands for machine code
  444. /// statepoint. The chain nodes will have already been created and the DAG root
  445. /// will be set to the last value spilled (if any were).
  446. static void
  447. lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
  448. SmallVectorImpl<MachineMemOperand *> &MemRefs,
  449. SmallVectorImpl<SDValue> &GCPtrs,
  450. DenseMap<SDValue, int> &LowerAsVReg,
  451. SelectionDAGBuilder::StatepointLoweringInfo &SI,
  452. SelectionDAGBuilder &Builder) {
  453. // Lower the deopt and gc arguments for this statepoint. Layout will be:
  454. // deopt argument length, deopt arguments.., gc arguments...
  455. // Figure out what lowering strategy we're going to use for each part
  456. // Note: Is is conservatively correct to lower both "live-in" and "live-out"
  457. // as "live-through". A "live-through" variable is one which is "live-in",
  458. // "live-out", and live throughout the lifetime of the call (i.e. we can find
  459. // it from any PC within the transitive callee of the statepoint). In
  460. // particular, if the callee spills callee preserved registers we may not
  461. // be able to find a value placed in that register during the call. This is
  462. // fine for live-out, but not for live-through. If we were willing to make
  463. // assumptions about the code generator producing the callee, we could
  464. // potentially allow live-through values in callee saved registers.
  465. const bool LiveInDeopt =
  466. SI.StatepointFlags & (uint64_t)StatepointFlags::DeoptLiveIn;
  467. // Decide which deriver pointers will go on VRegs
  468. unsigned MaxVRegPtrs = MaxRegistersForGCPointers.getValue();
  469. // Pointers used on exceptional path of invoke statepoint.
  470. // We cannot assing them to VRegs.
  471. SmallSet<SDValue, 8> LPadPointers;
  472. if (!UseRegistersForGCPointersInLandingPad)
  473. if (const auto *StInvoke =
  474. dyn_cast_or_null<InvokeInst>(SI.StatepointInstr)) {
  475. LandingPadInst *LPI = StInvoke->getLandingPadInst();
  476. for (const auto *Relocate : SI.GCRelocates)
  477. if (Relocate->getOperand(0) == LPI) {
  478. LPadPointers.insert(Builder.getValue(Relocate->getBasePtr()));
  479. LPadPointers.insert(Builder.getValue(Relocate->getDerivedPtr()));
  480. }
  481. }
  482. LLVM_DEBUG(dbgs() << "Deciding how to lower GC Pointers:\n");
  483. // List of unique lowered GC Pointer values.
  484. SmallSetVector<SDValue, 16> LoweredGCPtrs;
  485. // Map lowered GC Pointer value to the index in above vector
  486. DenseMap<SDValue, unsigned> GCPtrIndexMap;
  487. unsigned CurNumVRegs = 0;
  488. auto canPassGCPtrOnVReg = [&](SDValue SD) {
  489. if (SD.getValueType().isVector())
  490. return false;
  491. if (LPadPointers.count(SD))
  492. return false;
  493. return !willLowerDirectly(SD);
  494. };
  495. auto processGCPtr = [&](const Value *V) {
  496. SDValue PtrSD = Builder.getValue(V);
  497. if (!LoweredGCPtrs.insert(PtrSD))
  498. return; // skip duplicates
  499. GCPtrIndexMap[PtrSD] = LoweredGCPtrs.size() - 1;
  500. assert(!LowerAsVReg.count(PtrSD) && "must not have been seen");
  501. if (LowerAsVReg.size() == MaxVRegPtrs)
  502. return;
  503. assert(V->getType()->isVectorTy() == PtrSD.getValueType().isVector() &&
  504. "IR and SD types disagree");
  505. if (!canPassGCPtrOnVReg(PtrSD)) {
  506. LLVM_DEBUG(dbgs() << "direct/spill "; PtrSD.dump(&Builder.DAG));
  507. return;
  508. }
  509. LLVM_DEBUG(dbgs() << "vreg "; PtrSD.dump(&Builder.DAG));
  510. LowerAsVReg[PtrSD] = CurNumVRegs++;
  511. };
  512. // Process derived pointers first to give them more chance to go on VReg.
  513. for (const Value *V : SI.Ptrs)
  514. processGCPtr(V);
  515. for (const Value *V : SI.Bases)
  516. processGCPtr(V);
  517. LLVM_DEBUG(dbgs() << LowerAsVReg.size() << " pointers will go in vregs\n");
  518. auto requireSpillSlot = [&](const Value *V) {
  519. if (!Builder.DAG.getTargetLoweringInfo().isTypeLegal(
  520. Builder.getValue(V).getValueType()))
  521. return true;
  522. if (isGCValue(V, Builder))
  523. return !LowerAsVReg.count(Builder.getValue(V));
  524. return !(LiveInDeopt || UseRegistersForDeoptValues);
  525. };
  526. // Before we actually start lowering (and allocating spill slots for values),
  527. // reserve any stack slots which we judge to be profitable to reuse for a
  528. // particular value. This is purely an optimization over the code below and
  529. // doesn't change semantics at all. It is important for performance that we
  530. // reserve slots for both deopt and gc values before lowering either.
  531. for (const Value *V : SI.DeoptState) {
  532. if (requireSpillSlot(V))
  533. reservePreviousStackSlotForValue(V, Builder);
  534. }
  535. for (const Value *V : SI.Ptrs) {
  536. SDValue SDV = Builder.getValue(V);
  537. if (!LowerAsVReg.count(SDV))
  538. reservePreviousStackSlotForValue(V, Builder);
  539. }
  540. for (const Value *V : SI.Bases) {
  541. SDValue SDV = Builder.getValue(V);
  542. if (!LowerAsVReg.count(SDV))
  543. reservePreviousStackSlotForValue(V, Builder);
  544. }
  545. // First, prefix the list with the number of unique values to be
  546. // lowered. Note that this is the number of *Values* not the
  547. // number of SDValues required to lower them.
  548. const int NumVMSArgs = SI.DeoptState.size();
  549. pushStackMapConstant(Ops, Builder, NumVMSArgs);
  550. // The vm state arguments are lowered in an opaque manner. We do not know
  551. // what type of values are contained within.
  552. LLVM_DEBUG(dbgs() << "Lowering deopt state\n");
  553. for (const Value *V : SI.DeoptState) {
  554. SDValue Incoming;
  555. // If this is a function argument at a static frame index, generate it as
  556. // the frame index.
  557. if (const Argument *Arg = dyn_cast<Argument>(V)) {
  558. int FI = Builder.FuncInfo.getArgumentFrameIndex(Arg);
  559. if (FI != INT_MAX)
  560. Incoming = Builder.DAG.getFrameIndex(FI, Builder.getFrameIndexTy());
  561. }
  562. if (!Incoming.getNode())
  563. Incoming = Builder.getValue(V);
  564. LLVM_DEBUG(dbgs() << "Value " << *V
  565. << " requireSpillSlot = " << requireSpillSlot(V) << "\n");
  566. lowerIncomingStatepointValue(Incoming, requireSpillSlot(V), Ops, MemRefs,
  567. Builder);
  568. }
  569. // Finally, go ahead and lower all the gc arguments.
  570. pushStackMapConstant(Ops, Builder, LoweredGCPtrs.size());
  571. for (SDValue SDV : LoweredGCPtrs)
  572. lowerIncomingStatepointValue(SDV, !LowerAsVReg.count(SDV), Ops, MemRefs,
  573. Builder);
  574. // Copy to out vector. LoweredGCPtrs will be empty after this point.
  575. GCPtrs = LoweredGCPtrs.takeVector();
  576. // If there are any explicit spill slots passed to the statepoint, record
  577. // them, but otherwise do not do anything special. These are user provided
  578. // allocas and give control over placement to the consumer. In this case,
  579. // it is the contents of the slot which may get updated, not the pointer to
  580. // the alloca
  581. SmallVector<SDValue, 4> Allocas;
  582. for (Value *V : SI.GCArgs) {
  583. SDValue Incoming = Builder.getValue(V);
  584. if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Incoming)) {
  585. // This handles allocas as arguments to the statepoint
  586. assert(Incoming.getValueType() == Builder.getFrameIndexTy() &&
  587. "Incoming value is a frame index!");
  588. Allocas.push_back(Builder.DAG.getTargetFrameIndex(
  589. FI->getIndex(), Builder.getFrameIndexTy()));
  590. auto &MF = Builder.DAG.getMachineFunction();
  591. auto *MMO = getMachineMemOperand(MF, *FI);
  592. MemRefs.push_back(MMO);
  593. }
  594. }
  595. pushStackMapConstant(Ops, Builder, Allocas.size());
  596. Ops.append(Allocas.begin(), Allocas.end());
  597. // Now construct GC base/derived map;
  598. pushStackMapConstant(Ops, Builder, SI.Ptrs.size());
  599. SDLoc L = Builder.getCurSDLoc();
  600. for (unsigned i = 0; i < SI.Ptrs.size(); ++i) {
  601. SDValue Base = Builder.getValue(SI.Bases[i]);
  602. assert(GCPtrIndexMap.count(Base) && "base not found in index map");
  603. Ops.push_back(
  604. Builder.DAG.getTargetConstant(GCPtrIndexMap[Base], L, MVT::i64));
  605. SDValue Derived = Builder.getValue(SI.Ptrs[i]);
  606. assert(GCPtrIndexMap.count(Derived) && "derived not found in index map");
  607. Ops.push_back(
  608. Builder.DAG.getTargetConstant(GCPtrIndexMap[Derived], L, MVT::i64));
  609. }
  610. }
  611. SDValue SelectionDAGBuilder::LowerAsSTATEPOINT(
  612. SelectionDAGBuilder::StatepointLoweringInfo &SI) {
  613. // The basic scheme here is that information about both the original call and
  614. // the safepoint is encoded in the CallInst. We create a temporary call and
  615. // lower it, then reverse engineer the calling sequence.
  616. NumOfStatepoints++;
  617. // Clear state
  618. StatepointLowering.startNewStatepoint(*this);
  619. assert(SI.Bases.size() == SI.Ptrs.size() && "Pointer without base!");
  620. assert((GFI || SI.Bases.empty()) &&
  621. "No gc specified, so cannot relocate pointers!");
  622. LLVM_DEBUG(dbgs() << "Lowering statepoint " << *SI.StatepointInstr << "\n");
  623. #ifndef NDEBUG
  624. for (const auto *Reloc : SI.GCRelocates)
  625. if (Reloc->getParent() == SI.StatepointInstr->getParent())
  626. StatepointLowering.scheduleRelocCall(*Reloc);
  627. #endif
  628. // Lower statepoint vmstate and gcstate arguments
  629. // All lowered meta args.
  630. SmallVector<SDValue, 10> LoweredMetaArgs;
  631. // Lowered GC pointers (subset of above).
  632. SmallVector<SDValue, 16> LoweredGCArgs;
  633. SmallVector<MachineMemOperand*, 16> MemRefs;
  634. // Maps derived pointer SDValue to statepoint result of relocated pointer.
  635. DenseMap<SDValue, int> LowerAsVReg;
  636. lowerStatepointMetaArgs(LoweredMetaArgs, MemRefs, LoweredGCArgs, LowerAsVReg,
  637. SI, *this);
  638. // Now that we've emitted the spills, we need to update the root so that the
  639. // call sequence is ordered correctly.
  640. SI.CLI.setChain(getRoot());
  641. // Get call node, we will replace it later with statepoint
  642. SDValue ReturnVal;
  643. SDNode *CallNode;
  644. std::tie(ReturnVal, CallNode) = lowerCallFromStatepointLoweringInfo(SI, *this);
  645. // Construct the actual GC_TRANSITION_START, STATEPOINT, and GC_TRANSITION_END
  646. // nodes with all the appropriate arguments and return values.
  647. // Call Node: Chain, Target, {Args}, RegMask, [Glue]
  648. SDValue Chain = CallNode->getOperand(0);
  649. SDValue Glue;
  650. bool CallHasIncomingGlue = CallNode->getGluedNode();
  651. if (CallHasIncomingGlue) {
  652. // Glue is always last operand
  653. Glue = CallNode->getOperand(CallNode->getNumOperands() - 1);
  654. }
  655. // Build the GC_TRANSITION_START node if necessary.
  656. //
  657. // The operands to the GC_TRANSITION_{START,END} nodes are laid out in the
  658. // order in which they appear in the call to the statepoint intrinsic. If
  659. // any of the operands is a pointer-typed, that operand is immediately
  660. // followed by a SRCVALUE for the pointer that may be used during lowering
  661. // (e.g. to form MachinePointerInfo values for loads/stores).
  662. const bool IsGCTransition =
  663. (SI.StatepointFlags & (uint64_t)StatepointFlags::GCTransition) ==
  664. (uint64_t)StatepointFlags::GCTransition;
  665. if (IsGCTransition) {
  666. SmallVector<SDValue, 8> TSOps;
  667. // Add chain
  668. TSOps.push_back(Chain);
  669. // Add GC transition arguments
  670. for (const Value *V : SI.GCTransitionArgs) {
  671. TSOps.push_back(getValue(V));
  672. if (V->getType()->isPointerTy())
  673. TSOps.push_back(DAG.getSrcValue(V));
  674. }
  675. // Add glue if necessary
  676. if (CallHasIncomingGlue)
  677. TSOps.push_back(Glue);
  678. SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
  679. SDValue GCTransitionStart =
  680. DAG.getNode(ISD::GC_TRANSITION_START, getCurSDLoc(), NodeTys, TSOps);
  681. Chain = GCTransitionStart.getValue(0);
  682. Glue = GCTransitionStart.getValue(1);
  683. }
  684. // TODO: Currently, all of these operands are being marked as read/write in
  685. // PrologEpilougeInserter.cpp, we should special case the VMState arguments
  686. // and flags to be read-only.
  687. SmallVector<SDValue, 40> Ops;
  688. // Add the <id> and <numBytes> constants.
  689. Ops.push_back(DAG.getTargetConstant(SI.ID, getCurSDLoc(), MVT::i64));
  690. Ops.push_back(
  691. DAG.getTargetConstant(SI.NumPatchBytes, getCurSDLoc(), MVT::i32));
  692. // Calculate and push starting position of vmstate arguments
  693. // Get number of arguments incoming directly into call node
  694. unsigned NumCallRegArgs =
  695. CallNode->getNumOperands() - (CallHasIncomingGlue ? 4 : 3);
  696. Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, getCurSDLoc(), MVT::i32));
  697. // Add call target
  698. SDValue CallTarget = SDValue(CallNode->getOperand(1).getNode(), 0);
  699. Ops.push_back(CallTarget);
  700. // Add call arguments
  701. // Get position of register mask in the call
  702. SDNode::op_iterator RegMaskIt;
  703. if (CallHasIncomingGlue)
  704. RegMaskIt = CallNode->op_end() - 2;
  705. else
  706. RegMaskIt = CallNode->op_end() - 1;
  707. Ops.insert(Ops.end(), CallNode->op_begin() + 2, RegMaskIt);
  708. // Add a constant argument for the calling convention
  709. pushStackMapConstant(Ops, *this, SI.CLI.CallConv);
  710. // Add a constant argument for the flags
  711. uint64_t Flags = SI.StatepointFlags;
  712. assert(((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0) &&
  713. "Unknown flag used");
  714. pushStackMapConstant(Ops, *this, Flags);
  715. // Insert all vmstate and gcstate arguments
  716. llvm::append_range(Ops, LoweredMetaArgs);
  717. // Add register mask from call node
  718. Ops.push_back(*RegMaskIt);
  719. // Add chain
  720. Ops.push_back(Chain);
  721. // Same for the glue, but we add it only if original call had it
  722. if (Glue.getNode())
  723. Ops.push_back(Glue);
  724. // Compute return values. Provide a glue output since we consume one as
  725. // input. This allows someone else to chain off us as needed.
  726. SmallVector<EVT, 8> NodeTys;
  727. for (auto SD : LoweredGCArgs) {
  728. if (!LowerAsVReg.count(SD))
  729. continue;
  730. NodeTys.push_back(SD.getValueType());
  731. }
  732. LLVM_DEBUG(dbgs() << "Statepoint has " << NodeTys.size() << " results\n");
  733. assert(NodeTys.size() == LowerAsVReg.size() && "Inconsistent GC Ptr lowering");
  734. NodeTys.push_back(MVT::Other);
  735. NodeTys.push_back(MVT::Glue);
  736. unsigned NumResults = NodeTys.size();
  737. MachineSDNode *StatepointMCNode =
  738. DAG.getMachineNode(TargetOpcode::STATEPOINT, getCurSDLoc(), NodeTys, Ops);
  739. DAG.setNodeMemRefs(StatepointMCNode, MemRefs);
  740. // For values lowered to tied-defs, create the virtual registers if used
  741. // in other blocks. For local gc.relocate record appropriate statepoint
  742. // result in StatepointLoweringState.
  743. DenseMap<SDValue, Register> VirtRegs;
  744. for (const auto *Relocate : SI.GCRelocates) {
  745. Value *Derived = Relocate->getDerivedPtr();
  746. SDValue SD = getValue(Derived);
  747. if (!LowerAsVReg.count(SD))
  748. continue;
  749. SDValue Relocated = SDValue(StatepointMCNode, LowerAsVReg[SD]);
  750. // Handle local relocate. Note that different relocates might
  751. // map to the same SDValue.
  752. if (SI.StatepointInstr->getParent() == Relocate->getParent()) {
  753. SDValue Res = StatepointLowering.getLocation(SD);
  754. if (Res)
  755. assert(Res == Relocated);
  756. else
  757. StatepointLowering.setLocation(SD, Relocated);
  758. continue;
  759. }
  760. // Handle multiple gc.relocates of the same input efficiently.
  761. if (VirtRegs.count(SD))
  762. continue;
  763. auto *RetTy = Relocate->getType();
  764. Register Reg = FuncInfo.CreateRegs(RetTy);
  765. RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
  766. DAG.getDataLayout(), Reg, RetTy, std::nullopt);
  767. SDValue Chain = DAG.getRoot();
  768. RFV.getCopyToRegs(Relocated, DAG, getCurSDLoc(), Chain, nullptr);
  769. PendingExports.push_back(Chain);
  770. VirtRegs[SD] = Reg;
  771. }
  772. // Record for later use how each relocation was lowered. This is needed to
  773. // allow later gc.relocates to mirror the lowering chosen.
  774. const Instruction *StatepointInstr = SI.StatepointInstr;
  775. auto &RelocationMap = FuncInfo.StatepointRelocationMaps[StatepointInstr];
  776. for (const GCRelocateInst *Relocate : SI.GCRelocates) {
  777. const Value *V = Relocate->getDerivedPtr();
  778. SDValue SDV = getValue(V);
  779. SDValue Loc = StatepointLowering.getLocation(SDV);
  780. bool IsLocal = (Relocate->getParent() == StatepointInstr->getParent());
  781. RecordType Record;
  782. if (IsLocal && LowerAsVReg.count(SDV)) {
  783. // Result is already stored in StatepointLowering
  784. Record.type = RecordType::SDValueNode;
  785. } else if (LowerAsVReg.count(SDV)) {
  786. Record.type = RecordType::VReg;
  787. assert(VirtRegs.count(SDV));
  788. Record.payload.Reg = VirtRegs[SDV];
  789. } else if (Loc.getNode()) {
  790. Record.type = RecordType::Spill;
  791. Record.payload.FI = cast<FrameIndexSDNode>(Loc)->getIndex();
  792. } else {
  793. Record.type = RecordType::NoRelocate;
  794. // If we didn't relocate a value, we'll essentialy end up inserting an
  795. // additional use of the original value when lowering the gc.relocate.
  796. // We need to make sure the value is available at the new use, which
  797. // might be in another block.
  798. if (Relocate->getParent() != StatepointInstr->getParent())
  799. ExportFromCurrentBlock(V);
  800. }
  801. RelocationMap[Relocate] = Record;
  802. }
  803. SDNode *SinkNode = StatepointMCNode;
  804. // Build the GC_TRANSITION_END node if necessary.
  805. //
  806. // See the comment above regarding GC_TRANSITION_START for the layout of
  807. // the operands to the GC_TRANSITION_END node.
  808. if (IsGCTransition) {
  809. SmallVector<SDValue, 8> TEOps;
  810. // Add chain
  811. TEOps.push_back(SDValue(StatepointMCNode, NumResults - 2));
  812. // Add GC transition arguments
  813. for (const Value *V : SI.GCTransitionArgs) {
  814. TEOps.push_back(getValue(V));
  815. if (V->getType()->isPointerTy())
  816. TEOps.push_back(DAG.getSrcValue(V));
  817. }
  818. // Add glue
  819. TEOps.push_back(SDValue(StatepointMCNode, NumResults - 1));
  820. SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
  821. SDValue GCTransitionStart =
  822. DAG.getNode(ISD::GC_TRANSITION_END, getCurSDLoc(), NodeTys, TEOps);
  823. SinkNode = GCTransitionStart.getNode();
  824. }
  825. // Replace original call
  826. // Call: ch,glue = CALL ...
  827. // Statepoint: [gc relocates],ch,glue = STATEPOINT ...
  828. unsigned NumSinkValues = SinkNode->getNumValues();
  829. SDValue StatepointValues[2] = {SDValue(SinkNode, NumSinkValues - 2),
  830. SDValue(SinkNode, NumSinkValues - 1)};
  831. DAG.ReplaceAllUsesWith(CallNode, StatepointValues);
  832. // Remove original call node
  833. DAG.DeleteNode(CallNode);
  834. // Since we always emit CopyToRegs (even for local relocates), we must
  835. // update root, so that they are emitted before any local uses.
  836. (void)getControlRoot();
  837. // TODO: A better future implementation would be to emit a single variable
  838. // argument, variable return value STATEPOINT node here and then hookup the
  839. // return value of each gc.relocate to the respective output of the
  840. // previously emitted STATEPOINT value. Unfortunately, this doesn't appear
  841. // to actually be possible today.
  842. return ReturnVal;
  843. }
  844. /// Return two gc.results if present. First result is a block local
  845. /// gc.result, second result is a non-block local gc.result. Corresponding
  846. /// entry will be nullptr if not present.
  847. static std::pair<const GCResultInst*, const GCResultInst*>
  848. getGCResultLocality(const GCStatepointInst &S) {
  849. std::pair<const GCResultInst *, const GCResultInst*> Res(nullptr, nullptr);
  850. for (const auto *U : S.users()) {
  851. auto *GRI = dyn_cast<GCResultInst>(U);
  852. if (!GRI)
  853. continue;
  854. if (GRI->getParent() == S.getParent())
  855. Res.first = GRI;
  856. else
  857. Res.second = GRI;
  858. }
  859. return Res;
  860. }
  861. void
  862. SelectionDAGBuilder::LowerStatepoint(const GCStatepointInst &I,
  863. const BasicBlock *EHPadBB /*= nullptr*/) {
  864. assert(I.getCallingConv() != CallingConv::AnyReg &&
  865. "anyregcc is not supported on statepoints!");
  866. #ifndef NDEBUG
  867. // Check that the associated GCStrategy expects to encounter statepoints.
  868. assert(GFI->getStrategy().useStatepoints() &&
  869. "GCStrategy does not expect to encounter statepoints");
  870. #endif
  871. SDValue ActualCallee;
  872. SDValue Callee = getValue(I.getActualCalledOperand());
  873. if (I.getNumPatchBytes() > 0) {
  874. // If we've been asked to emit a nop sequence instead of a call instruction
  875. // for this statepoint then don't lower the call target, but use a constant
  876. // `undef` instead. Not lowering the call target lets statepoint clients
  877. // get away without providing a physical address for the symbolic call
  878. // target at link time.
  879. ActualCallee = DAG.getUNDEF(Callee.getValueType());
  880. } else {
  881. ActualCallee = Callee;
  882. }
  883. StatepointLoweringInfo SI(DAG);
  884. populateCallLoweringInfo(SI.CLI, &I, GCStatepointInst::CallArgsBeginPos,
  885. I.getNumCallArgs(), ActualCallee,
  886. I.getActualReturnType(), false /* IsPatchPoint */);
  887. // There may be duplication in the gc.relocate list; such as two copies of
  888. // each relocation on normal and exceptional path for an invoke. We only
  889. // need to spill once and record one copy in the stackmap, but we need to
  890. // reload once per gc.relocate. (Dedupping gc.relocates is trickier and best
  891. // handled as a CSE problem elsewhere.)
  892. // TODO: There a couple of major stackmap size optimizations we could do
  893. // here if we wished.
  894. // 1) If we've encountered a derived pair {B, D}, we don't need to actually
  895. // record {B,B} if it's seen later.
  896. // 2) Due to rematerialization, actual derived pointers are somewhat rare;
  897. // given that, we could change the format to record base pointer relocations
  898. // separately with half the space. This would require a format rev and a
  899. // fairly major rework of the STATEPOINT node though.
  900. SmallSet<SDValue, 8> Seen;
  901. for (const GCRelocateInst *Relocate : I.getGCRelocates()) {
  902. SI.GCRelocates.push_back(Relocate);
  903. SDValue DerivedSD = getValue(Relocate->getDerivedPtr());
  904. if (Seen.insert(DerivedSD).second) {
  905. SI.Bases.push_back(Relocate->getBasePtr());
  906. SI.Ptrs.push_back(Relocate->getDerivedPtr());
  907. }
  908. }
  909. // If we find a deopt value which isn't explicitly added, we need to
  910. // ensure it gets lowered such that gc cycles occurring before the
  911. // deoptimization event during the lifetime of the call don't invalidate
  912. // the pointer we're deopting with. Note that we assume that all
  913. // pointers passed to deopt are base pointers; relaxing that assumption
  914. // would require relatively large changes to how we represent relocations.
  915. for (Value *V : I.deopt_operands()) {
  916. if (!isGCValue(V, *this))
  917. continue;
  918. if (Seen.insert(getValue(V)).second) {
  919. SI.Bases.push_back(V);
  920. SI.Ptrs.push_back(V);
  921. }
  922. }
  923. SI.GCArgs = ArrayRef<const Use>(I.gc_args_begin(), I.gc_args_end());
  924. SI.StatepointInstr = &I;
  925. SI.ID = I.getID();
  926. SI.DeoptState = ArrayRef<const Use>(I.deopt_begin(), I.deopt_end());
  927. SI.GCTransitionArgs = ArrayRef<const Use>(I.gc_transition_args_begin(),
  928. I.gc_transition_args_end());
  929. SI.StatepointFlags = I.getFlags();
  930. SI.NumPatchBytes = I.getNumPatchBytes();
  931. SI.EHPadBB = EHPadBB;
  932. SDValue ReturnValue = LowerAsSTATEPOINT(SI);
  933. // Export the result value if needed
  934. const auto GCResultLocality = getGCResultLocality(I);
  935. if (!GCResultLocality.first && !GCResultLocality.second) {
  936. // The return value is not needed, just generate a poison value.
  937. // Note: This covers the void return case.
  938. setValue(&I, DAG.getIntPtrConstant(-1, getCurSDLoc()));
  939. return;
  940. }
  941. if (GCResultLocality.first) {
  942. // Result value will be used in a same basic block. Don't export it or
  943. // perform any explicit register copies. The gc_result will simply grab
  944. // this value.
  945. setValue(&I, ReturnValue);
  946. }
  947. if (!GCResultLocality.second)
  948. return;
  949. // Result value will be used in a different basic block so we need to export
  950. // it now. Default exporting mechanism will not work here because statepoint
  951. // call has a different type than the actual call. It means that by default
  952. // llvm will create export register of the wrong type (always i32 in our
  953. // case). So instead we need to create export register with correct type
  954. // manually.
  955. // TODO: To eliminate this problem we can remove gc.result intrinsics
  956. // completely and make statepoint call to return a tuple.
  957. Type *RetTy = GCResultLocality.second->getType();
  958. Register Reg = FuncInfo.CreateRegs(RetTy);
  959. RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
  960. DAG.getDataLayout(), Reg, RetTy,
  961. I.getCallingConv());
  962. SDValue Chain = DAG.getEntryNode();
  963. RFV.getCopyToRegs(ReturnValue, DAG, getCurSDLoc(), Chain, nullptr);
  964. PendingExports.push_back(Chain);
  965. FuncInfo.ValueMap[&I] = Reg;
  966. }
  967. void SelectionDAGBuilder::LowerCallSiteWithDeoptBundleImpl(
  968. const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB,
  969. bool VarArgDisallowed, bool ForceVoidReturnTy) {
  970. StatepointLoweringInfo SI(DAG);
  971. unsigned ArgBeginIndex = Call->arg_begin() - Call->op_begin();
  972. populateCallLoweringInfo(
  973. SI.CLI, Call, ArgBeginIndex, Call->arg_size(), Callee,
  974. ForceVoidReturnTy ? Type::getVoidTy(*DAG.getContext()) : Call->getType(),
  975. false);
  976. if (!VarArgDisallowed)
  977. SI.CLI.IsVarArg = Call->getFunctionType()->isVarArg();
  978. auto DeoptBundle = *Call->getOperandBundle(LLVMContext::OB_deopt);
  979. unsigned DefaultID = StatepointDirectives::DeoptBundleStatepointID;
  980. auto SD = parseStatepointDirectivesFromAttrs(Call->getAttributes());
  981. SI.ID = SD.StatepointID.value_or(DefaultID);
  982. SI.NumPatchBytes = SD.NumPatchBytes.value_or(0);
  983. SI.DeoptState =
  984. ArrayRef<const Use>(DeoptBundle.Inputs.begin(), DeoptBundle.Inputs.end());
  985. SI.StatepointFlags = static_cast<uint64_t>(StatepointFlags::None);
  986. SI.EHPadBB = EHPadBB;
  987. // NB! The GC arguments are deliberately left empty.
  988. if (SDValue ReturnVal = LowerAsSTATEPOINT(SI)) {
  989. ReturnVal = lowerRangeToAssertZExt(DAG, *Call, ReturnVal);
  990. setValue(Call, ReturnVal);
  991. }
  992. }
  993. void SelectionDAGBuilder::LowerCallSiteWithDeoptBundle(
  994. const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB) {
  995. LowerCallSiteWithDeoptBundleImpl(Call, Callee, EHPadBB,
  996. /* VarArgDisallowed = */ false,
  997. /* ForceVoidReturnTy = */ false);
  998. }
  999. void SelectionDAGBuilder::visitGCResult(const GCResultInst &CI) {
  1000. // The result value of the gc_result is simply the result of the actual
  1001. // call. We've already emitted this, so just grab the value.
  1002. const Value *SI = CI.getStatepoint();
  1003. assert((isa<GCStatepointInst>(SI) || isa<UndefValue>(SI)) &&
  1004. "GetStatepoint must return one of two types");
  1005. if (isa<UndefValue>(SI))
  1006. return;
  1007. if (cast<GCStatepointInst>(SI)->getParent() == CI.getParent()) {
  1008. setValue(&CI, getValue(SI));
  1009. return;
  1010. }
  1011. // Statepoint is in different basic block so we should have stored call
  1012. // result in a virtual register.
  1013. // We can not use default getValue() functionality to copy value from this
  1014. // register because statepoint and actual call return types can be
  1015. // different, and getValue() will use CopyFromReg of the wrong type,
  1016. // which is always i32 in our case.
  1017. Type *RetTy = CI.getType();
  1018. SDValue CopyFromReg = getCopyFromRegs(SI, RetTy);
  1019. assert(CopyFromReg.getNode());
  1020. setValue(&CI, CopyFromReg);
  1021. }
  1022. void SelectionDAGBuilder::visitGCRelocate(const GCRelocateInst &Relocate) {
  1023. const Value *Statepoint = Relocate.getStatepoint();
  1024. #ifndef NDEBUG
  1025. // Consistency check
  1026. // We skip this check for relocates not in the same basic block as their
  1027. // statepoint. It would be too expensive to preserve validation info through
  1028. // different basic blocks.
  1029. assert((isa<GCStatepointInst>(Statepoint) || isa<UndefValue>(Statepoint)) &&
  1030. "GetStatepoint must return one of two types");
  1031. if (isa<UndefValue>(Statepoint))
  1032. return;
  1033. if (cast<GCStatepointInst>(Statepoint)->getParent() == Relocate.getParent())
  1034. StatepointLowering.relocCallVisited(Relocate);
  1035. #endif
  1036. const Value *DerivedPtr = Relocate.getDerivedPtr();
  1037. auto &RelocationMap =
  1038. FuncInfo.StatepointRelocationMaps[cast<GCStatepointInst>(Statepoint)];
  1039. auto SlotIt = RelocationMap.find(&Relocate);
  1040. assert(SlotIt != RelocationMap.end() && "Relocating not lowered gc value");
  1041. const RecordType &Record = SlotIt->second;
  1042. // If relocation was done via virtual register..
  1043. if (Record.type == RecordType::SDValueNode) {
  1044. assert(cast<GCStatepointInst>(Statepoint)->getParent() ==
  1045. Relocate.getParent() &&
  1046. "Nonlocal gc.relocate mapped via SDValue");
  1047. SDValue SDV = StatepointLowering.getLocation(getValue(DerivedPtr));
  1048. assert(SDV.getNode() && "empty SDValue");
  1049. setValue(&Relocate, SDV);
  1050. return;
  1051. }
  1052. if (Record.type == RecordType::VReg) {
  1053. Register InReg = Record.payload.Reg;
  1054. RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
  1055. DAG.getDataLayout(), InReg, Relocate.getType(),
  1056. std::nullopt); // This is not an ABI copy.
  1057. // We generate copy to/from regs even for local uses, hence we must
  1058. // chain with current root to ensure proper ordering of copies w.r.t.
  1059. // statepoint.
  1060. SDValue Chain = DAG.getRoot();
  1061. SDValue Relocation = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
  1062. Chain, nullptr, nullptr);
  1063. setValue(&Relocate, Relocation);
  1064. return;
  1065. }
  1066. if (Record.type == RecordType::Spill) {
  1067. unsigned Index = Record.payload.FI;
  1068. SDValue SpillSlot = DAG.getTargetFrameIndex(Index, getFrameIndexTy());
  1069. // All the reloads are independent and are reading memory only modified by
  1070. // statepoints (i.e. no other aliasing stores); informing SelectionDAG of
  1071. // this this let's CSE kick in for free and allows reordering of
  1072. // instructions if possible. The lowering for statepoint sets the root,
  1073. // so this is ordering all reloads with the either
  1074. // a) the statepoint node itself, or
  1075. // b) the entry of the current block for an invoke statepoint.
  1076. const SDValue Chain = DAG.getRoot(); // != Builder.getRoot()
  1077. auto &MF = DAG.getMachineFunction();
  1078. auto &MFI = MF.getFrameInfo();
  1079. auto PtrInfo = MachinePointerInfo::getFixedStack(MF, Index);
  1080. auto *LoadMMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
  1081. MFI.getObjectSize(Index),
  1082. MFI.getObjectAlign(Index));
  1083. auto LoadVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
  1084. Relocate.getType());
  1085. SDValue SpillLoad =
  1086. DAG.getLoad(LoadVT, getCurSDLoc(), Chain, SpillSlot, LoadMMO);
  1087. PendingLoads.push_back(SpillLoad.getValue(1));
  1088. assert(SpillLoad.getNode());
  1089. setValue(&Relocate, SpillLoad);
  1090. return;
  1091. }
  1092. assert(Record.type == RecordType::NoRelocate);
  1093. SDValue SD = getValue(DerivedPtr);
  1094. if (SD.isUndef() && SD.getValueType().getSizeInBits() <= 64) {
  1095. // Lowering relocate(undef) as arbitrary constant. Current constant value
  1096. // is chosen such that it's unlikely to be a valid pointer.
  1097. setValue(&Relocate, DAG.getTargetConstant(0xFEFEFEFE, SDLoc(SD), MVT::i64));
  1098. return;
  1099. }
  1100. // We didn't need to spill these special cases (constants and allocas).
  1101. // See the handling in spillIncomingValueForStatepoint for detail.
  1102. setValue(&Relocate, SD);
  1103. }
  1104. void SelectionDAGBuilder::LowerDeoptimizeCall(const CallInst *CI) {
  1105. const auto &TLI = DAG.getTargetLoweringInfo();
  1106. SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(RTLIB::DEOPTIMIZE),
  1107. TLI.getPointerTy(DAG.getDataLayout()));
  1108. // We don't lower calls to __llvm_deoptimize as varargs, but as a regular
  1109. // call. We also do not lower the return value to any virtual register, and
  1110. // change the immediately following return to a trap instruction.
  1111. LowerCallSiteWithDeoptBundleImpl(CI, Callee, /* EHPadBB = */ nullptr,
  1112. /* VarArgDisallowed = */ true,
  1113. /* ForceVoidReturnTy = */ true);
  1114. }
  1115. void SelectionDAGBuilder::LowerDeoptimizingReturn() {
  1116. // We do not lower the return value from llvm.deoptimize to any virtual
  1117. // register, and change the immediately following return to a trap
  1118. // instruction.
  1119. if (DAG.getTarget().Options.TrapUnreachable)
  1120. DAG.setRoot(
  1121. DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
  1122. }