CallLowering.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608
  1. #pragma once
  2. #ifdef __GNUC__
  3. #pragma GCC diagnostic push
  4. #pragma GCC diagnostic ignored "-Wunused-parameter"
  5. #endif
  6. //===- llvm/CodeGen/GlobalISel/CallLowering.h - Call lowering ---*- C++ -*-===//
  7. //
  8. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  9. // See https://llvm.org/LICENSE.txt for license information.
  10. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  11. //
  12. //===----------------------------------------------------------------------===//
  13. ///
  14. /// \file
  15. /// This file describes how to lower LLVM calls to machine code calls.
  16. ///
  17. //===----------------------------------------------------------------------===//
  18. #ifndef LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
  19. #define LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
  20. #include "llvm/ADT/ArrayRef.h"
  21. #include "llvm/ADT/SmallVector.h"
  22. #include "llvm/CodeGen/CallingConvLower.h"
  23. #include "llvm/CodeGen/MachineOperand.h"
  24. #include "llvm/CodeGen/TargetCallingConv.h"
  25. #include "llvm/IR/CallingConv.h"
  26. #include "llvm/IR/Type.h"
  27. #include "llvm/IR/Value.h"
  28. #include "llvm/Support/ErrorHandling.h"
  29. #include "llvm/Support/LowLevelTypeImpl.h"
  30. #include "llvm/Support/MachineValueType.h"
  31. #include <cstdint>
  32. #include <functional>
  33. namespace llvm {
  34. class AttributeList;
  35. class CallBase;
  36. class DataLayout;
  37. class Function;
  38. class FunctionLoweringInfo;
  39. class MachineIRBuilder;
  40. class MachineFunction;
  41. struct MachinePointerInfo;
  42. class MachineRegisterInfo;
  43. class TargetLowering;
  44. class CallLowering {
  45. const TargetLowering *TLI;
  46. virtual void anchor();
  47. public:
  48. struct BaseArgInfo {
  49. Type *Ty;
  50. SmallVector<ISD::ArgFlagsTy, 4> Flags;
  51. bool IsFixed;
  52. BaseArgInfo(Type *Ty,
  53. ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
  54. bool IsFixed = true)
  55. : Ty(Ty), Flags(Flags.begin(), Flags.end()), IsFixed(IsFixed) {}
  56. BaseArgInfo() : Ty(nullptr), IsFixed(false) {}
  57. };
  58. struct ArgInfo : public BaseArgInfo {
  59. SmallVector<Register, 4> Regs;
  60. // If the argument had to be split into multiple parts according to the
  61. // target calling convention, then this contains the original vregs
  62. // if the argument was an incoming arg.
  63. SmallVector<Register, 2> OrigRegs;
  64. /// Optionally track the original IR value for the argument. This may not be
  65. /// meaningful in all contexts. This should only be used on for forwarding
  66. /// through to use for aliasing information in MachinePointerInfo for memory
  67. /// arguments.
  68. const Value *OrigValue = nullptr;
  69. /// Index original Function's argument.
  70. unsigned OrigArgIndex;
  71. /// Sentinel value for implicit machine-level input arguments.
  72. static const unsigned NoArgIndex = UINT_MAX;
  73. ArgInfo(ArrayRef<Register> Regs, Type *Ty, unsigned OrigIndex,
  74. ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
  75. bool IsFixed = true, const Value *OrigValue = nullptr)
  76. : BaseArgInfo(Ty, Flags, IsFixed), Regs(Regs.begin(), Regs.end()),
  77. OrigValue(OrigValue), OrigArgIndex(OrigIndex) {
  78. if (!Regs.empty() && Flags.empty())
  79. this->Flags.push_back(ISD::ArgFlagsTy());
  80. // FIXME: We should have just one way of saying "no register".
  81. assert(((Ty->isVoidTy() || Ty->isEmptyTy()) ==
  82. (Regs.empty() || Regs[0] == 0)) &&
  83. "only void types should have no register");
  84. }
  85. ArgInfo(ArrayRef<Register> Regs, const Value &OrigValue, unsigned OrigIndex,
  86. ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
  87. bool IsFixed = true)
  88. : ArgInfo(Regs, OrigValue.getType(), OrigIndex, Flags, IsFixed, &OrigValue) {}
  89. ArgInfo() = default;
  90. };
  91. struct CallLoweringInfo {
  92. /// Calling convention to be used for the call.
  93. CallingConv::ID CallConv = CallingConv::C;
  94. /// Destination of the call. It should be either a register, globaladdress,
  95. /// or externalsymbol.
  96. MachineOperand Callee = MachineOperand::CreateImm(0);
  97. /// Descriptor for the return type of the function.
  98. ArgInfo OrigRet;
  99. /// List of descriptors of the arguments passed to the function.
  100. SmallVector<ArgInfo, 32> OrigArgs;
  101. /// Valid if the call has a swifterror inout parameter, and contains the
  102. /// vreg that the swifterror should be copied into after the call.
  103. Register SwiftErrorVReg;
  104. /// Original IR callsite corresponding to this call, if available.
  105. const CallBase *CB = nullptr;
  106. MDNode *KnownCallees = nullptr;
  107. /// True if the call must be tail call optimized.
  108. bool IsMustTailCall = false;
  109. /// True if the call passes all target-independent checks for tail call
  110. /// optimization.
  111. bool IsTailCall = false;
  112. /// True if the call was lowered as a tail call. This is consumed by the
  113. /// legalizer. This allows the legalizer to lower libcalls as tail calls.
  114. bool LoweredTailCall = false;
  115. /// True if the call is to a vararg function.
  116. bool IsVarArg = false;
  117. /// True if the function's return value can be lowered to registers.
  118. bool CanLowerReturn = true;
  119. /// VReg to hold the hidden sret parameter.
  120. Register DemoteRegister;
  121. /// The stack index for sret demotion.
  122. int DemoteStackIndex;
  123. /// Expected type identifier for indirect calls with a CFI check.
  124. const ConstantInt *CFIType = nullptr;
  125. };
  126. /// Argument handling is mostly uniform between the four places that
  127. /// make these decisions: function formal arguments, call
  128. /// instruction args, call instruction returns and function
  129. /// returns. However, once a decision has been made on where an
  130. /// argument should go, exactly what happens can vary slightly. This
  131. /// class abstracts the differences.
  132. ///
  133. /// ValueAssigner should not depend on any specific function state, and
  134. /// only determine the types and locations for arguments.
  135. struct ValueAssigner {
  136. ValueAssigner(bool IsIncoming, CCAssignFn *AssignFn_,
  137. CCAssignFn *AssignFnVarArg_ = nullptr)
  138. : AssignFn(AssignFn_), AssignFnVarArg(AssignFnVarArg_),
  139. IsIncomingArgumentHandler(IsIncoming) {
  140. // Some targets change the handler depending on whether the call is
  141. // varargs or not. If
  142. if (!AssignFnVarArg)
  143. AssignFnVarArg = AssignFn;
  144. }
  145. virtual ~ValueAssigner() = default;
  146. /// Returns true if the handler is dealing with incoming arguments,
  147. /// i.e. those that move values from some physical location to vregs.
  148. bool isIncomingArgumentHandler() const {
  149. return IsIncomingArgumentHandler;
  150. }
  151. /// Wrap call to (typically tablegenerated CCAssignFn). This may be
  152. /// overridden to track additional state information as arguments are
  153. /// assigned or apply target specific hacks around the legacy
  154. /// infrastructure.
  155. virtual bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
  156. CCValAssign::LocInfo LocInfo, const ArgInfo &Info,
  157. ISD::ArgFlagsTy Flags, CCState &State) {
  158. if (getAssignFn(State.isVarArg())(ValNo, ValVT, LocVT, LocInfo, Flags,
  159. State))
  160. return true;
  161. StackOffset = State.getNextStackOffset();
  162. return false;
  163. }
  164. /// Assignment function to use for a general call.
  165. CCAssignFn *AssignFn;
  166. /// Assignment function to use for a variadic call. This is usually the same
  167. /// as AssignFn on most targets.
  168. CCAssignFn *AssignFnVarArg;
  169. /// Stack offset for next argument. At the end of argument evaluation, this
  170. /// is typically the total stack size.
  171. uint64_t StackOffset = 0;
  172. /// Select the appropriate assignment function depending on whether this is
  173. /// a variadic call.
  174. CCAssignFn *getAssignFn(bool IsVarArg) const {
  175. return IsVarArg ? AssignFnVarArg : AssignFn;
  176. }
  177. private:
  178. const bool IsIncomingArgumentHandler;
  179. virtual void anchor();
  180. };
  181. struct IncomingValueAssigner : public ValueAssigner {
  182. IncomingValueAssigner(CCAssignFn *AssignFn_,
  183. CCAssignFn *AssignFnVarArg_ = nullptr)
  184. : ValueAssigner(true, AssignFn_, AssignFnVarArg_) {}
  185. };
  186. struct OutgoingValueAssigner : public ValueAssigner {
  187. OutgoingValueAssigner(CCAssignFn *AssignFn_,
  188. CCAssignFn *AssignFnVarArg_ = nullptr)
  189. : ValueAssigner(false, AssignFn_, AssignFnVarArg_) {}
  190. };
  191. struct ValueHandler {
  192. MachineIRBuilder &MIRBuilder;
  193. MachineRegisterInfo &MRI;
  194. const bool IsIncomingArgumentHandler;
  195. ValueHandler(bool IsIncoming, MachineIRBuilder &MIRBuilder,
  196. MachineRegisterInfo &MRI)
  197. : MIRBuilder(MIRBuilder), MRI(MRI),
  198. IsIncomingArgumentHandler(IsIncoming) {}
  199. virtual ~ValueHandler() = default;
  200. /// Returns true if the handler is dealing with incoming arguments,
  201. /// i.e. those that move values from some physical location to vregs.
  202. bool isIncomingArgumentHandler() const {
  203. return IsIncomingArgumentHandler;
  204. }
  205. /// Materialize a VReg containing the address of the specified
  206. /// stack-based object. This is either based on a FrameIndex or
  207. /// direct SP manipulation, depending on the context. \p MPO
  208. /// should be initialized to an appropriate description of the
  209. /// address created.
  210. virtual Register getStackAddress(uint64_t MemSize, int64_t Offset,
  211. MachinePointerInfo &MPO,
  212. ISD::ArgFlagsTy Flags) = 0;
  213. /// Return the in-memory size to write for the argument at \p VA. This may
  214. /// be smaller than the allocated stack slot size.
  215. ///
  216. /// This is overridable primarily for targets to maintain compatibility with
  217. /// hacks around the existing DAG call lowering infrastructure.
  218. virtual LLT getStackValueStoreType(const DataLayout &DL,
  219. const CCValAssign &VA,
  220. ISD::ArgFlagsTy Flags) const;
  221. /// The specified value has been assigned to a physical register,
  222. /// handle the appropriate COPY (either to or from) and mark any
  223. /// relevant uses/defines as needed.
  224. virtual void assignValueToReg(Register ValVReg, Register PhysReg,
  225. CCValAssign VA) = 0;
  226. /// The specified value has been assigned to a stack
  227. /// location. Load or store it there, with appropriate extension
  228. /// if necessary.
  229. virtual void assignValueToAddress(Register ValVReg, Register Addr,
  230. LLT MemTy, MachinePointerInfo &MPO,
  231. CCValAssign &VA) = 0;
  232. /// An overload which takes an ArgInfo if additional information about the
  233. /// arg is needed. \p ValRegIndex is the index in \p Arg.Regs for the value
  234. /// to store.
  235. virtual void assignValueToAddress(const ArgInfo &Arg, unsigned ValRegIndex,
  236. Register Addr, LLT MemTy,
  237. MachinePointerInfo &MPO,
  238. CCValAssign &VA) {
  239. assignValueToAddress(Arg.Regs[ValRegIndex], Addr, MemTy, MPO, VA);
  240. }
  241. /// Handle custom values, which may be passed into one or more of \p VAs.
  242. /// \p If the handler wants the assignments to be delayed until after
  243. /// mem loc assignments, then it sets \p Thunk to the thunk to do the
  244. /// assignment.
  245. /// \return The number of \p VAs that have been assigned after the first
  246. /// one, and which should therefore be skipped from further
  247. /// processing.
  248. virtual unsigned assignCustomValue(ArgInfo &Arg, ArrayRef<CCValAssign> VAs,
  249. std::function<void()> *Thunk = nullptr) {
  250. // This is not a pure virtual method because not all targets need to worry
  251. // about custom values.
  252. llvm_unreachable("Custom values not supported");
  253. }
  254. /// Do a memory copy of \p MemSize bytes from \p SrcPtr to \p DstPtr. This
  255. /// is necessary for outgoing stack-passed byval arguments.
  256. void
  257. copyArgumentMemory(const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
  258. const MachinePointerInfo &DstPtrInfo, Align DstAlign,
  259. const MachinePointerInfo &SrcPtrInfo, Align SrcAlign,
  260. uint64_t MemSize, CCValAssign &VA) const;
  261. /// Extend a register to the location type given in VA, capped at extending
  262. /// to at most MaxSize bits. If MaxSizeBits is 0 then no maximum is set.
  263. Register extendRegister(Register ValReg, CCValAssign &VA,
  264. unsigned MaxSizeBits = 0);
  265. };
  266. /// Base class for ValueHandlers used for arguments coming into the current
  267. /// function, or for return values received from a call.
  268. struct IncomingValueHandler : public ValueHandler {
  269. IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
  270. : ValueHandler(/*IsIncoming*/ true, MIRBuilder, MRI) {}
  271. /// Insert G_ASSERT_ZEXT/G_ASSERT_SEXT or other hint instruction based on \p
  272. /// VA, returning the new register if a hint was inserted.
  273. Register buildExtensionHint(CCValAssign &VA, Register SrcReg, LLT NarrowTy);
  274. /// Provides a default implementation for argument handling.
  275. void assignValueToReg(Register ValVReg, Register PhysReg,
  276. CCValAssign VA) override;
  277. };
  278. /// Base class for ValueHandlers used for arguments passed to a function call,
  279. /// or for return values.
  280. struct OutgoingValueHandler : public ValueHandler {
  281. OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
  282. : ValueHandler(/*IsIncoming*/ false, MIRBuilder, MRI) {}
  283. };
  284. protected:
  285. /// Getter for generic TargetLowering class.
  286. const TargetLowering *getTLI() const {
  287. return TLI;
  288. }
  289. /// Getter for target specific TargetLowering class.
  290. template <class XXXTargetLowering>
  291. const XXXTargetLowering *getTLI() const {
  292. return static_cast<const XXXTargetLowering *>(TLI);
  293. }
  294. /// \returns Flags corresponding to the attributes on the \p ArgIdx-th
  295. /// parameter of \p Call.
  296. ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call,
  297. unsigned ArgIdx) const;
  298. /// \returns Flags corresponding to the attributes on the return from \p Call.
  299. ISD::ArgFlagsTy getAttributesForReturn(const CallBase &Call) const;
  300. /// Adds flags to \p Flags based off of the attributes in \p Attrs.
  301. /// \p OpIdx is the index in \p Attrs to add flags from.
  302. void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
  303. const AttributeList &Attrs,
  304. unsigned OpIdx) const;
  305. template <typename FuncInfoTy>
  306. void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL,
  307. const FuncInfoTy &FuncInfo) const;
  308. /// Break \p OrigArgInfo into one or more pieces the calling convention can
  309. /// process, returned in \p SplitArgs. For example, this should break structs
  310. /// down into individual fields.
  311. ///
  312. /// If \p Offsets is non-null, it points to a vector to be filled in
  313. /// with the in-memory offsets of each of the individual values.
  314. void splitToValueTypes(const ArgInfo &OrigArgInfo,
  315. SmallVectorImpl<ArgInfo> &SplitArgs,
  316. const DataLayout &DL, CallingConv::ID CallConv,
  317. SmallVectorImpl<uint64_t> *Offsets = nullptr) const;
  318. /// Analyze the argument list in \p Args, using \p Assigner to populate \p
  319. /// CCInfo. This will determine the types and locations to use for passed or
  320. /// returned values. This may resize fields in \p Args if the value is split
  321. /// across multiple registers or stack slots.
  322. ///
  323. /// This is independent of the function state and can be used
  324. /// to determine how a call would pass arguments without needing to change the
  325. /// function. This can be used to check if arguments are suitable for tail
  326. /// call lowering.
  327. ///
  328. /// \return True if everything has succeeded, false otherwise.
  329. bool determineAssignments(ValueAssigner &Assigner,
  330. SmallVectorImpl<ArgInfo> &Args,
  331. CCState &CCInfo) const;
  332. /// Invoke ValueAssigner::assignArg on each of the given \p Args and then use
  333. /// \p Handler to move them to the assigned locations.
  334. ///
  335. /// \return True if everything has succeeded, false otherwise.
  336. bool determineAndHandleAssignments(
  337. ValueHandler &Handler, ValueAssigner &Assigner,
  338. SmallVectorImpl<ArgInfo> &Args, MachineIRBuilder &MIRBuilder,
  339. CallingConv::ID CallConv, bool IsVarArg,
  340. ArrayRef<Register> ThisReturnRegs = std::nullopt) const;
  341. /// Use \p Handler to insert code to handle the argument/return values
  342. /// represented by \p Args. It's expected determineAssignments previously
  343. /// processed these arguments to populate \p CCState and \p ArgLocs.
  344. bool
  345. handleAssignments(ValueHandler &Handler, SmallVectorImpl<ArgInfo> &Args,
  346. CCState &CCState, SmallVectorImpl<CCValAssign> &ArgLocs,
  347. MachineIRBuilder &MIRBuilder,
  348. ArrayRef<Register> ThisReturnRegs = std::nullopt) const;
  349. /// Check whether parameters to a call that are passed in callee saved
  350. /// registers are the same as from the calling function. This needs to be
  351. /// checked for tail call eligibility.
  352. bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
  353. const uint32_t *CallerPreservedMask,
  354. const SmallVectorImpl<CCValAssign> &ArgLocs,
  355. const SmallVectorImpl<ArgInfo> &OutVals) const;
  356. /// \returns True if the calling convention for a callee and its caller pass
  357. /// results in the same way. Typically used for tail call eligibility checks.
  358. ///
  359. /// \p Info is the CallLoweringInfo for the call.
  360. /// \p MF is the MachineFunction for the caller.
  361. /// \p InArgs contains the results of the call.
  362. /// \p CalleeAssigner specifies the target's handling of the argument types
  363. /// for the callee.
  364. /// \p CallerAssigner specifies the target's handling of the
  365. /// argument types for the caller.
  366. bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF,
  367. SmallVectorImpl<ArgInfo> &InArgs,
  368. ValueAssigner &CalleeAssigner,
  369. ValueAssigner &CallerAssigner) const;
  370. public:
  371. CallLowering(const TargetLowering *TLI) : TLI(TLI) {}
  372. virtual ~CallLowering() = default;
  373. /// \return true if the target is capable of handling swifterror values that
  374. /// have been promoted to a specified register. The extended versions of
  375. /// lowerReturn and lowerCall should be implemented.
  376. virtual bool supportSwiftError() const {
  377. return false;
  378. }
  379. /// Load the returned value from the stack into virtual registers in \p VRegs.
  380. /// It uses the frame index \p FI and the start offset from \p DemoteReg.
  381. /// The loaded data size will be determined from \p RetTy.
  382. void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
  383. ArrayRef<Register> VRegs, Register DemoteReg,
  384. int FI) const;
  385. /// Store the return value given by \p VRegs into stack starting at the offset
  386. /// specified in \p DemoteReg.
  387. void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
  388. ArrayRef<Register> VRegs, Register DemoteReg) const;
  389. /// Insert the hidden sret ArgInfo to the beginning of \p SplitArgs.
  390. /// This function should be called from the target specific
  391. /// lowerFormalArguments when \p F requires the sret demotion.
  392. void insertSRetIncomingArgument(const Function &F,
  393. SmallVectorImpl<ArgInfo> &SplitArgs,
  394. Register &DemoteReg, MachineRegisterInfo &MRI,
  395. const DataLayout &DL) const;
  396. /// For the call-base described by \p CB, insert the hidden sret ArgInfo to
  397. /// the OrigArgs field of \p Info.
  398. void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
  399. const CallBase &CB,
  400. CallLoweringInfo &Info) const;
  401. /// \return True if the return type described by \p Outs can be returned
  402. /// without performing sret demotion.
  403. bool checkReturn(CCState &CCInfo, SmallVectorImpl<BaseArgInfo> &Outs,
  404. CCAssignFn *Fn) const;
  405. /// Get the type and the ArgFlags for the split components of \p RetTy as
  406. /// returned by \c ComputeValueVTs.
  407. void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs,
  408. SmallVectorImpl<BaseArgInfo> &Outs,
  409. const DataLayout &DL) const;
  410. /// Toplevel function to check the return type based on the target calling
  411. /// convention. \return True if the return value of \p MF can be returned
  412. /// without performing sret demotion.
  413. bool checkReturnTypeForCallConv(MachineFunction &MF) const;
  414. /// This hook must be implemented to check whether the return values
  415. /// described by \p Outs can fit into the return registers. If false
  416. /// is returned, an sret-demotion is performed.
  417. virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv,
  418. SmallVectorImpl<BaseArgInfo> &Outs,
  419. bool IsVarArg) const {
  420. return true;
  421. }
  422. /// This hook must be implemented to lower outgoing return values, described
  423. /// by \p Val, into the specified virtual registers \p VRegs.
  424. /// This hook is used by GlobalISel.
  425. ///
  426. /// \p FLI is required for sret demotion.
  427. ///
  428. /// \p SwiftErrorVReg is non-zero if the function has a swifterror parameter
  429. /// that needs to be implicitly returned.
  430. ///
  431. /// \return True if the lowering succeeds, false otherwise.
  432. virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
  433. ArrayRef<Register> VRegs, FunctionLoweringInfo &FLI,
  434. Register SwiftErrorVReg) const {
  435. if (!supportSwiftError()) {
  436. assert(SwiftErrorVReg == 0 && "attempt to use unsupported swifterror");
  437. return lowerReturn(MIRBuilder, Val, VRegs, FLI);
  438. }
  439. return false;
  440. }
  441. /// This hook behaves as the extended lowerReturn function, but for targets
  442. /// that do not support swifterror value promotion.
  443. virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
  444. ArrayRef<Register> VRegs,
  445. FunctionLoweringInfo &FLI) const {
  446. return false;
  447. }
  448. virtual bool fallBackToDAGISel(const MachineFunction &MF) const {
  449. return false;
  450. }
  451. /// This hook must be implemented to lower the incoming (formal)
  452. /// arguments, described by \p VRegs, for GlobalISel. Each argument
  453. /// must end up in the related virtual registers described by \p VRegs.
  454. /// In other words, the first argument should end up in \c VRegs[0],
  455. /// the second in \c VRegs[1], and so on. For each argument, there will be one
  456. /// register for each non-aggregate type, as returned by \c computeValueLLTs.
  457. /// \p MIRBuilder is set to the proper insertion for the argument
  458. /// lowering. \p FLI is required for sret demotion.
  459. ///
  460. /// \return True if the lowering succeeded, false otherwise.
  461. virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
  462. const Function &F,
  463. ArrayRef<ArrayRef<Register>> VRegs,
  464. FunctionLoweringInfo &FLI) const {
  465. return false;
  466. }
  467. /// This hook must be implemented to lower the given call instruction,
  468. /// including argument and return value marshalling.
  469. ///
  470. ///
  471. /// \return true if the lowering succeeded, false otherwise.
  472. virtual bool lowerCall(MachineIRBuilder &MIRBuilder,
  473. CallLoweringInfo &Info) const {
  474. return false;
  475. }
  476. /// Lower the given call instruction, including argument and return value
  477. /// marshalling.
  478. ///
  479. /// \p CI is the call/invoke instruction.
  480. ///
  481. /// \p ResRegs are the registers where the call's return value should be
  482. /// stored (or 0 if there is no return value). There will be one register for
  483. /// each non-aggregate type, as returned by \c computeValueLLTs.
  484. ///
  485. /// \p ArgRegs is a list of lists of virtual registers containing each
  486. /// argument that needs to be passed (argument \c i should be placed in \c
  487. /// ArgRegs[i]). For each argument, there will be one register for each
  488. /// non-aggregate type, as returned by \c computeValueLLTs.
  489. ///
  490. /// \p SwiftErrorVReg is non-zero if the call has a swifterror inout
  491. /// parameter, and contains the vreg that the swifterror should be copied into
  492. /// after the call.
  493. ///
  494. /// \p GetCalleeReg is a callback to materialize a register for the callee if
  495. /// the target determines it cannot jump to the destination based purely on \p
  496. /// CI. This might be because \p CI is indirect, or because of the limited
  497. /// range of an immediate jump.
  498. ///
  499. /// \return true if the lowering succeeded, false otherwise.
  500. bool lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &Call,
  501. ArrayRef<Register> ResRegs,
  502. ArrayRef<ArrayRef<Register>> ArgRegs, Register SwiftErrorVReg,
  503. std::function<unsigned()> GetCalleeReg) const;
  504. /// For targets which want to use big-endian can enable it with
  505. /// enableBigEndian() hook
  506. virtual bool enableBigEndian() const { return false; }
  507. /// For targets which support the "returned" parameter attribute, returns
  508. /// true if the given type is a valid one to use with "returned".
  509. virtual bool isTypeIsValidForThisReturn(EVT Ty) const { return false; }
  510. };
  511. } // end namespace llvm
  512. #endif // LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
  513. #ifdef __GNUC__
  514. #pragma GCC diagnostic pop
  515. #endif