IRTranslator.h 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. #pragma once
  2. #ifdef __GNUC__
  3. #pragma GCC diagnostic push
  4. #pragma GCC diagnostic ignored "-Wunused-parameter"
  5. #endif
  6. //===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
  7. //
  8. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  9. // See https://llvm.org/LICENSE.txt for license information.
  10. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  11. //
  12. //===----------------------------------------------------------------------===//
  13. /// \file
  14. /// This file declares the IRTranslator pass.
  15. /// This pass is responsible for translating LLVM IR into MachineInstr.
  16. /// It uses target hooks to lower the ABI but aside from that, the pass
  17. /// generated code is generic. This is the default translator used for
  18. /// GlobalISel.
  19. ///
  20. /// \todo Replace the comments with actual doxygen comments.
  21. //===----------------------------------------------------------------------===//
  22. #ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
  23. #define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
  24. #include "llvm/ADT/DenseMap.h"
  25. #include "llvm/ADT/SmallVector.h"
  26. #include "llvm/CodeGen/CodeGenCommonISel.h"
  27. #include "llvm/CodeGen/FunctionLoweringInfo.h"
  28. #include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
  29. #include "llvm/CodeGen/MachineFunctionPass.h"
  30. #include "llvm/CodeGen/SwiftErrorValueTracking.h"
  31. #include "llvm/CodeGen/SwitchLoweringUtils.h"
  32. #include "llvm/IR/Intrinsics.h"
  33. #include "llvm/Support/Allocator.h"
  34. #include "llvm/Support/CodeGen.h"
  35. #include <memory>
  36. #include <utility>
  37. namespace llvm {
  38. class AllocaInst;
  39. class BasicBlock;
  40. class CallInst;
  41. class CallLowering;
  42. class Constant;
  43. class ConstrainedFPIntrinsic;
  44. class DataLayout;
  45. class Instruction;
  46. class MachineBasicBlock;
  47. class MachineFunction;
  48. class MachineInstr;
  49. class MachineRegisterInfo;
  50. class OptimizationRemarkEmitter;
  51. class PHINode;
  52. class TargetPassConfig;
  53. class User;
  54. class Value;
  55. // Technically the pass should run on an hypothetical MachineModule,
  56. // since it should translate Global into some sort of MachineGlobal.
  57. // The MachineGlobal should ultimately just be a transfer of ownership of
  58. // the interesting bits that are relevant to represent a global value.
  59. // That being said, we could investigate what would it cost to just duplicate
  60. // the information from the LLVM IR.
  61. // The idea is that ultimately we would be able to free up the memory used
  62. // by the LLVM IR as soon as the translation is over.
  63. class IRTranslator : public MachineFunctionPass {
  64. public:
  65. static char ID;
  66. private:
  67. /// Interface used to lower the everything related to calls.
  68. const CallLowering *CLI;
  69. /// This class contains the mapping between the Values to vreg related data.
  70. class ValueToVRegInfo {
  71. public:
  72. ValueToVRegInfo() = default;
  73. using VRegListT = SmallVector<Register, 1>;
  74. using OffsetListT = SmallVector<uint64_t, 1>;
  75. using const_vreg_iterator =
  76. DenseMap<const Value *, VRegListT *>::const_iterator;
  77. using const_offset_iterator =
  78. DenseMap<const Value *, OffsetListT *>::const_iterator;
  79. inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
  80. VRegListT *getVRegs(const Value &V) {
  81. auto It = ValToVRegs.find(&V);
  82. if (It != ValToVRegs.end())
  83. return It->second;
  84. return insertVRegs(V);
  85. }
  86. OffsetListT *getOffsets(const Value &V) {
  87. auto It = TypeToOffsets.find(V.getType());
  88. if (It != TypeToOffsets.end())
  89. return It->second;
  90. return insertOffsets(V);
  91. }
  92. const_vreg_iterator findVRegs(const Value &V) const {
  93. return ValToVRegs.find(&V);
  94. }
  95. bool contains(const Value &V) const {
  96. return ValToVRegs.find(&V) != ValToVRegs.end();
  97. }
  98. void reset() {
  99. ValToVRegs.clear();
  100. TypeToOffsets.clear();
  101. VRegAlloc.DestroyAll();
  102. OffsetAlloc.DestroyAll();
  103. }
  104. private:
  105. VRegListT *insertVRegs(const Value &V) {
  106. assert(ValToVRegs.find(&V) == ValToVRegs.end() && "Value already exists");
  107. // We placement new using our fast allocator since we never try to free
  108. // the vectors until translation is finished.
  109. auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
  110. ValToVRegs[&V] = VRegList;
  111. return VRegList;
  112. }
  113. OffsetListT *insertOffsets(const Value &V) {
  114. assert(TypeToOffsets.find(V.getType()) == TypeToOffsets.end() &&
  115. "Type already exists");
  116. auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
  117. TypeToOffsets[V.getType()] = OffsetList;
  118. return OffsetList;
  119. }
  120. SpecificBumpPtrAllocator<VRegListT> VRegAlloc;
  121. SpecificBumpPtrAllocator<OffsetListT> OffsetAlloc;
  122. // We store pointers to vectors here since references may be invalidated
  123. // while we hold them if we stored the vectors directly.
  124. DenseMap<const Value *, VRegListT*> ValToVRegs;
  125. DenseMap<const Type *, OffsetListT*> TypeToOffsets;
  126. };
  127. /// Mapping of the values of the current LLVM IR function to the related
  128. /// virtual registers and offsets.
  129. ValueToVRegInfo VMap;
  130. // N.b. it's not completely obvious that this will be sufficient for every
  131. // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
  132. // lives.
  133. DenseMap<const BasicBlock *, MachineBasicBlock *> BBToMBB;
  134. // One BasicBlock can be translated to multiple MachineBasicBlocks. For such
  135. // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
  136. // a mapping between the edges arriving at the BasicBlock to the corresponding
  137. // created MachineBasicBlocks. Some BasicBlocks that get translated to a
  138. // single MachineBasicBlock may also end up in this Map.
  139. using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
  140. DenseMap<CFGEdge, SmallVector<MachineBasicBlock *, 1>> MachinePreds;
  141. // List of stubbed PHI instructions, for values and basic blocks to be filled
  142. // in once all MachineBasicBlocks have been created.
  143. SmallVector<std::pair<const PHINode *, SmallVector<MachineInstr *, 1>>, 4>
  144. PendingPHIs;
  145. /// Record of what frame index has been allocated to specified allocas for
  146. /// this function.
  147. DenseMap<const AllocaInst *, int> FrameIndices;
  148. SwiftErrorValueTracking SwiftError;
  149. /// \name Methods for translating form LLVM IR to MachineInstr.
  150. /// \see ::translate for general information on the translate methods.
  151. /// @{
  152. /// Translate \p Inst into its corresponding MachineInstr instruction(s).
  153. /// Insert the newly translated instruction(s) right where the CurBuilder
  154. /// is set.
  155. ///
  156. /// The general algorithm is:
  157. /// 1. Look for a virtual register for each operand or
  158. /// create one.
  159. /// 2 Update the VMap accordingly.
  160. /// 2.alt. For constant arguments, if they are compile time constants,
  161. /// produce an immediate in the right operand and do not touch
  162. /// ValToReg. Actually we will go with a virtual register for each
  163. /// constants because it may be expensive to actually materialize the
  164. /// constant. Moreover, if the constant spans on several instructions,
  165. /// CSE may not catch them.
  166. /// => Update ValToVReg and remember that we saw a constant in Constants.
  167. /// We will materialize all the constants in finalize.
  168. /// Note: we would need to do something so that we can recognize such operand
  169. /// as constants.
  170. /// 3. Create the generic instruction.
  171. ///
  172. /// \return true if the translation succeeded.
  173. bool translate(const Instruction &Inst);
  174. /// Materialize \p C into virtual-register \p Reg. The generic instructions
  175. /// performing this materialization will be inserted into the entry block of
  176. /// the function.
  177. ///
  178. /// \return true if the materialization succeeded.
  179. bool translate(const Constant &C, Register Reg);
  180. // Translate U as a copy of V.
  181. bool translateCopy(const User &U, const Value &V,
  182. MachineIRBuilder &MIRBuilder);
  183. /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
  184. /// emitted.
  185. bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
  186. /// Translate an LLVM load instruction into generic IR.
  187. bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
  188. /// Translate an LLVM store instruction into generic IR.
  189. bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
  190. /// Translate an LLVM string intrinsic (memcpy, memset, ...).
  191. bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
  192. unsigned Opcode);
  193. void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
  194. bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
  195. MachineIRBuilder &MIRBuilder);
  196. bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
  197. MachineIRBuilder &MIRBuilder);
  198. /// Helper function for translateSimpleIntrinsic.
  199. /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
  200. /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns
  201. /// Intrinsic::not_intrinsic.
  202. unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID);
  203. /// Translates the intrinsics defined in getSimpleIntrinsicOpcode.
  204. /// \return true if the translation succeeded.
  205. bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
  206. MachineIRBuilder &MIRBuilder);
  207. bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI,
  208. MachineIRBuilder &MIRBuilder);
  209. bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
  210. MachineIRBuilder &MIRBuilder);
  211. bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder);
  212. /// Returns true if the value should be split into multiple LLTs.
  213. /// If \p Offsets is given then the split type's offsets will be stored in it.
  214. /// If \p Offsets is not empty it will be cleared first.
  215. bool valueIsSplit(const Value &V,
  216. SmallVectorImpl<uint64_t> *Offsets = nullptr);
  217. /// Common code for translating normal calls or invokes.
  218. bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);
  219. /// Translate call instruction.
  220. /// \pre \p U is a call instruction.
  221. bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
  222. /// When an invoke or a cleanupret unwinds to the next EH pad, there are
  223. /// many places it could ultimately go. In the IR, we have a single unwind
  224. /// destination, but in the machine CFG, we enumerate all the possible blocks.
  225. /// This function skips over imaginary basic blocks that hold catchswitch
  226. /// instructions, and finds all the "real" machine
  227. /// basic block destinations. As those destinations may not be successors of
  228. /// EHPadBB, here we also calculate the edge probability to those
  229. /// destinations. The passed-in Prob is the edge probability to EHPadBB.
  230. bool findUnwindDestinations(
  231. const BasicBlock *EHPadBB, BranchProbability Prob,
  232. SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
  233. &UnwindDests);
  234. bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
  235. bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
  236. bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
  237. /// Translate one of LLVM's cast instructions into MachineInstrs, with the
  238. /// given generic Opcode.
  239. bool translateCast(unsigned Opcode, const User &U,
  240. MachineIRBuilder &MIRBuilder);
  241. /// Translate a phi instruction.
  242. bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
  243. /// Translate a comparison (icmp or fcmp) instruction or constant.
  244. bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
  245. /// Translate an integer compare instruction (or constant).
  246. bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
  247. return translateCompare(U, MIRBuilder);
  248. }
  249. /// Translate a floating-point compare instruction (or constant).
  250. bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
  251. return translateCompare(U, MIRBuilder);
  252. }
  253. /// Add remaining operands onto phis we've translated. Executed after all
  254. /// MachineBasicBlocks for the function have been created.
  255. void finishPendingPhis();
  256. /// Translate \p Inst into a unary operation \p Opcode.
  257. /// \pre \p U is a unary operation.
  258. bool translateUnaryOp(unsigned Opcode, const User &U,
  259. MachineIRBuilder &MIRBuilder);
  260. /// Translate \p Inst into a binary operation \p Opcode.
  261. /// \pre \p U is a binary operation.
  262. bool translateBinaryOp(unsigned Opcode, const User &U,
  263. MachineIRBuilder &MIRBuilder);
  264. /// If the set of cases should be emitted as a series of branches, return
  265. /// true. If we should emit this as a bunch of and/or'd together conditions,
  266. /// return false.
  267. bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
  268. /// Helper method for findMergedConditions.
  269. /// This function emits a branch and is used at the leaves of an OR or an
  270. /// AND operator tree.
  271. void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
  272. MachineBasicBlock *FBB,
  273. MachineBasicBlock *CurBB,
  274. MachineBasicBlock *SwitchBB,
  275. BranchProbability TProb,
  276. BranchProbability FProb, bool InvertCond);
  277. /// Used during condbr translation to find trees of conditions that can be
  278. /// optimized.
  279. void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
  280. MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
  281. MachineBasicBlock *SwitchBB,
  282. Instruction::BinaryOps Opc, BranchProbability TProb,
  283. BranchProbability FProb, bool InvertCond);
  284. /// Translate branch (br) instruction.
  285. /// \pre \p U is a branch instruction.
  286. bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
  287. // Begin switch lowering functions.
  288. bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
  289. SwitchCG::JumpTableHeader &JTH,
  290. MachineBasicBlock *HeaderBB);
  291. void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);
  292. void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
  293. MachineIRBuilder &MIB);
  294. /// Generate for for the BitTest header block, which precedes each sequence of
  295. /// BitTestCases.
  296. void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
  297. MachineBasicBlock *SwitchMBB);
  298. /// Generate code to produces one "bit test" for a given BitTestCase \p B.
  299. void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
  300. BranchProbability BranchProbToNext, Register Reg,
  301. SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
  302. bool lowerJumpTableWorkItem(
  303. SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
  304. MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
  305. MachineIRBuilder &MIB, MachineFunction::iterator BBI,
  306. BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
  307. MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
  308. bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
  309. MachineBasicBlock *Fallthrough,
  310. bool FallthroughUnreachable,
  311. BranchProbability UnhandledProbs,
  312. MachineBasicBlock *CurMBB,
  313. MachineIRBuilder &MIB,
  314. MachineBasicBlock *SwitchMBB);
  315. bool lowerBitTestWorkItem(
  316. SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
  317. MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
  318. MachineIRBuilder &MIB, MachineFunction::iterator BBI,
  319. BranchProbability DefaultProb, BranchProbability UnhandledProbs,
  320. SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
  321. bool FallthroughUnreachable);
  322. bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
  323. MachineBasicBlock *SwitchMBB,
  324. MachineBasicBlock *DefaultMBB,
  325. MachineIRBuilder &MIB);
  326. bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
  327. // End switch lowering section.
  328. bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
  329. bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
  330. bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
  331. bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
  332. bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
  333. bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
  334. /// Translate return (ret) instruction.
  335. /// The target needs to implement CallLowering::lowerReturn for
  336. /// this to succeed.
  337. /// \pre \p U is a return instruction.
  338. bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
  339. bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
  340. bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
  341. return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
  342. }
  343. bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
  344. return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
  345. }
  346. bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
  347. return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
  348. }
  349. bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
  350. return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
  351. }
  352. bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
  353. return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
  354. }
  355. bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
  356. return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
  357. }
  358. bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
  359. return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
  360. }
  361. bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
  362. return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
  363. }
  364. bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
  365. return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
  366. }
  367. bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
  368. return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
  369. }
  370. bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
  371. return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
  372. }
  373. bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
  374. return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
  375. }
  376. bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
  377. return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
  378. }
  379. bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
  380. return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
  381. }
  382. bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
  383. return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
  384. }
  385. bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
  386. return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
  387. }
  388. bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
  389. return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
  390. }
  391. bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
  392. return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
  393. }
  394. bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
  395. return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
  396. }
  397. bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder);
  398. bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
  399. return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
  400. }
  401. bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
  402. return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
  403. }
  404. bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
  405. return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
  406. }
  407. bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
  408. return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
  409. }
  410. bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
  411. return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
  412. }
  413. bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
  414. return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
  415. }
  416. bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
  417. return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
  418. }
  419. bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
  420. return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
  421. }
  422. bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
  423. return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
  424. }
  425. bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
  426. return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
  427. }
  428. bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
  429. bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
  430. bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
  431. bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
  432. bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
  433. bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
  434. bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
  435. bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder);
  436. // Stubs to keep the compiler happy while we implement the rest of the
  437. // translation.
  438. bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
  439. return false;
  440. }
  441. bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
  442. return false;
  443. }
  444. bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
  445. return false;
  446. }
  447. bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
  448. return false;
  449. }
  450. bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
  451. return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
  452. }
  453. bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
  454. return false;
  455. }
  456. bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
  457. return false;
  458. }
  459. bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
  460. return false;
  461. }
  462. bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
  463. return false;
  464. }
  465. /// @}
  466. // Builder for machine instruction a la IRBuilder.
  467. // I.e., compared to regular MIBuilder, this one also inserts the instruction
  468. // in the current block, it can creates block, etc., basically a kind of
  469. // IRBuilder, but for Machine IR.
  470. // CSEMIRBuilder CurBuilder;
  471. std::unique_ptr<MachineIRBuilder> CurBuilder;
  472. // Builder set to the entry block (just after ABI lowering instructions). Used
  473. // as a convenient location for Constants.
  474. // CSEMIRBuilder EntryBuilder;
  475. std::unique_ptr<MachineIRBuilder> EntryBuilder;
  476. // The MachineFunction currently being translated.
  477. MachineFunction *MF;
  478. /// MachineRegisterInfo used to create virtual registers.
  479. MachineRegisterInfo *MRI = nullptr;
  480. const DataLayout *DL;
  481. /// Current target configuration. Controls how the pass handles errors.
  482. const TargetPassConfig *TPC;
  483. CodeGenOpt::Level OptLevel;
  484. /// Current optimization remark emitter. Used to report failures.
  485. std::unique_ptr<OptimizationRemarkEmitter> ORE;
  486. FunctionLoweringInfo FuncInfo;
  487. // True when either the Target Machine specifies no optimizations or the
  488. // function has the optnone attribute.
  489. bool EnableOpts = false;
  490. /// True when the block contains a tail call. This allows the IRTranslator to
  491. /// stop translating such blocks early.
  492. bool HasTailCall = false;
  493. StackProtectorDescriptor SPDescriptor;
  494. /// Switch analysis and optimization.
  495. class GISelSwitchLowering : public SwitchCG::SwitchLowering {
  496. public:
  497. GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
  498. : SwitchLowering(funcinfo), IRT(irt) {
  499. assert(irt && "irt is null!");
  500. }
  501. virtual void addSuccessorWithProb(
  502. MachineBasicBlock *Src, MachineBasicBlock *Dst,
  503. BranchProbability Prob = BranchProbability::getUnknown()) override {
  504. IRT->addSuccessorWithProb(Src, Dst, Prob);
  505. }
  506. virtual ~GISelSwitchLowering() = default;
  507. private:
  508. IRTranslator *IRT;
  509. };
  510. std::unique_ptr<GISelSwitchLowering> SL;
  511. // * Insert all the code needed to materialize the constants
  512. // at the proper place. E.g., Entry block or dominator block
  513. // of each constant depending on how fancy we want to be.
  514. // * Clear the different maps.
  515. void finalizeFunction();
  516. // Processing steps done per block. E.g. emitting jump tables, stack
  517. // protectors etc. Returns true if no errors, false if there was a problem
  518. // that caused an abort.
  519. bool finalizeBasicBlock(const BasicBlock &BB, MachineBasicBlock &MBB);
  520. /// Codegen a new tail for a stack protector check ParentMBB which has had its
  521. /// tail spliced into a stack protector check success bb.
  522. ///
  523. /// For a high level explanation of how this fits into the stack protector
  524. /// generation see the comment on the declaration of class
  525. /// StackProtectorDescriptor.
  526. ///
  527. /// \return true if there were no problems.
  528. bool emitSPDescriptorParent(StackProtectorDescriptor &SPD,
  529. MachineBasicBlock *ParentBB);
  530. /// Codegen the failure basic block for a stack protector check.
  531. ///
  532. /// A failure stack protector machine basic block consists simply of a call to
  533. /// __stack_chk_fail().
  534. ///
  535. /// For a high level explanation of how this fits into the stack protector
  536. /// generation see the comment on the declaration of class
  537. /// StackProtectorDescriptor.
  538. ///
  539. /// \return true if there were no problems.
  540. bool emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
  541. MachineBasicBlock *FailureBB);
  542. /// Get the VRegs that represent \p Val.
  543. /// Non-aggregate types have just one corresponding VReg and the list can be
  544. /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
  545. /// not exist, they are created.
  546. ArrayRef<Register> getOrCreateVRegs(const Value &Val);
  547. Register getOrCreateVReg(const Value &Val) {
  548. auto Regs = getOrCreateVRegs(Val);
  549. if (Regs.empty())
  550. return 0;
  551. assert(Regs.size() == 1 &&
  552. "attempt to get single VReg for aggregate or void");
  553. return Regs[0];
  554. }
  555. /// Allocate some vregs and offsets in the VMap. Then populate just the
  556. /// offsets while leaving the vregs empty.
  557. ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
  558. /// Get the frame index that represents \p Val.
  559. /// If such VReg does not exist, it is created.
  560. int getOrCreateFrameIndex(const AllocaInst &AI);
  561. /// Get the alignment of the given memory operation instruction. This will
  562. /// either be the explicitly specified value or the ABI-required alignment for
  563. /// the type being accessed (according to the Module's DataLayout).
  564. Align getMemOpAlign(const Instruction &I);
  565. /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
  566. /// returned will be the head of the translated block (suitable for branch
  567. /// destinations).
  568. MachineBasicBlock &getMBB(const BasicBlock &BB);
  569. /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
  570. /// to `Edge.first` at the IR level. This is used when IRTranslation creates
  571. /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
  572. /// represented simply by the IR-level CFG.
  573. void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
  574. /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
  575. /// this is just the single MachineBasicBlock corresponding to the predecessor
  576. /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
  577. /// preceding the original though (e.g. switch instructions).
  578. SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
  579. auto RemappedEdge = MachinePreds.find(Edge);
  580. if (RemappedEdge != MachinePreds.end())
  581. return RemappedEdge->second;
  582. return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
  583. }
  584. /// Return branch probability calculated by BranchProbabilityInfo for IR
  585. /// blocks.
  586. BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
  587. const MachineBasicBlock *Dst) const;
  588. void addSuccessorWithProb(
  589. MachineBasicBlock *Src, MachineBasicBlock *Dst,
  590. BranchProbability Prob = BranchProbability::getUnknown());
  591. public:
  592. IRTranslator(CodeGenOpt::Level OptLevel = CodeGenOpt::None);
  593. StringRef getPassName() const override { return "IRTranslator"; }
  594. void getAnalysisUsage(AnalysisUsage &AU) const override;
  595. // Algo:
  596. // CallLowering = MF.subtarget.getCallLowering()
  597. // F = MF.getParent()
  598. // MIRBuilder.reset(MF)
  599. // getMBB(F.getEntryBB())
  600. // CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
  601. // for each bb in F
  602. // getMBB(bb)
  603. // for each inst in bb
  604. // if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
  605. // report_fatal_error("Don't know how to translate input");
  606. // finalize()
  607. bool runOnMachineFunction(MachineFunction &MF) override;
  608. };
  609. } // end namespace llvm
  610. #endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
  611. #ifdef __GNUC__
  612. #pragma GCC diagnostic pop
  613. #endif