CodeGenCommonISel.cpp 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. //===-- CodeGenCommonISel.cpp ---------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file defines common utilies that are shared between SelectionDAG and
  10. // GlobalISel frameworks.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/CodeGen/CodeGenCommonISel.h"
  14. #include "llvm/Analysis/BranchProbabilityInfo.h"
  15. #include "llvm/CodeGen/MachineBasicBlock.h"
  16. #include "llvm/CodeGen/MachineFunction.h"
  17. #include "llvm/CodeGen/TargetInstrInfo.h"
  18. #include "llvm/CodeGen/TargetOpcodes.h"
  19. #include "llvm/IR/DebugInfoMetadata.h"
  20. #define DEBUG_TYPE "codegen-common"
  21. using namespace llvm;
  22. /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
  23. /// is 0.
  24. MachineBasicBlock *
  25. StackProtectorDescriptor::addSuccessorMBB(
  26. const BasicBlock *BB, MachineBasicBlock *ParentMBB, bool IsLikely,
  27. MachineBasicBlock *SuccMBB) {
  28. // If SuccBB has not been created yet, create it.
  29. if (!SuccMBB) {
  30. MachineFunction *MF = ParentMBB->getParent();
  31. MachineFunction::iterator BBI(ParentMBB);
  32. SuccMBB = MF->CreateMachineBasicBlock(BB);
  33. MF->insert(++BBI, SuccMBB);
  34. }
  35. // Add it as a successor of ParentMBB.
  36. ParentMBB->addSuccessor(
  37. SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely));
  38. return SuccMBB;
  39. }
  40. /// Given that the input MI is before a partial terminator sequence TSeq, return
  41. /// true if M + TSeq also a partial terminator sequence.
  42. ///
  43. /// A Terminator sequence is a sequence of MachineInstrs which at this point in
  44. /// lowering copy vregs into physical registers, which are then passed into
  45. /// terminator instructors so we can satisfy ABI constraints. A partial
  46. /// terminator sequence is an improper subset of a terminator sequence (i.e. it
  47. /// may be the whole terminator sequence).
  48. static bool MIIsInTerminatorSequence(const MachineInstr &MI) {
  49. // If we do not have a copy or an implicit def, we return true if and only if
  50. // MI is a debug value.
  51. if (!MI.isCopy() && !MI.isImplicitDef()) {
  52. // Sometimes DBG_VALUE MI sneak in between the copies from the vregs to the
  53. // physical registers if there is debug info associated with the terminator
  54. // of our mbb. We want to include said debug info in our terminator
  55. // sequence, so we return true in that case.
  56. if (MI.isDebugInstr())
  57. return true;
  58. // For GlobalISel, we may have extension instructions for arguments within
  59. // copy sequences. Allow these.
  60. switch (MI.getOpcode()) {
  61. case TargetOpcode::G_TRUNC:
  62. case TargetOpcode::G_ZEXT:
  63. case TargetOpcode::G_ANYEXT:
  64. case TargetOpcode::G_SEXT:
  65. case TargetOpcode::G_MERGE_VALUES:
  66. case TargetOpcode::G_UNMERGE_VALUES:
  67. case TargetOpcode::G_CONCAT_VECTORS:
  68. case TargetOpcode::G_BUILD_VECTOR:
  69. case TargetOpcode::G_EXTRACT:
  70. return true;
  71. default:
  72. return false;
  73. }
  74. }
  75. // We have left the terminator sequence if we are not doing one of the
  76. // following:
  77. //
  78. // 1. Copying a vreg into a physical register.
  79. // 2. Copying a vreg into a vreg.
  80. // 3. Defining a register via an implicit def.
  81. // OPI should always be a register definition...
  82. MachineInstr::const_mop_iterator OPI = MI.operands_begin();
  83. if (!OPI->isReg() || !OPI->isDef())
  84. return false;
  85. // Defining any register via an implicit def is always ok.
  86. if (MI.isImplicitDef())
  87. return true;
  88. // Grab the copy source...
  89. MachineInstr::const_mop_iterator OPI2 = OPI;
  90. ++OPI2;
  91. assert(OPI2 != MI.operands_end()
  92. && "Should have a copy implying we should have 2 arguments.");
  93. // Make sure that the copy dest is not a vreg when the copy source is a
  94. // physical register.
  95. if (!OPI2->isReg() ||
  96. (!OPI->getReg().isPhysical() && OPI2->getReg().isPhysical()))
  97. return false;
  98. return true;
  99. }
  100. /// Find the split point at which to splice the end of BB into its success stack
  101. /// protector check machine basic block.
  102. ///
  103. /// On many platforms, due to ABI constraints, terminators, even before register
  104. /// allocation, use physical registers. This creates an issue for us since
  105. /// physical registers at this point can not travel across basic
  106. /// blocks. Luckily, selectiondag always moves physical registers into vregs
  107. /// when they enter functions and moves them through a sequence of copies back
  108. /// into the physical registers right before the terminator creating a
  109. /// ``Terminator Sequence''. This function is searching for the beginning of the
  110. /// terminator sequence so that we can ensure that we splice off not just the
  111. /// terminator, but additionally the copies that move the vregs into the
  112. /// physical registers.
  113. MachineBasicBlock::iterator
  114. llvm::findSplitPointForStackProtector(MachineBasicBlock *BB,
  115. const TargetInstrInfo &TII) {
  116. MachineBasicBlock::iterator SplitPoint = BB->getFirstTerminator();
  117. if (SplitPoint == BB->begin())
  118. return SplitPoint;
  119. MachineBasicBlock::iterator Start = BB->begin();
  120. MachineBasicBlock::iterator Previous = SplitPoint;
  121. do {
  122. --Previous;
  123. } while (Previous != Start && Previous->isDebugInstr());
  124. if (TII.isTailCall(*SplitPoint) &&
  125. Previous->getOpcode() == TII.getCallFrameDestroyOpcode()) {
  126. // Call frames cannot be nested, so if this frame is describing the tail
  127. // call itself, then we must insert before the sequence even starts. For
  128. // example:
  129. // <split point>
  130. // ADJCALLSTACKDOWN ...
  131. // <Moves>
  132. // ADJCALLSTACKUP ...
  133. // TAILJMP somewhere
  134. // On the other hand, it could be an unrelated call in which case this tail
  135. // call has no register moves of its own and should be the split point. For
  136. // example:
  137. // ADJCALLSTACKDOWN
  138. // CALL something_else
  139. // ADJCALLSTACKUP
  140. // <split point>
  141. // TAILJMP somewhere
  142. do {
  143. --Previous;
  144. if (Previous->isCall())
  145. return SplitPoint;
  146. } while(Previous->getOpcode() != TII.getCallFrameSetupOpcode());
  147. return Previous;
  148. }
  149. while (MIIsInTerminatorSequence(*Previous)) {
  150. SplitPoint = Previous;
  151. if (Previous == Start)
  152. break;
  153. --Previous;
  154. }
  155. return SplitPoint;
  156. }
  157. unsigned llvm::getInvertedFPClassTest(unsigned Test) {
  158. unsigned InvertedTest = ~Test & fcAllFlags;
  159. switch (InvertedTest) {
  160. default:
  161. break;
  162. case fcNan:
  163. case fcSNan:
  164. case fcQNan:
  165. case fcInf:
  166. case fcPosInf:
  167. case fcNegInf:
  168. case fcNormal:
  169. case fcPosNormal:
  170. case fcNegNormal:
  171. case fcSubnormal:
  172. case fcPosSubnormal:
  173. case fcNegSubnormal:
  174. case fcZero:
  175. case fcPosZero:
  176. case fcNegZero:
  177. case fcFinite:
  178. case fcPosFinite:
  179. case fcNegFinite:
  180. return InvertedTest;
  181. }
  182. return 0;
  183. }
  184. static MachineOperand *getSalvageOpsForCopy(const MachineRegisterInfo &MRI,
  185. MachineInstr &Copy) {
  186. assert(Copy.getOpcode() == TargetOpcode::COPY && "Must be a COPY");
  187. return &Copy.getOperand(1);
  188. }
  189. static MachineOperand *getSalvageOpsForTrunc(const MachineRegisterInfo &MRI,
  190. MachineInstr &Trunc,
  191. SmallVectorImpl<uint64_t> &Ops) {
  192. assert(Trunc.getOpcode() == TargetOpcode::G_TRUNC && "Must be a G_TRUNC");
  193. const auto FromLLT = MRI.getType(Trunc.getOperand(1).getReg());
  194. const auto ToLLT = MRI.getType(Trunc.defs().begin()->getReg());
  195. // TODO: Support non-scalar types.
  196. if (!FromLLT.isScalar()) {
  197. return nullptr;
  198. }
  199. auto ExtOps = DIExpression::getExtOps(FromLLT.getSizeInBits(),
  200. ToLLT.getSizeInBits(), false);
  201. Ops.append(ExtOps.begin(), ExtOps.end());
  202. return &Trunc.getOperand(1);
  203. }
  204. static MachineOperand *salvageDebugInfoImpl(const MachineRegisterInfo &MRI,
  205. MachineInstr &MI,
  206. SmallVectorImpl<uint64_t> &Ops) {
  207. switch (MI.getOpcode()) {
  208. case TargetOpcode::G_TRUNC:
  209. return getSalvageOpsForTrunc(MRI, MI, Ops);
  210. case TargetOpcode::COPY:
  211. return getSalvageOpsForCopy(MRI, MI);
  212. default:
  213. return nullptr;
  214. }
  215. }
  216. void llvm::salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI,
  217. MachineInstr &MI,
  218. ArrayRef<MachineOperand *> DbgUsers) {
  219. // These are arbitrary chosen limits on the maximum number of values and the
  220. // maximum size of a debug expression we can salvage up to, used for
  221. // performance reasons.
  222. const unsigned MaxExpressionSize = 128;
  223. for (auto *DefMO : DbgUsers) {
  224. MachineInstr *DbgMI = DefMO->getParent();
  225. if (DbgMI->isIndirectDebugValue()) {
  226. continue;
  227. }
  228. int UseMOIdx = DbgMI->findRegisterUseOperandIdx(DefMO->getReg());
  229. assert(UseMOIdx != -1 && DbgMI->hasDebugOperandForReg(DefMO->getReg()) &&
  230. "Must use salvaged instruction as its location");
  231. // TODO: Support DBG_VALUE_LIST.
  232. if (DbgMI->getOpcode() != TargetOpcode::DBG_VALUE) {
  233. assert(DbgMI->getOpcode() == TargetOpcode::DBG_VALUE_LIST &&
  234. "Must be either DBG_VALUE or DBG_VALUE_LIST");
  235. continue;
  236. }
  237. const DIExpression *SalvagedExpr = DbgMI->getDebugExpression();
  238. SmallVector<uint64_t, 16> Ops;
  239. auto Op0 = salvageDebugInfoImpl(MRI, MI, Ops);
  240. if (!Op0)
  241. continue;
  242. SalvagedExpr = DIExpression::appendOpsToArg(SalvagedExpr, Ops, 0, true);
  243. bool IsValidSalvageExpr =
  244. SalvagedExpr->getNumElements() <= MaxExpressionSize;
  245. if (IsValidSalvageExpr) {
  246. auto &UseMO = DbgMI->getOperand(UseMOIdx);
  247. UseMO.setReg(Op0->getReg());
  248. UseMO.setSubReg(Op0->getSubReg());
  249. DbgMI->getDebugExpressionOp().setMetadata(SalvagedExpr);
  250. LLVM_DEBUG(dbgs() << "SALVAGE: " << *DbgMI << '\n');
  251. }
  252. }
  253. }