AArch64StorePairSuppress.cpp 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. //===--- AArch64StorePairSuppress.cpp --- Suppress store pair formation ---===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This pass identifies floating point stores that should not be combined into
  10. // store pairs. Later we may do the same for floating point loads.
  11. // ===---------------------------------------------------------------------===//
  12. #include "AArch64InstrInfo.h"
  13. #include "llvm/CodeGen/MachineFunction.h"
  14. #include "llvm/CodeGen/MachineFunctionPass.h"
  15. #include "llvm/CodeGen/MachineInstr.h"
  16. #include "llvm/CodeGen/MachineTraceMetrics.h"
  17. #include "llvm/CodeGen/TargetInstrInfo.h"
  18. #include "llvm/CodeGen/TargetSchedule.h"
  19. #include "llvm/Support/Debug.h"
  20. #include "llvm/Support/raw_ostream.h"
  21. using namespace llvm;
  22. #define DEBUG_TYPE "aarch64-stp-suppress"
  23. #define STPSUPPRESS_PASS_NAME "AArch64 Store Pair Suppression"
  24. namespace {
  25. class AArch64StorePairSuppress : public MachineFunctionPass {
  26. const AArch64InstrInfo *TII;
  27. const TargetRegisterInfo *TRI;
  28. const MachineRegisterInfo *MRI;
  29. TargetSchedModel SchedModel;
  30. MachineTraceMetrics *Traces;
  31. MachineTraceMetrics::Ensemble *MinInstr;
  32. public:
  33. static char ID;
  34. AArch64StorePairSuppress() : MachineFunctionPass(ID) {
  35. initializeAArch64StorePairSuppressPass(*PassRegistry::getPassRegistry());
  36. }
  37. StringRef getPassName() const override { return STPSUPPRESS_PASS_NAME; }
  38. bool runOnMachineFunction(MachineFunction &F) override;
  39. private:
  40. bool shouldAddSTPToBlock(const MachineBasicBlock *BB);
  41. bool isNarrowFPStore(const MachineInstr &MI);
  42. void getAnalysisUsage(AnalysisUsage &AU) const override {
  43. AU.setPreservesCFG();
  44. AU.addRequired<MachineTraceMetrics>();
  45. AU.addPreserved<MachineTraceMetrics>();
  46. MachineFunctionPass::getAnalysisUsage(AU);
  47. }
  48. };
  49. char AArch64StorePairSuppress::ID = 0;
  50. } // anonymous
  51. INITIALIZE_PASS(AArch64StorePairSuppress, "aarch64-stp-suppress",
  52. STPSUPPRESS_PASS_NAME, false, false)
  53. FunctionPass *llvm::createAArch64StorePairSuppressPass() {
  54. return new AArch64StorePairSuppress();
  55. }
  56. /// Return true if an STP can be added to this block without increasing the
  57. /// critical resource height. STP is good to form in Ld/St limited blocks and
  58. /// bad to form in float-point limited blocks. This is true independent of the
  59. /// critical path. If the critical path is longer than the resource height, the
  60. /// extra vector ops can limit physreg renaming. Otherwise, it could simply
  61. /// oversaturate the vector units.
  62. bool AArch64StorePairSuppress::shouldAddSTPToBlock(const MachineBasicBlock *BB) {
  63. if (!MinInstr)
  64. MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount);
  65. MachineTraceMetrics::Trace BBTrace = MinInstr->getTrace(BB);
  66. unsigned ResLength = BBTrace.getResourceLength();
  67. // Get the machine model's scheduling class for STPQi.
  68. // Bypass TargetSchedule's SchedClass resolution since we only have an opcode.
  69. unsigned SCIdx = TII->get(AArch64::STPDi).getSchedClass();
  70. const MCSchedClassDesc *SCDesc =
  71. SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdx);
  72. // If a subtarget does not define resources for STPQi, bail here.
  73. if (SCDesc->isValid() && !SCDesc->isVariant()) {
  74. unsigned ResLenWithSTP = BBTrace.getResourceLength(std::nullopt, SCDesc);
  75. if (ResLenWithSTP > ResLength) {
  76. LLVM_DEBUG(dbgs() << " Suppress STP in BB: " << BB->getNumber()
  77. << " resources " << ResLength << " -> " << ResLenWithSTP
  78. << "\n");
  79. return false;
  80. }
  81. }
  82. return true;
  83. }
  84. /// Return true if this is a floating-point store smaller than the V reg. On
  85. /// cyclone, these require a vector shuffle before storing a pair.
  86. /// Ideally we would call getMatchingPairOpcode() and have the machine model
  87. /// tell us if it's profitable with no cpu knowledge here.
  88. ///
  89. /// FIXME: We plan to develop a decent Target abstraction for simple loads and
  90. /// stores. Until then use a nasty switch similar to AArch64LoadStoreOptimizer.
  91. bool AArch64StorePairSuppress::isNarrowFPStore(const MachineInstr &MI) {
  92. switch (MI.getOpcode()) {
  93. default:
  94. return false;
  95. case AArch64::STRSui:
  96. case AArch64::STRDui:
  97. case AArch64::STURSi:
  98. case AArch64::STURDi:
  99. return true;
  100. }
  101. }
  102. bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
  103. if (skipFunction(MF.getFunction()) || MF.getFunction().hasOptSize())
  104. return false;
  105. const TargetSubtargetInfo &ST = MF.getSubtarget();
  106. TII = static_cast<const AArch64InstrInfo *>(ST.getInstrInfo());
  107. TRI = ST.getRegisterInfo();
  108. MRI = &MF.getRegInfo();
  109. SchedModel.init(&ST);
  110. Traces = &getAnalysis<MachineTraceMetrics>();
  111. MinInstr = nullptr;
  112. LLVM_DEBUG(dbgs() << "*** " << getPassName() << ": " << MF.getName() << '\n');
  113. if (!SchedModel.hasInstrSchedModel()) {
  114. LLVM_DEBUG(dbgs() << " Skipping pass: no machine model present.\n");
  115. return false;
  116. }
  117. // Check for a sequence of stores to the same base address. We don't need to
  118. // precisely determine whether a store pair can be formed. But we do want to
  119. // filter out most situations where we can't form store pairs to avoid
  120. // computing trace metrics in those cases.
  121. for (auto &MBB : MF) {
  122. bool SuppressSTP = false;
  123. unsigned PrevBaseReg = 0;
  124. for (auto &MI : MBB) {
  125. if (!isNarrowFPStore(MI))
  126. continue;
  127. const MachineOperand *BaseOp;
  128. int64_t Offset;
  129. bool OffsetIsScalable;
  130. if (TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
  131. TRI) &&
  132. BaseOp->isReg()) {
  133. Register BaseReg = BaseOp->getReg();
  134. if (PrevBaseReg == BaseReg) {
  135. // If this block can take STPs, skip ahead to the next block.
  136. if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent()))
  137. break;
  138. // Otherwise, continue unpairing the stores in this block.
  139. LLVM_DEBUG(dbgs() << "Unpairing store " << MI << "\n");
  140. SuppressSTP = true;
  141. TII->suppressLdStPair(MI);
  142. }
  143. PrevBaseReg = BaseReg;
  144. } else
  145. PrevBaseReg = 0;
  146. }
  147. }
  148. // This pass just sets some internal MachineMemOperand flags. It can't really
  149. // invalidate anything.
  150. return false;
  151. }