AArch64CompressJumpTables.cpp 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. //==-- AArch64CompressJumpTables.cpp - Compress jump tables for AArch64 --====//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. // This pass looks at the basic blocks each jump-table refers to and works out
  8. // whether they can be emitted in a compressed form (with 8 or 16-bit
  9. // entries). If so, it changes the opcode and flags them in the associated
  10. // AArch64FunctionInfo.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "AArch64.h"
  14. #include "AArch64MachineFunctionInfo.h"
  15. #include "AArch64Subtarget.h"
  16. #include "llvm/ADT/Statistic.h"
  17. #include "llvm/CodeGen/MachineFunctionPass.h"
  18. #include "llvm/CodeGen/MachineJumpTableInfo.h"
  19. #include "llvm/CodeGen/TargetInstrInfo.h"
  20. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  21. #include "llvm/MC/MCContext.h"
  22. #include "llvm/Support/Alignment.h"
  23. #include "llvm/Support/Debug.h"
  24. using namespace llvm;
  25. #define DEBUG_TYPE "aarch64-jump-tables"
  26. STATISTIC(NumJT8, "Number of jump-tables with 1-byte entries");
  27. STATISTIC(NumJT16, "Number of jump-tables with 2-byte entries");
  28. STATISTIC(NumJT32, "Number of jump-tables with 4-byte entries");
  29. namespace {
  30. class AArch64CompressJumpTables : public MachineFunctionPass {
  31. const TargetInstrInfo *TII;
  32. MachineFunction *MF;
  33. SmallVector<int, 8> BlockInfo;
  34. /// Returns the size in instructions of the block \p MBB, or std::nullopt if
  35. /// we couldn't get a safe upper bound.
  36. std::optional<int> computeBlockSize(MachineBasicBlock &MBB);
  37. /// Gather information about the function, returns false if we can't perform
  38. /// this optimization for some reason.
  39. bool scanFunction();
  40. bool compressJumpTable(MachineInstr &MI, int Offset);
  41. public:
  42. static char ID;
  43. AArch64CompressJumpTables() : MachineFunctionPass(ID) {
  44. initializeAArch64CompressJumpTablesPass(*PassRegistry::getPassRegistry());
  45. }
  46. bool runOnMachineFunction(MachineFunction &MF) override;
  47. MachineFunctionProperties getRequiredProperties() const override {
  48. return MachineFunctionProperties().set(
  49. MachineFunctionProperties::Property::NoVRegs);
  50. }
  51. StringRef getPassName() const override {
  52. return "AArch64 Compress Jump Tables";
  53. }
  54. };
  55. char AArch64CompressJumpTables::ID = 0;
  56. } // namespace
  57. INITIALIZE_PASS(AArch64CompressJumpTables, DEBUG_TYPE,
  58. "AArch64 compress jump tables pass", false, false)
  59. std::optional<int>
  60. AArch64CompressJumpTables::computeBlockSize(MachineBasicBlock &MBB) {
  61. int Size = 0;
  62. for (const MachineInstr &MI : MBB) {
  63. // Inline asm may contain some directives like .bytes which we don't
  64. // currently have the ability to parse accurately. To be safe, just avoid
  65. // computing a size and bail out.
  66. if (MI.getOpcode() == AArch64::INLINEASM ||
  67. MI.getOpcode() == AArch64::INLINEASM_BR)
  68. return std::nullopt;
  69. Size += TII->getInstSizeInBytes(MI);
  70. }
  71. return Size;
  72. }
  73. bool AArch64CompressJumpTables::scanFunction() {
  74. BlockInfo.clear();
  75. BlockInfo.resize(MF->getNumBlockIDs());
  76. unsigned Offset = 0;
  77. for (MachineBasicBlock &MBB : *MF) {
  78. const Align Alignment = MBB.getAlignment();
  79. unsigned AlignedOffset;
  80. if (Alignment == Align(1))
  81. AlignedOffset = Offset;
  82. else
  83. AlignedOffset = alignTo(Offset, Alignment);
  84. BlockInfo[MBB.getNumber()] = AlignedOffset;
  85. auto BlockSize = computeBlockSize(MBB);
  86. if (!BlockSize)
  87. return false;
  88. Offset = AlignedOffset + *BlockSize;
  89. }
  90. return true;
  91. }
  92. bool AArch64CompressJumpTables::compressJumpTable(MachineInstr &MI,
  93. int Offset) {
  94. if (MI.getOpcode() != AArch64::JumpTableDest32)
  95. return false;
  96. int JTIdx = MI.getOperand(4).getIndex();
  97. auto &JTInfo = *MF->getJumpTableInfo();
  98. const MachineJumpTableEntry &JT = JTInfo.getJumpTables()[JTIdx];
  99. // The jump-table might have been optimized away.
  100. if (JT.MBBs.empty())
  101. return false;
  102. int MaxOffset = std::numeric_limits<int>::min(),
  103. MinOffset = std::numeric_limits<int>::max();
  104. MachineBasicBlock *MinBlock = nullptr;
  105. for (auto *Block : JT.MBBs) {
  106. int BlockOffset = BlockInfo[Block->getNumber()];
  107. assert(BlockOffset % 4 == 0 && "misaligned basic block");
  108. MaxOffset = std::max(MaxOffset, BlockOffset);
  109. if (BlockOffset <= MinOffset) {
  110. MinOffset = BlockOffset;
  111. MinBlock = Block;
  112. }
  113. }
  114. assert(MinBlock && "Failed to find minimum offset block");
  115. // The ADR instruction needed to calculate the address of the first reachable
  116. // basic block can address +/-1MB.
  117. if (!isInt<21>(MinOffset - Offset)) {
  118. ++NumJT32;
  119. return false;
  120. }
  121. int Span = MaxOffset - MinOffset;
  122. auto *AFI = MF->getInfo<AArch64FunctionInfo>();
  123. if (isUInt<8>(Span / 4)) {
  124. AFI->setJumpTableEntryInfo(JTIdx, 1, MinBlock->getSymbol());
  125. MI.setDesc(TII->get(AArch64::JumpTableDest8));
  126. ++NumJT8;
  127. return true;
  128. }
  129. if (isUInt<16>(Span / 4)) {
  130. AFI->setJumpTableEntryInfo(JTIdx, 2, MinBlock->getSymbol());
  131. MI.setDesc(TII->get(AArch64::JumpTableDest16));
  132. ++NumJT16;
  133. return true;
  134. }
  135. ++NumJT32;
  136. return false;
  137. }
  138. bool AArch64CompressJumpTables::runOnMachineFunction(MachineFunction &MFIn) {
  139. bool Changed = false;
  140. MF = &MFIn;
  141. const auto &ST = MF->getSubtarget<AArch64Subtarget>();
  142. TII = ST.getInstrInfo();
  143. if (ST.force32BitJumpTables() && !MF->getFunction().hasMinSize())
  144. return false;
  145. if (!scanFunction())
  146. return false;
  147. for (MachineBasicBlock &MBB : *MF) {
  148. int Offset = BlockInfo[MBB.getNumber()];
  149. for (MachineInstr &MI : MBB) {
  150. Changed |= compressJumpTable(MI, Offset);
  151. Offset += TII->getInstSizeInBytes(MI);
  152. }
  153. }
  154. return Changed;
  155. }
  156. FunctionPass *llvm::createAArch64CompressJumpTablesPass() {
  157. return new AArch64CompressJumpTables();
  158. }