ThumbRegisterInfo.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611
  1. //===-- ThumbRegisterInfo.cpp - Thumb-1 Register Information -------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains the Thumb-1 implementation of the TargetRegisterInfo
  10. // class.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "ThumbRegisterInfo.h"
  14. #include "ARMBaseInstrInfo.h"
  15. #include "ARMMachineFunctionInfo.h"
  16. #include "ARMSubtarget.h"
  17. #include "MCTargetDesc/ARMAddressingModes.h"
  18. #include "llvm/CodeGen/MachineConstantPool.h"
  19. #include "llvm/CodeGen/MachineFrameInfo.h"
  20. #include "llvm/CodeGen/MachineFunction.h"
  21. #include "llvm/CodeGen/MachineInstrBuilder.h"
  22. #include "llvm/CodeGen/MachineRegisterInfo.h"
  23. #include "llvm/CodeGen/RegisterScavenging.h"
  24. #include "llvm/IR/Constants.h"
  25. #include "llvm/IR/DerivedTypes.h"
  26. #include "llvm/IR/Function.h"
  27. #include "llvm/IR/LLVMContext.h"
  28. #include "llvm/Support/CommandLine.h"
  29. #include "llvm/Support/ErrorHandling.h"
  30. #include "llvm/CodeGen/TargetFrameLowering.h"
  31. #include "llvm/Target/TargetMachine.h"
  32. namespace llvm {
  33. extern cl::opt<bool> ReuseFrameIndexVals;
  34. }
  35. using namespace llvm;
  36. ThumbRegisterInfo::ThumbRegisterInfo() = default;
  37. const TargetRegisterClass *
  38. ThumbRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
  39. const MachineFunction &MF) const {
  40. if (!MF.getSubtarget<ARMSubtarget>().isThumb1Only())
  41. return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC, MF);
  42. if (ARM::tGPRRegClass.hasSubClassEq(RC))
  43. return &ARM::tGPRRegClass;
  44. return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC, MF);
  45. }
  46. const TargetRegisterClass *
  47. ThumbRegisterInfo::getPointerRegClass(const MachineFunction &MF,
  48. unsigned Kind) const {
  49. if (!MF.getSubtarget<ARMSubtarget>().isThumb1Only())
  50. return ARMBaseRegisterInfo::getPointerRegClass(MF, Kind);
  51. return &ARM::tGPRRegClass;
  52. }
  53. static void emitThumb1LoadConstPool(MachineBasicBlock &MBB,
  54. MachineBasicBlock::iterator &MBBI,
  55. const DebugLoc &dl, unsigned DestReg,
  56. unsigned SubIdx, int Val,
  57. ARMCC::CondCodes Pred, unsigned PredReg,
  58. unsigned MIFlags) {
  59. MachineFunction &MF = *MBB.getParent();
  60. const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
  61. const TargetInstrInfo &TII = *STI.getInstrInfo();
  62. MachineConstantPool *ConstantPool = MF.getConstantPool();
  63. const Constant *C = ConstantInt::get(
  64. Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Val);
  65. unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4));
  66. BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRpci))
  67. .addReg(DestReg, getDefRegState(true), SubIdx)
  68. .addConstantPoolIndex(Idx).addImm(Pred).addReg(PredReg)
  69. .setMIFlags(MIFlags);
  70. }
  71. static void emitThumb2LoadConstPool(MachineBasicBlock &MBB,
  72. MachineBasicBlock::iterator &MBBI,
  73. const DebugLoc &dl, unsigned DestReg,
  74. unsigned SubIdx, int Val,
  75. ARMCC::CondCodes Pred, unsigned PredReg,
  76. unsigned MIFlags) {
  77. MachineFunction &MF = *MBB.getParent();
  78. const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
  79. MachineConstantPool *ConstantPool = MF.getConstantPool();
  80. const Constant *C = ConstantInt::get(
  81. Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Val);
  82. unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4));
  83. BuildMI(MBB, MBBI, dl, TII.get(ARM::t2LDRpci))
  84. .addReg(DestReg, getDefRegState(true), SubIdx)
  85. .addConstantPoolIndex(Idx)
  86. .add(predOps(ARMCC::AL))
  87. .setMIFlags(MIFlags);
  88. }
  89. /// emitLoadConstPool - Emits a load from constpool to materialize the
  90. /// specified immediate.
  91. void ThumbRegisterInfo::emitLoadConstPool(
  92. MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
  93. const DebugLoc &dl, Register DestReg, unsigned SubIdx, int Val,
  94. ARMCC::CondCodes Pred, Register PredReg, unsigned MIFlags) const {
  95. MachineFunction &MF = *MBB.getParent();
  96. const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
  97. if (STI.isThumb1Only()) {
  98. assert((isARMLowRegister(DestReg) || DestReg.isVirtual()) &&
  99. "Thumb1 does not have ldr to high register");
  100. return emitThumb1LoadConstPool(MBB, MBBI, dl, DestReg, SubIdx, Val, Pred,
  101. PredReg, MIFlags);
  102. }
  103. return emitThumb2LoadConstPool(MBB, MBBI, dl, DestReg, SubIdx, Val, Pred,
  104. PredReg, MIFlags);
  105. }
  106. /// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize
  107. /// a destreg = basereg + immediate in Thumb code. Materialize the immediate
  108. /// in a register using mov / mvn sequences or load the immediate from a
  109. /// constpool entry.
  110. static void emitThumbRegPlusImmInReg(
  111. MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
  112. const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes,
  113. bool CanChangeCC, const TargetInstrInfo &TII,
  114. const ARMBaseRegisterInfo &MRI, unsigned MIFlags = MachineInstr::NoFlags) {
  115. MachineFunction &MF = *MBB.getParent();
  116. const ARMSubtarget &ST = MF.getSubtarget<ARMSubtarget>();
  117. bool isHigh = !isARMLowRegister(DestReg) ||
  118. (BaseReg != 0 && !isARMLowRegister(BaseReg));
  119. bool isSub = false;
  120. // Subtract doesn't have high register version. Load the negative value
  121. // if either base or dest register is a high register. Also, if do not
  122. // issue sub as part of the sequence if condition register is to be
  123. // preserved.
  124. if (NumBytes < 0 && !isHigh && CanChangeCC) {
  125. isSub = true;
  126. NumBytes = -NumBytes;
  127. }
  128. Register LdReg = DestReg;
  129. if (DestReg == ARM::SP)
  130. assert(BaseReg == ARM::SP && "Unexpected!");
  131. if (!isARMLowRegister(DestReg) && !DestReg.isVirtual())
  132. LdReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
  133. if (NumBytes <= 255 && NumBytes >= 0 && CanChangeCC) {
  134. BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg)
  135. .add(t1CondCodeOp())
  136. .addImm(NumBytes)
  137. .setMIFlags(MIFlags);
  138. } else if (NumBytes < 0 && NumBytes >= -255 && CanChangeCC) {
  139. BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg)
  140. .add(t1CondCodeOp())
  141. .addImm(NumBytes)
  142. .setMIFlags(MIFlags);
  143. BuildMI(MBB, MBBI, dl, TII.get(ARM::tRSB), LdReg)
  144. .add(t1CondCodeOp())
  145. .addReg(LdReg, RegState::Kill)
  146. .setMIFlags(MIFlags);
  147. } else if (ST.genExecuteOnly()) {
  148. BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi32imm), LdReg)
  149. .addImm(NumBytes).setMIFlags(MIFlags);
  150. } else
  151. MRI.emitLoadConstPool(MBB, MBBI, dl, LdReg, 0, NumBytes, ARMCC::AL, 0,
  152. MIFlags);
  153. // Emit add / sub.
  154. int Opc = (isSub) ? ARM::tSUBrr
  155. : ((isHigh || !CanChangeCC) ? ARM::tADDhirr : ARM::tADDrr);
  156. MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg);
  157. if (Opc != ARM::tADDhirr)
  158. MIB = MIB.add(t1CondCodeOp());
  159. if (DestReg == ARM::SP || isSub)
  160. MIB.addReg(BaseReg).addReg(LdReg, RegState::Kill);
  161. else
  162. MIB.addReg(LdReg).addReg(BaseReg, RegState::Kill);
  163. MIB.add(predOps(ARMCC::AL));
  164. }
  165. /// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
  166. /// a destreg = basereg + immediate in Thumb code. Tries a series of ADDs or
  167. /// SUBs first, and uses a constant pool value if the instruction sequence would
  168. /// be too long. This is allowed to modify the condition flags.
  169. void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
  170. MachineBasicBlock::iterator &MBBI,
  171. const DebugLoc &dl, Register DestReg,
  172. Register BaseReg, int NumBytes,
  173. const TargetInstrInfo &TII,
  174. const ARMBaseRegisterInfo &MRI,
  175. unsigned MIFlags) {
  176. bool isSub = NumBytes < 0;
  177. unsigned Bytes = (unsigned)NumBytes;
  178. if (isSub) Bytes = -NumBytes;
  179. int CopyOpc = 0;
  180. unsigned CopyBits = 0;
  181. unsigned CopyScale = 1;
  182. bool CopyNeedsCC = false;
  183. int ExtraOpc = 0;
  184. unsigned ExtraBits = 0;
  185. unsigned ExtraScale = 1;
  186. bool ExtraNeedsCC = false;
  187. // Strategy:
  188. // We need to select two types of instruction, maximizing the available
  189. // immediate range of each. The instructions we use will depend on whether
  190. // DestReg and BaseReg are low, high or the stack pointer.
  191. // * CopyOpc - DestReg = BaseReg + imm
  192. // This will be emitted once if DestReg != BaseReg, and never if
  193. // DestReg == BaseReg.
  194. // * ExtraOpc - DestReg = DestReg + imm
  195. // This will be emitted as many times as necessary to add the
  196. // full immediate.
  197. // If the immediate ranges of these instructions are not large enough to cover
  198. // NumBytes with a reasonable number of instructions, we fall back to using a
  199. // value loaded from a constant pool.
  200. if (DestReg == ARM::SP) {
  201. if (BaseReg == ARM::SP) {
  202. // sp -> sp
  203. // Already in right reg, no copy needed
  204. } else {
  205. // low -> sp or high -> sp
  206. CopyOpc = ARM::tMOVr;
  207. CopyBits = 0;
  208. }
  209. ExtraOpc = isSub ? ARM::tSUBspi : ARM::tADDspi;
  210. ExtraBits = 7;
  211. ExtraScale = 4;
  212. } else if (isARMLowRegister(DestReg)) {
  213. if (BaseReg == ARM::SP) {
  214. // sp -> low
  215. assert(!isSub && "Thumb1 does not have tSUBrSPi");
  216. CopyOpc = ARM::tADDrSPi;
  217. CopyBits = 8;
  218. CopyScale = 4;
  219. } else if (DestReg == BaseReg) {
  220. // low -> same low
  221. // Already in right reg, no copy needed
  222. } else if (isARMLowRegister(BaseReg)) {
  223. // low -> different low
  224. CopyOpc = isSub ? ARM::tSUBi3 : ARM::tADDi3;
  225. CopyBits = 3;
  226. CopyNeedsCC = true;
  227. } else {
  228. // high -> low
  229. CopyOpc = ARM::tMOVr;
  230. CopyBits = 0;
  231. }
  232. ExtraOpc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
  233. ExtraBits = 8;
  234. ExtraNeedsCC = true;
  235. } else /* DestReg is high */ {
  236. if (DestReg == BaseReg) {
  237. // high -> same high
  238. // Already in right reg, no copy needed
  239. } else {
  240. // {low,high,sp} -> high
  241. CopyOpc = ARM::tMOVr;
  242. CopyBits = 0;
  243. }
  244. ExtraOpc = 0;
  245. }
  246. // We could handle an unaligned immediate with an unaligned copy instruction
  247. // and an aligned extra instruction, but this case is not currently needed.
  248. assert(((Bytes & 3) == 0 || ExtraScale == 1) &&
  249. "Unaligned offset, but all instructions require alignment");
  250. unsigned CopyRange = ((1 << CopyBits) - 1) * CopyScale;
  251. // If we would emit the copy with an immediate of 0, just use tMOVr.
  252. if (CopyOpc && Bytes < CopyScale) {
  253. CopyOpc = ARM::tMOVr;
  254. CopyScale = 1;
  255. CopyNeedsCC = false;
  256. CopyRange = 0;
  257. }
  258. unsigned ExtraRange = ((1 << ExtraBits) - 1) * ExtraScale; // per instruction
  259. unsigned RequiredCopyInstrs = CopyOpc ? 1 : 0;
  260. unsigned RangeAfterCopy = (CopyRange > Bytes) ? 0 : (Bytes - CopyRange);
  261. // We could handle this case when the copy instruction does not require an
  262. // aligned immediate, but we do not currently do this.
  263. assert(RangeAfterCopy % ExtraScale == 0 &&
  264. "Extra instruction requires immediate to be aligned");
  265. unsigned RequiredExtraInstrs;
  266. if (ExtraRange)
  267. RequiredExtraInstrs = alignTo(RangeAfterCopy, ExtraRange) / ExtraRange;
  268. else if (RangeAfterCopy > 0)
  269. // We need an extra instruction but none is available
  270. RequiredExtraInstrs = 1000000;
  271. else
  272. RequiredExtraInstrs = 0;
  273. unsigned RequiredInstrs = RequiredCopyInstrs + RequiredExtraInstrs;
  274. unsigned Threshold = (DestReg == ARM::SP) ? 3 : 2;
  275. // Use a constant pool, if the sequence of ADDs/SUBs is too expensive.
  276. if (RequiredInstrs > Threshold) {
  277. emitThumbRegPlusImmInReg(MBB, MBBI, dl,
  278. DestReg, BaseReg, NumBytes, true,
  279. TII, MRI, MIFlags);
  280. return;
  281. }
  282. // Emit zero or one copy instructions
  283. if (CopyOpc) {
  284. unsigned CopyImm = std::min(Bytes, CopyRange) / CopyScale;
  285. Bytes -= CopyImm * CopyScale;
  286. MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(CopyOpc), DestReg);
  287. if (CopyNeedsCC)
  288. MIB = MIB.add(t1CondCodeOp());
  289. MIB.addReg(BaseReg, RegState::Kill);
  290. if (CopyOpc != ARM::tMOVr) {
  291. MIB.addImm(CopyImm);
  292. }
  293. MIB.setMIFlags(MIFlags).add(predOps(ARMCC::AL));
  294. BaseReg = DestReg;
  295. }
  296. // Emit zero or more in-place add/sub instructions
  297. while (Bytes) {
  298. unsigned ExtraImm = std::min(Bytes, ExtraRange) / ExtraScale;
  299. Bytes -= ExtraImm * ExtraScale;
  300. MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(ExtraOpc), DestReg);
  301. if (ExtraNeedsCC)
  302. MIB = MIB.add(t1CondCodeOp());
  303. MIB.addReg(BaseReg)
  304. .addImm(ExtraImm)
  305. .add(predOps(ARMCC::AL))
  306. .setMIFlags(MIFlags);
  307. }
  308. }
  309. static void removeOperands(MachineInstr &MI, unsigned i) {
  310. unsigned Op = i;
  311. for (unsigned e = MI.getNumOperands(); i != e; ++i)
  312. MI.removeOperand(Op);
  313. }
  314. /// convertToNonSPOpcode - Change the opcode to the non-SP version, because
  315. /// we're replacing the frame index with a non-SP register.
  316. static unsigned convertToNonSPOpcode(unsigned Opcode) {
  317. switch (Opcode) {
  318. case ARM::tLDRspi:
  319. return ARM::tLDRi;
  320. case ARM::tSTRspi:
  321. return ARM::tSTRi;
  322. }
  323. return Opcode;
  324. }
  325. bool ThumbRegisterInfo::rewriteFrameIndex(MachineBasicBlock::iterator II,
  326. unsigned FrameRegIdx,
  327. Register FrameReg, int &Offset,
  328. const ARMBaseInstrInfo &TII) const {
  329. MachineInstr &MI = *II;
  330. MachineBasicBlock &MBB = *MI.getParent();
  331. MachineFunction &MF = *MBB.getParent();
  332. assert(MBB.getParent()->getSubtarget<ARMSubtarget>().isThumb1Only() &&
  333. "This isn't needed for thumb2!");
  334. DebugLoc dl = MI.getDebugLoc();
  335. MachineInstrBuilder MIB(*MBB.getParent(), &MI);
  336. unsigned Opcode = MI.getOpcode();
  337. const MCInstrDesc &Desc = MI.getDesc();
  338. unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
  339. if (Opcode == ARM::tADDframe) {
  340. Offset += MI.getOperand(FrameRegIdx+1).getImm();
  341. Register DestReg = MI.getOperand(0).getReg();
  342. emitThumbRegPlusImmediate(MBB, II, dl, DestReg, FrameReg, Offset, TII,
  343. *this);
  344. MBB.erase(II);
  345. return true;
  346. } else {
  347. if (AddrMode != ARMII::AddrModeT1_s)
  348. llvm_unreachable("Unsupported addressing mode!");
  349. unsigned ImmIdx = FrameRegIdx + 1;
  350. int InstrOffs = MI.getOperand(ImmIdx).getImm();
  351. unsigned NumBits = (FrameReg == ARM::SP) ? 8 : 5;
  352. unsigned Scale = 4;
  353. Offset += InstrOffs * Scale;
  354. assert((Offset & (Scale - 1)) == 0 && "Can't encode this offset!");
  355. // Common case: small offset, fits into instruction.
  356. MachineOperand &ImmOp = MI.getOperand(ImmIdx);
  357. int ImmedOffset = Offset / Scale;
  358. unsigned Mask = (1 << NumBits) - 1;
  359. if ((unsigned)Offset <= Mask * Scale) {
  360. // Replace the FrameIndex with the frame register (e.g., sp).
  361. Register DestReg = FrameReg;
  362. // In case FrameReg is a high register, move it to a low reg to ensure it
  363. // can be used as an operand.
  364. if (ARM::hGPRRegClass.contains(FrameReg) && FrameReg != ARM::SP) {
  365. DestReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
  366. BuildMI(MBB, II, dl, TII.get(ARM::tMOVr), DestReg)
  367. .addReg(FrameReg)
  368. .add(predOps(ARMCC::AL));
  369. }
  370. MI.getOperand(FrameRegIdx).ChangeToRegister(DestReg, false);
  371. ImmOp.ChangeToImmediate(ImmedOffset);
  372. // If we're using a register where sp was stored, convert the instruction
  373. // to the non-SP version.
  374. unsigned NewOpc = convertToNonSPOpcode(Opcode);
  375. if (NewOpc != Opcode && FrameReg != ARM::SP)
  376. MI.setDesc(TII.get(NewOpc));
  377. return true;
  378. }
  379. NumBits = 5;
  380. Mask = (1 << NumBits) - 1;
  381. // If this is a thumb spill / restore, we will be using a constpool load to
  382. // materialize the offset.
  383. if (Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi) {
  384. ImmOp.ChangeToImmediate(0);
  385. } else {
  386. // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
  387. ImmedOffset = ImmedOffset & Mask;
  388. ImmOp.ChangeToImmediate(ImmedOffset);
  389. Offset &= ~(Mask * Scale);
  390. }
  391. }
  392. return Offset == 0;
  393. }
  394. void ThumbRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
  395. int64_t Offset) const {
  396. const MachineFunction &MF = *MI.getParent()->getParent();
  397. const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
  398. if (!STI.isThumb1Only())
  399. return ARMBaseRegisterInfo::resolveFrameIndex(MI, BaseReg, Offset);
  400. const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
  401. int Off = Offset; // ARM doesn't need the general 64-bit offsets
  402. unsigned i = 0;
  403. while (!MI.getOperand(i).isFI()) {
  404. ++i;
  405. assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
  406. }
  407. bool Done = rewriteFrameIndex(MI, i, BaseReg, Off, TII);
  408. assert (Done && "Unable to resolve frame index!");
  409. (void)Done;
  410. }
  411. bool ThumbRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
  412. int SPAdj, unsigned FIOperandNum,
  413. RegScavenger *RS) const {
  414. MachineInstr &MI = *II;
  415. MachineBasicBlock &MBB = *MI.getParent();
  416. MachineFunction &MF = *MBB.getParent();
  417. const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
  418. if (!STI.isThumb1Only())
  419. return ARMBaseRegisterInfo::eliminateFrameIndex(II, SPAdj, FIOperandNum,
  420. RS);
  421. Register VReg;
  422. const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
  423. DebugLoc dl = MI.getDebugLoc();
  424. MachineInstrBuilder MIB(*MBB.getParent(), &MI);
  425. Register FrameReg;
  426. int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
  427. const ARMFrameLowering *TFI = getFrameLowering(MF);
  428. int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
  429. // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
  430. // call frame setup/destroy instructions have already been eliminated. That
  431. // means the stack pointer cannot be used to access the emergency spill slot
  432. // when !hasReservedCallFrame().
  433. #ifndef NDEBUG
  434. if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
  435. assert(STI.getFrameLowering()->hasReservedCallFrame(MF) &&
  436. "Cannot use SP to access the emergency spill slot in "
  437. "functions without a reserved call frame");
  438. assert(!MF.getFrameInfo().hasVarSizedObjects() &&
  439. "Cannot use SP to access the emergency spill slot in "
  440. "functions with variable sized frame objects");
  441. }
  442. #endif // NDEBUG
  443. // Special handling of dbg_value instructions.
  444. if (MI.isDebugValue()) {
  445. MI.getOperand(FIOperandNum). ChangeToRegister(FrameReg, false /*isDef*/);
  446. MI.getOperand(FIOperandNum+1).ChangeToImmediate(Offset);
  447. return false;
  448. }
  449. // Modify MI as necessary to handle as much of 'Offset' as possible
  450. assert(MF.getInfo<ARMFunctionInfo>()->isThumbFunction() &&
  451. "This eliminateFrameIndex only supports Thumb1!");
  452. if (rewriteFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
  453. return true;
  454. // If we get here, the immediate doesn't fit into the instruction. We folded
  455. // as much as possible above, handle the rest, providing a register that is
  456. // SP+LargeImm.
  457. assert(Offset && "This code isn't needed if offset already handled!");
  458. unsigned Opcode = MI.getOpcode();
  459. // Remove predicate first.
  460. int PIdx = MI.findFirstPredOperandIdx();
  461. if (PIdx != -1)
  462. removeOperands(MI, PIdx);
  463. if (MI.mayLoad()) {
  464. // Use the destination register to materialize sp + offset.
  465. Register TmpReg = MI.getOperand(0).getReg();
  466. bool UseRR = false;
  467. if (Opcode == ARM::tLDRspi) {
  468. if (FrameReg == ARM::SP || STI.genExecuteOnly())
  469. emitThumbRegPlusImmInReg(MBB, II, dl, TmpReg, FrameReg,
  470. Offset, false, TII, *this);
  471. else {
  472. emitLoadConstPool(MBB, II, dl, TmpReg, 0, Offset);
  473. if (!ARM::hGPRRegClass.contains(FrameReg)) {
  474. UseRR = true;
  475. } else {
  476. // If FrameReg is a high register, add the reg values in a separate
  477. // instruction as the load won't be able to access it.
  478. BuildMI(MBB, II, dl, TII.get(ARM::tADDhirr), TmpReg)
  479. .addReg(TmpReg)
  480. .addReg(FrameReg)
  481. .add(predOps(ARMCC::AL));
  482. }
  483. }
  484. } else {
  485. emitThumbRegPlusImmediate(MBB, II, dl, TmpReg, FrameReg, Offset, TII,
  486. *this);
  487. }
  488. MI.setDesc(TII.get(UseRR ? ARM::tLDRr : ARM::tLDRi));
  489. MI.getOperand(FIOperandNum).ChangeToRegister(TmpReg, false, false, true);
  490. if (UseRR) {
  491. assert(!ARM::hGPRRegClass.contains(FrameReg) &&
  492. "Thumb1 loads can't use high register");
  493. // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
  494. // register. The offset is already handled in the vreg value.
  495. MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
  496. false);
  497. }
  498. } else if (MI.mayStore()) {
  499. VReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
  500. bool UseRR = false;
  501. if (Opcode == ARM::tSTRspi) {
  502. if (FrameReg == ARM::SP || STI.genExecuteOnly())
  503. emitThumbRegPlusImmInReg(MBB, II, dl, VReg, FrameReg,
  504. Offset, false, TII, *this);
  505. else {
  506. emitLoadConstPool(MBB, II, dl, VReg, 0, Offset);
  507. if (!ARM::hGPRRegClass.contains(FrameReg)) {
  508. UseRR = true;
  509. } else {
  510. // If FrameReg is a high register, add the reg values in a separate
  511. // instruction as the load won't be able to access it.
  512. BuildMI(MBB, II, dl, TII.get(ARM::tADDhirr), VReg)
  513. .addReg(VReg)
  514. .addReg(FrameReg)
  515. .add(predOps(ARMCC::AL));
  516. }
  517. }
  518. } else
  519. emitThumbRegPlusImmediate(MBB, II, dl, VReg, FrameReg, Offset, TII,
  520. *this);
  521. MI.setDesc(TII.get(UseRR ? ARM::tSTRr : ARM::tSTRi));
  522. MI.getOperand(FIOperandNum).ChangeToRegister(VReg, false, false, true);
  523. if (UseRR) {
  524. assert(!ARM::hGPRRegClass.contains(FrameReg) &&
  525. "Thumb1 stores can't use high register");
  526. // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
  527. // register. The offset is already handled in the vreg value.
  528. MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
  529. false);
  530. }
  531. } else {
  532. llvm_unreachable("Unexpected opcode!");
  533. }
  534. // Add predicate back if it's needed.
  535. if (MI.isPredicable())
  536. MIB.add(predOps(ARMCC::AL));
  537. return false;
  538. }
  539. bool
  540. ThumbRegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
  541. if (MF.getSubtarget<ARMSubtarget>().isThumb1Only()) {
  542. // For Thumb1, the emergency spill slot must be some small positive
  543. // offset from the base/stack pointer.
  544. return false;
  545. }
  546. // For Thumb2, put the emergency spill slot next to FP.
  547. return true;
  548. }