Thumb2InstrInfo.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835
  1. //===- Thumb2InstrInfo.cpp - Thumb-2 Instruction Information --------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains the Thumb-2 implementation of the TargetInstrInfo class.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "Thumb2InstrInfo.h"
  13. #include "ARMMachineFunctionInfo.h"
  14. #include "ARMSubtarget.h"
  15. #include "MCTargetDesc/ARMAddressingModes.h"
  16. #include "llvm/CodeGen/MachineBasicBlock.h"
  17. #include "llvm/CodeGen/MachineFrameInfo.h"
  18. #include "llvm/CodeGen/MachineFunction.h"
  19. #include "llvm/CodeGen/MachineInstr.h"
  20. #include "llvm/CodeGen/MachineInstrBuilder.h"
  21. #include "llvm/CodeGen/MachineMemOperand.h"
  22. #include "llvm/CodeGen/MachineOperand.h"
  23. #include "llvm/CodeGen/MachineRegisterInfo.h"
  24. #include "llvm/CodeGen/TargetRegisterInfo.h"
  25. #include "llvm/IR/DebugLoc.h"
  26. #include "llvm/MC/MCInst.h"
  27. #include "llvm/MC/MCInstBuilder.h"
  28. #include "llvm/MC/MCInstrDesc.h"
  29. #include "llvm/Support/CommandLine.h"
  30. #include "llvm/Support/ErrorHandling.h"
  31. #include "llvm/Support/MathExtras.h"
  32. #include "llvm/Target/TargetMachine.h"
  33. #include <cassert>
  34. using namespace llvm;
  35. static cl::opt<bool>
  36. OldT2IfCvt("old-thumb2-ifcvt", cl::Hidden,
  37. cl::desc("Use old-style Thumb2 if-conversion heuristics"),
  38. cl::init(false));
  39. static cl::opt<bool>
  40. PreferNoCSEL("prefer-no-csel", cl::Hidden,
  41. cl::desc("Prefer predicated Move to CSEL"),
  42. cl::init(false));
  43. Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI)
  44. : ARMBaseInstrInfo(STI) {}
  45. /// Return the noop instruction to use for a noop.
  46. MCInst Thumb2InstrInfo::getNop() const {
  47. return MCInstBuilder(ARM::tHINT).addImm(0).addImm(ARMCC::AL).addReg(0);
  48. }
  49. unsigned Thumb2InstrInfo::getUnindexedOpcode(unsigned Opc) const {
  50. // FIXME
  51. return 0;
  52. }
  53. void
  54. Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
  55. MachineBasicBlock *NewDest) const {
  56. MachineBasicBlock *MBB = Tail->getParent();
  57. ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
  58. if (!AFI->hasITBlocks() || Tail->isBranch()) {
  59. TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest);
  60. return;
  61. }
  62. // If the first instruction of Tail is predicated, we may have to update
  63. // the IT instruction.
  64. Register PredReg;
  65. ARMCC::CondCodes CC = getInstrPredicate(*Tail, PredReg);
  66. MachineBasicBlock::iterator MBBI = Tail;
  67. if (CC != ARMCC::AL)
  68. // Expecting at least the t2IT instruction before it.
  69. --MBBI;
  70. // Actually replace the tail.
  71. TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest);
  72. // Fix up IT.
  73. if (CC != ARMCC::AL) {
  74. MachineBasicBlock::iterator E = MBB->begin();
  75. unsigned Count = 4; // At most 4 instructions in an IT block.
  76. while (Count && MBBI != E) {
  77. if (MBBI->isDebugInstr()) {
  78. --MBBI;
  79. continue;
  80. }
  81. if (MBBI->getOpcode() == ARM::t2IT) {
  82. unsigned Mask = MBBI->getOperand(1).getImm();
  83. if (Count == 4)
  84. MBBI->eraseFromParent();
  85. else {
  86. unsigned MaskOn = 1 << Count;
  87. unsigned MaskOff = ~(MaskOn - 1);
  88. MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn);
  89. }
  90. return;
  91. }
  92. --MBBI;
  93. --Count;
  94. }
  95. // Ctrl flow can reach here if branch folding is run before IT block
  96. // formation pass.
  97. }
  98. }
  99. bool
  100. Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
  101. MachineBasicBlock::iterator MBBI) const {
  102. while (MBBI->isDebugInstr()) {
  103. ++MBBI;
  104. if (MBBI == MBB.end())
  105. return false;
  106. }
  107. Register PredReg;
  108. return getITInstrPredicate(*MBBI, PredReg) == ARMCC::AL;
  109. }
  110. MachineInstr *
  111. Thumb2InstrInfo::optimizeSelect(MachineInstr &MI,
  112. SmallPtrSetImpl<MachineInstr *> &SeenMIs,
  113. bool PreferFalse) const {
  114. // Try to use the base optimizeSelect, which uses canFoldIntoMOVCC to fold the
  115. // MOVCC into another instruction. If that fails on 8.1-M fall back to using a
  116. // CSEL.
  117. MachineInstr *RV = ARMBaseInstrInfo::optimizeSelect(MI, SeenMIs, PreferFalse);
  118. if (!RV && getSubtarget().hasV8_1MMainlineOps() && !PreferNoCSEL) {
  119. Register DestReg = MI.getOperand(0).getReg();
  120. if (!DestReg.isVirtual())
  121. return nullptr;
  122. MachineInstrBuilder NewMI = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
  123. get(ARM::t2CSEL), DestReg)
  124. .add(MI.getOperand(2))
  125. .add(MI.getOperand(1))
  126. .add(MI.getOperand(3));
  127. SeenMIs.insert(NewMI);
  128. return NewMI;
  129. }
  130. return RV;
  131. }
  132. void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
  133. MachineBasicBlock::iterator I,
  134. const DebugLoc &DL, MCRegister DestReg,
  135. MCRegister SrcReg, bool KillSrc) const {
  136. // Handle SPR, DPR, and QPR copies.
  137. if (!ARM::GPRRegClass.contains(DestReg, SrcReg))
  138. return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc);
  139. BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg)
  140. .addReg(SrcReg, getKillRegState(KillSrc))
  141. .add(predOps(ARMCC::AL));
  142. }
  143. void Thumb2InstrInfo::
  144. storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
  145. Register SrcReg, bool isKill, int FI,
  146. const TargetRegisterClass *RC,
  147. const TargetRegisterInfo *TRI) const {
  148. DebugLoc DL;
  149. if (I != MBB.end()) DL = I->getDebugLoc();
  150. MachineFunction &MF = *MBB.getParent();
  151. MachineFrameInfo &MFI = MF.getFrameInfo();
  152. MachineMemOperand *MMO = MF.getMachineMemOperand(
  153. MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
  154. MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
  155. if (ARM::GPRRegClass.hasSubClassEq(RC)) {
  156. BuildMI(MBB, I, DL, get(ARM::t2STRi12))
  157. .addReg(SrcReg, getKillRegState(isKill))
  158. .addFrameIndex(FI)
  159. .addImm(0)
  160. .addMemOperand(MMO)
  161. .add(predOps(ARMCC::AL));
  162. return;
  163. }
  164. if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
  165. // Thumb2 STRD expects its dest-registers to be in rGPR. Not a problem for
  166. // gsub_0, but needs an extra constraint for gsub_1 (which could be sp
  167. // otherwise).
  168. if (Register::isVirtualRegister(SrcReg)) {
  169. MachineRegisterInfo *MRI = &MF.getRegInfo();
  170. MRI->constrainRegClass(SrcReg, &ARM::GPRPairnospRegClass);
  171. }
  172. MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8));
  173. AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
  174. AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
  175. MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL));
  176. return;
  177. }
  178. ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI);
  179. }
  180. void Thumb2InstrInfo::
  181. loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
  182. Register DestReg, int FI,
  183. const TargetRegisterClass *RC,
  184. const TargetRegisterInfo *TRI) const {
  185. MachineFunction &MF = *MBB.getParent();
  186. MachineFrameInfo &MFI = MF.getFrameInfo();
  187. MachineMemOperand *MMO = MF.getMachineMemOperand(
  188. MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
  189. MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
  190. DebugLoc DL;
  191. if (I != MBB.end()) DL = I->getDebugLoc();
  192. if (ARM::GPRRegClass.hasSubClassEq(RC)) {
  193. BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg)
  194. .addFrameIndex(FI)
  195. .addImm(0)
  196. .addMemOperand(MMO)
  197. .add(predOps(ARMCC::AL));
  198. return;
  199. }
  200. if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
  201. // Thumb2 LDRD expects its dest-registers to be in rGPR. Not a problem for
  202. // gsub_0, but needs an extra constraint for gsub_1 (which could be sp
  203. // otherwise).
  204. if (Register::isVirtualRegister(DestReg)) {
  205. MachineRegisterInfo *MRI = &MF.getRegInfo();
  206. MRI->constrainRegClass(DestReg, &ARM::GPRPairnospRegClass);
  207. }
  208. MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8));
  209. AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
  210. AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
  211. MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL));
  212. if (Register::isPhysicalRegister(DestReg))
  213. MIB.addReg(DestReg, RegState::ImplicitDefine);
  214. return;
  215. }
  216. ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI);
  217. }
  218. void Thumb2InstrInfo::expandLoadStackGuard(
  219. MachineBasicBlock::iterator MI) const {
  220. MachineFunction &MF = *MI->getParent()->getParent();
  221. Module &M = *MF.getFunction().getParent();
  222. if (M.getStackProtectorGuard() == "tls") {
  223. expandLoadStackGuardBase(MI, ARM::t2MRC, ARM::t2LDRi12);
  224. return;
  225. }
  226. const GlobalValue *GV =
  227. cast<GlobalValue>((*MI->memoperands_begin())->getValue());
  228. if (MF.getSubtarget<ARMSubtarget>().isGVInGOT(GV))
  229. expandLoadStackGuardBase(MI, ARM::t2LDRLIT_ga_pcrel, ARM::t2LDRi12);
  230. else if (MF.getTarget().isPositionIndependent())
  231. expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12);
  232. else
  233. expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12);
  234. }
  235. MachineInstr *Thumb2InstrInfo::commuteInstructionImpl(MachineInstr &MI,
  236. bool NewMI,
  237. unsigned OpIdx1,
  238. unsigned OpIdx2) const {
  239. switch (MI.getOpcode()) {
  240. case ARM::MVE_VMAXNMAf16:
  241. case ARM::MVE_VMAXNMAf32:
  242. case ARM::MVE_VMINNMAf16:
  243. case ARM::MVE_VMINNMAf32:
  244. // Don't allow predicated instructions to be commuted.
  245. if (getVPTInstrPredicate(MI) != ARMVCC::None)
  246. return nullptr;
  247. }
  248. return ARMBaseInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
  249. }
  250. void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
  251. MachineBasicBlock::iterator &MBBI,
  252. const DebugLoc &dl, Register DestReg,
  253. Register BaseReg, int NumBytes,
  254. ARMCC::CondCodes Pred, Register PredReg,
  255. const ARMBaseInstrInfo &TII,
  256. unsigned MIFlags) {
  257. if (NumBytes == 0 && DestReg != BaseReg) {
  258. BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
  259. .addReg(BaseReg, RegState::Kill)
  260. .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
  261. return;
  262. }
  263. bool isSub = NumBytes < 0;
  264. if (isSub) NumBytes = -NumBytes;
  265. // If profitable, use a movw or movt to materialize the offset.
  266. // FIXME: Use the scavenger to grab a scratch register.
  267. if (DestReg != ARM::SP && DestReg != BaseReg &&
  268. NumBytes >= 4096 &&
  269. ARM_AM::getT2SOImmVal(NumBytes) == -1) {
  270. bool Fits = false;
  271. if (NumBytes < 65536) {
  272. // Use a movw to materialize the 16-bit constant.
  273. BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), DestReg)
  274. .addImm(NumBytes)
  275. .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
  276. Fits = true;
  277. } else if ((NumBytes & 0xffff) == 0) {
  278. // Use a movt to materialize the 32-bit constant.
  279. BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVTi16), DestReg)
  280. .addReg(DestReg)
  281. .addImm(NumBytes >> 16)
  282. .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
  283. Fits = true;
  284. }
  285. if (Fits) {
  286. if (isSub) {
  287. BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), DestReg)
  288. .addReg(BaseReg)
  289. .addReg(DestReg, RegState::Kill)
  290. .add(predOps(Pred, PredReg))
  291. .add(condCodeOp())
  292. .setMIFlags(MIFlags);
  293. } else {
  294. // Here we know that DestReg is not SP but we do not
  295. // know anything about BaseReg. t2ADDrr is an invalid
  296. // instruction is SP is used as the second argument, but
  297. // is fine if SP is the first argument. To be sure we
  298. // do not generate invalid encoding, put BaseReg first.
  299. BuildMI(MBB, MBBI, dl, TII.get(ARM::t2ADDrr), DestReg)
  300. .addReg(BaseReg)
  301. .addReg(DestReg, RegState::Kill)
  302. .add(predOps(Pred, PredReg))
  303. .add(condCodeOp())
  304. .setMIFlags(MIFlags);
  305. }
  306. return;
  307. }
  308. }
  309. while (NumBytes) {
  310. unsigned ThisVal = NumBytes;
  311. unsigned Opc = 0;
  312. if (DestReg == ARM::SP && BaseReg != ARM::SP) {
  313. // mov sp, rn. Note t2MOVr cannot be used.
  314. BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
  315. .addReg(BaseReg)
  316. .setMIFlags(MIFlags)
  317. .add(predOps(ARMCC::AL));
  318. BaseReg = ARM::SP;
  319. continue;
  320. }
  321. assert((DestReg != ARM::SP || BaseReg == ARM::SP) &&
  322. "Writing to SP, from other register.");
  323. // Try to use T1, as it smaller
  324. if ((DestReg == ARM::SP) && (ThisVal < ((1 << 7) - 1) * 4)) {
  325. assert((ThisVal & 3) == 0 && "Stack update is not multiple of 4?");
  326. Opc = isSub ? ARM::tSUBspi : ARM::tADDspi;
  327. BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
  328. .addReg(BaseReg)
  329. .addImm(ThisVal / 4)
  330. .setMIFlags(MIFlags)
  331. .add(predOps(ARMCC::AL));
  332. break;
  333. }
  334. bool HasCCOut = true;
  335. int ImmIsT2SO = ARM_AM::getT2SOImmVal(ThisVal);
  336. bool ToSP = DestReg == ARM::SP;
  337. unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
  338. unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
  339. unsigned t2SUBi12 = ToSP ? ARM::t2SUBspImm12 : ARM::t2SUBri12;
  340. unsigned t2ADDi12 = ToSP ? ARM::t2ADDspImm12 : ARM::t2ADDri12;
  341. Opc = isSub ? t2SUB : t2ADD;
  342. // Prefer T2: sub rd, rn, so_imm | sub sp, sp, so_imm
  343. if (ImmIsT2SO != -1) {
  344. NumBytes = 0;
  345. } else if (ThisVal < 4096) {
  346. // Prefer T3 if can make it in a single go: subw rd, rn, imm12 | subw sp,
  347. // sp, imm12
  348. Opc = isSub ? t2SUBi12 : t2ADDi12;
  349. HasCCOut = false;
  350. NumBytes = 0;
  351. } else {
  352. // Use one T2 instruction to reduce NumBytes
  353. // FIXME: Move this to ARMAddressingModes.h?
  354. unsigned RotAmt = countLeadingZeros(ThisVal);
  355. ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt);
  356. NumBytes &= ~ThisVal;
  357. assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 &&
  358. "Bit extraction didn't work?");
  359. }
  360. // Build the new ADD / SUB.
  361. MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
  362. .addReg(BaseReg, RegState::Kill)
  363. .addImm(ThisVal)
  364. .add(predOps(ARMCC::AL))
  365. .setMIFlags(MIFlags);
  366. if (HasCCOut)
  367. MIB.add(condCodeOp());
  368. BaseReg = DestReg;
  369. }
  370. }
  371. static unsigned
  372. negativeOffsetOpcode(unsigned opcode)
  373. {
  374. switch (opcode) {
  375. case ARM::t2LDRi12: return ARM::t2LDRi8;
  376. case ARM::t2LDRHi12: return ARM::t2LDRHi8;
  377. case ARM::t2LDRBi12: return ARM::t2LDRBi8;
  378. case ARM::t2LDRSHi12: return ARM::t2LDRSHi8;
  379. case ARM::t2LDRSBi12: return ARM::t2LDRSBi8;
  380. case ARM::t2STRi12: return ARM::t2STRi8;
  381. case ARM::t2STRBi12: return ARM::t2STRBi8;
  382. case ARM::t2STRHi12: return ARM::t2STRHi8;
  383. case ARM::t2PLDi12: return ARM::t2PLDi8;
  384. case ARM::t2PLDWi12: return ARM::t2PLDWi8;
  385. case ARM::t2PLIi12: return ARM::t2PLIi8;
  386. case ARM::t2LDRi8:
  387. case ARM::t2LDRHi8:
  388. case ARM::t2LDRBi8:
  389. case ARM::t2LDRSHi8:
  390. case ARM::t2LDRSBi8:
  391. case ARM::t2STRi8:
  392. case ARM::t2STRBi8:
  393. case ARM::t2STRHi8:
  394. case ARM::t2PLDi8:
  395. case ARM::t2PLDWi8:
  396. case ARM::t2PLIi8:
  397. return opcode;
  398. default:
  399. llvm_unreachable("unknown thumb2 opcode.");
  400. }
  401. }
  402. static unsigned
  403. positiveOffsetOpcode(unsigned opcode)
  404. {
  405. switch (opcode) {
  406. case ARM::t2LDRi8: return ARM::t2LDRi12;
  407. case ARM::t2LDRHi8: return ARM::t2LDRHi12;
  408. case ARM::t2LDRBi8: return ARM::t2LDRBi12;
  409. case ARM::t2LDRSHi8: return ARM::t2LDRSHi12;
  410. case ARM::t2LDRSBi8: return ARM::t2LDRSBi12;
  411. case ARM::t2STRi8: return ARM::t2STRi12;
  412. case ARM::t2STRBi8: return ARM::t2STRBi12;
  413. case ARM::t2STRHi8: return ARM::t2STRHi12;
  414. case ARM::t2PLDi8: return ARM::t2PLDi12;
  415. case ARM::t2PLDWi8: return ARM::t2PLDWi12;
  416. case ARM::t2PLIi8: return ARM::t2PLIi12;
  417. case ARM::t2LDRi12:
  418. case ARM::t2LDRHi12:
  419. case ARM::t2LDRBi12:
  420. case ARM::t2LDRSHi12:
  421. case ARM::t2LDRSBi12:
  422. case ARM::t2STRi12:
  423. case ARM::t2STRBi12:
  424. case ARM::t2STRHi12:
  425. case ARM::t2PLDi12:
  426. case ARM::t2PLDWi12:
  427. case ARM::t2PLIi12:
  428. return opcode;
  429. default:
  430. llvm_unreachable("unknown thumb2 opcode.");
  431. }
  432. }
  433. static unsigned
  434. immediateOffsetOpcode(unsigned opcode)
  435. {
  436. switch (opcode) {
  437. case ARM::t2LDRs: return ARM::t2LDRi12;
  438. case ARM::t2LDRHs: return ARM::t2LDRHi12;
  439. case ARM::t2LDRBs: return ARM::t2LDRBi12;
  440. case ARM::t2LDRSHs: return ARM::t2LDRSHi12;
  441. case ARM::t2LDRSBs: return ARM::t2LDRSBi12;
  442. case ARM::t2STRs: return ARM::t2STRi12;
  443. case ARM::t2STRBs: return ARM::t2STRBi12;
  444. case ARM::t2STRHs: return ARM::t2STRHi12;
  445. case ARM::t2PLDs: return ARM::t2PLDi12;
  446. case ARM::t2PLDWs: return ARM::t2PLDWi12;
  447. case ARM::t2PLIs: return ARM::t2PLIi12;
  448. case ARM::t2LDRi12:
  449. case ARM::t2LDRHi12:
  450. case ARM::t2LDRBi12:
  451. case ARM::t2LDRSHi12:
  452. case ARM::t2LDRSBi12:
  453. case ARM::t2STRi12:
  454. case ARM::t2STRBi12:
  455. case ARM::t2STRHi12:
  456. case ARM::t2PLDi12:
  457. case ARM::t2PLDWi12:
  458. case ARM::t2PLIi12:
  459. case ARM::t2LDRi8:
  460. case ARM::t2LDRHi8:
  461. case ARM::t2LDRBi8:
  462. case ARM::t2LDRSHi8:
  463. case ARM::t2LDRSBi8:
  464. case ARM::t2STRi8:
  465. case ARM::t2STRBi8:
  466. case ARM::t2STRHi8:
  467. case ARM::t2PLDi8:
  468. case ARM::t2PLDWi8:
  469. case ARM::t2PLIi8:
  470. return opcode;
  471. default:
  472. llvm_unreachable("unknown thumb2 opcode.");
  473. }
  474. }
  475. bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
  476. Register FrameReg, int &Offset,
  477. const ARMBaseInstrInfo &TII,
  478. const TargetRegisterInfo *TRI) {
  479. unsigned Opcode = MI.getOpcode();
  480. const MCInstrDesc &Desc = MI.getDesc();
  481. unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
  482. bool isSub = false;
  483. MachineFunction &MF = *MI.getParent()->getParent();
  484. const TargetRegisterClass *RegClass =
  485. TII.getRegClass(Desc, FrameRegIdx, TRI, MF);
  486. // Memory operands in inline assembly always use AddrModeT2_i12.
  487. if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
  488. AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2?
  489. const bool IsSP = Opcode == ARM::t2ADDspImm12 || Opcode == ARM::t2ADDspImm;
  490. if (IsSP || Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) {
  491. Offset += MI.getOperand(FrameRegIdx+1).getImm();
  492. Register PredReg;
  493. if (Offset == 0 && getInstrPredicate(MI, PredReg) == ARMCC::AL &&
  494. !MI.definesRegister(ARM::CPSR)) {
  495. // Turn it into a move.
  496. MI.setDesc(TII.get(ARM::tMOVr));
  497. MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
  498. // Remove offset and remaining explicit predicate operands.
  499. do MI.RemoveOperand(FrameRegIdx+1);
  500. while (MI.getNumOperands() > FrameRegIdx+1);
  501. MachineInstrBuilder MIB(*MI.getParent()->getParent(), &MI);
  502. MIB.add(predOps(ARMCC::AL));
  503. return true;
  504. }
  505. bool HasCCOut = (Opcode != ARM::t2ADDspImm12 && Opcode != ARM::t2ADDri12);
  506. if (Offset < 0) {
  507. Offset = -Offset;
  508. isSub = true;
  509. MI.setDesc(IsSP ? TII.get(ARM::t2SUBspImm) : TII.get(ARM::t2SUBri));
  510. } else {
  511. MI.setDesc(IsSP ? TII.get(ARM::t2ADDspImm) : TII.get(ARM::t2ADDri));
  512. }
  513. // Common case: small offset, fits into instruction.
  514. if (ARM_AM::getT2SOImmVal(Offset) != -1) {
  515. MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
  516. MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
  517. // Add cc_out operand if the original instruction did not have one.
  518. if (!HasCCOut)
  519. MI.addOperand(MachineOperand::CreateReg(0, false));
  520. Offset = 0;
  521. return true;
  522. }
  523. // Another common case: imm12.
  524. if (Offset < 4096 &&
  525. (!HasCCOut || MI.getOperand(MI.getNumOperands()-1).getReg() == 0)) {
  526. unsigned NewOpc = isSub ? IsSP ? ARM::t2SUBspImm12 : ARM::t2SUBri12
  527. : IsSP ? ARM::t2ADDspImm12 : ARM::t2ADDri12;
  528. MI.setDesc(TII.get(NewOpc));
  529. MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
  530. MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
  531. // Remove the cc_out operand.
  532. if (HasCCOut)
  533. MI.RemoveOperand(MI.getNumOperands()-1);
  534. Offset = 0;
  535. return true;
  536. }
  537. // Otherwise, extract 8 adjacent bits from the immediate into this
  538. // t2ADDri/t2SUBri.
  539. unsigned RotAmt = countLeadingZeros<unsigned>(Offset);
  540. unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xff000000U, RotAmt);
  541. // We will handle these bits from offset, clear them.
  542. Offset &= ~ThisImmVal;
  543. assert(ARM_AM::getT2SOImmVal(ThisImmVal) != -1 &&
  544. "Bit extraction didn't work?");
  545. MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
  546. // Add cc_out operand if the original instruction did not have one.
  547. if (!HasCCOut)
  548. MI.addOperand(MachineOperand::CreateReg(0, false));
  549. } else {
  550. // AddrMode4 and AddrMode6 cannot handle any offset.
  551. if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
  552. return false;
  553. // AddrModeT2_so cannot handle any offset. If there is no offset
  554. // register then we change to an immediate version.
  555. unsigned NewOpc = Opcode;
  556. if (AddrMode == ARMII::AddrModeT2_so) {
  557. Register OffsetReg = MI.getOperand(FrameRegIdx + 1).getReg();
  558. if (OffsetReg != 0) {
  559. MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
  560. return Offset == 0;
  561. }
  562. MI.RemoveOperand(FrameRegIdx+1);
  563. MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0);
  564. NewOpc = immediateOffsetOpcode(Opcode);
  565. AddrMode = ARMII::AddrModeT2_i12;
  566. }
  567. unsigned NumBits = 0;
  568. unsigned Scale = 1;
  569. if (AddrMode == ARMII::AddrModeT2_i8neg ||
  570. AddrMode == ARMII::AddrModeT2_i12) {
  571. // i8 supports only negative, and i12 supports only positive, so
  572. // based on Offset sign convert Opcode to the appropriate
  573. // instruction
  574. Offset += MI.getOperand(FrameRegIdx+1).getImm();
  575. if (Offset < 0) {
  576. NewOpc = negativeOffsetOpcode(Opcode);
  577. NumBits = 8;
  578. isSub = true;
  579. Offset = -Offset;
  580. } else {
  581. NewOpc = positiveOffsetOpcode(Opcode);
  582. NumBits = 12;
  583. }
  584. } else if (AddrMode == ARMII::AddrMode5) {
  585. // VFP address mode.
  586. const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1);
  587. int InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
  588. if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
  589. InstrOffs *= -1;
  590. NumBits = 8;
  591. Scale = 4;
  592. Offset += InstrOffs * 4;
  593. assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
  594. if (Offset < 0) {
  595. Offset = -Offset;
  596. isSub = true;
  597. }
  598. } else if (AddrMode == ARMII::AddrMode5FP16) {
  599. // VFP address mode.
  600. const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1);
  601. int InstrOffs = ARM_AM::getAM5FP16Offset(OffOp.getImm());
  602. if (ARM_AM::getAM5FP16Op(OffOp.getImm()) == ARM_AM::sub)
  603. InstrOffs *= -1;
  604. NumBits = 8;
  605. Scale = 2;
  606. Offset += InstrOffs * 2;
  607. assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
  608. if (Offset < 0) {
  609. Offset = -Offset;
  610. isSub = true;
  611. }
  612. } else if (AddrMode == ARMII::AddrModeT2_i7s4 ||
  613. AddrMode == ARMII::AddrModeT2_i7s2 ||
  614. AddrMode == ARMII::AddrModeT2_i7) {
  615. Offset += MI.getOperand(FrameRegIdx + 1).getImm();
  616. unsigned OffsetMask;
  617. switch (AddrMode) {
  618. case ARMII::AddrModeT2_i7s4: NumBits = 9; OffsetMask = 0x3; break;
  619. case ARMII::AddrModeT2_i7s2: NumBits = 8; OffsetMask = 0x1; break;
  620. default: NumBits = 7; OffsetMask = 0x0; break;
  621. }
  622. // MCInst operand expects already scaled value.
  623. Scale = 1;
  624. assert((Offset & OffsetMask) == 0 && "Can't encode this offset!");
  625. (void)OffsetMask; // squash unused-variable warning at -NDEBUG
  626. } else if (AddrMode == ARMII::AddrModeT2_i8s4) {
  627. Offset += MI.getOperand(FrameRegIdx + 1).getImm();
  628. NumBits = 8 + 2;
  629. // MCInst operand expects already scaled value.
  630. Scale = 1;
  631. assert((Offset & 3) == 0 && "Can't encode this offset!");
  632. } else if (AddrMode == ARMII::AddrModeT2_ldrex) {
  633. Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4;
  634. NumBits = 8; // 8 bits scaled by 4
  635. Scale = 4;
  636. assert((Offset & 3) == 0 && "Can't encode this offset!");
  637. } else {
  638. llvm_unreachable("Unsupported addressing mode!");
  639. }
  640. if (NewOpc != Opcode)
  641. MI.setDesc(TII.get(NewOpc));
  642. MachineOperand &ImmOp = MI.getOperand(FrameRegIdx+1);
  643. // Attempt to fold address computation
  644. // Common case: small offset, fits into instruction. We need to make sure
  645. // the register class is correct too, for instructions like the MVE
  646. // VLDRH.32, which only accepts low tGPR registers.
  647. int ImmedOffset = Offset / Scale;
  648. unsigned Mask = (1 << NumBits) - 1;
  649. if ((unsigned)Offset <= Mask * Scale &&
  650. (Register::isVirtualRegister(FrameReg) ||
  651. RegClass->contains(FrameReg))) {
  652. if (Register::isVirtualRegister(FrameReg)) {
  653. // Make sure the register class for the virtual register is correct
  654. MachineRegisterInfo *MRI = &MF.getRegInfo();
  655. if (!MRI->constrainRegClass(FrameReg, RegClass))
  656. llvm_unreachable("Unable to constrain virtual register class.");
  657. }
  658. // Replace the FrameIndex with fp/sp
  659. MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
  660. if (isSub) {
  661. if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16)
  662. // FIXME: Not consistent.
  663. ImmedOffset |= 1 << NumBits;
  664. else
  665. ImmedOffset = -ImmedOffset;
  666. }
  667. ImmOp.ChangeToImmediate(ImmedOffset);
  668. Offset = 0;
  669. return true;
  670. }
  671. // Otherwise, offset doesn't fit. Pull in what we can to simplify
  672. ImmedOffset = ImmedOffset & Mask;
  673. if (isSub) {
  674. if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16)
  675. // FIXME: Not consistent.
  676. ImmedOffset |= 1 << NumBits;
  677. else {
  678. ImmedOffset = -ImmedOffset;
  679. if (ImmedOffset == 0)
  680. // Change the opcode back if the encoded offset is zero.
  681. MI.setDesc(TII.get(positiveOffsetOpcode(NewOpc)));
  682. }
  683. }
  684. ImmOp.ChangeToImmediate(ImmedOffset);
  685. Offset &= ~(Mask*Scale);
  686. }
  687. Offset = (isSub) ? -Offset : Offset;
  688. return Offset == 0 && (Register::isVirtualRegister(FrameReg) ||
  689. RegClass->contains(FrameReg));
  690. }
  691. ARMCC::CondCodes llvm::getITInstrPredicate(const MachineInstr &MI,
  692. Register &PredReg) {
  693. unsigned Opc = MI.getOpcode();
  694. if (Opc == ARM::tBcc || Opc == ARM::t2Bcc)
  695. return ARMCC::AL;
  696. return getInstrPredicate(MI, PredReg);
  697. }
  698. int llvm::findFirstVPTPredOperandIdx(const MachineInstr &MI) {
  699. const MCInstrDesc &MCID = MI.getDesc();
  700. if (!MCID.OpInfo)
  701. return -1;
  702. for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i)
  703. if (ARM::isVpred(MCID.OpInfo[i].OperandType))
  704. return i;
  705. return -1;
  706. }
  707. ARMVCC::VPTCodes llvm::getVPTInstrPredicate(const MachineInstr &MI,
  708. Register &PredReg) {
  709. int PIdx = findFirstVPTPredOperandIdx(MI);
  710. if (PIdx == -1) {
  711. PredReg = 0;
  712. return ARMVCC::None;
  713. }
  714. PredReg = MI.getOperand(PIdx+1).getReg();
  715. return (ARMVCC::VPTCodes)MI.getOperand(PIdx).getImm();
  716. }
  717. void llvm::recomputeVPTBlockMask(MachineInstr &Instr) {
  718. assert(isVPTOpcode(Instr.getOpcode()) && "Not a VPST or VPT Instruction!");
  719. MachineOperand &MaskOp = Instr.getOperand(0);
  720. assert(MaskOp.isImm() && "Operand 0 is not the block mask of the VPT/VPST?!");
  721. MachineBasicBlock::iterator Iter = ++Instr.getIterator(),
  722. End = Instr.getParent()->end();
  723. while (Iter != End && Iter->isDebugInstr())
  724. ++Iter;
  725. // Verify that the instruction after the VPT/VPST is predicated (it should
  726. // be), and skip it.
  727. assert(Iter != End && "Expected some instructions in any VPT block");
  728. assert(
  729. getVPTInstrPredicate(*Iter) == ARMVCC::Then &&
  730. "VPT/VPST should be followed by an instruction with a 'then' predicate!");
  731. ++Iter;
  732. // Iterate over the predicated instructions, updating the BlockMask as we go.
  733. ARM::PredBlockMask BlockMask = ARM::PredBlockMask::T;
  734. while (Iter != End) {
  735. if (Iter->isDebugInstr()) {
  736. ++Iter;
  737. continue;
  738. }
  739. ARMVCC::VPTCodes Pred = getVPTInstrPredicate(*Iter);
  740. if (Pred == ARMVCC::None)
  741. break;
  742. BlockMask = expandPredBlockMask(BlockMask, Pred);
  743. ++Iter;
  744. }
  745. // Rewrite the BlockMask.
  746. MaskOp.setImm((int64_t)(BlockMask));
  747. }