ARMInstructionSelector.cpp 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158
  1. //===- ARMInstructionSelector.cpp ----------------------------*- C++ -*-==//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. /// \file
  9. /// This file implements the targeting of the InstructionSelector class for ARM.
  10. /// \todo This should be generated by TableGen.
  11. //===----------------------------------------------------------------------===//
  12. #include "ARMRegisterBankInfo.h"
  13. #include "ARMSubtarget.h"
  14. #include "ARMTargetMachine.h"
  15. #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
  16. #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
  17. #include "llvm/CodeGen/MachineConstantPool.h"
  18. #include "llvm/CodeGen/MachineRegisterInfo.h"
  19. #include "llvm/IR/IntrinsicsARM.h"
  20. #include "llvm/Support/Debug.h"
  21. #define DEBUG_TYPE "arm-isel"
  22. using namespace llvm;
  23. namespace {
  24. #define GET_GLOBALISEL_PREDICATE_BITSET
  25. #include "ARMGenGlobalISel.inc"
  26. #undef GET_GLOBALISEL_PREDICATE_BITSET
  27. class ARMInstructionSelector : public InstructionSelector {
  28. public:
  29. ARMInstructionSelector(const ARMBaseTargetMachine &TM, const ARMSubtarget &STI,
  30. const ARMRegisterBankInfo &RBI);
  31. bool select(MachineInstr &I) override;
  32. static const char *getName() { return DEBUG_TYPE; }
  33. private:
  34. bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
  35. struct CmpConstants;
  36. struct InsertInfo;
  37. bool selectCmp(CmpConstants Helper, MachineInstrBuilder &MIB,
  38. MachineRegisterInfo &MRI) const;
  39. // Helper for inserting a comparison sequence that sets \p ResReg to either 1
  40. // if \p LHSReg and \p RHSReg are in the relationship defined by \p Cond, or
  41. // \p PrevRes otherwise. In essence, it computes PrevRes OR (LHS Cond RHS).
  42. bool insertComparison(CmpConstants Helper, InsertInfo I, unsigned ResReg,
  43. ARMCC::CondCodes Cond, unsigned LHSReg, unsigned RHSReg,
  44. unsigned PrevRes) const;
  45. // Set \p DestReg to \p Constant.
  46. void putConstant(InsertInfo I, unsigned DestReg, unsigned Constant) const;
  47. bool selectGlobal(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const;
  48. bool selectSelect(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const;
  49. bool selectShift(unsigned ShiftOpc, MachineInstrBuilder &MIB) const;
  50. // Check if the types match and both operands have the expected size and
  51. // register bank.
  52. bool validOpRegPair(MachineRegisterInfo &MRI, unsigned LHS, unsigned RHS,
  53. unsigned ExpectedSize, unsigned ExpectedRegBankID) const;
  54. // Check if the register has the expected size and register bank.
  55. bool validReg(MachineRegisterInfo &MRI, unsigned Reg, unsigned ExpectedSize,
  56. unsigned ExpectedRegBankID) const;
  57. const ARMBaseInstrInfo &TII;
  58. const ARMBaseRegisterInfo &TRI;
  59. const ARMBaseTargetMachine &TM;
  60. const ARMRegisterBankInfo &RBI;
  61. const ARMSubtarget &STI;
  62. // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
  63. // uses "STI." in the code generated by TableGen. If we want to reuse some of
  64. // the custom C++ predicates written for DAGISel, we need to have both around.
  65. const ARMSubtarget *Subtarget = &STI;
  66. // Store the opcodes that we might need, so we don't have to check what kind
  67. // of subtarget (ARM vs Thumb) we have all the time.
  68. struct OpcodeCache {
  69. unsigned ZEXT16;
  70. unsigned SEXT16;
  71. unsigned ZEXT8;
  72. unsigned SEXT8;
  73. // Used for implementing ZEXT/SEXT from i1
  74. unsigned AND;
  75. unsigned RSB;
  76. unsigned STORE32;
  77. unsigned LOAD32;
  78. unsigned STORE16;
  79. unsigned LOAD16;
  80. unsigned STORE8;
  81. unsigned LOAD8;
  82. unsigned ADDrr;
  83. unsigned ADDri;
  84. // Used for G_ICMP
  85. unsigned CMPrr;
  86. unsigned MOVi;
  87. unsigned MOVCCi;
  88. // Used for G_SELECT
  89. unsigned MOVCCr;
  90. unsigned TSTri;
  91. unsigned Bcc;
  92. // Used for G_GLOBAL_VALUE
  93. unsigned MOVi32imm;
  94. unsigned ConstPoolLoad;
  95. unsigned MOV_ga_pcrel;
  96. unsigned LDRLIT_ga_pcrel;
  97. unsigned LDRLIT_ga_abs;
  98. OpcodeCache(const ARMSubtarget &STI);
  99. } const Opcodes;
  100. // Select the opcode for simple extensions (that translate to a single SXT/UXT
  101. // instruction). Extension operations more complicated than that should not
  102. // invoke this. Returns the original opcode if it doesn't know how to select a
  103. // better one.
  104. unsigned selectSimpleExtOpc(unsigned Opc, unsigned Size) const;
  105. // Select the opcode for simple loads and stores. Returns the original opcode
  106. // if it doesn't know how to select a better one.
  107. unsigned selectLoadStoreOpCode(unsigned Opc, unsigned RegBank,
  108. unsigned Size) const;
  109. void renderVFPF32Imm(MachineInstrBuilder &New, const MachineInstr &Old,
  110. int OpIdx = -1) const;
  111. void renderVFPF64Imm(MachineInstrBuilder &New, const MachineInstr &Old,
  112. int OpIdx = -1) const;
  113. #define GET_GLOBALISEL_PREDICATES_DECL
  114. #include "ARMGenGlobalISel.inc"
  115. #undef GET_GLOBALISEL_PREDICATES_DECL
  116. // We declare the temporaries used by selectImpl() in the class to minimize the
  117. // cost of constructing placeholder values.
  118. #define GET_GLOBALISEL_TEMPORARIES_DECL
  119. #include "ARMGenGlobalISel.inc"
  120. #undef GET_GLOBALISEL_TEMPORARIES_DECL
  121. };
  122. } // end anonymous namespace
  123. namespace llvm {
  124. InstructionSelector *
  125. createARMInstructionSelector(const ARMBaseTargetMachine &TM,
  126. const ARMSubtarget &STI,
  127. const ARMRegisterBankInfo &RBI) {
  128. return new ARMInstructionSelector(TM, STI, RBI);
  129. }
  130. }
  131. #define GET_GLOBALISEL_IMPL
  132. #include "ARMGenGlobalISel.inc"
  133. #undef GET_GLOBALISEL_IMPL
  134. ARMInstructionSelector::ARMInstructionSelector(const ARMBaseTargetMachine &TM,
  135. const ARMSubtarget &STI,
  136. const ARMRegisterBankInfo &RBI)
  137. : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), TM(TM), RBI(RBI),
  138. STI(STI), Opcodes(STI),
  139. #define GET_GLOBALISEL_PREDICATES_INIT
  140. #include "ARMGenGlobalISel.inc"
  141. #undef GET_GLOBALISEL_PREDICATES_INIT
  142. #define GET_GLOBALISEL_TEMPORARIES_INIT
  143. #include "ARMGenGlobalISel.inc"
  144. #undef GET_GLOBALISEL_TEMPORARIES_INIT
  145. {
  146. }
  147. static const TargetRegisterClass *guessRegClass(unsigned Reg,
  148. MachineRegisterInfo &MRI,
  149. const TargetRegisterInfo &TRI,
  150. const RegisterBankInfo &RBI) {
  151. const RegisterBank *RegBank = RBI.getRegBank(Reg, MRI, TRI);
  152. assert(RegBank && "Can't get reg bank for virtual register");
  153. const unsigned Size = MRI.getType(Reg).getSizeInBits();
  154. assert((RegBank->getID() == ARM::GPRRegBankID ||
  155. RegBank->getID() == ARM::FPRRegBankID) &&
  156. "Unsupported reg bank");
  157. if (RegBank->getID() == ARM::FPRRegBankID) {
  158. if (Size == 32)
  159. return &ARM::SPRRegClass;
  160. else if (Size == 64)
  161. return &ARM::DPRRegClass;
  162. else if (Size == 128)
  163. return &ARM::QPRRegClass;
  164. else
  165. llvm_unreachable("Unsupported destination size");
  166. }
  167. return &ARM::GPRRegClass;
  168. }
  169. static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
  170. MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
  171. const RegisterBankInfo &RBI) {
  172. Register DstReg = I.getOperand(0).getReg();
  173. if (Register::isPhysicalRegister(DstReg))
  174. return true;
  175. const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI);
  176. // No need to constrain SrcReg. It will get constrained when
  177. // we hit another of its uses or its defs.
  178. // Copies do not have constraints.
  179. if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
  180. LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
  181. << " operand\n");
  182. return false;
  183. }
  184. return true;
  185. }
  186. static bool selectMergeValues(MachineInstrBuilder &MIB,
  187. const ARMBaseInstrInfo &TII,
  188. MachineRegisterInfo &MRI,
  189. const TargetRegisterInfo &TRI,
  190. const RegisterBankInfo &RBI) {
  191. assert(TII.getSubtarget().hasVFP2Base() && "Can't select merge without VFP");
  192. // We only support G_MERGE_VALUES as a way to stick together two scalar GPRs
  193. // into one DPR.
  194. Register VReg0 = MIB.getReg(0);
  195. (void)VReg0;
  196. assert(MRI.getType(VReg0).getSizeInBits() == 64 &&
  197. RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::FPRRegBankID &&
  198. "Unsupported operand for G_MERGE_VALUES");
  199. Register VReg1 = MIB.getReg(1);
  200. (void)VReg1;
  201. assert(MRI.getType(VReg1).getSizeInBits() == 32 &&
  202. RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID &&
  203. "Unsupported operand for G_MERGE_VALUES");
  204. Register VReg2 = MIB.getReg(2);
  205. (void)VReg2;
  206. assert(MRI.getType(VReg2).getSizeInBits() == 32 &&
  207. RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::GPRRegBankID &&
  208. "Unsupported operand for G_MERGE_VALUES");
  209. MIB->setDesc(TII.get(ARM::VMOVDRR));
  210. MIB.add(predOps(ARMCC::AL));
  211. return true;
  212. }
  213. static bool selectUnmergeValues(MachineInstrBuilder &MIB,
  214. const ARMBaseInstrInfo &TII,
  215. MachineRegisterInfo &MRI,
  216. const TargetRegisterInfo &TRI,
  217. const RegisterBankInfo &RBI) {
  218. assert(TII.getSubtarget().hasVFP2Base() &&
  219. "Can't select unmerge without VFP");
  220. // We only support G_UNMERGE_VALUES as a way to break up one DPR into two
  221. // GPRs.
  222. Register VReg0 = MIB.getReg(0);
  223. (void)VReg0;
  224. assert(MRI.getType(VReg0).getSizeInBits() == 32 &&
  225. RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::GPRRegBankID &&
  226. "Unsupported operand for G_UNMERGE_VALUES");
  227. Register VReg1 = MIB.getReg(1);
  228. (void)VReg1;
  229. assert(MRI.getType(VReg1).getSizeInBits() == 32 &&
  230. RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID &&
  231. "Unsupported operand for G_UNMERGE_VALUES");
  232. Register VReg2 = MIB.getReg(2);
  233. (void)VReg2;
  234. assert(MRI.getType(VReg2).getSizeInBits() == 64 &&
  235. RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::FPRRegBankID &&
  236. "Unsupported operand for G_UNMERGE_VALUES");
  237. MIB->setDesc(TII.get(ARM::VMOVRRD));
  238. MIB.add(predOps(ARMCC::AL));
  239. return true;
  240. }
  241. ARMInstructionSelector::OpcodeCache::OpcodeCache(const ARMSubtarget &STI) {
  242. bool isThumb = STI.isThumb();
  243. using namespace TargetOpcode;
  244. #define STORE_OPCODE(VAR, OPC) VAR = isThumb ? ARM::t2##OPC : ARM::OPC
  245. STORE_OPCODE(SEXT16, SXTH);
  246. STORE_OPCODE(ZEXT16, UXTH);
  247. STORE_OPCODE(SEXT8, SXTB);
  248. STORE_OPCODE(ZEXT8, UXTB);
  249. STORE_OPCODE(AND, ANDri);
  250. STORE_OPCODE(RSB, RSBri);
  251. STORE_OPCODE(STORE32, STRi12);
  252. STORE_OPCODE(LOAD32, LDRi12);
  253. // LDRH/STRH are special...
  254. STORE16 = isThumb ? ARM::t2STRHi12 : ARM::STRH;
  255. LOAD16 = isThumb ? ARM::t2LDRHi12 : ARM::LDRH;
  256. STORE_OPCODE(STORE8, STRBi12);
  257. STORE_OPCODE(LOAD8, LDRBi12);
  258. STORE_OPCODE(ADDrr, ADDrr);
  259. STORE_OPCODE(ADDri, ADDri);
  260. STORE_OPCODE(CMPrr, CMPrr);
  261. STORE_OPCODE(MOVi, MOVi);
  262. STORE_OPCODE(MOVCCi, MOVCCi);
  263. STORE_OPCODE(MOVCCr, MOVCCr);
  264. STORE_OPCODE(TSTri, TSTri);
  265. STORE_OPCODE(Bcc, Bcc);
  266. STORE_OPCODE(MOVi32imm, MOVi32imm);
  267. ConstPoolLoad = isThumb ? ARM::t2LDRpci : ARM::LDRi12;
  268. STORE_OPCODE(MOV_ga_pcrel, MOV_ga_pcrel);
  269. LDRLIT_ga_pcrel = isThumb ? ARM::tLDRLIT_ga_pcrel : ARM::LDRLIT_ga_pcrel;
  270. LDRLIT_ga_abs = isThumb ? ARM::tLDRLIT_ga_abs : ARM::LDRLIT_ga_abs;
  271. #undef MAP_OPCODE
  272. }
  273. unsigned ARMInstructionSelector::selectSimpleExtOpc(unsigned Opc,
  274. unsigned Size) const {
  275. using namespace TargetOpcode;
  276. if (Size != 8 && Size != 16)
  277. return Opc;
  278. if (Opc == G_SEXT)
  279. return Size == 8 ? Opcodes.SEXT8 : Opcodes.SEXT16;
  280. if (Opc == G_ZEXT)
  281. return Size == 8 ? Opcodes.ZEXT8 : Opcodes.ZEXT16;
  282. return Opc;
  283. }
  284. unsigned ARMInstructionSelector::selectLoadStoreOpCode(unsigned Opc,
  285. unsigned RegBank,
  286. unsigned Size) const {
  287. bool isStore = Opc == TargetOpcode::G_STORE;
  288. if (RegBank == ARM::GPRRegBankID) {
  289. switch (Size) {
  290. case 1:
  291. case 8:
  292. return isStore ? Opcodes.STORE8 : Opcodes.LOAD8;
  293. case 16:
  294. return isStore ? Opcodes.STORE16 : Opcodes.LOAD16;
  295. case 32:
  296. return isStore ? Opcodes.STORE32 : Opcodes.LOAD32;
  297. default:
  298. return Opc;
  299. }
  300. }
  301. if (RegBank == ARM::FPRRegBankID) {
  302. switch (Size) {
  303. case 32:
  304. return isStore ? ARM::VSTRS : ARM::VLDRS;
  305. case 64:
  306. return isStore ? ARM::VSTRD : ARM::VLDRD;
  307. default:
  308. return Opc;
  309. }
  310. }
  311. return Opc;
  312. }
  313. // When lowering comparisons, we sometimes need to perform two compares instead
  314. // of just one. Get the condition codes for both comparisons. If only one is
  315. // needed, the second member of the pair is ARMCC::AL.
  316. static std::pair<ARMCC::CondCodes, ARMCC::CondCodes>
  317. getComparePreds(CmpInst::Predicate Pred) {
  318. std::pair<ARMCC::CondCodes, ARMCC::CondCodes> Preds = {ARMCC::AL, ARMCC::AL};
  319. switch (Pred) {
  320. case CmpInst::FCMP_ONE:
  321. Preds = {ARMCC::GT, ARMCC::MI};
  322. break;
  323. case CmpInst::FCMP_UEQ:
  324. Preds = {ARMCC::EQ, ARMCC::VS};
  325. break;
  326. case CmpInst::ICMP_EQ:
  327. case CmpInst::FCMP_OEQ:
  328. Preds.first = ARMCC::EQ;
  329. break;
  330. case CmpInst::ICMP_SGT:
  331. case CmpInst::FCMP_OGT:
  332. Preds.first = ARMCC::GT;
  333. break;
  334. case CmpInst::ICMP_SGE:
  335. case CmpInst::FCMP_OGE:
  336. Preds.first = ARMCC::GE;
  337. break;
  338. case CmpInst::ICMP_UGT:
  339. case CmpInst::FCMP_UGT:
  340. Preds.first = ARMCC::HI;
  341. break;
  342. case CmpInst::FCMP_OLT:
  343. Preds.first = ARMCC::MI;
  344. break;
  345. case CmpInst::ICMP_ULE:
  346. case CmpInst::FCMP_OLE:
  347. Preds.first = ARMCC::LS;
  348. break;
  349. case CmpInst::FCMP_ORD:
  350. Preds.first = ARMCC::VC;
  351. break;
  352. case CmpInst::FCMP_UNO:
  353. Preds.first = ARMCC::VS;
  354. break;
  355. case CmpInst::FCMP_UGE:
  356. Preds.first = ARMCC::PL;
  357. break;
  358. case CmpInst::ICMP_SLT:
  359. case CmpInst::FCMP_ULT:
  360. Preds.first = ARMCC::LT;
  361. break;
  362. case CmpInst::ICMP_SLE:
  363. case CmpInst::FCMP_ULE:
  364. Preds.first = ARMCC::LE;
  365. break;
  366. case CmpInst::FCMP_UNE:
  367. case CmpInst::ICMP_NE:
  368. Preds.first = ARMCC::NE;
  369. break;
  370. case CmpInst::ICMP_UGE:
  371. Preds.first = ARMCC::HS;
  372. break;
  373. case CmpInst::ICMP_ULT:
  374. Preds.first = ARMCC::LO;
  375. break;
  376. default:
  377. break;
  378. }
  379. assert(Preds.first != ARMCC::AL && "No comparisons needed?");
  380. return Preds;
  381. }
  382. struct ARMInstructionSelector::CmpConstants {
  383. CmpConstants(unsigned CmpOpcode, unsigned FlagsOpcode, unsigned SelectOpcode,
  384. unsigned OpRegBank, unsigned OpSize)
  385. : ComparisonOpcode(CmpOpcode), ReadFlagsOpcode(FlagsOpcode),
  386. SelectResultOpcode(SelectOpcode), OperandRegBankID(OpRegBank),
  387. OperandSize(OpSize) {}
  388. // The opcode used for performing the comparison.
  389. const unsigned ComparisonOpcode;
  390. // The opcode used for reading the flags set by the comparison. May be
  391. // ARM::INSTRUCTION_LIST_END if we don't need to read the flags.
  392. const unsigned ReadFlagsOpcode;
  393. // The opcode used for materializing the result of the comparison.
  394. const unsigned SelectResultOpcode;
  395. // The assumed register bank ID for the operands.
  396. const unsigned OperandRegBankID;
  397. // The assumed size in bits for the operands.
  398. const unsigned OperandSize;
  399. };
  400. struct ARMInstructionSelector::InsertInfo {
  401. InsertInfo(MachineInstrBuilder &MIB)
  402. : MBB(*MIB->getParent()), InsertBefore(std::next(MIB->getIterator())),
  403. DbgLoc(MIB->getDebugLoc()) {}
  404. MachineBasicBlock &MBB;
  405. const MachineBasicBlock::instr_iterator InsertBefore;
  406. const DebugLoc &DbgLoc;
  407. };
  408. void ARMInstructionSelector::putConstant(InsertInfo I, unsigned DestReg,
  409. unsigned Constant) const {
  410. (void)BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(Opcodes.MOVi))
  411. .addDef(DestReg)
  412. .addImm(Constant)
  413. .add(predOps(ARMCC::AL))
  414. .add(condCodeOp());
  415. }
  416. bool ARMInstructionSelector::validOpRegPair(MachineRegisterInfo &MRI,
  417. unsigned LHSReg, unsigned RHSReg,
  418. unsigned ExpectedSize,
  419. unsigned ExpectedRegBankID) const {
  420. return MRI.getType(LHSReg) == MRI.getType(RHSReg) &&
  421. validReg(MRI, LHSReg, ExpectedSize, ExpectedRegBankID) &&
  422. validReg(MRI, RHSReg, ExpectedSize, ExpectedRegBankID);
  423. }
  424. bool ARMInstructionSelector::validReg(MachineRegisterInfo &MRI, unsigned Reg,
  425. unsigned ExpectedSize,
  426. unsigned ExpectedRegBankID) const {
  427. if (MRI.getType(Reg).getSizeInBits() != ExpectedSize) {
  428. LLVM_DEBUG(dbgs() << "Unexpected size for register");
  429. return false;
  430. }
  431. if (RBI.getRegBank(Reg, MRI, TRI)->getID() != ExpectedRegBankID) {
  432. LLVM_DEBUG(dbgs() << "Unexpected register bank for register");
  433. return false;
  434. }
  435. return true;
  436. }
  437. bool ARMInstructionSelector::selectCmp(CmpConstants Helper,
  438. MachineInstrBuilder &MIB,
  439. MachineRegisterInfo &MRI) const {
  440. const InsertInfo I(MIB);
  441. auto ResReg = MIB.getReg(0);
  442. if (!validReg(MRI, ResReg, 1, ARM::GPRRegBankID))
  443. return false;
  444. auto Cond =
  445. static_cast<CmpInst::Predicate>(MIB->getOperand(1).getPredicate());
  446. if (Cond == CmpInst::FCMP_TRUE || Cond == CmpInst::FCMP_FALSE) {
  447. putConstant(I, ResReg, Cond == CmpInst::FCMP_TRUE ? 1 : 0);
  448. MIB->eraseFromParent();
  449. return true;
  450. }
  451. auto LHSReg = MIB.getReg(2);
  452. auto RHSReg = MIB.getReg(3);
  453. if (!validOpRegPair(MRI, LHSReg, RHSReg, Helper.OperandSize,
  454. Helper.OperandRegBankID))
  455. return false;
  456. auto ARMConds = getComparePreds(Cond);
  457. auto ZeroReg = MRI.createVirtualRegister(&ARM::GPRRegClass);
  458. putConstant(I, ZeroReg, 0);
  459. if (ARMConds.second == ARMCC::AL) {
  460. // Simple case, we only need one comparison and we're done.
  461. if (!insertComparison(Helper, I, ResReg, ARMConds.first, LHSReg, RHSReg,
  462. ZeroReg))
  463. return false;
  464. } else {
  465. // Not so simple, we need two successive comparisons.
  466. auto IntermediateRes = MRI.createVirtualRegister(&ARM::GPRRegClass);
  467. if (!insertComparison(Helper, I, IntermediateRes, ARMConds.first, LHSReg,
  468. RHSReg, ZeroReg))
  469. return false;
  470. if (!insertComparison(Helper, I, ResReg, ARMConds.second, LHSReg, RHSReg,
  471. IntermediateRes))
  472. return false;
  473. }
  474. MIB->eraseFromParent();
  475. return true;
  476. }
  477. bool ARMInstructionSelector::insertComparison(CmpConstants Helper, InsertInfo I,
  478. unsigned ResReg,
  479. ARMCC::CondCodes Cond,
  480. unsigned LHSReg, unsigned RHSReg,
  481. unsigned PrevRes) const {
  482. // Perform the comparison.
  483. auto CmpI =
  484. BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(Helper.ComparisonOpcode))
  485. .addUse(LHSReg)
  486. .addUse(RHSReg)
  487. .add(predOps(ARMCC::AL));
  488. if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI))
  489. return false;
  490. // Read the comparison flags (if necessary).
  491. if (Helper.ReadFlagsOpcode != ARM::INSTRUCTION_LIST_END) {
  492. auto ReadI = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc,
  493. TII.get(Helper.ReadFlagsOpcode))
  494. .add(predOps(ARMCC::AL));
  495. if (!constrainSelectedInstRegOperands(*ReadI, TII, TRI, RBI))
  496. return false;
  497. }
  498. // Select either 1 or the previous result based on the value of the flags.
  499. auto Mov1I = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc,
  500. TII.get(Helper.SelectResultOpcode))
  501. .addDef(ResReg)
  502. .addUse(PrevRes)
  503. .addImm(1)
  504. .add(predOps(Cond, ARM::CPSR));
  505. if (!constrainSelectedInstRegOperands(*Mov1I, TII, TRI, RBI))
  506. return false;
  507. return true;
  508. }
  509. bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB,
  510. MachineRegisterInfo &MRI) const {
  511. if ((STI.isROPI() || STI.isRWPI()) && !STI.isTargetELF()) {
  512. LLVM_DEBUG(dbgs() << "ROPI and RWPI only supported for ELF\n");
  513. return false;
  514. }
  515. auto GV = MIB->getOperand(1).getGlobal();
  516. if (GV->isThreadLocal()) {
  517. LLVM_DEBUG(dbgs() << "TLS variables not supported yet\n");
  518. return false;
  519. }
  520. auto &MBB = *MIB->getParent();
  521. auto &MF = *MBB.getParent();
  522. bool UseMovt = STI.useMovt();
  523. unsigned Size = TM.getPointerSize(0);
  524. const Align Alignment(4);
  525. auto addOpsForConstantPoolLoad = [&MF, Alignment,
  526. Size](MachineInstrBuilder &MIB,
  527. const GlobalValue *GV, bool IsSBREL) {
  528. assert((MIB->getOpcode() == ARM::LDRi12 ||
  529. MIB->getOpcode() == ARM::t2LDRpci) &&
  530. "Unsupported instruction");
  531. auto ConstPool = MF.getConstantPool();
  532. auto CPIndex =
  533. // For SB relative entries we need a target-specific constant pool.
  534. // Otherwise, just use a regular constant pool entry.
  535. IsSBREL
  536. ? ConstPool->getConstantPoolIndex(
  537. ARMConstantPoolConstant::Create(GV, ARMCP::SBREL), Alignment)
  538. : ConstPool->getConstantPoolIndex(GV, Alignment);
  539. MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0)
  540. .addMemOperand(MF.getMachineMemOperand(
  541. MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
  542. Size, Alignment));
  543. if (MIB->getOpcode() == ARM::LDRi12)
  544. MIB.addImm(0);
  545. MIB.add(predOps(ARMCC::AL));
  546. };
  547. auto addGOTMemOperand = [this, &MF, Alignment](MachineInstrBuilder &MIB) {
  548. MIB.addMemOperand(MF.getMachineMemOperand(
  549. MachinePointerInfo::getGOT(MF), MachineMemOperand::MOLoad,
  550. TM.getProgramPointerSize(), Alignment));
  551. };
  552. if (TM.isPositionIndependent()) {
  553. bool Indirect = STI.isGVIndirectSymbol(GV);
  554. // For ARM mode, we have different pseudoinstructions for direct accesses
  555. // and indirect accesses, and the ones for indirect accesses include the
  556. // load from GOT. For Thumb mode, we use the same pseudoinstruction for both
  557. // direct and indirect accesses, and we need to manually generate the load
  558. // from GOT.
  559. bool UseOpcodeThatLoads = Indirect && !STI.isThumb();
  560. // FIXME: Taking advantage of MOVT for ELF is pretty involved, so we don't
  561. // support it yet. See PR28229.
  562. unsigned Opc =
  563. UseMovt && !STI.isTargetELF()
  564. ? (UseOpcodeThatLoads ? (unsigned)ARM::MOV_ga_pcrel_ldr
  565. : Opcodes.MOV_ga_pcrel)
  566. : (UseOpcodeThatLoads ? (unsigned)ARM::LDRLIT_ga_pcrel_ldr
  567. : Opcodes.LDRLIT_ga_pcrel);
  568. MIB->setDesc(TII.get(Opc));
  569. int TargetFlags = ARMII::MO_NO_FLAG;
  570. if (STI.isTargetDarwin())
  571. TargetFlags |= ARMII::MO_NONLAZY;
  572. if (STI.isGVInGOT(GV))
  573. TargetFlags |= ARMII::MO_GOT;
  574. MIB->getOperand(1).setTargetFlags(TargetFlags);
  575. if (Indirect) {
  576. if (!UseOpcodeThatLoads) {
  577. auto ResultReg = MIB.getReg(0);
  578. auto AddressReg = MRI.createVirtualRegister(&ARM::GPRRegClass);
  579. MIB->getOperand(0).setReg(AddressReg);
  580. auto InsertBefore = std::next(MIB->getIterator());
  581. auto MIBLoad = BuildMI(MBB, InsertBefore, MIB->getDebugLoc(),
  582. TII.get(Opcodes.LOAD32))
  583. .addDef(ResultReg)
  584. .addReg(AddressReg)
  585. .addImm(0)
  586. .add(predOps(ARMCC::AL));
  587. addGOTMemOperand(MIBLoad);
  588. if (!constrainSelectedInstRegOperands(*MIBLoad, TII, TRI, RBI))
  589. return false;
  590. } else {
  591. addGOTMemOperand(MIB);
  592. }
  593. }
  594. return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
  595. }
  596. bool isReadOnly = STI.getTargetLowering()->isReadOnly(GV);
  597. if (STI.isROPI() && isReadOnly) {
  598. unsigned Opc = UseMovt ? Opcodes.MOV_ga_pcrel : Opcodes.LDRLIT_ga_pcrel;
  599. MIB->setDesc(TII.get(Opc));
  600. return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
  601. }
  602. if (STI.isRWPI() && !isReadOnly) {
  603. auto Offset = MRI.createVirtualRegister(&ARM::GPRRegClass);
  604. MachineInstrBuilder OffsetMIB;
  605. if (UseMovt) {
  606. OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(),
  607. TII.get(Opcodes.MOVi32imm), Offset);
  608. OffsetMIB.addGlobalAddress(GV, /*Offset*/ 0, ARMII::MO_SBREL);
  609. } else {
  610. // Load the offset from the constant pool.
  611. OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(),
  612. TII.get(Opcodes.ConstPoolLoad), Offset);
  613. addOpsForConstantPoolLoad(OffsetMIB, GV, /*IsSBREL*/ true);
  614. }
  615. if (!constrainSelectedInstRegOperands(*OffsetMIB, TII, TRI, RBI))
  616. return false;
  617. // Add the offset to the SB register.
  618. MIB->setDesc(TII.get(Opcodes.ADDrr));
  619. MIB->RemoveOperand(1);
  620. MIB.addReg(ARM::R9) // FIXME: don't hardcode R9
  621. .addReg(Offset)
  622. .add(predOps(ARMCC::AL))
  623. .add(condCodeOp());
  624. return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
  625. }
  626. if (STI.isTargetELF()) {
  627. if (UseMovt) {
  628. MIB->setDesc(TII.get(Opcodes.MOVi32imm));
  629. } else {
  630. // Load the global's address from the constant pool.
  631. MIB->setDesc(TII.get(Opcodes.ConstPoolLoad));
  632. MIB->RemoveOperand(1);
  633. addOpsForConstantPoolLoad(MIB, GV, /*IsSBREL*/ false);
  634. }
  635. } else if (STI.isTargetMachO()) {
  636. if (UseMovt)
  637. MIB->setDesc(TII.get(Opcodes.MOVi32imm));
  638. else
  639. MIB->setDesc(TII.get(Opcodes.LDRLIT_ga_abs));
  640. } else {
  641. LLVM_DEBUG(dbgs() << "Object format not supported yet\n");
  642. return false;
  643. }
  644. return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
  645. }
  646. bool ARMInstructionSelector::selectSelect(MachineInstrBuilder &MIB,
  647. MachineRegisterInfo &MRI) const {
  648. auto &MBB = *MIB->getParent();
  649. auto InsertBefore = std::next(MIB->getIterator());
  650. auto &DbgLoc = MIB->getDebugLoc();
  651. // Compare the condition to 1.
  652. auto CondReg = MIB.getReg(1);
  653. assert(validReg(MRI, CondReg, 1, ARM::GPRRegBankID) &&
  654. "Unsupported types for select operation");
  655. auto CmpI = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(Opcodes.TSTri))
  656. .addUse(CondReg)
  657. .addImm(1)
  658. .add(predOps(ARMCC::AL));
  659. if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI))
  660. return false;
  661. // Move a value into the result register based on the result of the
  662. // comparison.
  663. auto ResReg = MIB.getReg(0);
  664. auto TrueReg = MIB.getReg(2);
  665. auto FalseReg = MIB.getReg(3);
  666. assert(validOpRegPair(MRI, ResReg, TrueReg, 32, ARM::GPRRegBankID) &&
  667. validOpRegPair(MRI, TrueReg, FalseReg, 32, ARM::GPRRegBankID) &&
  668. "Unsupported types for select operation");
  669. auto Mov1I = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(Opcodes.MOVCCr))
  670. .addDef(ResReg)
  671. .addUse(TrueReg)
  672. .addUse(FalseReg)
  673. .add(predOps(ARMCC::EQ, ARM::CPSR));
  674. if (!constrainSelectedInstRegOperands(*Mov1I, TII, TRI, RBI))
  675. return false;
  676. MIB->eraseFromParent();
  677. return true;
  678. }
  679. bool ARMInstructionSelector::selectShift(unsigned ShiftOpc,
  680. MachineInstrBuilder &MIB) const {
  681. assert(!STI.isThumb() && "Unsupported subtarget");
  682. MIB->setDesc(TII.get(ARM::MOVsr));
  683. MIB.addImm(ShiftOpc);
  684. MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
  685. return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
  686. }
  687. void ARMInstructionSelector::renderVFPF32Imm(
  688. MachineInstrBuilder &NewInstBuilder, const MachineInstr &OldInst,
  689. int OpIdx) const {
  690. assert(OldInst.getOpcode() == TargetOpcode::G_FCONSTANT &&
  691. OpIdx == -1 && "Expected G_FCONSTANT");
  692. APFloat FPImmValue = OldInst.getOperand(1).getFPImm()->getValueAPF();
  693. int FPImmEncoding = ARM_AM::getFP32Imm(FPImmValue);
  694. assert(FPImmEncoding != -1 && "Invalid immediate value");
  695. NewInstBuilder.addImm(FPImmEncoding);
  696. }
  697. void ARMInstructionSelector::renderVFPF64Imm(
  698. MachineInstrBuilder &NewInstBuilder, const MachineInstr &OldInst, int OpIdx) const {
  699. assert(OldInst.getOpcode() == TargetOpcode::G_FCONSTANT &&
  700. OpIdx == -1 && "Expected G_FCONSTANT");
  701. APFloat FPImmValue = OldInst.getOperand(1).getFPImm()->getValueAPF();
  702. int FPImmEncoding = ARM_AM::getFP64Imm(FPImmValue);
  703. assert(FPImmEncoding != -1 && "Invalid immediate value");
  704. NewInstBuilder.addImm(FPImmEncoding);
  705. }
  706. bool ARMInstructionSelector::select(MachineInstr &I) {
  707. assert(I.getParent() && "Instruction should be in a basic block!");
  708. assert(I.getParent()->getParent() && "Instruction should be in a function!");
  709. auto &MBB = *I.getParent();
  710. auto &MF = *MBB.getParent();
  711. auto &MRI = MF.getRegInfo();
  712. if (!isPreISelGenericOpcode(I.getOpcode())) {
  713. if (I.isCopy())
  714. return selectCopy(I, TII, MRI, TRI, RBI);
  715. return true;
  716. }
  717. using namespace TargetOpcode;
  718. if (selectImpl(I, *CoverageInfo))
  719. return true;
  720. MachineInstrBuilder MIB{MF, I};
  721. bool isSExt = false;
  722. switch (I.getOpcode()) {
  723. case G_SEXT:
  724. isSExt = true;
  725. LLVM_FALLTHROUGH;
  726. case G_ZEXT: {
  727. assert(MRI.getType(I.getOperand(0).getReg()).getSizeInBits() <= 32 &&
  728. "Unsupported destination size for extension");
  729. LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
  730. unsigned SrcSize = SrcTy.getSizeInBits();
  731. switch (SrcSize) {
  732. case 1: {
  733. // ZExt boils down to & 0x1; for SExt we also subtract that from 0
  734. I.setDesc(TII.get(Opcodes.AND));
  735. MIB.addImm(1).add(predOps(ARMCC::AL)).add(condCodeOp());
  736. if (isSExt) {
  737. Register SExtResult = I.getOperand(0).getReg();
  738. // Use a new virtual register for the result of the AND
  739. Register AndResult = MRI.createVirtualRegister(&ARM::GPRRegClass);
  740. I.getOperand(0).setReg(AndResult);
  741. auto InsertBefore = std::next(I.getIterator());
  742. auto SubI =
  743. BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(Opcodes.RSB))
  744. .addDef(SExtResult)
  745. .addUse(AndResult)
  746. .addImm(0)
  747. .add(predOps(ARMCC::AL))
  748. .add(condCodeOp());
  749. if (!constrainSelectedInstRegOperands(*SubI, TII, TRI, RBI))
  750. return false;
  751. }
  752. break;
  753. }
  754. case 8:
  755. case 16: {
  756. unsigned NewOpc = selectSimpleExtOpc(I.getOpcode(), SrcSize);
  757. if (NewOpc == I.getOpcode())
  758. return false;
  759. I.setDesc(TII.get(NewOpc));
  760. MIB.addImm(0).add(predOps(ARMCC::AL));
  761. break;
  762. }
  763. default:
  764. LLVM_DEBUG(dbgs() << "Unsupported source size for extension");
  765. return false;
  766. }
  767. break;
  768. }
  769. case G_ANYEXT:
  770. case G_TRUNC: {
  771. // The high bits are undefined, so there's nothing special to do, just
  772. // treat it as a copy.
  773. auto SrcReg = I.getOperand(1).getReg();
  774. auto DstReg = I.getOperand(0).getReg();
  775. const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
  776. const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
  777. if (SrcRegBank.getID() == ARM::FPRRegBankID) {
  778. // This should only happen in the obscure case where we have put a 64-bit
  779. // integer into a D register. Get it out of there and keep only the
  780. // interesting part.
  781. assert(I.getOpcode() == G_TRUNC && "Unsupported operand for G_ANYEXT");
  782. assert(DstRegBank.getID() == ARM::GPRRegBankID &&
  783. "Unsupported combination of register banks");
  784. assert(MRI.getType(SrcReg).getSizeInBits() == 64 && "Unsupported size");
  785. assert(MRI.getType(DstReg).getSizeInBits() <= 32 && "Unsupported size");
  786. Register IgnoredBits = MRI.createVirtualRegister(&ARM::GPRRegClass);
  787. auto InsertBefore = std::next(I.getIterator());
  788. auto MovI =
  789. BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(ARM::VMOVRRD))
  790. .addDef(DstReg)
  791. .addDef(IgnoredBits)
  792. .addUse(SrcReg)
  793. .add(predOps(ARMCC::AL));
  794. if (!constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI))
  795. return false;
  796. MIB->eraseFromParent();
  797. return true;
  798. }
  799. if (SrcRegBank.getID() != DstRegBank.getID()) {
  800. LLVM_DEBUG(
  801. dbgs() << "G_TRUNC/G_ANYEXT operands on different register banks\n");
  802. return false;
  803. }
  804. if (SrcRegBank.getID() != ARM::GPRRegBankID) {
  805. LLVM_DEBUG(dbgs() << "G_TRUNC/G_ANYEXT on non-GPR not supported yet\n");
  806. return false;
  807. }
  808. I.setDesc(TII.get(COPY));
  809. return selectCopy(I, TII, MRI, TRI, RBI);
  810. }
  811. case G_CONSTANT: {
  812. if (!MRI.getType(I.getOperand(0).getReg()).isPointer()) {
  813. // Non-pointer constants should be handled by TableGen.
  814. LLVM_DEBUG(dbgs() << "Unsupported constant type\n");
  815. return false;
  816. }
  817. auto &Val = I.getOperand(1);
  818. if (Val.isCImm()) {
  819. if (!Val.getCImm()->isZero()) {
  820. LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n");
  821. return false;
  822. }
  823. Val.ChangeToImmediate(0);
  824. } else {
  825. assert(Val.isImm() && "Unexpected operand for G_CONSTANT");
  826. if (Val.getImm() != 0) {
  827. LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n");
  828. return false;
  829. }
  830. }
  831. assert(!STI.isThumb() && "Unsupported subtarget");
  832. I.setDesc(TII.get(ARM::MOVi));
  833. MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
  834. break;
  835. }
  836. case G_FCONSTANT: {
  837. // Load from constant pool
  838. unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits() / 8;
  839. Align Alignment(Size);
  840. assert((Size == 4 || Size == 8) && "Unsupported FP constant type");
  841. auto LoadOpcode = Size == 4 ? ARM::VLDRS : ARM::VLDRD;
  842. auto ConstPool = MF.getConstantPool();
  843. auto CPIndex =
  844. ConstPool->getConstantPoolIndex(I.getOperand(1).getFPImm(), Alignment);
  845. MIB->setDesc(TII.get(LoadOpcode));
  846. MIB->RemoveOperand(1);
  847. MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0)
  848. .addMemOperand(
  849. MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF),
  850. MachineMemOperand::MOLoad, Size, Alignment))
  851. .addImm(0)
  852. .add(predOps(ARMCC::AL));
  853. break;
  854. }
  855. case G_INTTOPTR:
  856. case G_PTRTOINT: {
  857. auto SrcReg = I.getOperand(1).getReg();
  858. auto DstReg = I.getOperand(0).getReg();
  859. const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
  860. const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
  861. if (SrcRegBank.getID() != DstRegBank.getID()) {
  862. LLVM_DEBUG(
  863. dbgs()
  864. << "G_INTTOPTR/G_PTRTOINT operands on different register banks\n");
  865. return false;
  866. }
  867. if (SrcRegBank.getID() != ARM::GPRRegBankID) {
  868. LLVM_DEBUG(
  869. dbgs() << "G_INTTOPTR/G_PTRTOINT on non-GPR not supported yet\n");
  870. return false;
  871. }
  872. I.setDesc(TII.get(COPY));
  873. return selectCopy(I, TII, MRI, TRI, RBI);
  874. }
  875. case G_SELECT:
  876. return selectSelect(MIB, MRI);
  877. case G_ICMP: {
  878. CmpConstants Helper(Opcodes.CMPrr, ARM::INSTRUCTION_LIST_END,
  879. Opcodes.MOVCCi, ARM::GPRRegBankID, 32);
  880. return selectCmp(Helper, MIB, MRI);
  881. }
  882. case G_FCMP: {
  883. assert(STI.hasVFP2Base() && "Can't select fcmp without VFP");
  884. Register OpReg = I.getOperand(2).getReg();
  885. unsigned Size = MRI.getType(OpReg).getSizeInBits();
  886. if (Size == 64 && !STI.hasFP64()) {
  887. LLVM_DEBUG(dbgs() << "Subtarget only supports single precision");
  888. return false;
  889. }
  890. if (Size != 32 && Size != 64) {
  891. LLVM_DEBUG(dbgs() << "Unsupported size for G_FCMP operand");
  892. return false;
  893. }
  894. CmpConstants Helper(Size == 32 ? ARM::VCMPS : ARM::VCMPD, ARM::FMSTAT,
  895. Opcodes.MOVCCi, ARM::FPRRegBankID, Size);
  896. return selectCmp(Helper, MIB, MRI);
  897. }
  898. case G_LSHR:
  899. return selectShift(ARM_AM::ShiftOpc::lsr, MIB);
  900. case G_ASHR:
  901. return selectShift(ARM_AM::ShiftOpc::asr, MIB);
  902. case G_SHL: {
  903. return selectShift(ARM_AM::ShiftOpc::lsl, MIB);
  904. }
  905. case G_PTR_ADD:
  906. I.setDesc(TII.get(Opcodes.ADDrr));
  907. MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
  908. break;
  909. case G_FRAME_INDEX:
  910. // Add 0 to the given frame index and hope it will eventually be folded into
  911. // the user(s).
  912. I.setDesc(TII.get(Opcodes.ADDri));
  913. MIB.addImm(0).add(predOps(ARMCC::AL)).add(condCodeOp());
  914. break;
  915. case G_GLOBAL_VALUE:
  916. return selectGlobal(MIB, MRI);
  917. case G_STORE:
  918. case G_LOAD: {
  919. const auto &MemOp = **I.memoperands_begin();
  920. if (MemOp.isAtomic()) {
  921. LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
  922. return false;
  923. }
  924. Register Reg = I.getOperand(0).getReg();
  925. unsigned RegBank = RBI.getRegBank(Reg, MRI, TRI)->getID();
  926. LLT ValTy = MRI.getType(Reg);
  927. const auto ValSize = ValTy.getSizeInBits();
  928. assert((ValSize != 64 || STI.hasVFP2Base()) &&
  929. "Don't know how to load/store 64-bit value without VFP");
  930. const auto NewOpc = selectLoadStoreOpCode(I.getOpcode(), RegBank, ValSize);
  931. if (NewOpc == G_LOAD || NewOpc == G_STORE)
  932. return false;
  933. I.setDesc(TII.get(NewOpc));
  934. if (NewOpc == ARM::LDRH || NewOpc == ARM::STRH)
  935. // LDRH has a funny addressing mode (there's already a FIXME for it).
  936. MIB.addReg(0);
  937. MIB.addImm(0).add(predOps(ARMCC::AL));
  938. break;
  939. }
  940. case G_MERGE_VALUES: {
  941. if (!selectMergeValues(MIB, TII, MRI, TRI, RBI))
  942. return false;
  943. break;
  944. }
  945. case G_UNMERGE_VALUES: {
  946. if (!selectUnmergeValues(MIB, TII, MRI, TRI, RBI))
  947. return false;
  948. break;
  949. }
  950. case G_BRCOND: {
  951. if (!validReg(MRI, I.getOperand(0).getReg(), 1, ARM::GPRRegBankID)) {
  952. LLVM_DEBUG(dbgs() << "Unsupported condition register for G_BRCOND");
  953. return false;
  954. }
  955. // Set the flags.
  956. auto Test =
  957. BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcodes.TSTri))
  958. .addReg(I.getOperand(0).getReg())
  959. .addImm(1)
  960. .add(predOps(ARMCC::AL));
  961. if (!constrainSelectedInstRegOperands(*Test, TII, TRI, RBI))
  962. return false;
  963. // Branch conditionally.
  964. auto Branch =
  965. BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcodes.Bcc))
  966. .add(I.getOperand(1))
  967. .add(predOps(ARMCC::NE, ARM::CPSR));
  968. if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI))
  969. return false;
  970. I.eraseFromParent();
  971. return true;
  972. }
  973. case G_PHI: {
  974. I.setDesc(TII.get(PHI));
  975. Register DstReg = I.getOperand(0).getReg();
  976. const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI);
  977. if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
  978. break;
  979. }
  980. return true;
  981. }
  982. default:
  983. return false;
  984. }
  985. return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
  986. }