ARMSubtarget.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. //===-- ARMSubtarget.cpp - ARM Subtarget Information ----------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the ARM specific subclass of TargetSubtargetInfo.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "ARM.h"
  13. #include "ARMCallLowering.h"
  14. #include "ARMLegalizerInfo.h"
  15. #include "ARMRegisterBankInfo.h"
  16. #include "ARMFrameLowering.h"
  17. #include "ARMInstrInfo.h"
  18. #include "ARMSubtarget.h"
  19. #include "ARMTargetMachine.h"
  20. #include "MCTargetDesc/ARMMCTargetDesc.h"
  21. #include "Thumb1FrameLowering.h"
  22. #include "Thumb1InstrInfo.h"
  23. #include "Thumb2InstrInfo.h"
  24. #include "llvm/ADT/StringRef.h"
  25. #include "llvm/ADT/Triple.h"
  26. #include "llvm/ADT/Twine.h"
  27. #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
  28. #include "llvm/CodeGen/MachineFunction.h"
  29. #include "llvm/IR/Function.h"
  30. #include "llvm/IR/GlobalValue.h"
  31. #include "llvm/MC/MCAsmInfo.h"
  32. #include "llvm/MC/MCTargetOptions.h"
  33. #include "llvm/Support/CodeGen.h"
  34. #include "llvm/Support/CommandLine.h"
  35. #include "llvm/Support/ARMTargetParser.h"
  36. #include "llvm/Support/TargetParser.h"
  37. #include "llvm/Target/TargetOptions.h"
  38. using namespace llvm;
  39. #define DEBUG_TYPE "arm-subtarget"
  40. #define GET_SUBTARGETINFO_TARGET_DESC
  41. #define GET_SUBTARGETINFO_CTOR
  42. #include "ARMGenSubtargetInfo.inc"
  43. static cl::opt<bool>
  44. UseFusedMulOps("arm-use-mulops",
  45. cl::init(true), cl::Hidden);
  46. enum ITMode {
  47. DefaultIT,
  48. RestrictedIT,
  49. NoRestrictedIT
  50. };
  51. static cl::opt<ITMode>
  52. IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT),
  53. cl::ZeroOrMore,
  54. cl::values(clEnumValN(DefaultIT, "arm-default-it",
  55. "Generate IT block based on arch"),
  56. clEnumValN(RestrictedIT, "arm-restrict-it",
  57. "Disallow deprecated IT based on ARMv8"),
  58. clEnumValN(NoRestrictedIT, "arm-no-restrict-it",
  59. "Allow IT blocks based on ARMv7")));
  60. /// ForceFastISel - Use the fast-isel, even for subtargets where it is not
  61. /// currently supported (for testing only).
  62. static cl::opt<bool>
  63. ForceFastISel("arm-force-fast-isel",
  64. cl::init(false), cl::Hidden);
  65. static cl::opt<bool> EnableSubRegLiveness("arm-enable-subreg-liveness",
  66. cl::init(false), cl::Hidden);
  67. /// initializeSubtargetDependencies - Initializes using a CPU and feature string
  68. /// so that we can use initializer lists for subtarget initialization.
  69. ARMSubtarget &ARMSubtarget::initializeSubtargetDependencies(StringRef CPU,
  70. StringRef FS) {
  71. initializeEnvironment();
  72. initSubtargetFeatures(CPU, FS);
  73. return *this;
  74. }
  75. ARMFrameLowering *ARMSubtarget::initializeFrameLowering(StringRef CPU,
  76. StringRef FS) {
  77. ARMSubtarget &STI = initializeSubtargetDependencies(CPU, FS);
  78. if (STI.isThumb1Only())
  79. return (ARMFrameLowering *)new Thumb1FrameLowering(STI);
  80. return new ARMFrameLowering(STI);
  81. }
  82. ARMSubtarget::ARMSubtarget(const Triple &TT, const std::string &CPU,
  83. const std::string &FS,
  84. const ARMBaseTargetMachine &TM, bool IsLittle,
  85. bool MinSize)
  86. : ARMGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS),
  87. UseMulOps(UseFusedMulOps), CPUString(CPU), OptMinSize(MinSize),
  88. IsLittle(IsLittle), TargetTriple(TT), Options(TM.Options), TM(TM),
  89. FrameLowering(initializeFrameLowering(CPU, FS)),
  90. // At this point initializeSubtargetDependencies has been called so
  91. // we can query directly.
  92. InstrInfo(isThumb1Only()
  93. ? (ARMBaseInstrInfo *)new Thumb1InstrInfo(*this)
  94. : !isThumb()
  95. ? (ARMBaseInstrInfo *)new ARMInstrInfo(*this)
  96. : (ARMBaseInstrInfo *)new Thumb2InstrInfo(*this)),
  97. TLInfo(TM, *this) {
  98. CallLoweringInfo.reset(new ARMCallLowering(*getTargetLowering()));
  99. Legalizer.reset(new ARMLegalizerInfo(*this));
  100. auto *RBI = new ARMRegisterBankInfo(*getRegisterInfo());
  101. // FIXME: At this point, we can't rely on Subtarget having RBI.
  102. // It's awkward to mix passing RBI and the Subtarget; should we pass
  103. // TII/TRI as well?
  104. InstSelector.reset(createARMInstructionSelector(
  105. *static_cast<const ARMBaseTargetMachine *>(&TM), *this, *RBI));
  106. RegBankInfo.reset(RBI);
  107. }
  108. const CallLowering *ARMSubtarget::getCallLowering() const {
  109. return CallLoweringInfo.get();
  110. }
  111. InstructionSelector *ARMSubtarget::getInstructionSelector() const {
  112. return InstSelector.get();
  113. }
  114. const LegalizerInfo *ARMSubtarget::getLegalizerInfo() const {
  115. return Legalizer.get();
  116. }
  117. const RegisterBankInfo *ARMSubtarget::getRegBankInfo() const {
  118. return RegBankInfo.get();
  119. }
  120. bool ARMSubtarget::isXRaySupported() const {
  121. // We don't currently suppport Thumb, but Windows requires Thumb.
  122. return hasV6Ops() && hasARMOps() && !isTargetWindows();
  123. }
  124. void ARMSubtarget::initializeEnvironment() {
  125. // MCAsmInfo isn't always present (e.g. in opt) so we can't initialize this
  126. // directly from it, but we can try to make sure they're consistent when both
  127. // available.
  128. UseSjLjEH = (isTargetDarwin() && !isTargetWatchABI() &&
  129. Options.ExceptionModel == ExceptionHandling::None) ||
  130. Options.ExceptionModel == ExceptionHandling::SjLj;
  131. assert((!TM.getMCAsmInfo() ||
  132. (TM.getMCAsmInfo()->getExceptionHandlingType() ==
  133. ExceptionHandling::SjLj) == UseSjLjEH) &&
  134. "inconsistent sjlj choice between CodeGen and MC");
  135. }
  136. void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
  137. if (CPUString.empty()) {
  138. CPUString = "generic";
  139. if (isTargetDarwin()) {
  140. StringRef ArchName = TargetTriple.getArchName();
  141. ARM::ArchKind AK = ARM::parseArch(ArchName);
  142. if (AK == ARM::ArchKind::ARMV7S)
  143. // Default to the Swift CPU when targeting armv7s/thumbv7s.
  144. CPUString = "swift";
  145. else if (AK == ARM::ArchKind::ARMV7K)
  146. // Default to the Cortex-a7 CPU when targeting armv7k/thumbv7k.
  147. // ARMv7k does not use SjLj exception handling.
  148. CPUString = "cortex-a7";
  149. }
  150. }
  151. // Insert the architecture feature derived from the target triple into the
  152. // feature string. This is important for setting features that are implied
  153. // based on the architecture version.
  154. std::string ArchFS = ARM_MC::ParseARMTriple(TargetTriple, CPUString);
  155. if (!FS.empty()) {
  156. if (!ArchFS.empty())
  157. ArchFS = (Twine(ArchFS) + "," + FS).str();
  158. else
  159. ArchFS = std::string(FS);
  160. }
  161. ParseSubtargetFeatures(CPUString, /*TuneCPU*/ CPUString, ArchFS);
  162. // FIXME: This used enable V6T2 support implicitly for Thumb2 mode.
  163. // Assert this for now to make the change obvious.
  164. assert(hasV6T2Ops() || !hasThumb2());
  165. // Execute only support requires movt support
  166. if (genExecuteOnly()) {
  167. NoMovt = false;
  168. assert(hasV8MBaselineOps() && "Cannot generate execute-only code for this target");
  169. }
  170. // Keep a pointer to static instruction cost data for the specified CPU.
  171. SchedModel = getSchedModelForCPU(CPUString);
  172. // Initialize scheduling itinerary for the specified CPU.
  173. InstrItins = getInstrItineraryForCPU(CPUString);
  174. // FIXME: this is invalid for WindowsCE
  175. if (isTargetWindows())
  176. NoARM = true;
  177. if (isAAPCS_ABI())
  178. stackAlignment = Align(8);
  179. if (isTargetNaCl() || isAAPCS16_ABI())
  180. stackAlignment = Align(16);
  181. // FIXME: Completely disable sibcall for Thumb1 since ThumbRegisterInfo::
  182. // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as
  183. // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation
  184. // support in the assembler and linker to be used. This would need to be
  185. // fixed to fully support tail calls in Thumb1.
  186. //
  187. // For ARMv8-M, we /do/ implement tail calls. Doing this is tricky for v8-M
  188. // baseline, since the LDM/POP instruction on Thumb doesn't take LR. This
  189. // means if we need to reload LR, it takes extra instructions, which outweighs
  190. // the value of the tail call; but here we don't know yet whether LR is going
  191. // to be used. We take the optimistic approach of generating the tail call and
  192. // perhaps taking a hit if we need to restore the LR.
  193. // Thumb1 PIC calls to external symbols use BX, so they can be tail calls,
  194. // but we need to make sure there are enough registers; the only valid
  195. // registers are the 4 used for parameters. We don't currently do this
  196. // case.
  197. SupportsTailCall = !isThumb1Only() || hasV8MBaselineOps();
  198. if (isTargetMachO() && isTargetIOS() && getTargetTriple().isOSVersionLT(5, 0))
  199. SupportsTailCall = false;
  200. switch (IT) {
  201. case DefaultIT:
  202. RestrictIT = hasV8Ops() && !hasMinSize();
  203. break;
  204. case RestrictedIT:
  205. RestrictIT = true;
  206. break;
  207. case NoRestrictedIT:
  208. RestrictIT = false;
  209. break;
  210. }
  211. // NEON f32 ops are non-IEEE 754 compliant. Darwin is ok with it by default.
  212. const FeatureBitset &Bits = getFeatureBits();
  213. if ((Bits[ARM::ProcA5] || Bits[ARM::ProcA8]) && // Where this matters
  214. (Options.UnsafeFPMath || isTargetDarwin()))
  215. UseNEONForSinglePrecisionFP = true;
  216. if (isRWPI())
  217. ReserveR9 = true;
  218. // If MVEVectorCostFactor is still 0 (has not been set to anything else), default it to 2
  219. if (MVEVectorCostFactor == 0)
  220. MVEVectorCostFactor = 2;
  221. // FIXME: Teach TableGen to deal with these instead of doing it manually here.
  222. switch (ARMProcFamily) {
  223. case Others:
  224. case CortexA5:
  225. break;
  226. case CortexA7:
  227. LdStMultipleTiming = DoubleIssue;
  228. break;
  229. case CortexA8:
  230. LdStMultipleTiming = DoubleIssue;
  231. break;
  232. case CortexA9:
  233. LdStMultipleTiming = DoubleIssueCheckUnalignedAccess;
  234. PreISelOperandLatencyAdjustment = 1;
  235. break;
  236. case CortexA12:
  237. break;
  238. case CortexA15:
  239. MaxInterleaveFactor = 2;
  240. PreISelOperandLatencyAdjustment = 1;
  241. PartialUpdateClearance = 12;
  242. break;
  243. case CortexA17:
  244. case CortexA32:
  245. case CortexA35:
  246. case CortexA53:
  247. case CortexA55:
  248. case CortexA57:
  249. case CortexA72:
  250. case CortexA73:
  251. case CortexA75:
  252. case CortexA76:
  253. case CortexA77:
  254. case CortexA78:
  255. case CortexA78C:
  256. case CortexA710:
  257. case CortexR4:
  258. case CortexR4F:
  259. case CortexR5:
  260. case CortexR7:
  261. case CortexM3:
  262. case CortexM7:
  263. case CortexR52:
  264. case CortexX1:
  265. case CortexX1C:
  266. break;
  267. case Exynos:
  268. LdStMultipleTiming = SingleIssuePlusExtras;
  269. MaxInterleaveFactor = 4;
  270. if (!isThumb())
  271. PrefLoopLogAlignment = 3;
  272. break;
  273. case Kryo:
  274. break;
  275. case Krait:
  276. PreISelOperandLatencyAdjustment = 1;
  277. break;
  278. case NeoverseN1:
  279. case NeoverseN2:
  280. case NeoverseV1:
  281. break;
  282. case Swift:
  283. MaxInterleaveFactor = 2;
  284. LdStMultipleTiming = SingleIssuePlusExtras;
  285. PreISelOperandLatencyAdjustment = 1;
  286. PartialUpdateClearance = 12;
  287. break;
  288. }
  289. }
  290. bool ARMSubtarget::isTargetHardFloat() const { return TM.isTargetHardFloat(); }
  291. bool ARMSubtarget::isAPCS_ABI() const {
  292. assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN);
  293. return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_APCS;
  294. }
  295. bool ARMSubtarget::isAAPCS_ABI() const {
  296. assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN);
  297. return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS ||
  298. TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16;
  299. }
  300. bool ARMSubtarget::isAAPCS16_ABI() const {
  301. assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN);
  302. return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16;
  303. }
  304. bool ARMSubtarget::isROPI() const {
  305. return TM.getRelocationModel() == Reloc::ROPI ||
  306. TM.getRelocationModel() == Reloc::ROPI_RWPI;
  307. }
  308. bool ARMSubtarget::isRWPI() const {
  309. return TM.getRelocationModel() == Reloc::RWPI ||
  310. TM.getRelocationModel() == Reloc::ROPI_RWPI;
  311. }
  312. bool ARMSubtarget::isGVIndirectSymbol(const GlobalValue *GV) const {
  313. if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
  314. return true;
  315. // 32 bit macho has no relocation for a-b if a is undefined, even if b is in
  316. // the section that is being relocated. This means we have to use o load even
  317. // for GVs that are known to be local to the dso.
  318. if (isTargetMachO() && TM.isPositionIndependent() &&
  319. (GV->isDeclarationForLinker() || GV->hasCommonLinkage()))
  320. return true;
  321. return false;
  322. }
  323. bool ARMSubtarget::isGVInGOT(const GlobalValue *GV) const {
  324. return isTargetELF() && TM.isPositionIndependent() &&
  325. !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
  326. }
  327. unsigned ARMSubtarget::getMispredictionPenalty() const {
  328. return SchedModel.MispredictPenalty;
  329. }
  330. bool ARMSubtarget::enableMachineScheduler() const {
  331. // The MachineScheduler can increase register usage, so we use more high
  332. // registers and end up with more T2 instructions that cannot be converted to
  333. // T1 instructions. At least until we do better at converting to thumb1
  334. // instructions, on cortex-m at Oz where we are size-paranoid, don't use the
  335. // Machine scheduler, relying on the DAG register pressure scheduler instead.
  336. if (isMClass() && hasMinSize())
  337. return false;
  338. // Enable the MachineScheduler before register allocation for subtargets
  339. // with the use-misched feature.
  340. return useMachineScheduler();
  341. }
  342. bool ARMSubtarget::enableSubRegLiveness() const {
  343. if (EnableSubRegLiveness.getNumOccurrences())
  344. return EnableSubRegLiveness;
  345. // Enable SubRegLiveness for MVE to better optimize s subregs for mqpr regs
  346. // and q subregs for qqqqpr regs.
  347. return hasMVEIntegerOps();
  348. }
  349. // This overrides the PostRAScheduler bit in the SchedModel for any CPU.
  350. bool ARMSubtarget::enablePostRAScheduler() const {
  351. if (enableMachineScheduler())
  352. return false;
  353. if (disablePostRAScheduler())
  354. return false;
  355. // Thumb1 cores will generally not benefit from post-ra scheduling
  356. return !isThumb1Only();
  357. }
  358. bool ARMSubtarget::enablePostRAMachineScheduler() const {
  359. if (!enableMachineScheduler())
  360. return false;
  361. if (disablePostRAScheduler())
  362. return false;
  363. return !isThumb1Only();
  364. }
  365. bool ARMSubtarget::enableAtomicExpand() const { return hasAnyDataBarrier(); }
  366. bool ARMSubtarget::useStride4VFPs() const {
  367. // For general targets, the prologue can grow when VFPs are allocated with
  368. // stride 4 (more vpush instructions). But WatchOS uses a compact unwind
  369. // format which it's more important to get right.
  370. return isTargetWatchABI() ||
  371. (useWideStrideVFP() && !OptMinSize);
  372. }
  373. bool ARMSubtarget::useMovt() const {
  374. // NOTE Windows on ARM needs to use mov.w/mov.t pairs to materialise 32-bit
  375. // immediates as it is inherently position independent, and may be out of
  376. // range otherwise.
  377. return !NoMovt && hasV8MBaselineOps() &&
  378. (isTargetWindows() || !OptMinSize || genExecuteOnly());
  379. }
  380. bool ARMSubtarget::useFastISel() const {
  381. // Enable fast-isel for any target, for testing only.
  382. if (ForceFastISel)
  383. return true;
  384. // Limit fast-isel to the targets that are or have been tested.
  385. if (!hasV6Ops())
  386. return false;
  387. // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl.
  388. return TM.Options.EnableFastISel &&
  389. ((isTargetMachO() && !isThumb1Only()) ||
  390. (isTargetLinux() && !isThumb()) || (isTargetNaCl() && !isThumb()));
  391. }
  392. unsigned ARMSubtarget::getGPRAllocationOrder(const MachineFunction &MF) const {
  393. // The GPR register class has multiple possible allocation orders, with
  394. // tradeoffs preferred by different sub-architectures and optimisation goals.
  395. // The allocation orders are:
  396. // 0: (the default tablegen order, not used)
  397. // 1: r14, r0-r13
  398. // 2: r0-r7
  399. // 3: r0-r7, r12, lr, r8-r11
  400. // Note that the register allocator will change this order so that
  401. // callee-saved registers are used later, as they require extra work in the
  402. // prologue/epilogue (though we sometimes override that).
  403. // For thumb1-only targets, only the low registers are allocatable.
  404. if (isThumb1Only())
  405. return 2;
  406. // Allocate low registers first, so we can select more 16-bit instructions.
  407. // We also (in ignoreCSRForAllocationOrder) override the default behaviour
  408. // with regards to callee-saved registers, because pushing extra registers is
  409. // much cheaper (in terms of code size) than using high registers. After
  410. // that, we allocate r12 (doesn't need to be saved), lr (saving it means we
  411. // can return with the pop, don't need an extra "bx lr") and then the rest of
  412. // the high registers.
  413. if (isThumb2() && MF.getFunction().hasMinSize())
  414. return 3;
  415. // Otherwise, allocate in the default order, using LR first because saving it
  416. // allows a shorter epilogue sequence.
  417. return 1;
  418. }
  419. bool ARMSubtarget::ignoreCSRForAllocationOrder(const MachineFunction &MF,
  420. unsigned PhysReg) const {
  421. // To minimize code size in Thumb2, we prefer the usage of low regs (lower
  422. // cost per use) so we can use narrow encoding. By default, caller-saved
  423. // registers (e.g. lr, r12) are always allocated first, regardless of
  424. // their cost per use. When optForMinSize, we prefer the low regs even if
  425. // they are CSR because usually push/pop can be folded into existing ones.
  426. return isThumb2() && MF.getFunction().hasMinSize() &&
  427. ARM::GPRRegClass.contains(PhysReg);
  428. }