ARMSubtarget.h 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971
  1. //===-- ARMSubtarget.h - Define Subtarget for the ARM ----------*- C++ -*--===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file declares the ARM specific subclass of TargetSubtargetInfo.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #ifndef LLVM_LIB_TARGET_ARM_ARMSUBTARGET_H
  13. #define LLVM_LIB_TARGET_ARM_ARMSUBTARGET_H
  14. #include "ARMBaseInstrInfo.h"
  15. #include "ARMBaseRegisterInfo.h"
  16. #include "ARMConstantPoolValue.h"
  17. #include "ARMFrameLowering.h"
  18. #include "ARMISelLowering.h"
  19. #include "ARMMachineFunctionInfo.h"
  20. #include "ARMSelectionDAGInfo.h"
  21. #include "llvm/ADT/Triple.h"
  22. #include "llvm/Analysis/TargetTransformInfo.h"
  23. #include "llvm/CodeGen/GlobalISel/CallLowering.h"
  24. #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
  25. #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
  26. #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
  27. #include "llvm/CodeGen/MachineFunction.h"
  28. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  29. #include "llvm/MC/MCInstrItineraries.h"
  30. #include "llvm/MC/MCSchedule.h"
  31. #include "llvm/Target/TargetMachine.h"
  32. #include "llvm/Target/TargetOptions.h"
  33. #include <memory>
  34. #include <string>
  35. #define GET_SUBTARGETINFO_HEADER
  36. #include "ARMGenSubtargetInfo.inc"
  37. namespace llvm {
  38. class ARMBaseTargetMachine;
  39. class GlobalValue;
  40. class StringRef;
  41. class ARMSubtarget : public ARMGenSubtargetInfo {
  42. protected:
  43. enum ARMProcFamilyEnum {
  44. Others,
  45. CortexA12,
  46. CortexA15,
  47. CortexA17,
  48. CortexA32,
  49. CortexA35,
  50. CortexA5,
  51. CortexA53,
  52. CortexA55,
  53. CortexA57,
  54. CortexA7,
  55. CortexA72,
  56. CortexA73,
  57. CortexA75,
  58. CortexA76,
  59. CortexA77,
  60. CortexA78,
  61. CortexA78C,
  62. CortexA710,
  63. CortexA8,
  64. CortexA9,
  65. CortexM3,
  66. CortexM7,
  67. CortexR4,
  68. CortexR4F,
  69. CortexR5,
  70. CortexR52,
  71. CortexR7,
  72. CortexX1,
  73. CortexX1C,
  74. Exynos,
  75. Krait,
  76. Kryo,
  77. NeoverseN1,
  78. NeoverseN2,
  79. NeoverseV1,
  80. Swift
  81. };
  82. enum ARMProcClassEnum {
  83. None,
  84. AClass,
  85. MClass,
  86. RClass
  87. };
  88. enum ARMArchEnum {
  89. ARMv2,
  90. ARMv2a,
  91. ARMv3,
  92. ARMv3m,
  93. ARMv4,
  94. ARMv4t,
  95. ARMv5,
  96. ARMv5t,
  97. ARMv5te,
  98. ARMv5tej,
  99. ARMv6,
  100. ARMv6k,
  101. ARMv6kz,
  102. ARMv6m,
  103. ARMv6sm,
  104. ARMv6t2,
  105. ARMv7a,
  106. ARMv7em,
  107. ARMv7m,
  108. ARMv7r,
  109. ARMv7ve,
  110. ARMv81a,
  111. ARMv82a,
  112. ARMv83a,
  113. ARMv84a,
  114. ARMv85a,
  115. ARMv86a,
  116. ARMv87a,
  117. ARMv88a,
  118. ARMv8a,
  119. ARMv8mBaseline,
  120. ARMv8mMainline,
  121. ARMv8r,
  122. ARMv81mMainline,
  123. ARMv9a,
  124. ARMv91a,
  125. ARMv92a,
  126. ARMv93a,
  127. };
  128. public:
  129. /// What kind of timing do load multiple/store multiple instructions have.
  130. enum ARMLdStMultipleTiming {
  131. /// Can load/store 2 registers/cycle.
  132. DoubleIssue,
  133. /// Can load/store 2 registers/cycle, but needs an extra cycle if the access
  134. /// is not 64-bit aligned.
  135. DoubleIssueCheckUnalignedAccess,
  136. /// Can load/store 1 register/cycle.
  137. SingleIssue,
  138. /// Can load/store 1 register/cycle, but needs an extra cycle for address
  139. /// computation and potentially also for register writeback.
  140. SingleIssuePlusExtras,
  141. };
  142. protected:
  143. /// ARMProcFamily - ARM processor family: Cortex-A8, Cortex-A9, and others.
  144. ARMProcFamilyEnum ARMProcFamily = Others;
  145. /// ARMProcClass - ARM processor class: None, AClass, RClass or MClass.
  146. ARMProcClassEnum ARMProcClass = None;
  147. /// ARMArch - ARM architecture
  148. ARMArchEnum ARMArch = ARMv4t;
  149. /// HasV4TOps, HasV5TOps, HasV5TEOps,
  150. /// HasV6Ops, HasV6MOps, HasV6KOps, HasV6T2Ops, HasV7Ops, HasV8Ops -
  151. /// Specify whether target support specific ARM ISA variants.
  152. bool HasV4TOps = false;
  153. bool HasV5TOps = false;
  154. bool HasV5TEOps = false;
  155. bool HasV6Ops = false;
  156. bool HasV6MOps = false;
  157. bool HasV6KOps = false;
  158. bool HasV6T2Ops = false;
  159. bool HasV7Ops = false;
  160. bool HasV8Ops = false;
  161. bool HasV8_1aOps = false;
  162. bool HasV8_2aOps = false;
  163. bool HasV8_3aOps = false;
  164. bool HasV8_4aOps = false;
  165. bool HasV8_5aOps = false;
  166. bool HasV8_6aOps = false;
  167. bool HasV8_8aOps = false;
  168. bool HasV8_7aOps = false;
  169. bool HasV9_0aOps = false;
  170. bool HasV9_1aOps = false;
  171. bool HasV9_2aOps = false;
  172. bool HasV9_3aOps = false;
  173. bool HasV8MBaselineOps = false;
  174. bool HasV8MMainlineOps = false;
  175. bool HasV8_1MMainlineOps = false;
  176. bool HasMVEIntegerOps = false;
  177. bool HasMVEFloatOps = false;
  178. bool HasCDEOps = false;
  179. /// HasVFPv2, HasVFPv3, HasVFPv4, HasFPARMv8, HasNEON - Specify what
  180. /// floating point ISAs are supported.
  181. bool HasVFPv2 = false;
  182. bool HasVFPv3 = false;
  183. bool HasVFPv4 = false;
  184. bool HasFPARMv8 = false;
  185. bool HasNEON = false;
  186. bool HasFPRegs = false;
  187. bool HasFPRegs16 = false;
  188. bool HasFPRegs64 = false;
  189. /// Versions of the VFP flags restricted to single precision, or to
  190. /// 16 d-registers, or both.
  191. bool HasVFPv2SP = false;
  192. bool HasVFPv3SP = false;
  193. bool HasVFPv4SP = false;
  194. bool HasFPARMv8SP = false;
  195. bool HasVFPv3D16 = false;
  196. bool HasVFPv4D16 = false;
  197. bool HasFPARMv8D16 = false;
  198. bool HasVFPv3D16SP = false;
  199. bool HasVFPv4D16SP = false;
  200. bool HasFPARMv8D16SP = false;
  201. /// HasDotProd - True if the ARMv8.2A dot product instructions are supported.
  202. bool HasDotProd = false;
  203. /// UseNEONForSinglePrecisionFP - if the NEONFP attribute has been
  204. /// specified. Use the method useNEONForSinglePrecisionFP() to
  205. /// determine if NEON should actually be used.
  206. bool UseNEONForSinglePrecisionFP = false;
  207. /// UseMulOps - True if non-microcoded fused integer multiply-add and
  208. /// multiply-subtract instructions should be used.
  209. bool UseMulOps = false;
  210. /// SlowFPVMLx - If the VFP2 / NEON instructions are available, indicates
  211. /// whether the FP VML[AS] instructions are slow (if so, don't use them).
  212. bool SlowFPVMLx = false;
  213. /// SlowFPVFMx - If the VFP4 / NEON instructions are available, indicates
  214. /// whether the FP VFM[AS] instructions are slow (if so, don't use them).
  215. bool SlowFPVFMx = false;
  216. /// HasVMLxForwarding - If true, NEON has special multiplier accumulator
  217. /// forwarding to allow mul + mla being issued back to back.
  218. bool HasVMLxForwarding = false;
  219. /// SlowFPBrcc - True if floating point compare + branch is slow.
  220. bool SlowFPBrcc = false;
  221. /// InThumbMode - True if compiling for Thumb, false for ARM.
  222. bool InThumbMode = false;
  223. /// UseSoftFloat - True if we're using software floating point features.
  224. bool UseSoftFloat = false;
  225. /// UseMISched - True if MachineScheduler should be used for this subtarget.
  226. bool UseMISched = false;
  227. /// DisablePostRAScheduler - False if scheduling should happen again after
  228. /// register allocation.
  229. bool DisablePostRAScheduler = false;
  230. /// HasThumb2 - True if Thumb2 instructions are supported.
  231. bool HasThumb2 = false;
  232. /// NoARM - True if subtarget does not support ARM mode execution.
  233. bool NoARM = false;
  234. /// ReserveR9 - True if R9 is not available as a general purpose register.
  235. bool ReserveR9 = false;
  236. /// NoMovt - True if MOVT / MOVW pairs are not used for materialization of
  237. /// 32-bit imms (including global addresses).
  238. bool NoMovt = false;
  239. /// SupportsTailCall - True if the OS supports tail call. The dynamic linker
  240. /// must be able to synthesize call stubs for interworking between ARM and
  241. /// Thumb.
  242. bool SupportsTailCall = false;
  243. /// HasFP16 - True if subtarget supports half-precision FP conversions
  244. bool HasFP16 = false;
  245. /// HasFullFP16 - True if subtarget supports half-precision FP operations
  246. bool HasFullFP16 = false;
  247. /// HasFP16FML - True if subtarget supports half-precision FP fml operations
  248. bool HasFP16FML = false;
  249. /// HasBF16 - True if subtarget supports BFloat16 floating point operations
  250. bool HasBF16 = false;
  251. /// HasMatMulInt8 - True if subtarget supports 8-bit integer matrix multiply
  252. bool HasMatMulInt8 = false;
  253. /// HasD32 - True if subtarget has the full 32 double precision
  254. /// FP registers for VFPv3.
  255. bool HasD32 = false;
  256. /// HasHardwareDivide - True if subtarget supports [su]div in Thumb mode
  257. bool HasHardwareDivideInThumb = false;
  258. /// HasHardwareDivideInARM - True if subtarget supports [su]div in ARM mode
  259. bool HasHardwareDivideInARM = false;
  260. /// HasDataBarrier - True if the subtarget supports DMB / DSB data barrier
  261. /// instructions.
  262. bool HasDataBarrier = false;
  263. /// HasFullDataBarrier - True if the subtarget supports DFB data barrier
  264. /// instruction.
  265. bool HasFullDataBarrier = false;
  266. /// HasV7Clrex - True if the subtarget supports CLREX instructions
  267. bool HasV7Clrex = false;
  268. /// HasAcquireRelease - True if the subtarget supports v8 atomics (LDA/LDAEX etc)
  269. /// instructions
  270. bool HasAcquireRelease = false;
  271. /// Pref32BitThumb - If true, codegen would prefer 32-bit Thumb instructions
  272. /// over 16-bit ones.
  273. bool Pref32BitThumb = false;
  274. /// AvoidCPSRPartialUpdate - If true, codegen would avoid using instructions
  275. /// that partially update CPSR and add false dependency on the previous
  276. /// CPSR setting instruction.
  277. bool AvoidCPSRPartialUpdate = false;
  278. /// CheapPredicableCPSRDef - If true, disable +1 predication cost
  279. /// for instructions updating CPSR. Enabled for Cortex-A57.
  280. bool CheapPredicableCPSRDef = false;
  281. /// AvoidMOVsShifterOperand - If true, codegen should avoid using flag setting
  282. /// movs with shifter operand (i.e. asr, lsl, lsr).
  283. bool AvoidMOVsShifterOperand = false;
  284. /// HasRetAddrStack - Some processors perform return stack prediction. CodeGen should
  285. /// avoid issue "normal" call instructions to callees which do not return.
  286. bool HasRetAddrStack = false;
  287. /// HasBranchPredictor - True if the subtarget has a branch predictor. Having
  288. /// a branch predictor or not changes the expected cost of taking a branch
  289. /// which affects the choice of whether to use predicated instructions.
  290. bool HasBranchPredictor = true;
  291. /// HasMPExtension - True if the subtarget supports Multiprocessing
  292. /// extension (ARMv7 only).
  293. bool HasMPExtension = false;
  294. /// HasVirtualization - True if the subtarget supports the Virtualization
  295. /// extension.
  296. bool HasVirtualization = false;
  297. /// HasFP64 - If true, the floating point unit supports double
  298. /// precision.
  299. bool HasFP64 = false;
  300. /// If true, the processor supports the Performance Monitor Extensions. These
  301. /// include a generic cycle-counter as well as more fine-grained (often
  302. /// implementation-specific) events.
  303. bool HasPerfMon = false;
  304. /// HasTrustZone - if true, processor supports TrustZone security extensions
  305. bool HasTrustZone = false;
  306. /// Has8MSecExt - if true, processor supports ARMv8-M Security Extensions
  307. bool Has8MSecExt = false;
  308. /// HasSHA2 - if true, processor supports SHA1 and SHA256
  309. bool HasSHA2 = false;
  310. /// HasAES - if true, processor supports AES
  311. bool HasAES = false;
  312. /// HasCrypto - if true, processor supports Cryptography extensions
  313. bool HasCrypto = false;
  314. /// HasCRC - if true, processor supports CRC instructions
  315. bool HasCRC = false;
  316. /// HasRAS - if true, the processor supports RAS extensions
  317. bool HasRAS = false;
  318. /// HasLOB - if true, the processor supports the Low Overhead Branch extension
  319. bool HasLOB = false;
  320. bool HasPACBTI = false;
  321. /// If true, the instructions "vmov.i32 d0, #0" and "vmov.i32 q0, #0" are
  322. /// particularly effective at zeroing a VFP register.
  323. bool HasZeroCycleZeroing = false;
  324. /// HasFPAO - if true, processor does positive address offset computation faster
  325. bool HasFPAO = false;
  326. /// HasFuseAES - if true, processor executes back to back AES instruction
  327. /// pairs faster.
  328. bool HasFuseAES = false;
  329. /// HasFuseLiterals - if true, processor executes back to back
  330. /// bottom and top halves of literal generation faster.
  331. bool HasFuseLiterals = false;
  332. /// If true, if conversion may decide to leave some instructions unpredicated.
  333. bool IsProfitableToUnpredicate = false;
  334. /// If true, VMOV will be favored over VGETLNi32.
  335. bool HasSlowVGETLNi32 = false;
  336. /// If true, VMOV will be favored over VDUP.
  337. bool HasSlowVDUP32 = false;
  338. /// If true, VMOVSR will be favored over VMOVDRR.
  339. bool PreferVMOVSR = false;
  340. /// If true, ISHST barriers will be used for Release semantics.
  341. bool PreferISHST = false;
  342. /// If true, a VLDM/VSTM starting with an odd register number is considered to
  343. /// take more microops than single VLDRS/VSTRS.
  344. bool SlowOddRegister = false;
  345. /// If true, loading into a D subregister will be penalized.
  346. bool SlowLoadDSubregister = false;
  347. /// If true, use a wider stride when allocating VFP registers.
  348. bool UseWideStrideVFP = false;
  349. /// If true, the AGU and NEON/FPU units are multiplexed.
  350. bool HasMuxedUnits = false;
  351. /// If true, VMOVS will never be widened to VMOVD.
  352. bool DontWidenVMOVS = false;
  353. /// If true, splat a register between VFP and NEON instructions.
  354. bool SplatVFPToNeon = false;
  355. /// If true, run the MLx expansion pass.
  356. bool ExpandMLx = false;
  357. /// If true, VFP/NEON VMLA/VMLS have special RAW hazards.
  358. bool HasVMLxHazards = false;
  359. // If true, read thread pointer from coprocessor register.
  360. bool ReadTPHard = false;
  361. /// If true, VMOVRS, VMOVSR and VMOVS will be converted from VFP to NEON.
  362. bool UseNEONForFPMovs = false;
  363. /// If true, VLDn instructions take an extra cycle for unaligned accesses.
  364. bool CheckVLDnAlign = false;
  365. /// If true, VFP instructions are not pipelined.
  366. bool NonpipelinedVFP = false;
  367. /// StrictAlign - If true, the subtarget disallows unaligned memory
  368. /// accesses for some types. For details, see
  369. /// ARMTargetLowering::allowsMisalignedMemoryAccesses().
  370. bool StrictAlign = false;
  371. /// RestrictIT - If true, the subtarget disallows generation of deprecated IT
  372. /// blocks to conform to ARMv8 rule.
  373. bool RestrictIT = false;
  374. /// HasDSP - If true, the subtarget supports the DSP (saturating arith
  375. /// and such) instructions.
  376. bool HasDSP = false;
  377. /// NaCl TRAP instruction is generated instead of the regular TRAP.
  378. bool UseNaClTrap = false;
  379. /// Generate calls via indirect call instructions.
  380. bool GenLongCalls = false;
  381. /// Generate code that does not contain data access to code sections.
  382. bool GenExecuteOnly = false;
  383. /// Target machine allowed unsafe FP math (such as use of NEON fp)
  384. bool UnsafeFPMath = false;
  385. /// UseSjLjEH - If true, the target uses SjLj exception handling (e.g. iOS).
  386. bool UseSjLjEH = false;
  387. /// Has speculation barrier
  388. bool HasSB = false;
  389. /// Implicitly convert an instruction to a different one if its immediates
  390. /// cannot be encoded. For example, ADD r0, r1, #FFFFFFFF -> SUB r0, r1, #1.
  391. bool NegativeImmediates = true;
  392. /// Mitigate against the cve-2021-35465 security vulnurability.
  393. bool FixCMSE_CVE_2021_35465 = false;
  394. /// Harden against Straight Line Speculation for Returns and Indirect
  395. /// Branches.
  396. bool HardenSlsRetBr = false;
  397. /// Harden against Straight Line Speculation for indirect calls.
  398. bool HardenSlsBlr = false;
  399. /// Generate thunk code for SLS mitigation in the normal text section.
  400. bool HardenSlsNoComdat = false;
  401. /// stackAlignment - The minimum alignment known to hold of the stack frame on
  402. /// entry to the function and which must be maintained by every function.
  403. Align stackAlignment = Align(4);
  404. /// CPUString - String name of used CPU.
  405. std::string CPUString;
  406. unsigned MaxInterleaveFactor = 1;
  407. /// Clearance before partial register updates (in number of instructions)
  408. unsigned PartialUpdateClearance = 0;
  409. /// What kind of timing do load multiple/store multiple have (double issue,
  410. /// single issue etc).
  411. ARMLdStMultipleTiming LdStMultipleTiming = SingleIssue;
  412. /// The adjustment that we need to apply to get the operand latency from the
  413. /// operand cycle returned by the itinerary data for pre-ISel operands.
  414. int PreISelOperandLatencyAdjustment = 2;
  415. /// What alignment is preferred for loop bodies, in log2(bytes).
  416. unsigned PrefLoopLogAlignment = 0;
  417. /// The cost factor for MVE instructions, representing the multiple beats an
  418. // instruction can take. The default is 2, (set in initSubtargetFeatures so
  419. // that we can use subtarget features less than 2).
  420. unsigned MVEVectorCostFactor = 0;
  421. /// OptMinSize - True if we're optimising for minimum code size, equal to
  422. /// the function attribute.
  423. bool OptMinSize = false;
  424. /// IsLittle - The target is Little Endian
  425. bool IsLittle;
  426. /// TargetTriple - What processor and OS we're targeting.
  427. Triple TargetTriple;
  428. /// SchedModel - Processor specific instruction costs.
  429. MCSchedModel SchedModel;
  430. /// Selected instruction itineraries (one entry per itinerary class.)
  431. InstrItineraryData InstrItins;
  432. /// NoBTIAtReturnTwice - Don't place a BTI instruction after
  433. /// return-twice constructs (setjmp)
  434. bool NoBTIAtReturnTwice = false;
  435. /// Options passed via command line that could influence the target
  436. const TargetOptions &Options;
  437. const ARMBaseTargetMachine &TM;
  438. public:
  439. /// This constructor initializes the data members to match that
  440. /// of the specified triple.
  441. ///
  442. ARMSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS,
  443. const ARMBaseTargetMachine &TM, bool IsLittle,
  444. bool MinSize = false);
  445. /// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size
  446. /// that still makes it profitable to inline the call.
  447. unsigned getMaxInlineSizeThreshold() const {
  448. return 64;
  449. }
  450. /// getMaxMemcpyTPInlineSizeThreshold - Returns the maximum size
  451. /// that still makes it profitable to inline a llvm.memcpy as a Tail
  452. /// Predicated loop.
  453. /// This threshold should only be used for constant size inputs.
  454. unsigned getMaxMemcpyTPInlineSizeThreshold() const { return 128; }
  455. /// ParseSubtargetFeatures - Parses features string setting specified
  456. /// subtarget options. Definition of function is auto generated by tblgen.
  457. void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
  458. /// initializeSubtargetDependencies - Initializes using a CPU and feature string
  459. /// so that we can use initializer lists for subtarget initialization.
  460. ARMSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
  461. const ARMSelectionDAGInfo *getSelectionDAGInfo() const override {
  462. return &TSInfo;
  463. }
  464. const ARMBaseInstrInfo *getInstrInfo() const override {
  465. return InstrInfo.get();
  466. }
  467. const ARMTargetLowering *getTargetLowering() const override {
  468. return &TLInfo;
  469. }
  470. const ARMFrameLowering *getFrameLowering() const override {
  471. return FrameLowering.get();
  472. }
  473. const ARMBaseRegisterInfo *getRegisterInfo() const override {
  474. return &InstrInfo->getRegisterInfo();
  475. }
  476. const CallLowering *getCallLowering() const override;
  477. InstructionSelector *getInstructionSelector() const override;
  478. const LegalizerInfo *getLegalizerInfo() const override;
  479. const RegisterBankInfo *getRegBankInfo() const override;
  480. private:
  481. ARMSelectionDAGInfo TSInfo;
  482. // Either Thumb1FrameLowering or ARMFrameLowering.
  483. std::unique_ptr<ARMFrameLowering> FrameLowering;
  484. // Either Thumb1InstrInfo or Thumb2InstrInfo.
  485. std::unique_ptr<ARMBaseInstrInfo> InstrInfo;
  486. ARMTargetLowering TLInfo;
  487. /// GlobalISel related APIs.
  488. std::unique_ptr<CallLowering> CallLoweringInfo;
  489. std::unique_ptr<InstructionSelector> InstSelector;
  490. std::unique_ptr<LegalizerInfo> Legalizer;
  491. std::unique_ptr<RegisterBankInfo> RegBankInfo;
  492. void initializeEnvironment();
  493. void initSubtargetFeatures(StringRef CPU, StringRef FS);
  494. ARMFrameLowering *initializeFrameLowering(StringRef CPU, StringRef FS);
  495. std::bitset<8> CoprocCDE = {};
  496. public:
  497. void computeIssueWidth();
  498. bool hasV4TOps() const { return HasV4TOps; }
  499. bool hasV5TOps() const { return HasV5TOps; }
  500. bool hasV5TEOps() const { return HasV5TEOps; }
  501. bool hasV6Ops() const { return HasV6Ops; }
  502. bool hasV6MOps() const { return HasV6MOps; }
  503. bool hasV6KOps() const { return HasV6KOps; }
  504. bool hasV6T2Ops() const { return HasV6T2Ops; }
  505. bool hasV7Ops() const { return HasV7Ops; }
  506. bool hasV8Ops() const { return HasV8Ops; }
  507. bool hasV8_1aOps() const { return HasV8_1aOps; }
  508. bool hasV8_2aOps() const { return HasV8_2aOps; }
  509. bool hasV8_3aOps() const { return HasV8_3aOps; }
  510. bool hasV8_4aOps() const { return HasV8_4aOps; }
  511. bool hasV8_5aOps() const { return HasV8_5aOps; }
  512. bool hasV8_6aOps() const { return HasV8_6aOps; }
  513. bool hasV8_7aOps() const { return HasV8_7aOps; }
  514. bool hasV8_8aOps() const { return HasV8_8aOps; }
  515. bool hasV9_0aOps() const { return HasV9_0aOps; }
  516. bool hasV9_1aOps() const { return HasV9_1aOps; }
  517. bool hasV9_2aOps() const { return HasV9_2aOps; }
  518. bool hasV9_3aOps() const { return HasV9_3aOps; }
  519. bool hasV8MBaselineOps() const { return HasV8MBaselineOps; }
  520. bool hasV8MMainlineOps() const { return HasV8MMainlineOps; }
  521. bool hasV8_1MMainlineOps() const { return HasV8_1MMainlineOps; }
  522. bool hasMVEIntegerOps() const { return HasMVEIntegerOps; }
  523. bool hasMVEFloatOps() const { return HasMVEFloatOps; }
  524. bool hasCDEOps() const { return HasCDEOps; }
  525. bool hasFPRegs() const { return HasFPRegs; }
  526. bool hasFPRegs16() const { return HasFPRegs16; }
  527. bool hasFPRegs64() const { return HasFPRegs64; }
  528. /// @{
  529. /// These functions are obsolete, please consider adding subtarget features
  530. /// or properties instead of calling them.
  531. bool isCortexA5() const { return ARMProcFamily == CortexA5; }
  532. bool isCortexA7() const { return ARMProcFamily == CortexA7; }
  533. bool isCortexA8() const { return ARMProcFamily == CortexA8; }
  534. bool isCortexA9() const { return ARMProcFamily == CortexA9; }
  535. bool isCortexA15() const { return ARMProcFamily == CortexA15; }
  536. bool isSwift() const { return ARMProcFamily == Swift; }
  537. bool isCortexM3() const { return ARMProcFamily == CortexM3; }
  538. bool isCortexM7() const { return ARMProcFamily == CortexM7; }
  539. bool isLikeA9() const { return isCortexA9() || isCortexA15() || isKrait(); }
  540. bool isCortexR5() const { return ARMProcFamily == CortexR5; }
  541. bool isKrait() const { return ARMProcFamily == Krait; }
  542. /// @}
  543. bool hasARMOps() const { return !NoARM; }
  544. bool hasVFP2Base() const { return HasVFPv2SP; }
  545. bool hasVFP3Base() const { return HasVFPv3D16SP; }
  546. bool hasVFP4Base() const { return HasVFPv4D16SP; }
  547. bool hasFPARMv8Base() const { return HasFPARMv8D16SP; }
  548. bool hasNEON() const { return HasNEON; }
  549. bool hasSHA2() const { return HasSHA2; }
  550. bool hasAES() const { return HasAES; }
  551. bool hasCrypto() const { return HasCrypto; }
  552. bool hasDotProd() const { return HasDotProd; }
  553. bool hasCRC() const { return HasCRC; }
  554. bool hasRAS() const { return HasRAS; }
  555. bool hasLOB() const { return HasLOB; }
  556. bool hasPACBTI() const { return HasPACBTI; }
  557. bool hasVirtualization() const { return HasVirtualization; }
  558. bool useNEONForSinglePrecisionFP() const {
  559. return hasNEON() && UseNEONForSinglePrecisionFP;
  560. }
  561. bool hasDivideInThumbMode() const { return HasHardwareDivideInThumb; }
  562. bool hasDivideInARMMode() const { return HasHardwareDivideInARM; }
  563. bool hasDataBarrier() const { return HasDataBarrier; }
  564. bool hasFullDataBarrier() const { return HasFullDataBarrier; }
  565. bool hasV7Clrex() const { return HasV7Clrex; }
  566. bool hasAcquireRelease() const { return HasAcquireRelease; }
  567. bool hasAnyDataBarrier() const {
  568. return HasDataBarrier || (hasV6Ops() && !isThumb());
  569. }
  570. bool useMulOps() const { return UseMulOps; }
  571. bool useFPVMLx() const { return !SlowFPVMLx; }
  572. bool useFPVFMx() const {
  573. return !isTargetDarwin() && hasVFP4Base() && !SlowFPVFMx;
  574. }
  575. bool useFPVFMx16() const { return useFPVFMx() && hasFullFP16(); }
  576. bool useFPVFMx64() const { return useFPVFMx() && hasFP64(); }
  577. bool hasVMLxForwarding() const { return HasVMLxForwarding; }
  578. bool isFPBrccSlow() const { return SlowFPBrcc; }
  579. bool hasFP64() const { return HasFP64; }
  580. bool hasPerfMon() const { return HasPerfMon; }
  581. bool hasTrustZone() const { return HasTrustZone; }
  582. bool has8MSecExt() const { return Has8MSecExt; }
  583. bool hasZeroCycleZeroing() const { return HasZeroCycleZeroing; }
  584. bool hasFPAO() const { return HasFPAO; }
  585. bool isProfitableToUnpredicate() const { return IsProfitableToUnpredicate; }
  586. bool hasSlowVGETLNi32() const { return HasSlowVGETLNi32; }
  587. bool hasSlowVDUP32() const { return HasSlowVDUP32; }
  588. bool preferVMOVSR() const { return PreferVMOVSR; }
  589. bool preferISHSTBarriers() const { return PreferISHST; }
  590. bool expandMLx() const { return ExpandMLx; }
  591. bool hasVMLxHazards() const { return HasVMLxHazards; }
  592. bool hasSlowOddRegister() const { return SlowOddRegister; }
  593. bool hasSlowLoadDSubregister() const { return SlowLoadDSubregister; }
  594. bool useWideStrideVFP() const { return UseWideStrideVFP; }
  595. bool hasMuxedUnits() const { return HasMuxedUnits; }
  596. bool dontWidenVMOVS() const { return DontWidenVMOVS; }
  597. bool useSplatVFPToNeon() const { return SplatVFPToNeon; }
  598. bool useNEONForFPMovs() const { return UseNEONForFPMovs; }
  599. bool checkVLDnAccessAlignment() const { return CheckVLDnAlign; }
  600. bool nonpipelinedVFP() const { return NonpipelinedVFP; }
  601. bool prefers32BitThumb() const { return Pref32BitThumb; }
  602. bool avoidCPSRPartialUpdate() const { return AvoidCPSRPartialUpdate; }
  603. bool cheapPredicableCPSRDef() const { return CheapPredicableCPSRDef; }
  604. bool avoidMOVsShifterOperand() const { return AvoidMOVsShifterOperand; }
  605. bool hasRetAddrStack() const { return HasRetAddrStack; }
  606. bool hasBranchPredictor() const { return HasBranchPredictor; }
  607. bool hasMPExtension() const { return HasMPExtension; }
  608. bool hasDSP() const { return HasDSP; }
  609. bool useNaClTrap() const { return UseNaClTrap; }
  610. bool useSjLjEH() const { return UseSjLjEH; }
  611. bool hasSB() const { return HasSB; }
  612. bool genLongCalls() const { return GenLongCalls; }
  613. bool genExecuteOnly() const { return GenExecuteOnly; }
  614. bool hasBaseDSP() const {
  615. if (isThumb())
  616. return hasDSP();
  617. else
  618. return hasV5TEOps();
  619. }
  620. bool hasFP16() const { return HasFP16; }
  621. bool hasD32() const { return HasD32; }
  622. bool hasFullFP16() const { return HasFullFP16; }
  623. bool hasFP16FML() const { return HasFP16FML; }
  624. bool hasBF16() const { return HasBF16; }
  625. bool hasFuseAES() const { return HasFuseAES; }
  626. bool hasFuseLiterals() const { return HasFuseLiterals; }
  627. /// Return true if the CPU supports any kind of instruction fusion.
  628. bool hasFusion() const { return hasFuseAES() || hasFuseLiterals(); }
  629. bool hasMatMulInt8() const { return HasMatMulInt8; }
  630. const Triple &getTargetTriple() const { return TargetTriple; }
  631. bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
  632. bool isTargetIOS() const { return TargetTriple.isiOS(); }
  633. bool isTargetWatchOS() const { return TargetTriple.isWatchOS(); }
  634. bool isTargetWatchABI() const { return TargetTriple.isWatchABI(); }
  635. bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
  636. bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
  637. bool isTargetNetBSD() const { return TargetTriple.isOSNetBSD(); }
  638. bool isTargetWindows() const { return TargetTriple.isOSWindows(); }
  639. bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
  640. bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
  641. bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
  642. // ARM EABI is the bare-metal EABI described in ARM ABI documents and
  643. // can be accessed via -target arm-none-eabi. This is NOT GNUEABI.
  644. // FIXME: Add a flag for bare-metal for that target and set Triple::EABI
  645. // even for GNUEABI, so we can make a distinction here and still conform to
  646. // the EABI on GNU (and Android) mode. This requires change in Clang, too.
  647. // FIXME: The Darwin exception is temporary, while we move users to
  648. // "*-*-*-macho" triples as quickly as possible.
  649. bool isTargetAEABI() const {
  650. return (TargetTriple.getEnvironment() == Triple::EABI ||
  651. TargetTriple.getEnvironment() == Triple::EABIHF) &&
  652. !isTargetDarwin() && !isTargetWindows();
  653. }
  654. bool isTargetGNUAEABI() const {
  655. return (TargetTriple.getEnvironment() == Triple::GNUEABI ||
  656. TargetTriple.getEnvironment() == Triple::GNUEABIHF) &&
  657. !isTargetDarwin() && !isTargetWindows();
  658. }
  659. bool isTargetMuslAEABI() const {
  660. return (TargetTriple.getEnvironment() == Triple::MuslEABI ||
  661. TargetTriple.getEnvironment() == Triple::MuslEABIHF) &&
  662. !isTargetDarwin() && !isTargetWindows();
  663. }
  664. // ARM Targets that support EHABI exception handling standard
  665. // Darwin uses SjLj. Other targets might need more checks.
  666. bool isTargetEHABICompatible() const {
  667. return TargetTriple.isTargetEHABICompatible();
  668. }
  669. bool isTargetHardFloat() const;
  670. bool isTargetAndroid() const { return TargetTriple.isAndroid(); }
  671. bool isXRaySupported() const override;
  672. bool isAPCS_ABI() const;
  673. bool isAAPCS_ABI() const;
  674. bool isAAPCS16_ABI() const;
  675. bool isROPI() const;
  676. bool isRWPI() const;
  677. bool useMachineScheduler() const { return UseMISched; }
  678. bool disablePostRAScheduler() const { return DisablePostRAScheduler; }
  679. bool useSoftFloat() const { return UseSoftFloat; }
  680. bool isThumb() const { return InThumbMode; }
  681. bool hasMinSize() const { return OptMinSize; }
  682. bool isThumb1Only() const { return InThumbMode && !HasThumb2; }
  683. bool isThumb2() const { return InThumbMode && HasThumb2; }
  684. bool hasThumb2() const { return HasThumb2; }
  685. bool isMClass() const { return ARMProcClass == MClass; }
  686. bool isRClass() const { return ARMProcClass == RClass; }
  687. bool isAClass() const { return ARMProcClass == AClass; }
  688. bool isReadTPHard() const { return ReadTPHard; }
  689. bool isR9Reserved() const {
  690. return isTargetMachO() ? (ReserveR9 || !HasV6Ops) : ReserveR9;
  691. }
  692. MCPhysReg getFramePointerReg() const {
  693. if (isTargetDarwin() || (!isTargetWindows() && isThumb()))
  694. return ARM::R7;
  695. return ARM::R11;
  696. }
  697. /// Returns true if the frame setup is split into two separate pushes (first
  698. /// r0-r7,lr then r8-r11), principally so that the frame pointer is adjacent
  699. /// to lr. This is always required on Thumb1-only targets, as the push and
  700. /// pop instructions can't access the high registers.
  701. bool splitFramePushPop(const MachineFunction &MF) const {
  702. if (MF.getInfo<ARMFunctionInfo>()->shouldSignReturnAddress())
  703. return true;
  704. return (getFramePointerReg() == ARM::R7 &&
  705. MF.getTarget().Options.DisableFramePointerElim(MF)) ||
  706. isThumb1Only();
  707. }
  708. bool useStride4VFPs() const;
  709. bool useMovt() const;
  710. bool supportsTailCall() const { return SupportsTailCall; }
  711. bool allowsUnalignedMem() const { return !StrictAlign; }
  712. bool restrictIT() const { return RestrictIT; }
  713. const std::string & getCPUString() const { return CPUString; }
  714. bool isLittle() const { return IsLittle; }
  715. unsigned getMispredictionPenalty() const;
  716. /// Returns true if machine scheduler should be enabled.
  717. bool enableMachineScheduler() const override;
  718. /// True for some subtargets at > -O0.
  719. bool enablePostRAScheduler() const override;
  720. /// True for some subtargets at > -O0.
  721. bool enablePostRAMachineScheduler() const override;
  722. /// Check whether this subtarget wants to use subregister liveness.
  723. bool enableSubRegLiveness() const override;
  724. /// Enable use of alias analysis during code generation (during MI
  725. /// scheduling, DAGCombine, etc.).
  726. bool useAA() const override { return true; }
  727. // enableAtomicExpand- True if we need to expand our atomics.
  728. bool enableAtomicExpand() const override;
  729. /// getInstrItins - Return the instruction itineraries based on subtarget
  730. /// selection.
  731. const InstrItineraryData *getInstrItineraryData() const override {
  732. return &InstrItins;
  733. }
  734. /// getStackAlignment - Returns the minimum alignment known to hold of the
  735. /// stack frame on entry to the function and which must be maintained by every
  736. /// function for this subtarget.
  737. Align getStackAlignment() const { return stackAlignment; }
  738. unsigned getMaxInterleaveFactor() const { return MaxInterleaveFactor; }
  739. unsigned getPartialUpdateClearance() const { return PartialUpdateClearance; }
  740. ARMLdStMultipleTiming getLdStMultipleTiming() const {
  741. return LdStMultipleTiming;
  742. }
  743. int getPreISelOperandLatencyAdjustment() const {
  744. return PreISelOperandLatencyAdjustment;
  745. }
  746. /// True if the GV will be accessed via an indirect symbol.
  747. bool isGVIndirectSymbol(const GlobalValue *GV) const;
  748. /// Returns the constant pool modifier needed to access the GV.
  749. bool isGVInGOT(const GlobalValue *GV) const;
  750. /// True if fast-isel is used.
  751. bool useFastISel() const;
  752. /// Returns the correct return opcode for the current feature set.
  753. /// Use BX if available to allow mixing thumb/arm code, but fall back
  754. /// to plain mov pc,lr on ARMv4.
  755. unsigned getReturnOpcode() const {
  756. if (isThumb())
  757. return ARM::tBX_RET;
  758. if (hasV4TOps())
  759. return ARM::BX_RET;
  760. return ARM::MOVPCLR;
  761. }
  762. /// Allow movt+movw for PIC global address calculation.
  763. /// ELF does not have GOT relocations for movt+movw.
  764. /// ROPI does not use GOT.
  765. bool allowPositionIndependentMovt() const {
  766. return isROPI() || !isTargetELF();
  767. }
  768. unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; }
  769. unsigned
  770. getMVEVectorCostFactor(TargetTransformInfo::TargetCostKind CostKind) const {
  771. if (CostKind == TargetTransformInfo::TCK_CodeSize)
  772. return 1;
  773. return MVEVectorCostFactor;
  774. }
  775. bool ignoreCSRForAllocationOrder(const MachineFunction &MF,
  776. unsigned PhysReg) const override;
  777. unsigned getGPRAllocationOrder(const MachineFunction &MF) const;
  778. bool fixCMSE_CVE_2021_35465() const { return FixCMSE_CVE_2021_35465; }
  779. bool hardenSlsRetBr() const { return HardenSlsRetBr; }
  780. bool hardenSlsBlr() const { return HardenSlsBlr; }
  781. bool hardenSlsNoComdat() const { return HardenSlsNoComdat; }
  782. bool getNoBTIAtReturnTwice() const { return NoBTIAtReturnTwice; }
  783. };
  784. } // end namespace llvm
  785. #endif // LLVM_LIB_TARGET_ARM_ARMSUBTARGET_H