ARMISelLowering.h 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. //===- ARMISelLowering.h - ARM DAG Lowering Interface -----------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file defines the interfaces that ARM uses to lower LLVM code into a
  10. // selection DAG.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
  14. #define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
  15. #include "MCTargetDesc/ARMBaseInfo.h"
  16. #include "llvm/ADT/SmallVector.h"
  17. #include "llvm/ADT/StringRef.h"
  18. #include "llvm/CodeGen/CallingConvLower.h"
  19. #include "llvm/CodeGen/ISDOpcodes.h"
  20. #include "llvm/CodeGen/MachineFunction.h"
  21. #include "llvm/CodeGen/SelectionDAGNodes.h"
  22. #include "llvm/CodeGen/TargetLowering.h"
  23. #include "llvm/CodeGen/ValueTypes.h"
  24. #include "llvm/IR/Attributes.h"
  25. #include "llvm/IR/CallingConv.h"
  26. #include "llvm/IR/Function.h"
  27. #include "llvm/IR/IRBuilder.h"
  28. #include "llvm/IR/InlineAsm.h"
  29. #include "llvm/Support/CodeGen.h"
  30. #include "llvm/Support/MachineValueType.h"
  31. #include <utility>
  32. namespace llvm {
  33. class ARMSubtarget;
  34. class DataLayout;
  35. class FastISel;
  36. class FunctionLoweringInfo;
  37. class GlobalValue;
  38. class InstrItineraryData;
  39. class Instruction;
  40. class MachineBasicBlock;
  41. class MachineInstr;
  42. class SelectionDAG;
  43. class TargetLibraryInfo;
  44. class TargetMachine;
  45. class TargetRegisterInfo;
  46. class VectorType;
  47. namespace ARMISD {
  48. // ARM Specific DAG Nodes
  49. enum NodeType : unsigned {
  50. // Start the numbering where the builtin ops and target ops leave off.
  51. FIRST_NUMBER = ISD::BUILTIN_OP_END,
  52. Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
  53. // TargetExternalSymbol, and TargetGlobalAddress.
  54. WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
  55. // PIC mode.
  56. WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
  57. // Add pseudo op to model memcpy for struct byval.
  58. COPY_STRUCT_BYVAL,
  59. CALL, // Function call.
  60. CALL_PRED, // Function call that's predicable.
  61. CALL_NOLINK, // Function call with branch not branch-and-link.
  62. tSECALL, // CMSE non-secure function call.
  63. t2CALL_BTI, // Thumb function call followed by BTI instruction.
  64. BRCOND, // Conditional branch.
  65. BR_JT, // Jumptable branch.
  66. BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
  67. RET_FLAG, // Return with a flag operand.
  68. SERET_FLAG, // CMSE Entry function return with a flag operand.
  69. INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand.
  70. PIC_ADD, // Add with a PC operand and a PIC label.
  71. ASRL, // MVE long arithmetic shift right.
  72. LSRL, // MVE long shift right.
  73. LSLL, // MVE long shift left.
  74. CMP, // ARM compare instructions.
  75. CMN, // ARM CMN instructions.
  76. CMPZ, // ARM compare that sets only Z flag.
  77. CMPFP, // ARM VFP compare instruction, sets FPSCR.
  78. CMPFPE, // ARM VFP signalling compare instruction, sets FPSCR.
  79. CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
  80. CMPFPEw0, // ARM VFP signalling compare against zero instruction, sets
  81. // FPSCR.
  82. FMSTAT, // ARM fmstat instruction.
  83. CMOV, // ARM conditional move instructions.
  84. SUBS, // Flag-setting subtraction.
  85. SSAT, // Signed saturation
  86. USAT, // Unsigned saturation
  87. BCC_i64,
  88. SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
  89. SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
  90. RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
  91. ADDC, // Add with carry
  92. ADDE, // Add using carry
  93. SUBC, // Sub with carry
  94. SUBE, // Sub using carry
  95. LSLS, // Shift left producing carry
  96. VMOVRRD, // double to two gprs.
  97. VMOVDRR, // Two gprs to double.
  98. VMOVSR, // move gpr to single, used for f32 literal constructed in a gpr
  99. EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
  100. EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
  101. EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch.
  102. TC_RETURN, // Tail call return pseudo.
  103. THREAD_POINTER,
  104. DYN_ALLOC, // Dynamic allocation on the stack.
  105. MEMBARRIER_MCR, // Memory barrier (MCR)
  106. PRELOAD, // Preload
  107. WIN__CHKSTK, // Windows' __chkstk call to do stack probing.
  108. WIN__DBZCHK, // Windows' divide by zero check
  109. WLS, // Low-overhead loops, While Loop Start branch. See t2WhileLoopStart
  110. WLSSETUP, // Setup for the iteration count of a WLS. See t2WhileLoopSetup.
  111. LOOP_DEC, // Really a part of LE, performs the sub
  112. LE, // Low-overhead loops, Loop End
  113. PREDICATE_CAST, // Predicate cast for MVE i1 types
  114. VECTOR_REG_CAST, // Reinterpret the current contents of a vector register
  115. MVESEXT, // Legalization aids for extending a vector into two/four vectors.
  116. MVEZEXT, // or truncating two/four vectors into one. Eventually becomes
  117. MVETRUNC, // stack store/load sequence, if not optimized to anything else.
  118. VCMP, // Vector compare.
  119. VCMPZ, // Vector compare to zero.
  120. VTST, // Vector test bits.
  121. // Vector shift by vector
  122. VSHLs, // ...left/right by signed
  123. VSHLu, // ...left/right by unsigned
  124. // Vector shift by immediate:
  125. VSHLIMM, // ...left
  126. VSHRsIMM, // ...right (signed)
  127. VSHRuIMM, // ...right (unsigned)
  128. // Vector rounding shift by immediate:
  129. VRSHRsIMM, // ...right (signed)
  130. VRSHRuIMM, // ...right (unsigned)
  131. VRSHRNIMM, // ...right narrow
  132. // Vector saturating shift by immediate:
  133. VQSHLsIMM, // ...left (signed)
  134. VQSHLuIMM, // ...left (unsigned)
  135. VQSHLsuIMM, // ...left (signed to unsigned)
  136. VQSHRNsIMM, // ...right narrow (signed)
  137. VQSHRNuIMM, // ...right narrow (unsigned)
  138. VQSHRNsuIMM, // ...right narrow (signed to unsigned)
  139. // Vector saturating rounding shift by immediate:
  140. VQRSHRNsIMM, // ...right narrow (signed)
  141. VQRSHRNuIMM, // ...right narrow (unsigned)
  142. VQRSHRNsuIMM, // ...right narrow (signed to unsigned)
  143. // Vector shift and insert:
  144. VSLIIMM, // ...left
  145. VSRIIMM, // ...right
  146. // Vector get lane (VMOV scalar to ARM core register)
  147. // (These are used for 8- and 16-bit element types only.)
  148. VGETLANEu, // zero-extend vector extract element
  149. VGETLANEs, // sign-extend vector extract element
  150. // Vector move immediate and move negated immediate:
  151. VMOVIMM,
  152. VMVNIMM,
  153. // Vector move f32 immediate:
  154. VMOVFPIMM,
  155. // Move H <-> R, clearing top 16 bits
  156. VMOVrh,
  157. VMOVhr,
  158. // Vector duplicate:
  159. VDUP,
  160. VDUPLANE,
  161. // Vector shuffles:
  162. VEXT, // extract
  163. VREV64, // reverse elements within 64-bit doublewords
  164. VREV32, // reverse elements within 32-bit words
  165. VREV16, // reverse elements within 16-bit halfwords
  166. VZIP, // zip (interleave)
  167. VUZP, // unzip (deinterleave)
  168. VTRN, // transpose
  169. VTBL1, // 1-register shuffle with mask
  170. VTBL2, // 2-register shuffle with mask
  171. VMOVN, // MVE vmovn
  172. // MVE Saturating truncates
  173. VQMOVNs, // Vector (V) Saturating (Q) Move and Narrow (N), signed (s)
  174. VQMOVNu, // Vector (V) Saturating (Q) Move and Narrow (N), unsigned (u)
  175. // MVE float <> half converts
  176. VCVTN, // MVE vcvt f32 -> f16, truncating into either the bottom or top
  177. // lanes
  178. VCVTL, // MVE vcvt f16 -> f32, extending from either the bottom or top lanes
  179. // MVE VIDUP instruction, taking a start value and increment.
  180. VIDUP,
  181. // Vector multiply long:
  182. VMULLs, // ...signed
  183. VMULLu, // ...unsigned
  184. VQDMULH, // MVE vqdmulh instruction
  185. // MVE reductions
  186. VADDVs, // sign- or zero-extend the elements of a vector to i32,
  187. VADDVu, // add them all together, and return an i32 of their sum
  188. VADDVps, // Same as VADDV[su] but with a v4i1 predicate mask
  189. VADDVpu,
  190. VADDLVs, // sign- or zero-extend elements to i64 and sum, returning
  191. VADDLVu, // the low and high 32-bit halves of the sum
  192. VADDLVAs, // Same as VADDLV[su] but also add an input accumulator
  193. VADDLVAu, // provided as low and high halves
  194. VADDLVps, // Same as VADDLV[su] but with a v4i1 predicate mask
  195. VADDLVpu,
  196. VADDLVAps, // Same as VADDLVp[su] but with a v4i1 predicate mask
  197. VADDLVApu,
  198. VMLAVs, // sign- or zero-extend the elements of two vectors to i32, multiply
  199. // them
  200. VMLAVu, // and add the results together, returning an i32 of their sum
  201. VMLAVps, // Same as VMLAV[su] with a v4i1 predicate mask
  202. VMLAVpu,
  203. VMLALVs, // Same as VMLAV but with i64, returning the low and
  204. VMLALVu, // high 32-bit halves of the sum
  205. VMLALVps, // Same as VMLALV[su] with a v4i1 predicate mask
  206. VMLALVpu,
  207. VMLALVAs, // Same as VMLALV but also add an input accumulator
  208. VMLALVAu, // provided as low and high halves
  209. VMLALVAps, // Same as VMLALVA[su] with a v4i1 predicate mask
  210. VMLALVApu,
  211. VMINVu, // Find minimum unsigned value of a vector and register
  212. VMINVs, // Find minimum signed value of a vector and register
  213. VMAXVu, // Find maximum unsigned value of a vector and register
  214. VMAXVs, // Find maximum signed value of a vector and register
  215. SMULWB, // Signed multiply word by half word, bottom
  216. SMULWT, // Signed multiply word by half word, top
  217. UMLAL, // 64bit Unsigned Accumulate Multiply
  218. SMLAL, // 64bit Signed Accumulate Multiply
  219. UMAAL, // 64-bit Unsigned Accumulate Accumulate Multiply
  220. SMLALBB, // 64-bit signed accumulate multiply bottom, bottom 16
  221. SMLALBT, // 64-bit signed accumulate multiply bottom, top 16
  222. SMLALTB, // 64-bit signed accumulate multiply top, bottom 16
  223. SMLALTT, // 64-bit signed accumulate multiply top, top 16
  224. SMLALD, // Signed multiply accumulate long dual
  225. SMLALDX, // Signed multiply accumulate long dual exchange
  226. SMLSLD, // Signed multiply subtract long dual
  227. SMLSLDX, // Signed multiply subtract long dual exchange
  228. SMMLAR, // Signed multiply long, round and add
  229. SMMLSR, // Signed multiply long, subtract and round
  230. // Single Lane QADD8 and QADD16. Only the bottom lane. That's what the b
  231. // stands for.
  232. QADD8b,
  233. QSUB8b,
  234. QADD16b,
  235. QSUB16b,
  236. UQADD8b,
  237. UQSUB8b,
  238. UQADD16b,
  239. UQSUB16b,
  240. // Operands of the standard BUILD_VECTOR node are not legalized, which
  241. // is fine if BUILD_VECTORs are always lowered to shuffles or other
  242. // operations, but for ARM some BUILD_VECTORs are legal as-is and their
  243. // operands need to be legalized. Define an ARM-specific version of
  244. // BUILD_VECTOR for this purpose.
  245. BUILD_VECTOR,
  246. // Bit-field insert
  247. BFI,
  248. // Vector OR with immediate
  249. VORRIMM,
  250. // Vector AND with NOT of immediate
  251. VBICIMM,
  252. // Pseudo vector bitwise select
  253. VBSP,
  254. // Pseudo-instruction representing a memory copy using ldm/stm
  255. // instructions.
  256. MEMCPY,
  257. // Pseudo-instruction representing a memory copy using a tail predicated
  258. // loop
  259. MEMCPYLOOP,
  260. // Pseudo-instruction representing a memset using a tail predicated
  261. // loop
  262. MEMSETLOOP,
  263. // V8.1MMainline condition select
  264. CSINV, // Conditional select invert.
  265. CSNEG, // Conditional select negate.
  266. CSINC, // Conditional select increment.
  267. // Vector load N-element structure to all lanes:
  268. VLD1DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
  269. VLD2DUP,
  270. VLD3DUP,
  271. VLD4DUP,
  272. // NEON loads with post-increment base updates:
  273. VLD1_UPD,
  274. VLD2_UPD,
  275. VLD3_UPD,
  276. VLD4_UPD,
  277. VLD2LN_UPD,
  278. VLD3LN_UPD,
  279. VLD4LN_UPD,
  280. VLD1DUP_UPD,
  281. VLD2DUP_UPD,
  282. VLD3DUP_UPD,
  283. VLD4DUP_UPD,
  284. VLD1x2_UPD,
  285. VLD1x3_UPD,
  286. VLD1x4_UPD,
  287. // NEON stores with post-increment base updates:
  288. VST1_UPD,
  289. VST2_UPD,
  290. VST3_UPD,
  291. VST4_UPD,
  292. VST2LN_UPD,
  293. VST3LN_UPD,
  294. VST4LN_UPD,
  295. VST1x2_UPD,
  296. VST1x3_UPD,
  297. VST1x4_UPD,
  298. // Load/Store of dual registers
  299. LDRD,
  300. STRD
  301. };
  302. } // end namespace ARMISD
  303. namespace ARM {
  304. /// Possible values of current rounding mode, which is specified in bits
  305. /// 23:22 of FPSCR.
  306. enum Rounding {
  307. RN = 0, // Round to Nearest
  308. RP = 1, // Round towards Plus infinity
  309. RM = 2, // Round towards Minus infinity
  310. RZ = 3, // Round towards Zero
  311. rmMask = 3 // Bit mask selecting rounding mode
  312. };
  313. // Bit position of rounding mode bits in FPSCR.
  314. const unsigned RoundingBitsPos = 22;
  315. } // namespace ARM
  316. /// Define some predicates that are used for node matching.
  317. namespace ARM {
  318. bool isBitFieldInvertedMask(unsigned v);
  319. } // end namespace ARM
  320. //===--------------------------------------------------------------------===//
  321. // ARMTargetLowering - ARM Implementation of the TargetLowering interface
  322. class ARMTargetLowering : public TargetLowering {
  323. public:
  324. explicit ARMTargetLowering(const TargetMachine &TM,
  325. const ARMSubtarget &STI);
  326. unsigned getJumpTableEncoding() const override;
  327. bool useSoftFloat() const override;
  328. SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
  329. /// ReplaceNodeResults - Replace the results of node with an illegal result
  330. /// type with new values built out of custom code.
  331. void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
  332. SelectionDAG &DAG) const override;
  333. const char *getTargetNodeName(unsigned Opcode) const override;
  334. bool isSelectSupported(SelectSupportKind Kind) const override {
  335. // ARM does not support scalar condition selects on vectors.
  336. return (Kind != ScalarCondVectorVal);
  337. }
  338. bool isReadOnly(const GlobalValue *GV) const;
  339. /// getSetCCResultType - Return the value type to use for ISD::SETCC.
  340. EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
  341. EVT VT) const override;
  342. MachineBasicBlock *
  343. EmitInstrWithCustomInserter(MachineInstr &MI,
  344. MachineBasicBlock *MBB) const override;
  345. void AdjustInstrPostInstrSelection(MachineInstr &MI,
  346. SDNode *Node) const override;
  347. SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const;
  348. SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const;
  349. SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const;
  350. SDValue PerformIntrinsicCombine(SDNode *N, DAGCombinerInfo &DCI) const;
  351. SDValue PerformMVEExtCombine(SDNode *N, DAGCombinerInfo &DCI) const;
  352. SDValue PerformMVETruncCombine(SDNode *N, DAGCombinerInfo &DCI) const;
  353. SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
  354. bool SimplifyDemandedBitsForTargetNode(SDValue Op,
  355. const APInt &OriginalDemandedBits,
  356. const APInt &OriginalDemandedElts,
  357. KnownBits &Known,
  358. TargetLoweringOpt &TLO,
  359. unsigned Depth) const override;
  360. bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override;
  361. /// allowsMisalignedMemoryAccesses - Returns true if the target allows
  362. /// unaligned memory accesses of the specified type. Returns whether it
  363. /// is "fast" by reference in the second argument.
  364. bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
  365. Align Alignment,
  366. MachineMemOperand::Flags Flags,
  367. bool *Fast) const override;
  368. EVT getOptimalMemOpType(const MemOp &Op,
  369. const AttributeList &FuncAttributes) const override;
  370. bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
  371. bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
  372. bool isZExtFree(SDValue Val, EVT VT2) const override;
  373. bool shouldSinkOperands(Instruction *I,
  374. SmallVectorImpl<Use *> &Ops) const override;
  375. Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const override;
  376. bool isFNegFree(EVT VT) const override;
  377. bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
  378. bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
  379. /// isLegalAddressingMode - Return true if the addressing mode represented
  380. /// by AM is legal for this target, for a load/store of the specified type.
  381. bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
  382. Type *Ty, unsigned AS,
  383. Instruction *I = nullptr) const override;
  384. /// getScalingFactorCost - Return the cost of the scaling used in
  385. /// addressing mode represented by AM.
  386. /// If the AM is supported, the return value must be >= 0.
  387. /// If the AM is not supported, the return value must be negative.
  388. InstructionCost getScalingFactorCost(const DataLayout &DL,
  389. const AddrMode &AM, Type *Ty,
  390. unsigned AS) const override;
  391. bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
  392. /// Returns true if the addressing mode representing by AM is legal
  393. /// for the Thumb1 target, for a load/store of the specified type.
  394. bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
  395. /// isLegalICmpImmediate - Return true if the specified immediate is legal
  396. /// icmp immediate, that is the target has icmp instructions which can
  397. /// compare a register against the immediate without having to materialize
  398. /// the immediate into a register.
  399. bool isLegalICmpImmediate(int64_t Imm) const override;
  400. /// isLegalAddImmediate - Return true if the specified immediate is legal
  401. /// add immediate, that is the target has add instructions which can
  402. /// add a register and the immediate without having to materialize
  403. /// the immediate into a register.
  404. bool isLegalAddImmediate(int64_t Imm) const override;
  405. /// getPreIndexedAddressParts - returns true by value, base pointer and
  406. /// offset pointer and addressing mode by reference if the node's address
  407. /// can be legally represented as pre-indexed load / store address.
  408. bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
  409. ISD::MemIndexedMode &AM,
  410. SelectionDAG &DAG) const override;
  411. /// getPostIndexedAddressParts - returns true by value, base pointer and
  412. /// offset pointer and addressing mode by reference if this node can be
  413. /// combined with a load / store to form a post-indexed load / store.
  414. bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
  415. SDValue &Offset, ISD::MemIndexedMode &AM,
  416. SelectionDAG &DAG) const override;
  417. void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
  418. const APInt &DemandedElts,
  419. const SelectionDAG &DAG,
  420. unsigned Depth) const override;
  421. bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
  422. const APInt &DemandedElts,
  423. TargetLoweringOpt &TLO) const override;
  424. bool ExpandInlineAsm(CallInst *CI) const override;
  425. ConstraintType getConstraintType(StringRef Constraint) const override;
  426. /// Examine constraint string and operand type and determine a weight value.
  427. /// The operand object must already have been set up with the operand type.
  428. ConstraintWeight getSingleConstraintMatchWeight(
  429. AsmOperandInfo &info, const char *constraint) const override;
  430. std::pair<unsigned, const TargetRegisterClass *>
  431. getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
  432. StringRef Constraint, MVT VT) const override;
  433. const char *LowerXConstraint(EVT ConstraintVT) const override;
  434. /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
  435. /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
  436. /// true it means one of the asm constraint of the inline asm instruction
  437. /// being processed is 'm'.
  438. void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
  439. std::vector<SDValue> &Ops,
  440. SelectionDAG &DAG) const override;
  441. unsigned
  442. getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
  443. if (ConstraintCode == "Q")
  444. return InlineAsm::Constraint_Q;
  445. else if (ConstraintCode.size() == 2) {
  446. if (ConstraintCode[0] == 'U') {
  447. switch(ConstraintCode[1]) {
  448. default:
  449. break;
  450. case 'm':
  451. return InlineAsm::Constraint_Um;
  452. case 'n':
  453. return InlineAsm::Constraint_Un;
  454. case 'q':
  455. return InlineAsm::Constraint_Uq;
  456. case 's':
  457. return InlineAsm::Constraint_Us;
  458. case 't':
  459. return InlineAsm::Constraint_Ut;
  460. case 'v':
  461. return InlineAsm::Constraint_Uv;
  462. case 'y':
  463. return InlineAsm::Constraint_Uy;
  464. }
  465. }
  466. }
  467. return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
  468. }
  469. const ARMSubtarget* getSubtarget() const {
  470. return Subtarget;
  471. }
  472. /// getRegClassFor - Return the register class that should be used for the
  473. /// specified value type.
  474. const TargetRegisterClass *
  475. getRegClassFor(MVT VT, bool isDivergent = false) const override;
  476. bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
  477. unsigned &PrefAlign) const override;
  478. /// createFastISel - This method returns a target specific FastISel object,
  479. /// or null if the target does not support "fast" ISel.
  480. FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
  481. const TargetLibraryInfo *libInfo) const override;
  482. Sched::Preference getSchedulingPreference(SDNode *N) const override;
  483. bool preferZeroCompareBranch() const override { return true; }
  484. bool
  485. isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
  486. bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
  487. /// isFPImmLegal - Returns true if the target can instruction select the
  488. /// specified FP immediate natively. If false, the legalizer will
  489. /// materialize the FP immediate as a load from a constant pool.
  490. bool isFPImmLegal(const APFloat &Imm, EVT VT,
  491. bool ForCodeSize = false) const override;
  492. bool getTgtMemIntrinsic(IntrinsicInfo &Info,
  493. const CallInst &I,
  494. MachineFunction &MF,
  495. unsigned Intrinsic) const override;
  496. /// Returns true if it is beneficial to convert a load of a constant
  497. /// to just the constant itself.
  498. bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
  499. Type *Ty) const override;
  500. /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
  501. /// with this index.
  502. bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
  503. unsigned Index) const override;
  504. bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
  505. bool MathUsed) const override {
  506. // Using overflow ops for overflow checks only should beneficial on ARM.
  507. return TargetLowering::shouldFormOverflowOp(Opcode, VT, true);
  508. }
  509. /// Returns true if an argument of type Ty needs to be passed in a
  510. /// contiguous block of registers in calling convention CallConv.
  511. bool functionArgumentNeedsConsecutiveRegisters(
  512. Type *Ty, CallingConv::ID CallConv, bool isVarArg,
  513. const DataLayout &DL) const override;
  514. /// If a physical register, this returns the register that receives the
  515. /// exception address on entry to an EH pad.
  516. Register
  517. getExceptionPointerRegister(const Constant *PersonalityFn) const override;
  518. /// If a physical register, this returns the register that receives the
  519. /// exception typeid on entry to a landing pad.
  520. Register
  521. getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
  522. Instruction *makeDMB(IRBuilderBase &Builder, ARM_MB::MemBOpt Domain) const;
  523. Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
  524. AtomicOrdering Ord) const override;
  525. Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr,
  526. AtomicOrdering Ord) const override;
  527. void
  528. emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override;
  529. Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
  530. AtomicOrdering Ord) const override;
  531. Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
  532. AtomicOrdering Ord) const override;
  533. unsigned getMaxSupportedInterleaveFactor() const override;
  534. bool lowerInterleavedLoad(LoadInst *LI,
  535. ArrayRef<ShuffleVectorInst *> Shuffles,
  536. ArrayRef<unsigned> Indices,
  537. unsigned Factor) const override;
  538. bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
  539. unsigned Factor) const override;
  540. bool shouldInsertFencesForAtomic(const Instruction *I) const override;
  541. TargetLoweringBase::AtomicExpansionKind
  542. shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
  543. bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
  544. TargetLoweringBase::AtomicExpansionKind
  545. shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
  546. TargetLoweringBase::AtomicExpansionKind
  547. shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
  548. bool useLoadStackGuardNode() const override;
  549. void insertSSPDeclarations(Module &M) const override;
  550. Value *getSDagStackGuard(const Module &M) const override;
  551. Function *getSSPStackGuardCheck(const Module &M) const override;
  552. bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
  553. unsigned &Cost) const override;
  554. bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
  555. const MachineFunction &MF) const override {
  556. // Do not merge to larger than i32.
  557. return (MemVT.getSizeInBits() <= 32);
  558. }
  559. bool isCheapToSpeculateCttz() const override;
  560. bool isCheapToSpeculateCtlz() const override;
  561. bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
  562. return VT.isScalarInteger();
  563. }
  564. bool supportSwiftError() const override {
  565. return true;
  566. }
  567. bool hasStandaloneRem(EVT VT) const override {
  568. return HasStandaloneRem;
  569. }
  570. bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
  571. CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const;
  572. CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const;
  573. /// Returns true if \p VecTy is a legal interleaved access type. This
  574. /// function checks the vector element type and the overall width of the
  575. /// vector.
  576. bool isLegalInterleavedAccessType(unsigned Factor, FixedVectorType *VecTy,
  577. Align Alignment,
  578. const DataLayout &DL) const;
  579. bool isMulAddWithConstProfitable(const SDValue &AddNode,
  580. const SDValue &ConstNode) const override;
  581. bool alignLoopsWithOptSize() const override;
  582. /// Returns the number of interleaved accesses that will be generated when
  583. /// lowering accesses of the given type.
  584. unsigned getNumInterleavedAccesses(VectorType *VecTy,
  585. const DataLayout &DL) const;
  586. void finalizeLowering(MachineFunction &MF) const override;
  587. /// Return the correct alignment for the current calling convention.
  588. Align getABIAlignmentForCallingConv(Type *ArgTy,
  589. const DataLayout &DL) const override;
  590. bool isDesirableToCommuteWithShift(const SDNode *N,
  591. CombineLevel Level) const override;
  592. bool shouldFoldConstantShiftPairToMask(const SDNode *N,
  593. CombineLevel Level) const override;
  594. bool preferIncOfAddToSubOfNot(EVT VT) const override;
  595. bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
  596. protected:
  597. std::pair<const TargetRegisterClass *, uint8_t>
  598. findRepresentativeClass(const TargetRegisterInfo *TRI,
  599. MVT VT) const override;
  600. private:
  601. /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
  602. /// make the right decision when generating code for different targets.
  603. const ARMSubtarget *Subtarget;
  604. const TargetRegisterInfo *RegInfo;
  605. const InstrItineraryData *Itins;
  606. /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
  607. unsigned ARMPCLabelIndex;
  608. // TODO: remove this, and have shouldInsertFencesForAtomic do the proper
  609. // check.
  610. bool InsertFencesForAtomic;
  611. bool HasStandaloneRem = true;
  612. void addTypeForNEON(MVT VT, MVT PromotedLdStVT);
  613. void addDRTypeForNEON(MVT VT);
  614. void addQRTypeForNEON(MVT VT);
  615. std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const;
  616. using RegsToPassVector = SmallVector<std::pair<unsigned, SDValue>, 8>;
  617. void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain,
  618. SDValue &Arg, RegsToPassVector &RegsToPass,
  619. CCValAssign &VA, CCValAssign &NextVA,
  620. SDValue &StackPtr,
  621. SmallVectorImpl<SDValue> &MemOpChains,
  622. bool IsTailCall,
  623. int SPDiff) const;
  624. SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
  625. SDValue &Root, SelectionDAG &DAG,
  626. const SDLoc &dl) const;
  627. CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC,
  628. bool isVarArg) const;
  629. CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
  630. bool isVarArg) const;
  631. std::pair<SDValue, MachinePointerInfo>
  632. computeAddrForCallArg(const SDLoc &dl, SelectionDAG &DAG,
  633. const CCValAssign &VA, SDValue StackPtr,
  634. bool IsTailCall, int SPDiff) const;
  635. SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
  636. SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
  637. SDValue LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
  638. SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG,
  639. const ARMSubtarget *Subtarget) const;
  640. SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
  641. const ARMSubtarget *Subtarget) const;
  642. SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
  643. SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
  644. SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
  645. SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
  646. SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
  647. SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const;
  648. SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
  649. SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
  650. SelectionDAG &DAG) const;
  651. SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
  652. SelectionDAG &DAG,
  653. TLSModel::Model model) const;
  654. SDValue LowerGlobalTLSAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
  655. SDValue LowerGlobalTLSAddressWindows(SDValue Op, SelectionDAG &DAG) const;
  656. SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
  657. SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
  658. SDValue LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const;
  659. SDValue LowerUnsignedALUO(SDValue Op, SelectionDAG &DAG) const;
  660. SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
  661. SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
  662. SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
  663. SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
  664. SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
  665. SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
  666. SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
  667. SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
  668. SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
  669. SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
  670. SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
  671. SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
  672. const ARMSubtarget *ST) const;
  673. SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
  674. const ARMSubtarget *ST) const;
  675. SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
  676. SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
  677. SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
  678. SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const;
  679. void ExpandDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed,
  680. SmallVectorImpl<SDValue> &Results) const;
  681. SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
  682. const ARMSubtarget *Subtarget) const;
  683. SDValue LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, bool Signed,
  684. SDValue &Chain) const;
  685. SDValue LowerREM(SDNode *N, SelectionDAG &DAG) const;
  686. SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
  687. SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
  688. SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
  689. SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
  690. SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
  691. SDValue LowerFSETCC(SDValue Op, SelectionDAG &DAG) const;
  692. void lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results,
  693. SelectionDAG &DAG) const;
  694. void LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results,
  695. SelectionDAG &DAG) const;
  696. Register getRegisterByName(const char* RegName, LLT VT,
  697. const MachineFunction &MF) const override;
  698. SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
  699. SmallVectorImpl<SDNode *> &Created) const override;
  700. bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
  701. EVT VT) const override;
  702. SDValue MoveToHPR(const SDLoc &dl, SelectionDAG &DAG, MVT LocVT, MVT ValVT,
  703. SDValue Val) const;
  704. SDValue MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG, MVT LocVT,
  705. MVT ValVT, SDValue Val) const;
  706. SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
  707. SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
  708. CallingConv::ID CallConv, bool isVarArg,
  709. const SmallVectorImpl<ISD::InputArg> &Ins,
  710. const SDLoc &dl, SelectionDAG &DAG,
  711. SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
  712. SDValue ThisVal) const;
  713. bool supportSplitCSR(MachineFunction *MF) const override {
  714. return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
  715. MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
  716. }
  717. void initializeSplitCSR(MachineBasicBlock *Entry) const override;
  718. void insertCopiesSplitCSR(
  719. MachineBasicBlock *Entry,
  720. const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
  721. bool
  722. splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
  723. SDValue *Parts, unsigned NumParts, MVT PartVT,
  724. Optional<CallingConv::ID> CC) const override;
  725. SDValue
  726. joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
  727. const SDValue *Parts, unsigned NumParts,
  728. MVT PartVT, EVT ValueVT,
  729. Optional<CallingConv::ID> CC) const override;
  730. SDValue
  731. LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
  732. const SmallVectorImpl<ISD::InputArg> &Ins,
  733. const SDLoc &dl, SelectionDAG &DAG,
  734. SmallVectorImpl<SDValue> &InVals) const override;
  735. int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &dl,
  736. SDValue &Chain, const Value *OrigArg,
  737. unsigned InRegsParamRecordIdx, int ArgOffset,
  738. unsigned ArgSize) const;
  739. void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
  740. const SDLoc &dl, SDValue &Chain,
  741. unsigned ArgOffset, unsigned TotalArgRegsSaveSize,
  742. bool ForceMutable = false) const;
  743. SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
  744. SmallVectorImpl<SDValue> &InVals) const override;
  745. /// HandleByVal - Target-specific cleanup for ByVal support.
  746. void HandleByVal(CCState *, unsigned &, Align) const override;
  747. /// IsEligibleForTailCallOptimization - Check whether the call is eligible
  748. /// for tail call optimization. Targets which want to do tail call
  749. /// optimization should implement this function.
  750. bool IsEligibleForTailCallOptimization(
  751. SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
  752. bool isCalleeStructRet, bool isCallerStructRet,
  753. const SmallVectorImpl<ISD::OutputArg> &Outs,
  754. const SmallVectorImpl<SDValue> &OutVals,
  755. const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG,
  756. const bool isIndirect) const;
  757. bool CanLowerReturn(CallingConv::ID CallConv,
  758. MachineFunction &MF, bool isVarArg,
  759. const SmallVectorImpl<ISD::OutputArg> &Outs,
  760. LLVMContext &Context) const override;
  761. SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
  762. const SmallVectorImpl<ISD::OutputArg> &Outs,
  763. const SmallVectorImpl<SDValue> &OutVals,
  764. const SDLoc &dl, SelectionDAG &DAG) const override;
  765. bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
  766. bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
  767. bool shouldConsiderGEPOffsetSplit() const override { return true; }
  768. bool isUnsupportedFloatingType(EVT VT) const;
  769. SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
  770. SDValue ARMcc, SDValue CCR, SDValue Cmp,
  771. SelectionDAG &DAG) const;
  772. SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
  773. SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const;
  774. SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
  775. const SDLoc &dl, bool Signaling = false) const;
  776. SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
  777. SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
  778. void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
  779. MachineBasicBlock *DispatchBB, int FI) const;
  780. void EmitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *MBB) const;
  781. bool RemapAddSubWithFlags(MachineInstr &MI, MachineBasicBlock *BB) const;
  782. MachineBasicBlock *EmitStructByval(MachineInstr &MI,
  783. MachineBasicBlock *MBB) const;
  784. MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI,
  785. MachineBasicBlock *MBB) const;
  786. MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI,
  787. MachineBasicBlock *MBB) const;
  788. void addMVEVectorTypes(bool HasMVEFP);
  789. void addAllExtLoads(const MVT From, const MVT To, LegalizeAction Action);
  790. void setAllExpand(MVT VT);
  791. };
  792. enum VMOVModImmType {
  793. VMOVModImm,
  794. VMVNModImm,
  795. MVEVMVNModImm,
  796. OtherModImm
  797. };
  798. namespace ARM {
  799. FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
  800. const TargetLibraryInfo *libInfo);
  801. } // end namespace ARM
  802. } // end namespace llvm
  803. #endif // LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H