AArch64MCCodeEmitter.cpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683
  1. //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code-=//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the AArch64MCCodeEmitter class.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "MCTargetDesc/AArch64AddressingModes.h"
  13. #include "MCTargetDesc/AArch64FixupKinds.h"
  14. #include "MCTargetDesc/AArch64MCExpr.h"
  15. #include "Utils/AArch64BaseInfo.h"
  16. #include "llvm/ADT/SmallVector.h"
  17. #include "llvm/ADT/Statistic.h"
  18. #include "llvm/MC/MCCodeEmitter.h"
  19. #include "llvm/MC/MCContext.h"
  20. #include "llvm/MC/MCFixup.h"
  21. #include "llvm/MC/MCInst.h"
  22. #include "llvm/MC/MCInstrInfo.h"
  23. #include "llvm/MC/MCRegisterInfo.h"
  24. #include "llvm/MC/MCSubtargetInfo.h"
  25. #include "llvm/Support/Casting.h"
  26. #include "llvm/Support/Endian.h"
  27. #include "llvm/Support/EndianStream.h"
  28. #include "llvm/Support/ErrorHandling.h"
  29. #include "llvm/Support/raw_ostream.h"
  30. #include <cassert>
  31. #include <cstdint>
  32. using namespace llvm;
  33. #define DEBUG_TYPE "mccodeemitter"
  34. STATISTIC(MCNumEmitted, "Number of MC instructions emitted.");
  35. STATISTIC(MCNumFixups, "Number of MC fixups created.");
  36. namespace {
  37. class AArch64MCCodeEmitter : public MCCodeEmitter {
  38. MCContext &Ctx;
  39. const MCInstrInfo &MCII;
  40. public:
  41. AArch64MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
  42. : Ctx(ctx), MCII(mcii) {}
  43. AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) = delete;
  44. void operator=(const AArch64MCCodeEmitter &) = delete;
  45. ~AArch64MCCodeEmitter() override = default;
  46. // getBinaryCodeForInstr - TableGen'erated function for getting the
  47. // binary encoding for an instruction.
  48. uint64_t getBinaryCodeForInstr(const MCInst &MI,
  49. SmallVectorImpl<MCFixup> &Fixups,
  50. const MCSubtargetInfo &STI) const;
  51. /// getMachineOpValue - Return binary encoding of operand. If the machine
  52. /// operand requires relocation, record the relocation and return zero.
  53. unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO,
  54. SmallVectorImpl<MCFixup> &Fixups,
  55. const MCSubtargetInfo &STI) const;
  56. /// getLdStUImm12OpValue - Return encoding info for 12-bit unsigned immediate
  57. /// attached to a load, store or prfm instruction. If operand requires a
  58. /// relocation, record it and return zero in that part of the encoding.
  59. template <uint32_t FixupKind>
  60. uint32_t getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
  61. SmallVectorImpl<MCFixup> &Fixups,
  62. const MCSubtargetInfo &STI) const;
  63. /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
  64. /// target.
  65. uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
  66. SmallVectorImpl<MCFixup> &Fixups,
  67. const MCSubtargetInfo &STI) const;
  68. /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
  69. /// the 2-bit shift field.
  70. uint32_t getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
  71. SmallVectorImpl<MCFixup> &Fixups,
  72. const MCSubtargetInfo &STI) const;
  73. /// getCondBranchTargetOpValue - Return the encoded value for a conditional
  74. /// branch target.
  75. uint32_t getCondBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  76. SmallVectorImpl<MCFixup> &Fixups,
  77. const MCSubtargetInfo &STI) const;
  78. /// getLoadLiteralOpValue - Return the encoded value for a load-literal
  79. /// pc-relative address.
  80. uint32_t getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
  81. SmallVectorImpl<MCFixup> &Fixups,
  82. const MCSubtargetInfo &STI) const;
  83. /// getMemExtendOpValue - Return the encoded value for a reg-extend load/store
  84. /// instruction: bit 0 is whether a shift is present, bit 1 is whether the
  85. /// operation is a sign extend (as opposed to a zero extend).
  86. uint32_t getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
  87. SmallVectorImpl<MCFixup> &Fixups,
  88. const MCSubtargetInfo &STI) const;
  89. /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
  90. /// branch target.
  91. uint32_t getTestBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  92. SmallVectorImpl<MCFixup> &Fixups,
  93. const MCSubtargetInfo &STI) const;
  94. /// getBranchTargetOpValue - Return the encoded value for an unconditional
  95. /// branch target.
  96. uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  97. SmallVectorImpl<MCFixup> &Fixups,
  98. const MCSubtargetInfo &STI) const;
  99. /// getMoveWideImmOpValue - Return the encoded value for the immediate operand
  100. /// of a MOVZ or MOVK instruction.
  101. uint32_t getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
  102. SmallVectorImpl<MCFixup> &Fixups,
  103. const MCSubtargetInfo &STI) const;
  104. /// getVecShifterOpValue - Return the encoded value for the vector shifter.
  105. uint32_t getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
  106. SmallVectorImpl<MCFixup> &Fixups,
  107. const MCSubtargetInfo &STI) const;
  108. /// getMoveVecShifterOpValue - Return the encoded value for the vector move
  109. /// shifter (MSL).
  110. uint32_t getMoveVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
  111. SmallVectorImpl<MCFixup> &Fixups,
  112. const MCSubtargetInfo &STI) const;
  113. /// getFixedPointScaleOpValue - Return the encoded value for the
  114. // FP-to-fixed-point scale factor.
  115. uint32_t getFixedPointScaleOpValue(const MCInst &MI, unsigned OpIdx,
  116. SmallVectorImpl<MCFixup> &Fixups,
  117. const MCSubtargetInfo &STI) const;
  118. uint32_t getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx,
  119. SmallVectorImpl<MCFixup> &Fixups,
  120. const MCSubtargetInfo &STI) const;
  121. uint32_t getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx,
  122. SmallVectorImpl<MCFixup> &Fixups,
  123. const MCSubtargetInfo &STI) const;
  124. uint32_t getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx,
  125. SmallVectorImpl<MCFixup> &Fixups,
  126. const MCSubtargetInfo &STI) const;
  127. uint32_t getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx,
  128. SmallVectorImpl<MCFixup> &Fixups,
  129. const MCSubtargetInfo &STI) const;
  130. uint32_t getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx,
  131. SmallVectorImpl<MCFixup> &Fixups,
  132. const MCSubtargetInfo &STI) const;
  133. uint32_t getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx,
  134. SmallVectorImpl<MCFixup> &Fixups,
  135. const MCSubtargetInfo &STI) const;
  136. uint32_t getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx,
  137. SmallVectorImpl<MCFixup> &Fixups,
  138. const MCSubtargetInfo &STI) const;
  139. uint32_t getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx,
  140. SmallVectorImpl<MCFixup> &Fixups,
  141. const MCSubtargetInfo &STI) const;
  142. uint32_t getImm8OptLsl(const MCInst &MI, unsigned OpIdx,
  143. SmallVectorImpl<MCFixup> &Fixups,
  144. const MCSubtargetInfo &STI) const;
  145. uint32_t getSVEIncDecImm(const MCInst &MI, unsigned OpIdx,
  146. SmallVectorImpl<MCFixup> &Fixups,
  147. const MCSubtargetInfo &STI) const;
  148. unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue,
  149. const MCSubtargetInfo &STI) const;
  150. void encodeInstruction(const MCInst &MI, raw_ostream &OS,
  151. SmallVectorImpl<MCFixup> &Fixups,
  152. const MCSubtargetInfo &STI) const override;
  153. unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue,
  154. const MCSubtargetInfo &STI) const;
  155. template<int hasRs, int hasRt2> unsigned
  156. fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue,
  157. const MCSubtargetInfo &STI) const;
  158. unsigned fixOneOperandFPComparison(const MCInst &MI, unsigned EncodedValue,
  159. const MCSubtargetInfo &STI) const;
  160. uint32_t EncodeMatrixTileListRegisterClass(const MCInst &MI, unsigned OpIdx,
  161. SmallVectorImpl<MCFixup> &Fixups,
  162. const MCSubtargetInfo &STI) const;
  163. uint32_t encodeMatrixIndexGPR32(const MCInst &MI, unsigned OpIdx,
  164. SmallVectorImpl<MCFixup> &Fixups,
  165. const MCSubtargetInfo &STI) const;
  166. private:
  167. FeatureBitset computeAvailableFeatures(const FeatureBitset &FB) const;
  168. void
  169. verifyInstructionPredicates(const MCInst &MI,
  170. const FeatureBitset &AvailableFeatures) const;
  171. };
  172. } // end anonymous namespace
  173. /// getMachineOpValue - Return binary encoding of operand. If the machine
  174. /// operand requires relocation, record the relocation and return zero.
  175. unsigned
  176. AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO,
  177. SmallVectorImpl<MCFixup> &Fixups,
  178. const MCSubtargetInfo &STI) const {
  179. if (MO.isReg())
  180. return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
  181. assert(MO.isImm() && "did not expect relocated expression");
  182. return static_cast<unsigned>(MO.getImm());
  183. }
  184. template<unsigned FixupKind> uint32_t
  185. AArch64MCCodeEmitter::getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
  186. SmallVectorImpl<MCFixup> &Fixups,
  187. const MCSubtargetInfo &STI) const {
  188. const MCOperand &MO = MI.getOperand(OpIdx);
  189. uint32_t ImmVal = 0;
  190. if (MO.isImm())
  191. ImmVal = static_cast<uint32_t>(MO.getImm());
  192. else {
  193. assert(MO.isExpr() && "unable to encode load/store imm operand");
  194. MCFixupKind Kind = MCFixupKind(FixupKind);
  195. Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
  196. ++MCNumFixups;
  197. }
  198. return ImmVal;
  199. }
  200. /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
  201. /// target.
  202. uint32_t
  203. AArch64MCCodeEmitter::getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
  204. SmallVectorImpl<MCFixup> &Fixups,
  205. const MCSubtargetInfo &STI) const {
  206. const MCOperand &MO = MI.getOperand(OpIdx);
  207. // If the destination is an immediate, we have nothing to do.
  208. if (MO.isImm())
  209. return MO.getImm();
  210. assert(MO.isExpr() && "Unexpected target type!");
  211. const MCExpr *Expr = MO.getExpr();
  212. MCFixupKind Kind = MI.getOpcode() == AArch64::ADR
  213. ? MCFixupKind(AArch64::fixup_aarch64_pcrel_adr_imm21)
  214. : MCFixupKind(AArch64::fixup_aarch64_pcrel_adrp_imm21);
  215. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  216. MCNumFixups += 1;
  217. // All of the information is in the fixup.
  218. return 0;
  219. }
  220. /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
  221. /// the 2-bit shift field. The shift field is stored in bits 13-14 of the
  222. /// return value.
  223. uint32_t
  224. AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
  225. SmallVectorImpl<MCFixup> &Fixups,
  226. const MCSubtargetInfo &STI) const {
  227. // Suboperands are [imm, shifter].
  228. const MCOperand &MO = MI.getOperand(OpIdx);
  229. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  230. assert(AArch64_AM::getShiftType(MO1.getImm()) == AArch64_AM::LSL &&
  231. "unexpected shift type for add/sub immediate");
  232. unsigned ShiftVal = AArch64_AM::getShiftValue(MO1.getImm());
  233. assert((ShiftVal == 0 || ShiftVal == 12) &&
  234. "unexpected shift value for add/sub immediate");
  235. if (MO.isImm())
  236. return MO.getImm() | (ShiftVal == 0 ? 0 : (1 << ShiftVal));
  237. assert(MO.isExpr() && "Unable to encode MCOperand!");
  238. const MCExpr *Expr = MO.getExpr();
  239. // Encode the 12 bits of the fixup.
  240. MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_add_imm12);
  241. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  242. ++MCNumFixups;
  243. // Set the shift bit of the add instruction for relocation types
  244. // R_AARCH64_TLSLE_ADD_TPREL_HI12 and R_AARCH64_TLSLD_ADD_DTPREL_HI12.
  245. if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(Expr)) {
  246. AArch64MCExpr::VariantKind RefKind = A64E->getKind();
  247. if (RefKind == AArch64MCExpr::VK_TPREL_HI12 ||
  248. RefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
  249. RefKind == AArch64MCExpr::VK_SECREL_HI12)
  250. ShiftVal = 12;
  251. }
  252. return ShiftVal == 0 ? 0 : (1 << ShiftVal);
  253. }
  254. /// getCondBranchTargetOpValue - Return the encoded value for a conditional
  255. /// branch target.
  256. uint32_t AArch64MCCodeEmitter::getCondBranchTargetOpValue(
  257. const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
  258. const MCSubtargetInfo &STI) const {
  259. const MCOperand &MO = MI.getOperand(OpIdx);
  260. // If the destination is an immediate, we have nothing to do.
  261. if (MO.isImm())
  262. return MO.getImm();
  263. assert(MO.isExpr() && "Unexpected target type!");
  264. MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch19);
  265. Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
  266. ++MCNumFixups;
  267. // All of the information is in the fixup.
  268. return 0;
  269. }
  270. /// getLoadLiteralOpValue - Return the encoded value for a load-literal
  271. /// pc-relative address.
  272. uint32_t
  273. AArch64MCCodeEmitter::getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
  274. SmallVectorImpl<MCFixup> &Fixups,
  275. const MCSubtargetInfo &STI) const {
  276. const MCOperand &MO = MI.getOperand(OpIdx);
  277. // If the destination is an immediate, we have nothing to do.
  278. if (MO.isImm())
  279. return MO.getImm();
  280. assert(MO.isExpr() && "Unexpected target type!");
  281. MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_ldr_pcrel_imm19);
  282. Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
  283. ++MCNumFixups;
  284. // All of the information is in the fixup.
  285. return 0;
  286. }
  287. uint32_t
  288. AArch64MCCodeEmitter::getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
  289. SmallVectorImpl<MCFixup> &Fixups,
  290. const MCSubtargetInfo &STI) const {
  291. unsigned SignExtend = MI.getOperand(OpIdx).getImm();
  292. unsigned DoShift = MI.getOperand(OpIdx + 1).getImm();
  293. return (SignExtend << 1) | DoShift;
  294. }
  295. uint32_t
  296. AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
  297. SmallVectorImpl<MCFixup> &Fixups,
  298. const MCSubtargetInfo &STI) const {
  299. const MCOperand &MO = MI.getOperand(OpIdx);
  300. if (MO.isImm())
  301. return MO.getImm();
  302. assert(MO.isExpr() && "Unexpected movz/movk immediate");
  303. Fixups.push_back(MCFixup::create(
  304. 0, MO.getExpr(), MCFixupKind(AArch64::fixup_aarch64_movw), MI.getLoc()));
  305. ++MCNumFixups;
  306. return 0;
  307. }
  308. /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
  309. /// branch target.
  310. uint32_t AArch64MCCodeEmitter::getTestBranchTargetOpValue(
  311. const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
  312. const MCSubtargetInfo &STI) const {
  313. const MCOperand &MO = MI.getOperand(OpIdx);
  314. // If the destination is an immediate, we have nothing to do.
  315. if (MO.isImm())
  316. return MO.getImm();
  317. assert(MO.isExpr() && "Unexpected ADR target type!");
  318. MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch14);
  319. Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
  320. ++MCNumFixups;
  321. // All of the information is in the fixup.
  322. return 0;
  323. }
  324. /// getBranchTargetOpValue - Return the encoded value for an unconditional
  325. /// branch target.
  326. uint32_t
  327. AArch64MCCodeEmitter::getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  328. SmallVectorImpl<MCFixup> &Fixups,
  329. const MCSubtargetInfo &STI) const {
  330. const MCOperand &MO = MI.getOperand(OpIdx);
  331. // If the destination is an immediate, we have nothing to do.
  332. if (MO.isImm())
  333. return MO.getImm();
  334. assert(MO.isExpr() && "Unexpected ADR target type!");
  335. MCFixupKind Kind = MI.getOpcode() == AArch64::BL
  336. ? MCFixupKind(AArch64::fixup_aarch64_pcrel_call26)
  337. : MCFixupKind(AArch64::fixup_aarch64_pcrel_branch26);
  338. Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
  339. ++MCNumFixups;
  340. // All of the information is in the fixup.
  341. return 0;
  342. }
  343. /// getVecShifterOpValue - Return the encoded value for the vector shifter:
  344. ///
  345. /// 00 -> 0
  346. /// 01 -> 8
  347. /// 10 -> 16
  348. /// 11 -> 24
  349. uint32_t
  350. AArch64MCCodeEmitter::getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
  351. SmallVectorImpl<MCFixup> &Fixups,
  352. const MCSubtargetInfo &STI) const {
  353. const MCOperand &MO = MI.getOperand(OpIdx);
  354. assert(MO.isImm() && "Expected an immediate value for the shift amount!");
  355. switch (MO.getImm()) {
  356. default:
  357. break;
  358. case 0:
  359. return 0;
  360. case 8:
  361. return 1;
  362. case 16:
  363. return 2;
  364. case 24:
  365. return 3;
  366. }
  367. llvm_unreachable("Invalid value for vector shift amount!");
  368. }
  369. /// getFixedPointScaleOpValue - Return the encoded value for the
  370. // FP-to-fixed-point scale factor.
  371. uint32_t AArch64MCCodeEmitter::getFixedPointScaleOpValue(
  372. const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
  373. const MCSubtargetInfo &STI) const {
  374. const MCOperand &MO = MI.getOperand(OpIdx);
  375. assert(MO.isImm() && "Expected an immediate value for the scale amount!");
  376. return 64 - MO.getImm();
  377. }
  378. uint32_t
  379. AArch64MCCodeEmitter::getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx,
  380. SmallVectorImpl<MCFixup> &Fixups,
  381. const MCSubtargetInfo &STI) const {
  382. const MCOperand &MO = MI.getOperand(OpIdx);
  383. assert(MO.isImm() && "Expected an immediate value for the scale amount!");
  384. return 64 - MO.getImm();
  385. }
  386. uint32_t
  387. AArch64MCCodeEmitter::getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx,
  388. SmallVectorImpl<MCFixup> &Fixups,
  389. const MCSubtargetInfo &STI) const {
  390. const MCOperand &MO = MI.getOperand(OpIdx);
  391. assert(MO.isImm() && "Expected an immediate value for the scale amount!");
  392. return 32 - MO.getImm();
  393. }
  394. uint32_t
  395. AArch64MCCodeEmitter::getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx,
  396. SmallVectorImpl<MCFixup> &Fixups,
  397. const MCSubtargetInfo &STI) const {
  398. const MCOperand &MO = MI.getOperand(OpIdx);
  399. assert(MO.isImm() && "Expected an immediate value for the scale amount!");
  400. return 16 - MO.getImm();
  401. }
  402. uint32_t
  403. AArch64MCCodeEmitter::getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx,
  404. SmallVectorImpl<MCFixup> &Fixups,
  405. const MCSubtargetInfo &STI) const {
  406. const MCOperand &MO = MI.getOperand(OpIdx);
  407. assert(MO.isImm() && "Expected an immediate value for the scale amount!");
  408. return 8 - MO.getImm();
  409. }
  410. uint32_t
  411. AArch64MCCodeEmitter::getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx,
  412. SmallVectorImpl<MCFixup> &Fixups,
  413. const MCSubtargetInfo &STI) const {
  414. const MCOperand &MO = MI.getOperand(OpIdx);
  415. assert(MO.isImm() && "Expected an immediate value for the scale amount!");
  416. return MO.getImm() - 64;
  417. }
  418. uint32_t
  419. AArch64MCCodeEmitter::getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx,
  420. SmallVectorImpl<MCFixup> &Fixups,
  421. const MCSubtargetInfo &STI) const {
  422. const MCOperand &MO = MI.getOperand(OpIdx);
  423. assert(MO.isImm() && "Expected an immediate value for the scale amount!");
  424. return MO.getImm() - 32;
  425. }
  426. uint32_t
  427. AArch64MCCodeEmitter::getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx,
  428. SmallVectorImpl<MCFixup> &Fixups,
  429. const MCSubtargetInfo &STI) const {
  430. const MCOperand &MO = MI.getOperand(OpIdx);
  431. assert(MO.isImm() && "Expected an immediate value for the scale amount!");
  432. return MO.getImm() - 16;
  433. }
  434. uint32_t
  435. AArch64MCCodeEmitter::getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx,
  436. SmallVectorImpl<MCFixup> &Fixups,
  437. const MCSubtargetInfo &STI) const {
  438. const MCOperand &MO = MI.getOperand(OpIdx);
  439. assert(MO.isImm() && "Expected an immediate value for the scale amount!");
  440. return MO.getImm() - 8;
  441. }
  442. uint32_t AArch64MCCodeEmitter::EncodeMatrixTileListRegisterClass(
  443. const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
  444. const MCSubtargetInfo &STI) const {
  445. unsigned RegMask = MI.getOperand(OpIdx).getImm();
  446. assert(RegMask <= 0xFF && "Invalid register mask!");
  447. return RegMask;
  448. }
  449. uint32_t
  450. AArch64MCCodeEmitter::encodeMatrixIndexGPR32(const MCInst &MI, unsigned OpIdx,
  451. SmallVectorImpl<MCFixup> &Fixups,
  452. const MCSubtargetInfo &STI) const {
  453. auto RegOpnd = MI.getOperand(OpIdx).getReg();
  454. assert(RegOpnd >= AArch64::W12 && RegOpnd <= AArch64::W15 &&
  455. "Expected register in the range w12-w15!");
  456. return RegOpnd - AArch64::W12;
  457. }
  458. uint32_t
  459. AArch64MCCodeEmitter::getImm8OptLsl(const MCInst &MI, unsigned OpIdx,
  460. SmallVectorImpl<MCFixup> &Fixups,
  461. const MCSubtargetInfo &STI) const {
  462. // Test shift
  463. auto ShiftOpnd = MI.getOperand(OpIdx + 1).getImm();
  464. assert(AArch64_AM::getShiftType(ShiftOpnd) == AArch64_AM::LSL &&
  465. "Unexpected shift type for imm8_opt_lsl immediate.");
  466. unsigned ShiftVal = AArch64_AM::getShiftValue(ShiftOpnd);
  467. assert((ShiftVal == 0 || ShiftVal == 8) &&
  468. "Unexpected shift value for imm8_opt_lsl immediate.");
  469. // Test immediate
  470. auto Immediate = MI.getOperand(OpIdx).getImm();
  471. return (Immediate & 0xff) | (ShiftVal == 0 ? 0 : (1 << ShiftVal));
  472. }
  473. uint32_t
  474. AArch64MCCodeEmitter::getSVEIncDecImm(const MCInst &MI, unsigned OpIdx,
  475. SmallVectorImpl<MCFixup> &Fixups,
  476. const MCSubtargetInfo &STI) const {
  477. const MCOperand &MO = MI.getOperand(OpIdx);
  478. assert(MO.isImm() && "Expected an immediate value!");
  479. // Normalize 1-16 range to 0-15.
  480. return MO.getImm() - 1;
  481. }
  482. /// getMoveVecShifterOpValue - Return the encoded value for the vector move
  483. /// shifter (MSL).
  484. uint32_t AArch64MCCodeEmitter::getMoveVecShifterOpValue(
  485. const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
  486. const MCSubtargetInfo &STI) const {
  487. const MCOperand &MO = MI.getOperand(OpIdx);
  488. assert(MO.isImm() &&
  489. "Expected an immediate value for the move shift amount!");
  490. unsigned ShiftVal = AArch64_AM::getShiftValue(MO.getImm());
  491. assert((ShiftVal == 8 || ShiftVal == 16) && "Invalid shift amount!");
  492. return ShiftVal == 8 ? 0 : 1;
  493. }
  494. unsigned AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue,
  495. const MCSubtargetInfo &STI) const {
  496. // If one of the signed fixup kinds is applied to a MOVZ instruction, the
  497. // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
  498. // job to ensure that any bits possibly affected by this are 0. This means we
  499. // must zero out bit 30 (essentially emitting a MOVN).
  500. MCOperand UImm16MO = MI.getOperand(1);
  501. // Nothing to do if there's no fixup.
  502. if (UImm16MO.isImm())
  503. return EncodedValue;
  504. const MCExpr *E = UImm16MO.getExpr();
  505. if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
  506. switch (A64E->getKind()) {
  507. case AArch64MCExpr::VK_DTPREL_G2:
  508. case AArch64MCExpr::VK_DTPREL_G1:
  509. case AArch64MCExpr::VK_DTPREL_G0:
  510. case AArch64MCExpr::VK_GOTTPREL_G1:
  511. case AArch64MCExpr::VK_TPREL_G2:
  512. case AArch64MCExpr::VK_TPREL_G1:
  513. case AArch64MCExpr::VK_TPREL_G0:
  514. return EncodedValue & ~(1u << 30);
  515. default:
  516. // Nothing to do for an unsigned fixup.
  517. return EncodedValue;
  518. }
  519. }
  520. return EncodedValue;
  521. }
  522. void AArch64MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
  523. SmallVectorImpl<MCFixup> &Fixups,
  524. const MCSubtargetInfo &STI) const {
  525. verifyInstructionPredicates(MI,
  526. computeAvailableFeatures(STI.getFeatureBits()));
  527. if (MI.getOpcode() == AArch64::TLSDESCCALL) {
  528. // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
  529. // following (BLR) instruction. It doesn't emit any code itself so it
  530. // doesn't go through the normal TableGenerated channels.
  531. auto Reloc = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32
  532. ? ELF::R_AARCH64_P32_TLSDESC_CALL
  533. : ELF::R_AARCH64_TLSDESC_CALL;
  534. Fixups.push_back(
  535. MCFixup::create(0, MI.getOperand(0).getExpr(),
  536. MCFixupKind(FirstLiteralRelocationKind + Reloc)));
  537. return;
  538. }
  539. if (MI.getOpcode() == AArch64::CompilerBarrier ||
  540. MI.getOpcode() == AArch64::SPACE) {
  541. // CompilerBarrier just prevents the compiler from reordering accesses, and
  542. // SPACE just increases basic block size, in both cases no actual code.
  543. return;
  544. }
  545. uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
  546. support::endian::write<uint32_t>(OS, Binary, support::little);
  547. ++MCNumEmitted; // Keep track of the # of mi's emitted.
  548. }
  549. unsigned
  550. AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
  551. unsigned EncodedValue,
  552. const MCSubtargetInfo &STI) const {
  553. // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
  554. // (i.e. all bits 1) but is ignored by the processor.
  555. EncodedValue |= 0x1f << 10;
  556. return EncodedValue;
  557. }
  558. template<int hasRs, int hasRt2> unsigned
  559. AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
  560. unsigned EncodedValue,
  561. const MCSubtargetInfo &STI) const {
  562. if (!hasRs) EncodedValue |= 0x001F0000;
  563. if (!hasRt2) EncodedValue |= 0x00007C00;
  564. return EncodedValue;
  565. }
  566. unsigned AArch64MCCodeEmitter::fixOneOperandFPComparison(
  567. const MCInst &MI, unsigned EncodedValue, const MCSubtargetInfo &STI) const {
  568. // The Rm field of FCMP and friends is unused - it should be assembled
  569. // as 0, but is ignored by the processor.
  570. EncodedValue &= ~(0x1f << 16);
  571. return EncodedValue;
  572. }
  573. #define ENABLE_INSTR_PREDICATE_VERIFIER
  574. #include "AArch64GenMCCodeEmitter.inc"
  575. MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
  576. const MCRegisterInfo &MRI,
  577. MCContext &Ctx) {
  578. return new AArch64MCCodeEmitter(MCII, Ctx);
  579. }