ARMMCCodeEmitter.cpp 74 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016
  1. //===-- ARM/ARMMCCodeEmitter.cpp - Convert ARM code to machine code -------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the ARMMCCodeEmitter class.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "MCTargetDesc/ARMAddressingModes.h"
  13. #include "MCTargetDesc/ARMBaseInfo.h"
  14. #include "MCTargetDesc/ARMFixupKinds.h"
  15. #include "MCTargetDesc/ARMMCExpr.h"
  16. #include "llvm/ADT/APFloat.h"
  17. #include "llvm/ADT/APInt.h"
  18. #include "llvm/ADT/SmallVector.h"
  19. #include "llvm/ADT/Statistic.h"
  20. #include "llvm/ADT/Triple.h"
  21. #include "llvm/MC/MCCodeEmitter.h"
  22. #include "llvm/MC/MCContext.h"
  23. #include "llvm/MC/MCExpr.h"
  24. #include "llvm/MC/MCFixup.h"
  25. #include "llvm/MC/MCInst.h"
  26. #include "llvm/MC/MCInstrDesc.h"
  27. #include "llvm/MC/MCInstrInfo.h"
  28. #include "llvm/MC/MCRegisterInfo.h"
  29. #include "llvm/MC/MCSubtargetInfo.h"
  30. #include "llvm/Support/Casting.h"
  31. #include "llvm/Support/Compiler.h"
  32. #include "llvm/Support/ErrorHandling.h"
  33. #include "llvm/Support/MathExtras.h"
  34. #include "llvm/Support/raw_ostream.h"
  35. #include <algorithm>
  36. #include <cassert>
  37. #include <cstdint>
  38. #include <cstdlib>
  39. using namespace llvm;
  40. #define DEBUG_TYPE "mccodeemitter"
  41. STATISTIC(MCNumEmitted, "Number of MC instructions emitted.");
  42. STATISTIC(MCNumCPRelocations, "Number of constant pool relocations created.");
  43. namespace {
  44. class ARMMCCodeEmitter : public MCCodeEmitter {
  45. const MCInstrInfo &MCII;
  46. MCContext &CTX;
  47. bool IsLittleEndian;
  48. public:
  49. ARMMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx, bool IsLittle)
  50. : MCII(mcii), CTX(ctx), IsLittleEndian(IsLittle) {
  51. }
  52. ARMMCCodeEmitter(const ARMMCCodeEmitter &) = delete;
  53. ARMMCCodeEmitter &operator=(const ARMMCCodeEmitter &) = delete;
  54. ~ARMMCCodeEmitter() override = default;
  55. bool isThumb(const MCSubtargetInfo &STI) const {
  56. return STI.getFeatureBits()[ARM::ModeThumb];
  57. }
  58. bool isThumb2(const MCSubtargetInfo &STI) const {
  59. return isThumb(STI) && STI.getFeatureBits()[ARM::FeatureThumb2];
  60. }
  61. bool isTargetMachO(const MCSubtargetInfo &STI) const {
  62. const Triple &TT = STI.getTargetTriple();
  63. return TT.isOSBinFormatMachO();
  64. }
  65. unsigned getMachineSoImmOpValue(unsigned SoImm) const;
  66. // getBinaryCodeForInstr - TableGen'erated function for getting the
  67. // binary encoding for an instruction.
  68. uint64_t getBinaryCodeForInstr(const MCInst &MI,
  69. SmallVectorImpl<MCFixup> &Fixups,
  70. const MCSubtargetInfo &STI) const;
  71. /// getMachineOpValue - Return binary encoding of operand. If the machine
  72. /// operand requires relocation, record the relocation and return zero.
  73. unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
  74. SmallVectorImpl<MCFixup> &Fixups,
  75. const MCSubtargetInfo &STI) const;
  76. /// getHiLo16ImmOpValue - Return the encoding for the hi / low 16-bit of
  77. /// the specified operand. This is used for operands with :lower16: and
  78. /// :upper16: prefixes.
  79. uint32_t getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
  80. SmallVectorImpl<MCFixup> &Fixups,
  81. const MCSubtargetInfo &STI) const;
  82. bool EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx,
  83. unsigned &Reg, unsigned &Imm,
  84. SmallVectorImpl<MCFixup> &Fixups,
  85. const MCSubtargetInfo &STI) const;
  86. /// getThumbBLTargetOpValue - Return encoding info for Thumb immediate
  87. /// BL branch target.
  88. uint32_t getThumbBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
  89. SmallVectorImpl<MCFixup> &Fixups,
  90. const MCSubtargetInfo &STI) const;
  91. /// getThumbBLXTargetOpValue - Return encoding info for Thumb immediate
  92. /// BLX branch target.
  93. uint32_t getThumbBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
  94. SmallVectorImpl<MCFixup> &Fixups,
  95. const MCSubtargetInfo &STI) const;
  96. /// getThumbBRTargetOpValue - Return encoding info for Thumb branch target.
  97. uint32_t getThumbBRTargetOpValue(const MCInst &MI, unsigned OpIdx,
  98. SmallVectorImpl<MCFixup> &Fixups,
  99. const MCSubtargetInfo &STI) const;
  100. /// getThumbBCCTargetOpValue - Return encoding info for Thumb branch target.
  101. uint32_t getThumbBCCTargetOpValue(const MCInst &MI, unsigned OpIdx,
  102. SmallVectorImpl<MCFixup> &Fixups,
  103. const MCSubtargetInfo &STI) const;
  104. /// getThumbCBTargetOpValue - Return encoding info for Thumb branch target.
  105. uint32_t getThumbCBTargetOpValue(const MCInst &MI, unsigned OpIdx,
  106. SmallVectorImpl<MCFixup> &Fixups,
  107. const MCSubtargetInfo &STI) const;
  108. /// getBranchTargetOpValue - Return encoding info for 24-bit immediate
  109. /// branch target.
  110. uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  111. SmallVectorImpl<MCFixup> &Fixups,
  112. const MCSubtargetInfo &STI) const;
  113. /// getThumbBranchTargetOpValue - Return encoding info for 24-bit
  114. /// immediate Thumb2 direct branch target.
  115. uint32_t getThumbBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  116. SmallVectorImpl<MCFixup> &Fixups,
  117. const MCSubtargetInfo &STI) const;
  118. /// getARMBranchTargetOpValue - Return encoding info for 24-bit immediate
  119. /// branch target.
  120. uint32_t getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  121. SmallVectorImpl<MCFixup> &Fixups,
  122. const MCSubtargetInfo &STI) const;
  123. uint32_t getARMBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
  124. SmallVectorImpl<MCFixup> &Fixups,
  125. const MCSubtargetInfo &STI) const;
  126. uint32_t getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
  127. SmallVectorImpl<MCFixup> &Fixups,
  128. const MCSubtargetInfo &STI) const;
  129. /// getAdrLabelOpValue - Return encoding info for 12-bit immediate
  130. /// ADR label target.
  131. uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
  132. SmallVectorImpl<MCFixup> &Fixups,
  133. const MCSubtargetInfo &STI) const;
  134. uint32_t getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
  135. SmallVectorImpl<MCFixup> &Fixups,
  136. const MCSubtargetInfo &STI) const;
  137. uint32_t getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
  138. SmallVectorImpl<MCFixup> &Fixups,
  139. const MCSubtargetInfo &STI) const;
  140. uint32_t getITMaskOpValue(const MCInst &MI, unsigned OpIdx,
  141. SmallVectorImpl<MCFixup> &Fixups,
  142. const MCSubtargetInfo &STI) const;
  143. /// getMVEShiftImmOpValue - Return encoding info for the 'sz:imm5'
  144. /// operand.
  145. uint32_t getMVEShiftImmOpValue(const MCInst &MI, unsigned OpIdx,
  146. SmallVectorImpl<MCFixup> &Fixups,
  147. const MCSubtargetInfo &STI) const;
  148. /// getAddrModeImm12OpValue - Return encoding info for 'reg +/- imm12'
  149. /// operand.
  150. uint32_t getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx,
  151. SmallVectorImpl<MCFixup> &Fixups,
  152. const MCSubtargetInfo &STI) const;
  153. /// getThumbAddrModeRegRegOpValue - Return encoding for 'reg + reg' operand.
  154. uint32_t getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx,
  155. SmallVectorImpl<MCFixup> &Fixups,
  156. const MCSubtargetInfo &STI) const;
  157. /// getT2AddrModeImm8s4OpValue - Return encoding info for 'reg +/- imm8<<2'
  158. /// operand.
  159. uint32_t getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx,
  160. SmallVectorImpl<MCFixup> &Fixups,
  161. const MCSubtargetInfo &STI) const;
  162. /// getT2AddrModeImm7s4OpValue - Return encoding info for 'reg +/- imm7<<2'
  163. /// operand.
  164. uint32_t getT2AddrModeImm7s4OpValue(const MCInst &MI, unsigned OpIdx,
  165. SmallVectorImpl<MCFixup> &Fixups,
  166. const MCSubtargetInfo &STI) const;
  167. /// getT2AddrModeImm0_1020s4OpValue - Return encoding info for 'reg + imm8<<2'
  168. /// operand.
  169. uint32_t getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx,
  170. SmallVectorImpl<MCFixup> &Fixups,
  171. const MCSubtargetInfo &STI) const;
  172. /// getT2ScaledImmOpValue - Return encoding info for '+/- immX<<Y'
  173. /// operand.
  174. template<unsigned Bits, unsigned Shift>
  175. uint32_t getT2ScaledImmOpValue(const MCInst &MI, unsigned OpIdx,
  176. SmallVectorImpl<MCFixup> &Fixups,
  177. const MCSubtargetInfo &STI) const;
  178. /// getMveAddrModeRQOpValue - Return encoding info for 'reg, vreg'
  179. /// operand.
  180. uint32_t getMveAddrModeRQOpValue(const MCInst &MI, unsigned OpIdx,
  181. SmallVectorImpl<MCFixup> &Fixups,
  182. const MCSubtargetInfo &STI) const;
  183. /// getMveAddrModeQOpValue - Return encoding info for 'reg +/- imm7<<{shift}'
  184. /// operand.
  185. template<int shift>
  186. uint32_t getMveAddrModeQOpValue(const MCInst &MI, unsigned OpIdx,
  187. SmallVectorImpl<MCFixup> &Fixups,
  188. const MCSubtargetInfo &STI) const;
  189. /// getLdStSORegOpValue - Return encoding info for 'reg +/- reg shop imm'
  190. /// operand as needed by load/store instructions.
  191. uint32_t getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx,
  192. SmallVectorImpl<MCFixup> &Fixups,
  193. const MCSubtargetInfo &STI) const;
  194. /// getLdStmModeOpValue - Return encoding for load/store multiple mode.
  195. uint32_t getLdStmModeOpValue(const MCInst &MI, unsigned OpIdx,
  196. SmallVectorImpl<MCFixup> &Fixups,
  197. const MCSubtargetInfo &STI) const {
  198. ARM_AM::AMSubMode Mode = (ARM_AM::AMSubMode)MI.getOperand(OpIdx).getImm();
  199. switch (Mode) {
  200. default: llvm_unreachable("Unknown addressing sub-mode!");
  201. case ARM_AM::da: return 0;
  202. case ARM_AM::ia: return 1;
  203. case ARM_AM::db: return 2;
  204. case ARM_AM::ib: return 3;
  205. }
  206. }
  207. /// getShiftOp - Return the shift opcode (bit[6:5]) of the immediate value.
  208. ///
  209. unsigned getShiftOp(ARM_AM::ShiftOpc ShOpc) const {
  210. switch (ShOpc) {
  211. case ARM_AM::no_shift:
  212. case ARM_AM::lsl: return 0;
  213. case ARM_AM::lsr: return 1;
  214. case ARM_AM::asr: return 2;
  215. case ARM_AM::ror:
  216. case ARM_AM::rrx: return 3;
  217. default:
  218. llvm_unreachable("Invalid ShiftOpc!");
  219. }
  220. }
  221. /// getAddrMode2OffsetOpValue - Return encoding for am2offset operands.
  222. uint32_t getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx,
  223. SmallVectorImpl<MCFixup> &Fixups,
  224. const MCSubtargetInfo &STI) const;
  225. /// getPostIdxRegOpValue - Return encoding for postidx_reg operands.
  226. uint32_t getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx,
  227. SmallVectorImpl<MCFixup> &Fixups,
  228. const MCSubtargetInfo &STI) const;
  229. /// getAddrMode3OffsetOpValue - Return encoding for am3offset operands.
  230. uint32_t getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx,
  231. SmallVectorImpl<MCFixup> &Fixups,
  232. const MCSubtargetInfo &STI) const;
  233. /// getAddrMode3OpValue - Return encoding for addrmode3 operands.
  234. uint32_t getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx,
  235. SmallVectorImpl<MCFixup> &Fixups,
  236. const MCSubtargetInfo &STI) const;
  237. /// getAddrModeThumbSPOpValue - Return encoding info for 'reg +/- imm12'
  238. /// operand.
  239. uint32_t getAddrModeThumbSPOpValue(const MCInst &MI, unsigned OpIdx,
  240. SmallVectorImpl<MCFixup> &Fixups,
  241. const MCSubtargetInfo &STI) const;
  242. /// getAddrModeISOpValue - Encode the t_addrmode_is# operands.
  243. uint32_t getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx,
  244. SmallVectorImpl<MCFixup> &Fixups,
  245. const MCSubtargetInfo &STI) const;
  246. /// getAddrModePCOpValue - Return encoding for t_addrmode_pc operands.
  247. uint32_t getAddrModePCOpValue(const MCInst &MI, unsigned OpIdx,
  248. SmallVectorImpl<MCFixup> &Fixups,
  249. const MCSubtargetInfo &STI) const;
  250. /// getAddrMode5OpValue - Return encoding info for 'reg +/- (imm8 << 2)' operand.
  251. uint32_t getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
  252. SmallVectorImpl<MCFixup> &Fixups,
  253. const MCSubtargetInfo &STI) const;
  254. /// getAddrMode5FP16OpValue - Return encoding info for 'reg +/- (imm8 << 1)' operand.
  255. uint32_t getAddrMode5FP16OpValue(const MCInst &MI, unsigned OpIdx,
  256. SmallVectorImpl<MCFixup> &Fixups,
  257. const MCSubtargetInfo &STI) const;
  258. /// getCCOutOpValue - Return encoding of the 's' bit.
  259. unsigned getCCOutOpValue(const MCInst &MI, unsigned Op,
  260. SmallVectorImpl<MCFixup> &Fixups,
  261. const MCSubtargetInfo &STI) const {
  262. // The operand is either reg0 or CPSR. The 's' bit is encoded as '0' or
  263. // '1' respectively.
  264. return MI.getOperand(Op).getReg() == ARM::CPSR;
  265. }
  266. unsigned getModImmOpValue(const MCInst &MI, unsigned Op,
  267. SmallVectorImpl<MCFixup> &Fixups,
  268. const MCSubtargetInfo &ST) const {
  269. const MCOperand &MO = MI.getOperand(Op);
  270. // Support for fixups (MCFixup)
  271. if (MO.isExpr()) {
  272. const MCExpr *Expr = MO.getExpr();
  273. // Fixups resolve to plain values that need to be encoded.
  274. MCFixupKind Kind = MCFixupKind(ARM::fixup_arm_mod_imm);
  275. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  276. return 0;
  277. }
  278. // Immediate is already in its encoded format
  279. return MO.getImm();
  280. }
  281. /// getT2SOImmOpValue - Return an encoded 12-bit shifted-immediate value.
  282. unsigned getT2SOImmOpValue(const MCInst &MI, unsigned Op,
  283. SmallVectorImpl<MCFixup> &Fixups,
  284. const MCSubtargetInfo &STI) const {
  285. const MCOperand &MO = MI.getOperand(Op);
  286. // Support for fixups (MCFixup)
  287. if (MO.isExpr()) {
  288. const MCExpr *Expr = MO.getExpr();
  289. // Fixups resolve to plain values that need to be encoded.
  290. MCFixupKind Kind = MCFixupKind(ARM::fixup_t2_so_imm);
  291. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  292. return 0;
  293. }
  294. unsigned SoImm = MO.getImm();
  295. unsigned Encoded = ARM_AM::getT2SOImmVal(SoImm);
  296. assert(Encoded != ~0U && "Not a Thumb2 so_imm value?");
  297. return Encoded;
  298. }
  299. unsigned getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum,
  300. SmallVectorImpl<MCFixup> &Fixups,
  301. const MCSubtargetInfo &STI) const;
  302. template<unsigned Bits, unsigned Shift>
  303. unsigned getT2AddrModeImmOpValue(const MCInst &MI, unsigned OpNum,
  304. SmallVectorImpl<MCFixup> &Fixups,
  305. const MCSubtargetInfo &STI) const;
  306. unsigned getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum,
  307. SmallVectorImpl<MCFixup> &Fixups,
  308. const MCSubtargetInfo &STI) const;
  309. /// getSORegOpValue - Return an encoded so_reg shifted register value.
  310. unsigned getSORegRegOpValue(const MCInst &MI, unsigned Op,
  311. SmallVectorImpl<MCFixup> &Fixups,
  312. const MCSubtargetInfo &STI) const;
  313. unsigned getSORegImmOpValue(const MCInst &MI, unsigned Op,
  314. SmallVectorImpl<MCFixup> &Fixups,
  315. const MCSubtargetInfo &STI) const;
  316. unsigned getT2SORegOpValue(const MCInst &MI, unsigned Op,
  317. SmallVectorImpl<MCFixup> &Fixups,
  318. const MCSubtargetInfo &STI) const;
  319. unsigned getNEONVcvtImm32OpValue(const MCInst &MI, unsigned Op,
  320. SmallVectorImpl<MCFixup> &Fixups,
  321. const MCSubtargetInfo &STI) const {
  322. return 64 - MI.getOperand(Op).getImm();
  323. }
  324. unsigned getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op,
  325. SmallVectorImpl<MCFixup> &Fixups,
  326. const MCSubtargetInfo &STI) const;
  327. unsigned getRegisterListOpValue(const MCInst &MI, unsigned Op,
  328. SmallVectorImpl<MCFixup> &Fixups,
  329. const MCSubtargetInfo &STI) const;
  330. unsigned getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op,
  331. SmallVectorImpl<MCFixup> &Fixups,
  332. const MCSubtargetInfo &STI) const;
  333. unsigned getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op,
  334. SmallVectorImpl<MCFixup> &Fixups,
  335. const MCSubtargetInfo &STI) const;
  336. unsigned getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op,
  337. SmallVectorImpl<MCFixup> &Fixups,
  338. const MCSubtargetInfo &STI) const;
  339. unsigned getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op,
  340. SmallVectorImpl<MCFixup> &Fixups,
  341. const MCSubtargetInfo &STI) const;
  342. unsigned getShiftRight8Imm(const MCInst &MI, unsigned Op,
  343. SmallVectorImpl<MCFixup> &Fixups,
  344. const MCSubtargetInfo &STI) const;
  345. unsigned getShiftRight16Imm(const MCInst &MI, unsigned Op,
  346. SmallVectorImpl<MCFixup> &Fixups,
  347. const MCSubtargetInfo &STI) const;
  348. unsigned getShiftRight32Imm(const MCInst &MI, unsigned Op,
  349. SmallVectorImpl<MCFixup> &Fixups,
  350. const MCSubtargetInfo &STI) const;
  351. unsigned getShiftRight64Imm(const MCInst &MI, unsigned Op,
  352. SmallVectorImpl<MCFixup> &Fixups,
  353. const MCSubtargetInfo &STI) const;
  354. unsigned getThumbSRImmOpValue(const MCInst &MI, unsigned Op,
  355. SmallVectorImpl<MCFixup> &Fixups,
  356. const MCSubtargetInfo &STI) const;
  357. unsigned NEONThumb2DataIPostEncoder(const MCInst &MI,
  358. unsigned EncodedValue,
  359. const MCSubtargetInfo &STI) const;
  360. unsigned NEONThumb2LoadStorePostEncoder(const MCInst &MI,
  361. unsigned EncodedValue,
  362. const MCSubtargetInfo &STI) const;
  363. unsigned NEONThumb2DupPostEncoder(const MCInst &MI,
  364. unsigned EncodedValue,
  365. const MCSubtargetInfo &STI) const;
  366. unsigned NEONThumb2V8PostEncoder(const MCInst &MI,
  367. unsigned EncodedValue,
  368. const MCSubtargetInfo &STI) const;
  369. unsigned VFPThumb2PostEncoder(const MCInst &MI,
  370. unsigned EncodedValue,
  371. const MCSubtargetInfo &STI) const;
  372. uint32_t getPowerTwoOpValue(const MCInst &MI, unsigned OpIdx,
  373. SmallVectorImpl<MCFixup> &Fixups,
  374. const MCSubtargetInfo &STI) const;
  375. void EmitByte(unsigned char C, raw_ostream &OS) const {
  376. OS << (char)C;
  377. }
  378. void EmitConstant(uint64_t Val, unsigned Size, raw_ostream &OS) const {
  379. // Output the constant in little endian byte order.
  380. for (unsigned i = 0; i != Size; ++i) {
  381. unsigned Shift = IsLittleEndian ? i * 8 : (Size - 1 - i) * 8;
  382. EmitByte((Val >> Shift) & 0xff, OS);
  383. }
  384. }
  385. void encodeInstruction(const MCInst &MI, raw_ostream &OS,
  386. SmallVectorImpl<MCFixup> &Fixups,
  387. const MCSubtargetInfo &STI) const override;
  388. template <bool isNeg, ARM::Fixups fixup>
  389. uint32_t getBFTargetOpValue(const MCInst &MI, unsigned OpIdx,
  390. SmallVectorImpl<MCFixup> &Fixups,
  391. const MCSubtargetInfo &STI) const;
  392. uint32_t getBFAfterTargetOpValue(const MCInst &MI, unsigned OpIdx,
  393. SmallVectorImpl<MCFixup> &Fixups,
  394. const MCSubtargetInfo &STI) const;
  395. uint32_t getVPTMaskOpValue(const MCInst &MI, unsigned OpIdx,
  396. SmallVectorImpl<MCFixup> &Fixups,
  397. const MCSubtargetInfo &STI) const;
  398. uint32_t getRestrictedCondCodeOpValue(const MCInst &MI, unsigned OpIdx,
  399. SmallVectorImpl<MCFixup> &Fixups,
  400. const MCSubtargetInfo &STI) const;
  401. template <unsigned size>
  402. uint32_t getMVEPairVectorIndexOpValue(const MCInst &MI, unsigned OpIdx,
  403. SmallVectorImpl<MCFixup> &Fixups,
  404. const MCSubtargetInfo &STI) const;
  405. };
  406. } // end anonymous namespace
  407. /// NEONThumb2DataIPostEncoder - Post-process encoded NEON data-processing
  408. /// instructions, and rewrite them to their Thumb2 form if we are currently in
  409. /// Thumb2 mode.
  410. unsigned ARMMCCodeEmitter::NEONThumb2DataIPostEncoder(const MCInst &MI,
  411. unsigned EncodedValue,
  412. const MCSubtargetInfo &STI) const {
  413. if (isThumb2(STI)) {
  414. // NEON Thumb2 data-processsing encodings are very simple: bit 24 is moved
  415. // to bit 12 of the high half-word (i.e. bit 28), and bits 27-24 are
  416. // set to 1111.
  417. unsigned Bit24 = EncodedValue & 0x01000000;
  418. unsigned Bit28 = Bit24 << 4;
  419. EncodedValue &= 0xEFFFFFFF;
  420. EncodedValue |= Bit28;
  421. EncodedValue |= 0x0F000000;
  422. }
  423. return EncodedValue;
  424. }
  425. /// NEONThumb2LoadStorePostEncoder - Post-process encoded NEON load/store
  426. /// instructions, and rewrite them to their Thumb2 form if we are currently in
  427. /// Thumb2 mode.
  428. unsigned ARMMCCodeEmitter::NEONThumb2LoadStorePostEncoder(const MCInst &MI,
  429. unsigned EncodedValue,
  430. const MCSubtargetInfo &STI) const {
  431. if (isThumb2(STI)) {
  432. EncodedValue &= 0xF0FFFFFF;
  433. EncodedValue |= 0x09000000;
  434. }
  435. return EncodedValue;
  436. }
  437. /// NEONThumb2DupPostEncoder - Post-process encoded NEON vdup
  438. /// instructions, and rewrite them to their Thumb2 form if we are currently in
  439. /// Thumb2 mode.
  440. unsigned ARMMCCodeEmitter::NEONThumb2DupPostEncoder(const MCInst &MI,
  441. unsigned EncodedValue,
  442. const MCSubtargetInfo &STI) const {
  443. if (isThumb2(STI)) {
  444. EncodedValue &= 0x00FFFFFF;
  445. EncodedValue |= 0xEE000000;
  446. }
  447. return EncodedValue;
  448. }
  449. /// Post-process encoded NEON v8 instructions, and rewrite them to Thumb2 form
  450. /// if we are in Thumb2.
  451. unsigned ARMMCCodeEmitter::NEONThumb2V8PostEncoder(const MCInst &MI,
  452. unsigned EncodedValue,
  453. const MCSubtargetInfo &STI) const {
  454. if (isThumb2(STI)) {
  455. EncodedValue |= 0xC000000; // Set bits 27-26
  456. }
  457. return EncodedValue;
  458. }
  459. /// VFPThumb2PostEncoder - Post-process encoded VFP instructions and rewrite
  460. /// them to their Thumb2 form if we are currently in Thumb2 mode.
  461. unsigned ARMMCCodeEmitter::
  462. VFPThumb2PostEncoder(const MCInst &MI, unsigned EncodedValue,
  463. const MCSubtargetInfo &STI) const {
  464. if (isThumb2(STI)) {
  465. EncodedValue &= 0x0FFFFFFF;
  466. EncodedValue |= 0xE0000000;
  467. }
  468. return EncodedValue;
  469. }
  470. /// getMachineOpValue - Return binary encoding of operand. If the machine
  471. /// operand requires relocation, record the relocation and return zero.
  472. unsigned ARMMCCodeEmitter::
  473. getMachineOpValue(const MCInst &MI, const MCOperand &MO,
  474. SmallVectorImpl<MCFixup> &Fixups,
  475. const MCSubtargetInfo &STI) const {
  476. if (MO.isReg()) {
  477. unsigned Reg = MO.getReg();
  478. unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg);
  479. // In NEON, Q registers are encoded as 2x their register number,
  480. // because they're using the same indices as the D registers they
  481. // overlap. In MVE, there are no 64-bit vector instructions, so
  482. // the encodings all refer to Q-registers by their literal
  483. // register number.
  484. if (STI.getFeatureBits()[ARM::HasMVEIntegerOps])
  485. return RegNo;
  486. switch (Reg) {
  487. default:
  488. return RegNo;
  489. case ARM::Q0: case ARM::Q1: case ARM::Q2: case ARM::Q3:
  490. case ARM::Q4: case ARM::Q5: case ARM::Q6: case ARM::Q7:
  491. case ARM::Q8: case ARM::Q9: case ARM::Q10: case ARM::Q11:
  492. case ARM::Q12: case ARM::Q13: case ARM::Q14: case ARM::Q15:
  493. return 2 * RegNo;
  494. }
  495. } else if (MO.isImm()) {
  496. return static_cast<unsigned>(MO.getImm());
  497. } else if (MO.isDFPImm()) {
  498. return static_cast<unsigned>(APFloat(bit_cast<double>(MO.getDFPImm()))
  499. .bitcastToAPInt()
  500. .getHiBits(32)
  501. .getLimitedValue());
  502. }
  503. llvm_unreachable("Unable to encode MCOperand!");
  504. }
  505. /// getAddrModeImmOpValue - Return encoding info for 'reg +/- imm' operand.
  506. bool ARMMCCodeEmitter::
  507. EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx, unsigned &Reg,
  508. unsigned &Imm, SmallVectorImpl<MCFixup> &Fixups,
  509. const MCSubtargetInfo &STI) const {
  510. const MCOperand &MO = MI.getOperand(OpIdx);
  511. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  512. Reg = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  513. int32_t SImm = MO1.getImm();
  514. bool isAdd = true;
  515. // Special value for #-0
  516. if (SImm == INT32_MIN) {
  517. SImm = 0;
  518. isAdd = false;
  519. }
  520. // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
  521. if (SImm < 0) {
  522. SImm = -SImm;
  523. isAdd = false;
  524. }
  525. Imm = SImm;
  526. return isAdd;
  527. }
  528. /// getBranchTargetOpValue - Helper function to get the branch target operand,
  529. /// which is either an immediate or requires a fixup.
  530. static uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  531. unsigned FixupKind,
  532. SmallVectorImpl<MCFixup> &Fixups,
  533. const MCSubtargetInfo &STI) {
  534. const MCOperand &MO = MI.getOperand(OpIdx);
  535. // If the destination is an immediate, we have nothing to do.
  536. if (MO.isImm()) return MO.getImm();
  537. assert(MO.isExpr() && "Unexpected branch target type!");
  538. const MCExpr *Expr = MO.getExpr();
  539. MCFixupKind Kind = MCFixupKind(FixupKind);
  540. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  541. // All of the information is in the fixup.
  542. return 0;
  543. }
  544. // Thumb BL and BLX use a strange offset encoding where bits 22 and 21 are
  545. // determined by negating them and XOR'ing them with bit 23.
  546. static int32_t encodeThumbBLOffset(int32_t offset) {
  547. offset >>= 1;
  548. uint32_t S = (offset & 0x800000) >> 23;
  549. uint32_t J1 = (offset & 0x400000) >> 22;
  550. uint32_t J2 = (offset & 0x200000) >> 21;
  551. J1 = (~J1 & 0x1);
  552. J2 = (~J2 & 0x1);
  553. J1 ^= S;
  554. J2 ^= S;
  555. offset &= ~0x600000;
  556. offset |= J1 << 22;
  557. offset |= J2 << 21;
  558. return offset;
  559. }
  560. /// getThumbBLTargetOpValue - Return encoding info for immediate branch target.
  561. uint32_t ARMMCCodeEmitter::
  562. getThumbBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
  563. SmallVectorImpl<MCFixup> &Fixups,
  564. const MCSubtargetInfo &STI) const {
  565. const MCOperand MO = MI.getOperand(OpIdx);
  566. if (MO.isExpr())
  567. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_bl,
  568. Fixups, STI);
  569. return encodeThumbBLOffset(MO.getImm());
  570. }
  571. /// getThumbBLXTargetOpValue - Return encoding info for Thumb immediate
  572. /// BLX branch target.
  573. uint32_t ARMMCCodeEmitter::
  574. getThumbBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
  575. SmallVectorImpl<MCFixup> &Fixups,
  576. const MCSubtargetInfo &STI) const {
  577. const MCOperand MO = MI.getOperand(OpIdx);
  578. if (MO.isExpr())
  579. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_blx,
  580. Fixups, STI);
  581. return encodeThumbBLOffset(MO.getImm());
  582. }
  583. /// getThumbBRTargetOpValue - Return encoding info for Thumb branch target.
  584. uint32_t ARMMCCodeEmitter::
  585. getThumbBRTargetOpValue(const MCInst &MI, unsigned OpIdx,
  586. SmallVectorImpl<MCFixup> &Fixups,
  587. const MCSubtargetInfo &STI) const {
  588. const MCOperand MO = MI.getOperand(OpIdx);
  589. if (MO.isExpr())
  590. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_br,
  591. Fixups, STI);
  592. return (MO.getImm() >> 1);
  593. }
  594. /// getThumbBCCTargetOpValue - Return encoding info for Thumb branch target.
  595. uint32_t ARMMCCodeEmitter::
  596. getThumbBCCTargetOpValue(const MCInst &MI, unsigned OpIdx,
  597. SmallVectorImpl<MCFixup> &Fixups,
  598. const MCSubtargetInfo &STI) const {
  599. const MCOperand MO = MI.getOperand(OpIdx);
  600. if (MO.isExpr())
  601. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_bcc,
  602. Fixups, STI);
  603. return (MO.getImm() >> 1);
  604. }
  605. /// getThumbCBTargetOpValue - Return encoding info for Thumb branch target.
  606. uint32_t ARMMCCodeEmitter::
  607. getThumbCBTargetOpValue(const MCInst &MI, unsigned OpIdx,
  608. SmallVectorImpl<MCFixup> &Fixups,
  609. const MCSubtargetInfo &STI) const {
  610. const MCOperand MO = MI.getOperand(OpIdx);
  611. if (MO.isExpr())
  612. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_cb, Fixups, STI);
  613. return (MO.getImm() >> 1);
  614. }
  615. /// Return true if this branch has a non-always predication
  616. static bool HasConditionalBranch(const MCInst &MI) {
  617. int NumOp = MI.getNumOperands();
  618. if (NumOp >= 2) {
  619. for (int i = 0; i < NumOp-1; ++i) {
  620. const MCOperand &MCOp1 = MI.getOperand(i);
  621. const MCOperand &MCOp2 = MI.getOperand(i + 1);
  622. if (MCOp1.isImm() && MCOp2.isReg() &&
  623. (MCOp2.getReg() == 0 || MCOp2.getReg() == ARM::CPSR)) {
  624. if (ARMCC::CondCodes(MCOp1.getImm()) != ARMCC::AL)
  625. return true;
  626. }
  627. }
  628. }
  629. return false;
  630. }
  631. /// getBranchTargetOpValue - Return encoding info for 24-bit immediate branch
  632. /// target.
  633. uint32_t ARMMCCodeEmitter::
  634. getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  635. SmallVectorImpl<MCFixup> &Fixups,
  636. const MCSubtargetInfo &STI) const {
  637. // FIXME: This really, really shouldn't use TargetMachine. We don't want
  638. // coupling between MC and TM anywhere we can help it.
  639. if (isThumb2(STI))
  640. return
  641. ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_condbranch, Fixups, STI);
  642. return getARMBranchTargetOpValue(MI, OpIdx, Fixups, STI);
  643. }
  644. /// getBranchTargetOpValue - Return encoding info for 24-bit immediate branch
  645. /// target.
  646. uint32_t ARMMCCodeEmitter::
  647. getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  648. SmallVectorImpl<MCFixup> &Fixups,
  649. const MCSubtargetInfo &STI) const {
  650. const MCOperand MO = MI.getOperand(OpIdx);
  651. if (MO.isExpr()) {
  652. if (HasConditionalBranch(MI))
  653. return ::getBranchTargetOpValue(MI, OpIdx,
  654. ARM::fixup_arm_condbranch, Fixups, STI);
  655. return ::getBranchTargetOpValue(MI, OpIdx,
  656. ARM::fixup_arm_uncondbranch, Fixups, STI);
  657. }
  658. return MO.getImm() >> 2;
  659. }
  660. uint32_t ARMMCCodeEmitter::
  661. getARMBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
  662. SmallVectorImpl<MCFixup> &Fixups,
  663. const MCSubtargetInfo &STI) const {
  664. const MCOperand MO = MI.getOperand(OpIdx);
  665. if (MO.isExpr()) {
  666. if (HasConditionalBranch(MI))
  667. return ::getBranchTargetOpValue(MI, OpIdx,
  668. ARM::fixup_arm_condbl, Fixups, STI);
  669. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_uncondbl, Fixups, STI);
  670. }
  671. return MO.getImm() >> 2;
  672. }
  673. uint32_t ARMMCCodeEmitter::
  674. getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
  675. SmallVectorImpl<MCFixup> &Fixups,
  676. const MCSubtargetInfo &STI) const {
  677. const MCOperand MO = MI.getOperand(OpIdx);
  678. if (MO.isExpr())
  679. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_blx, Fixups, STI);
  680. return MO.getImm() >> 1;
  681. }
  682. /// getUnconditionalBranchTargetOpValue - Return encoding info for 24-bit
  683. /// immediate branch target.
  684. uint32_t ARMMCCodeEmitter::getThumbBranchTargetOpValue(
  685. const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
  686. const MCSubtargetInfo &STI) const {
  687. unsigned Val = 0;
  688. const MCOperand MO = MI.getOperand(OpIdx);
  689. if(MO.isExpr())
  690. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_uncondbranch, Fixups, STI);
  691. else
  692. Val = MO.getImm() >> 1;
  693. bool I = (Val & 0x800000);
  694. bool J1 = (Val & 0x400000);
  695. bool J2 = (Val & 0x200000);
  696. if (I ^ J1)
  697. Val &= ~0x400000;
  698. else
  699. Val |= 0x400000;
  700. if (I ^ J2)
  701. Val &= ~0x200000;
  702. else
  703. Val |= 0x200000;
  704. return Val;
  705. }
  706. /// getAdrLabelOpValue - Return encoding info for 12-bit shifted-immediate
  707. /// ADR label target.
  708. uint32_t ARMMCCodeEmitter::
  709. getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
  710. SmallVectorImpl<MCFixup> &Fixups,
  711. const MCSubtargetInfo &STI) const {
  712. const MCOperand MO = MI.getOperand(OpIdx);
  713. if (MO.isExpr())
  714. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_adr_pcrel_12,
  715. Fixups, STI);
  716. int64_t offset = MO.getImm();
  717. uint32_t Val = 0x2000;
  718. int SoImmVal;
  719. if (offset == INT32_MIN) {
  720. Val = 0x1000;
  721. SoImmVal = 0;
  722. } else if (offset < 0) {
  723. Val = 0x1000;
  724. offset *= -1;
  725. SoImmVal = ARM_AM::getSOImmVal(offset);
  726. if(SoImmVal == -1) {
  727. Val = 0x2000;
  728. offset *= -1;
  729. SoImmVal = ARM_AM::getSOImmVal(offset);
  730. }
  731. } else {
  732. SoImmVal = ARM_AM::getSOImmVal(offset);
  733. if(SoImmVal == -1) {
  734. Val = 0x1000;
  735. offset *= -1;
  736. SoImmVal = ARM_AM::getSOImmVal(offset);
  737. }
  738. }
  739. assert(SoImmVal != -1 && "Not a valid so_imm value!");
  740. Val |= SoImmVal;
  741. return Val;
  742. }
  743. /// getT2AdrLabelOpValue - Return encoding info for 12-bit immediate ADR label
  744. /// target.
  745. uint32_t ARMMCCodeEmitter::
  746. getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
  747. SmallVectorImpl<MCFixup> &Fixups,
  748. const MCSubtargetInfo &STI) const {
  749. const MCOperand MO = MI.getOperand(OpIdx);
  750. if (MO.isExpr())
  751. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_adr_pcrel_12,
  752. Fixups, STI);
  753. int32_t Val = MO.getImm();
  754. if (Val == INT32_MIN)
  755. Val = 0x1000;
  756. else if (Val < 0) {
  757. Val *= -1;
  758. Val |= 0x1000;
  759. }
  760. return Val;
  761. }
  762. /// getITMaskOpValue - Return the architectural encoding of an IT
  763. /// predication mask, given the MCOperand format.
  764. uint32_t ARMMCCodeEmitter::
  765. getITMaskOpValue(const MCInst &MI, unsigned OpIdx,
  766. SmallVectorImpl<MCFixup> &Fixups,
  767. const MCSubtargetInfo &STI) const {
  768. const MCOperand MaskMO = MI.getOperand(OpIdx);
  769. assert(MaskMO.isImm() && "Unexpected operand type!");
  770. unsigned Mask = MaskMO.getImm();
  771. // IT masks are encoded as a sequence of replacement low-order bits
  772. // for the condition code. So if the low bit of the starting
  773. // condition code is 1, then we have to flip all the bits above the
  774. // terminating bit (which is the lowest 1 bit).
  775. assert(OpIdx > 0 && "IT mask appears first!");
  776. const MCOperand CondMO = MI.getOperand(OpIdx-1);
  777. assert(CondMO.isImm() && "Unexpected operand type!");
  778. if (CondMO.getImm() & 1) {
  779. unsigned LowBit = Mask & -Mask;
  780. unsigned BitsAboveLowBit = 0xF & (-LowBit << 1);
  781. Mask ^= BitsAboveLowBit;
  782. }
  783. return Mask;
  784. }
  785. /// getThumbAdrLabelOpValue - Return encoding info for 8-bit immediate ADR label
  786. /// target.
  787. uint32_t ARMMCCodeEmitter::
  788. getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
  789. SmallVectorImpl<MCFixup> &Fixups,
  790. const MCSubtargetInfo &STI) const {
  791. const MCOperand MO = MI.getOperand(OpIdx);
  792. if (MO.isExpr())
  793. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_thumb_adr_pcrel_10,
  794. Fixups, STI);
  795. return MO.getImm();
  796. }
  797. /// getThumbAddrModeRegRegOpValue - Return encoding info for 'reg + reg'
  798. /// operand.
  799. uint32_t ARMMCCodeEmitter::
  800. getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx,
  801. SmallVectorImpl<MCFixup> &,
  802. const MCSubtargetInfo &STI) const {
  803. // [Rn, Rm]
  804. // {5-3} = Rm
  805. // {2-0} = Rn
  806. const MCOperand &MO1 = MI.getOperand(OpIdx);
  807. const MCOperand &MO2 = MI.getOperand(OpIdx + 1);
  808. unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg());
  809. unsigned Rm = CTX.getRegisterInfo()->getEncodingValue(MO2.getReg());
  810. return (Rm << 3) | Rn;
  811. }
  812. /// getMVEShiftImmOpValue - Return encoding info for the 'sz:imm5'
  813. /// operand.
  814. uint32_t
  815. ARMMCCodeEmitter::getMVEShiftImmOpValue(const MCInst &MI, unsigned OpIdx,
  816. SmallVectorImpl<MCFixup> &Fixups,
  817. const MCSubtargetInfo &STI) const {
  818. // {4-0} = szimm5
  819. // The value we are trying to encode is an immediate between either the
  820. // range of [1-7] or [1-15] depending on whether we are dealing with the
  821. // u8/s8 or the u16/s16 variants respectively.
  822. // This value is encoded as follows, if ShiftImm is the value within those
  823. // ranges then the encoding szimm5 = ShiftImm + size, where size is either 8
  824. // or 16.
  825. unsigned Size, ShiftImm;
  826. switch(MI.getOpcode()) {
  827. case ARM::MVE_VSHLL_imms16bh:
  828. case ARM::MVE_VSHLL_imms16th:
  829. case ARM::MVE_VSHLL_immu16bh:
  830. case ARM::MVE_VSHLL_immu16th:
  831. Size = 16;
  832. break;
  833. case ARM::MVE_VSHLL_imms8bh:
  834. case ARM::MVE_VSHLL_imms8th:
  835. case ARM::MVE_VSHLL_immu8bh:
  836. case ARM::MVE_VSHLL_immu8th:
  837. Size = 8;
  838. break;
  839. default:
  840. llvm_unreachable("Use of operand not supported by this instruction");
  841. }
  842. ShiftImm = MI.getOperand(OpIdx).getImm();
  843. return Size + ShiftImm;
  844. }
  845. /// getAddrModeImm12OpValue - Return encoding info for 'reg +/- imm12' operand.
  846. uint32_t ARMMCCodeEmitter::
  847. getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx,
  848. SmallVectorImpl<MCFixup> &Fixups,
  849. const MCSubtargetInfo &STI) const {
  850. // {17-13} = reg
  851. // {12} = (U)nsigned (add == '1', sub == '0')
  852. // {11-0} = imm12
  853. unsigned Reg = 0, Imm12 = 0;
  854. bool isAdd = true;
  855. // If The first operand isn't a register, we have a label reference.
  856. const MCOperand &MO = MI.getOperand(OpIdx);
  857. if (MO.isReg()) {
  858. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  859. if (MO1.isImm()) {
  860. isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm12, Fixups, STI);
  861. } else if (MO1.isExpr()) {
  862. assert(!isThumb(STI) && !isThumb2(STI) &&
  863. "Thumb mode requires different encoding");
  864. Reg = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  865. isAdd = false; // 'U' bit is set as part of the fixup.
  866. MCFixupKind Kind = MCFixupKind(ARM::fixup_arm_ldst_abs_12);
  867. Fixups.push_back(MCFixup::create(0, MO1.getExpr(), Kind, MI.getLoc()));
  868. }
  869. } else if (MO.isExpr()) {
  870. Reg = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC.
  871. isAdd = false; // 'U' bit is set as part of the fixup.
  872. MCFixupKind Kind;
  873. if (isThumb2(STI))
  874. Kind = MCFixupKind(ARM::fixup_t2_ldst_pcrel_12);
  875. else
  876. Kind = MCFixupKind(ARM::fixup_arm_ldst_pcrel_12);
  877. Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
  878. ++MCNumCPRelocations;
  879. } else {
  880. Reg = ARM::PC;
  881. int32_t Offset = MO.getImm();
  882. if (Offset == INT32_MIN) {
  883. Offset = 0;
  884. isAdd = false;
  885. } else if (Offset < 0) {
  886. Offset *= -1;
  887. isAdd = false;
  888. }
  889. Imm12 = Offset;
  890. }
  891. uint32_t Binary = Imm12 & 0xfff;
  892. // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
  893. if (isAdd)
  894. Binary |= (1 << 12);
  895. Binary |= (Reg << 13);
  896. return Binary;
  897. }
  898. template<unsigned Bits, unsigned Shift>
  899. uint32_t ARMMCCodeEmitter::
  900. getT2ScaledImmOpValue(const MCInst &MI, unsigned OpIdx,
  901. SmallVectorImpl<MCFixup> &Fixups,
  902. const MCSubtargetInfo &STI) const {
  903. // FIXME: The immediate operand should have already been encoded like this
  904. // before ever getting here. The encoder method should just need to combine
  905. // the MI operands for the register and the offset into a single
  906. // representation for the complex operand in the .td file. This isn't just
  907. // style, unfortunately. As-is, we can't represent the distinct encoding
  908. // for #-0.
  909. // {Bits} = (U)nsigned (add == '1', sub == '0')
  910. // {(Bits-1)-0} = immediate
  911. int32_t Imm = MI.getOperand(OpIdx).getImm();
  912. bool isAdd = Imm >= 0;
  913. // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
  914. if (Imm < 0)
  915. Imm = -(uint32_t)Imm;
  916. Imm >>= Shift;
  917. uint32_t Binary = Imm & ((1U << Bits) - 1);
  918. // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
  919. if (isAdd)
  920. Binary |= (1U << Bits);
  921. return Binary;
  922. }
  923. /// getMveAddrModeRQOpValue - Return encoding info for 'reg, vreg'
  924. /// operand.
  925. uint32_t ARMMCCodeEmitter::
  926. getMveAddrModeRQOpValue(const MCInst &MI, unsigned OpIdx,
  927. SmallVectorImpl<MCFixup> &Fixups,
  928. const MCSubtargetInfo &STI) const {
  929. // {6-3} Rn
  930. // {2-0} Qm
  931. const MCOperand &M0 = MI.getOperand(OpIdx);
  932. const MCOperand &M1 = MI.getOperand(OpIdx + 1);
  933. unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(M0.getReg());
  934. unsigned Qm = CTX.getRegisterInfo()->getEncodingValue(M1.getReg());
  935. assert(Qm < 8 && "Qm is supposed to be encodable in 3 bits");
  936. return (Rn << 3) | Qm;
  937. }
  938. /// getMveAddrModeRQOpValue - Return encoding info for 'reg, vreg'
  939. /// operand.
  940. template<int shift>
  941. uint32_t ARMMCCodeEmitter::
  942. getMveAddrModeQOpValue(const MCInst &MI, unsigned OpIdx,
  943. SmallVectorImpl<MCFixup> &Fixups,
  944. const MCSubtargetInfo &STI) const {
  945. // {10-8} Qm
  946. // {7-0} Imm
  947. const MCOperand &M0 = MI.getOperand(OpIdx);
  948. const MCOperand &M1 = MI.getOperand(OpIdx + 1);
  949. unsigned Qm = CTX.getRegisterInfo()->getEncodingValue(M0.getReg());
  950. int32_t Imm = M1.getImm();
  951. bool isAdd = Imm >= 0;
  952. Imm >>= shift;
  953. if (!isAdd)
  954. Imm = -(uint32_t)Imm;
  955. Imm &= 0x7f;
  956. if (isAdd)
  957. Imm |= 0x80;
  958. assert(Qm < 8 && "Qm is supposed to be encodable in 3 bits");
  959. return (Qm << 8) | Imm;
  960. }
  961. /// getT2AddrModeImm8s4OpValue - Return encoding info for
  962. /// 'reg +/- imm8<<2' operand.
  963. uint32_t ARMMCCodeEmitter::
  964. getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx,
  965. SmallVectorImpl<MCFixup> &Fixups,
  966. const MCSubtargetInfo &STI) const {
  967. // {12-9} = reg
  968. // {8} = (U)nsigned (add == '1', sub == '0')
  969. // {7-0} = imm8
  970. unsigned Reg, Imm8;
  971. bool isAdd = true;
  972. // If The first operand isn't a register, we have a label reference.
  973. const MCOperand &MO = MI.getOperand(OpIdx);
  974. if (!MO.isReg()) {
  975. Reg = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC.
  976. Imm8 = 0;
  977. isAdd = false ; // 'U' bit is set as part of the fixup.
  978. assert(MO.isExpr() && "Unexpected machine operand type!");
  979. const MCExpr *Expr = MO.getExpr();
  980. MCFixupKind Kind = MCFixupKind(ARM::fixup_t2_pcrel_10);
  981. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  982. ++MCNumCPRelocations;
  983. } else
  984. isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm8, Fixups, STI);
  985. // FIXME: The immediate operand should have already been encoded like this
  986. // before ever getting here. The encoder method should just need to combine
  987. // the MI operands for the register and the offset into a single
  988. // representation for the complex operand in the .td file. This isn't just
  989. // style, unfortunately. As-is, we can't represent the distinct encoding
  990. // for #-0.
  991. assert(((Imm8 & 0x3) == 0) && "Not a valid immediate!");
  992. uint32_t Binary = (Imm8 >> 2) & 0xff;
  993. // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
  994. if (isAdd)
  995. Binary |= (1 << 8);
  996. Binary |= (Reg << 9);
  997. return Binary;
  998. }
  999. /// getT2AddrModeImm7s4OpValue - Return encoding info for
  1000. /// 'reg +/- imm7<<2' operand.
  1001. uint32_t
  1002. ARMMCCodeEmitter::getT2AddrModeImm7s4OpValue(const MCInst &MI, unsigned OpIdx,
  1003. SmallVectorImpl<MCFixup> &Fixups,
  1004. const MCSubtargetInfo &STI) const {
  1005. // {11-8} = reg
  1006. // {7} = (A)dd (add == '1', sub == '0')
  1007. // {6-0} = imm7
  1008. unsigned Reg, Imm7;
  1009. // If The first operand isn't a register, we have a label reference.
  1010. bool isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm7, Fixups, STI);
  1011. // FIXME: The immediate operand should have already been encoded like this
  1012. // before ever getting here. The encoder method should just need to combine
  1013. // the MI operands for the register and the offset into a single
  1014. // representation for the complex operand in the .td file. This isn't just
  1015. // style, unfortunately. As-is, we can't represent the distinct encoding
  1016. // for #-0.
  1017. uint32_t Binary = (Imm7 >> 2) & 0xff;
  1018. // Immediate is always encoded as positive. The 'A' bit controls add vs sub.
  1019. if (isAdd)
  1020. Binary |= (1 << 7);
  1021. Binary |= (Reg << 8);
  1022. return Binary;
  1023. }
  1024. /// getT2AddrModeImm0_1020s4OpValue - Return encoding info for
  1025. /// 'reg + imm8<<2' operand.
  1026. uint32_t ARMMCCodeEmitter::
  1027. getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx,
  1028. SmallVectorImpl<MCFixup> &Fixups,
  1029. const MCSubtargetInfo &STI) const {
  1030. // {11-8} = reg
  1031. // {7-0} = imm8
  1032. const MCOperand &MO = MI.getOperand(OpIdx);
  1033. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  1034. unsigned Reg = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1035. unsigned Imm8 = MO1.getImm();
  1036. return (Reg << 8) | Imm8;
  1037. }
  1038. uint32_t
  1039. ARMMCCodeEmitter::getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
  1040. SmallVectorImpl<MCFixup> &Fixups,
  1041. const MCSubtargetInfo &STI) const {
  1042. // {20-16} = imm{15-12}
  1043. // {11-0} = imm{11-0}
  1044. const MCOperand &MO = MI.getOperand(OpIdx);
  1045. if (MO.isImm())
  1046. // Hi / lo 16 bits already extracted during earlier passes.
  1047. return static_cast<unsigned>(MO.getImm());
  1048. // Handle :upper16: and :lower16: assembly prefixes.
  1049. const MCExpr *E = MO.getExpr();
  1050. MCFixupKind Kind;
  1051. if (E->getKind() == MCExpr::Target) {
  1052. const ARMMCExpr *ARM16Expr = cast<ARMMCExpr>(E);
  1053. E = ARM16Expr->getSubExpr();
  1054. if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(E)) {
  1055. const int64_t Value = MCE->getValue();
  1056. if (Value > UINT32_MAX)
  1057. report_fatal_error("constant value truncated (limited to 32-bit)");
  1058. switch (ARM16Expr->getKind()) {
  1059. case ARMMCExpr::VK_ARM_HI16:
  1060. return (int32_t(Value) & 0xffff0000) >> 16;
  1061. case ARMMCExpr::VK_ARM_LO16:
  1062. return (int32_t(Value) & 0x0000ffff);
  1063. default: llvm_unreachable("Unsupported ARMFixup");
  1064. }
  1065. }
  1066. switch (ARM16Expr->getKind()) {
  1067. default: llvm_unreachable("Unsupported ARMFixup");
  1068. case ARMMCExpr::VK_ARM_HI16:
  1069. Kind = MCFixupKind(isThumb(STI) ? ARM::fixup_t2_movt_hi16
  1070. : ARM::fixup_arm_movt_hi16);
  1071. break;
  1072. case ARMMCExpr::VK_ARM_LO16:
  1073. Kind = MCFixupKind(isThumb(STI) ? ARM::fixup_t2_movw_lo16
  1074. : ARM::fixup_arm_movw_lo16);
  1075. break;
  1076. }
  1077. Fixups.push_back(MCFixup::create(0, E, Kind, MI.getLoc()));
  1078. return 0;
  1079. }
  1080. // If the expression doesn't have :upper16: or :lower16: on it,
  1081. // it's just a plain immediate expression, previously those evaluated to
  1082. // the lower 16 bits of the expression regardless of whether
  1083. // we have a movt or a movw, but that led to misleadingly results.
  1084. // This is disallowed in the AsmParser in validateInstruction()
  1085. // so this should never happen.
  1086. llvm_unreachable("expression without :upper16: or :lower16:");
  1087. }
  1088. uint32_t ARMMCCodeEmitter::
  1089. getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx,
  1090. SmallVectorImpl<MCFixup> &Fixups,
  1091. const MCSubtargetInfo &STI) const {
  1092. const MCOperand &MO = MI.getOperand(OpIdx);
  1093. const MCOperand &MO1 = MI.getOperand(OpIdx+1);
  1094. const MCOperand &MO2 = MI.getOperand(OpIdx+2);
  1095. unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1096. unsigned Rm = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg());
  1097. unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm());
  1098. bool isAdd = ARM_AM::getAM2Op(MO2.getImm()) == ARM_AM::add;
  1099. ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(MO2.getImm());
  1100. unsigned SBits = getShiftOp(ShOp);
  1101. // While "lsr #32" and "asr #32" exist, they are encoded with a 0 in the shift
  1102. // amount. However, it would be an easy mistake to make so check here.
  1103. assert((ShImm & ~0x1f) == 0 && "Out of range shift amount");
  1104. // {16-13} = Rn
  1105. // {12} = isAdd
  1106. // {11-0} = shifter
  1107. // {3-0} = Rm
  1108. // {4} = 0
  1109. // {6-5} = type
  1110. // {11-7} = imm
  1111. uint32_t Binary = Rm;
  1112. Binary |= Rn << 13;
  1113. Binary |= SBits << 5;
  1114. Binary |= ShImm << 7;
  1115. if (isAdd)
  1116. Binary |= 1 << 12;
  1117. return Binary;
  1118. }
  1119. uint32_t ARMMCCodeEmitter::
  1120. getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx,
  1121. SmallVectorImpl<MCFixup> &Fixups,
  1122. const MCSubtargetInfo &STI) const {
  1123. // {13} 1 == imm12, 0 == Rm
  1124. // {12} isAdd
  1125. // {11-0} imm12/Rm
  1126. const MCOperand &MO = MI.getOperand(OpIdx);
  1127. const MCOperand &MO1 = MI.getOperand(OpIdx+1);
  1128. unsigned Imm = MO1.getImm();
  1129. bool isAdd = ARM_AM::getAM2Op(Imm) == ARM_AM::add;
  1130. bool isReg = MO.getReg() != 0;
  1131. uint32_t Binary = ARM_AM::getAM2Offset(Imm);
  1132. // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm12
  1133. if (isReg) {
  1134. ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(Imm);
  1135. Binary <<= 7; // Shift amount is bits [11:7]
  1136. Binary |= getShiftOp(ShOp) << 5; // Shift type is bits [6:5]
  1137. Binary |= CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); // Rm is bits [3:0]
  1138. }
  1139. return Binary | (isAdd << 12) | (isReg << 13);
  1140. }
  1141. uint32_t ARMMCCodeEmitter::
  1142. getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx,
  1143. SmallVectorImpl<MCFixup> &Fixups,
  1144. const MCSubtargetInfo &STI) const {
  1145. // {4} isAdd
  1146. // {3-0} Rm
  1147. const MCOperand &MO = MI.getOperand(OpIdx);
  1148. const MCOperand &MO1 = MI.getOperand(OpIdx+1);
  1149. bool isAdd = MO1.getImm() != 0;
  1150. return CTX.getRegisterInfo()->getEncodingValue(MO.getReg()) | (isAdd << 4);
  1151. }
  1152. uint32_t ARMMCCodeEmitter::
  1153. getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx,
  1154. SmallVectorImpl<MCFixup> &Fixups,
  1155. const MCSubtargetInfo &STI) const {
  1156. // {9} 1 == imm8, 0 == Rm
  1157. // {8} isAdd
  1158. // {7-4} imm7_4/zero
  1159. // {3-0} imm3_0/Rm
  1160. const MCOperand &MO = MI.getOperand(OpIdx);
  1161. const MCOperand &MO1 = MI.getOperand(OpIdx+1);
  1162. unsigned Imm = MO1.getImm();
  1163. bool isAdd = ARM_AM::getAM3Op(Imm) == ARM_AM::add;
  1164. bool isImm = MO.getReg() == 0;
  1165. uint32_t Imm8 = ARM_AM::getAM3Offset(Imm);
  1166. // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8
  1167. if (!isImm)
  1168. Imm8 = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1169. return Imm8 | (isAdd << 8) | (isImm << 9);
  1170. }
  1171. uint32_t ARMMCCodeEmitter::
  1172. getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx,
  1173. SmallVectorImpl<MCFixup> &Fixups,
  1174. const MCSubtargetInfo &STI) const {
  1175. // {13} 1 == imm8, 0 == Rm
  1176. // {12-9} Rn
  1177. // {8} isAdd
  1178. // {7-4} imm7_4/zero
  1179. // {3-0} imm3_0/Rm
  1180. const MCOperand &MO = MI.getOperand(OpIdx);
  1181. const MCOperand &MO1 = MI.getOperand(OpIdx+1);
  1182. const MCOperand &MO2 = MI.getOperand(OpIdx+2);
  1183. // If The first operand isn't a register, we have a label reference.
  1184. if (!MO.isReg()) {
  1185. unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC.
  1186. assert(MO.isExpr() && "Unexpected machine operand type!");
  1187. const MCExpr *Expr = MO.getExpr();
  1188. MCFixupKind Kind = MCFixupKind(ARM::fixup_arm_pcrel_10_unscaled);
  1189. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  1190. ++MCNumCPRelocations;
  1191. return (Rn << 9) | (1 << 13);
  1192. }
  1193. unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1194. unsigned Imm = MO2.getImm();
  1195. bool isAdd = ARM_AM::getAM3Op(Imm) == ARM_AM::add;
  1196. bool isImm = MO1.getReg() == 0;
  1197. uint32_t Imm8 = ARM_AM::getAM3Offset(Imm);
  1198. // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8
  1199. if (!isImm)
  1200. Imm8 = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg());
  1201. return (Rn << 9) | Imm8 | (isAdd << 8) | (isImm << 13);
  1202. }
  1203. /// getAddrModeThumbSPOpValue - Encode the t_addrmode_sp operands.
  1204. uint32_t ARMMCCodeEmitter::
  1205. getAddrModeThumbSPOpValue(const MCInst &MI, unsigned OpIdx,
  1206. SmallVectorImpl<MCFixup> &Fixups,
  1207. const MCSubtargetInfo &STI) const {
  1208. // [SP, #imm]
  1209. // {7-0} = imm8
  1210. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  1211. assert(MI.getOperand(OpIdx).getReg() == ARM::SP &&
  1212. "Unexpected base register!");
  1213. // The immediate is already shifted for the implicit zeroes, so no change
  1214. // here.
  1215. return MO1.getImm() & 0xff;
  1216. }
  1217. /// getAddrModeISOpValue - Encode the t_addrmode_is# operands.
  1218. uint32_t ARMMCCodeEmitter::
  1219. getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx,
  1220. SmallVectorImpl<MCFixup> &Fixups,
  1221. const MCSubtargetInfo &STI) const {
  1222. // [Rn, #imm]
  1223. // {7-3} = imm5
  1224. // {2-0} = Rn
  1225. const MCOperand &MO = MI.getOperand(OpIdx);
  1226. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  1227. unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1228. unsigned Imm5 = MO1.getImm();
  1229. return ((Imm5 & 0x1f) << 3) | Rn;
  1230. }
  1231. /// getAddrModePCOpValue - Return encoding for t_addrmode_pc operands.
  1232. uint32_t ARMMCCodeEmitter::
  1233. getAddrModePCOpValue(const MCInst &MI, unsigned OpIdx,
  1234. SmallVectorImpl<MCFixup> &Fixups,
  1235. const MCSubtargetInfo &STI) const {
  1236. const MCOperand MO = MI.getOperand(OpIdx);
  1237. if (MO.isExpr())
  1238. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_cp, Fixups, STI);
  1239. return (MO.getImm() >> 2);
  1240. }
  1241. /// getAddrMode5OpValue - Return encoding info for 'reg +/- (imm8 << 2)' operand.
  1242. uint32_t ARMMCCodeEmitter::
  1243. getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
  1244. SmallVectorImpl<MCFixup> &Fixups,
  1245. const MCSubtargetInfo &STI) const {
  1246. // {12-9} = reg
  1247. // {8} = (U)nsigned (add == '1', sub == '0')
  1248. // {7-0} = imm8
  1249. unsigned Reg, Imm8;
  1250. bool isAdd;
  1251. // If The first operand isn't a register, we have a label reference.
  1252. const MCOperand &MO = MI.getOperand(OpIdx);
  1253. if (!MO.isReg()) {
  1254. Reg = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC.
  1255. Imm8 = 0;
  1256. isAdd = false; // 'U' bit is handled as part of the fixup.
  1257. assert(MO.isExpr() && "Unexpected machine operand type!");
  1258. const MCExpr *Expr = MO.getExpr();
  1259. MCFixupKind Kind;
  1260. if (isThumb2(STI))
  1261. Kind = MCFixupKind(ARM::fixup_t2_pcrel_10);
  1262. else
  1263. Kind = MCFixupKind(ARM::fixup_arm_pcrel_10);
  1264. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  1265. ++MCNumCPRelocations;
  1266. } else {
  1267. EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm8, Fixups, STI);
  1268. isAdd = ARM_AM::getAM5Op(Imm8) == ARM_AM::add;
  1269. }
  1270. uint32_t Binary = ARM_AM::getAM5Offset(Imm8);
  1271. // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
  1272. if (isAdd)
  1273. Binary |= (1 << 8);
  1274. Binary |= (Reg << 9);
  1275. return Binary;
  1276. }
  1277. /// getAddrMode5FP16OpValue - Return encoding info for 'reg +/- (imm8 << 1)' operand.
  1278. uint32_t ARMMCCodeEmitter::
  1279. getAddrMode5FP16OpValue(const MCInst &MI, unsigned OpIdx,
  1280. SmallVectorImpl<MCFixup> &Fixups,
  1281. const MCSubtargetInfo &STI) const {
  1282. // {12-9} = reg
  1283. // {8} = (U)nsigned (add == '1', sub == '0')
  1284. // {7-0} = imm8
  1285. unsigned Reg, Imm8;
  1286. bool isAdd;
  1287. // If The first operand isn't a register, we have a label reference.
  1288. const MCOperand &MO = MI.getOperand(OpIdx);
  1289. if (!MO.isReg()) {
  1290. Reg = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC.
  1291. Imm8 = 0;
  1292. isAdd = false; // 'U' bit is handled as part of the fixup.
  1293. assert(MO.isExpr() && "Unexpected machine operand type!");
  1294. const MCExpr *Expr = MO.getExpr();
  1295. MCFixupKind Kind;
  1296. if (isThumb2(STI))
  1297. Kind = MCFixupKind(ARM::fixup_t2_pcrel_9);
  1298. else
  1299. Kind = MCFixupKind(ARM::fixup_arm_pcrel_9);
  1300. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  1301. ++MCNumCPRelocations;
  1302. } else {
  1303. EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm8, Fixups, STI);
  1304. isAdd = ARM_AM::getAM5Op(Imm8) == ARM_AM::add;
  1305. }
  1306. uint32_t Binary = ARM_AM::getAM5Offset(Imm8);
  1307. // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
  1308. if (isAdd)
  1309. Binary |= (1 << 8);
  1310. Binary |= (Reg << 9);
  1311. return Binary;
  1312. }
  1313. unsigned ARMMCCodeEmitter::
  1314. getSORegRegOpValue(const MCInst &MI, unsigned OpIdx,
  1315. SmallVectorImpl<MCFixup> &Fixups,
  1316. const MCSubtargetInfo &STI) const {
  1317. // Sub-operands are [reg, reg, imm]. The first register is Rm, the reg to be
  1318. // shifted. The second is Rs, the amount to shift by, and the third specifies
  1319. // the type of the shift.
  1320. //
  1321. // {3-0} = Rm.
  1322. // {4} = 1
  1323. // {6-5} = type
  1324. // {11-8} = Rs
  1325. // {7} = 0
  1326. const MCOperand &MO = MI.getOperand(OpIdx);
  1327. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  1328. const MCOperand &MO2 = MI.getOperand(OpIdx + 2);
  1329. ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO2.getImm());
  1330. // Encode Rm.
  1331. unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1332. // Encode the shift opcode.
  1333. unsigned SBits = 0;
  1334. unsigned Rs = MO1.getReg();
  1335. if (Rs) {
  1336. // Set shift operand (bit[7:4]).
  1337. // LSL - 0001
  1338. // LSR - 0011
  1339. // ASR - 0101
  1340. // ROR - 0111
  1341. switch (SOpc) {
  1342. default: llvm_unreachable("Unknown shift opc!");
  1343. case ARM_AM::lsl: SBits = 0x1; break;
  1344. case ARM_AM::lsr: SBits = 0x3; break;
  1345. case ARM_AM::asr: SBits = 0x5; break;
  1346. case ARM_AM::ror: SBits = 0x7; break;
  1347. }
  1348. }
  1349. Binary |= SBits << 4;
  1350. // Encode the shift operation Rs.
  1351. // Encode Rs bit[11:8].
  1352. assert(ARM_AM::getSORegOffset(MO2.getImm()) == 0);
  1353. return Binary | (CTX.getRegisterInfo()->getEncodingValue(Rs) << ARMII::RegRsShift);
  1354. }
  1355. unsigned ARMMCCodeEmitter::
  1356. getSORegImmOpValue(const MCInst &MI, unsigned OpIdx,
  1357. SmallVectorImpl<MCFixup> &Fixups,
  1358. const MCSubtargetInfo &STI) const {
  1359. // Sub-operands are [reg, imm]. The first register is Rm, the reg to be
  1360. // shifted. The second is the amount to shift by.
  1361. //
  1362. // {3-0} = Rm.
  1363. // {4} = 0
  1364. // {6-5} = type
  1365. // {11-7} = imm
  1366. const MCOperand &MO = MI.getOperand(OpIdx);
  1367. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  1368. ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO1.getImm());
  1369. // Encode Rm.
  1370. unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1371. // Encode the shift opcode.
  1372. unsigned SBits = 0;
  1373. // Set shift operand (bit[6:4]).
  1374. // LSL - 000
  1375. // LSR - 010
  1376. // ASR - 100
  1377. // ROR - 110
  1378. // RRX - 110 and bit[11:8] clear.
  1379. switch (SOpc) {
  1380. default: llvm_unreachable("Unknown shift opc!");
  1381. case ARM_AM::lsl: SBits = 0x0; break;
  1382. case ARM_AM::lsr: SBits = 0x2; break;
  1383. case ARM_AM::asr: SBits = 0x4; break;
  1384. case ARM_AM::ror: SBits = 0x6; break;
  1385. case ARM_AM::rrx:
  1386. Binary |= 0x60;
  1387. return Binary;
  1388. }
  1389. // Encode shift_imm bit[11:7].
  1390. Binary |= SBits << 4;
  1391. unsigned Offset = ARM_AM::getSORegOffset(MO1.getImm());
  1392. assert(Offset < 32 && "Offset must be in range 0-31!");
  1393. return Binary | (Offset << 7);
  1394. }
  1395. unsigned ARMMCCodeEmitter::
  1396. getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum,
  1397. SmallVectorImpl<MCFixup> &Fixups,
  1398. const MCSubtargetInfo &STI) const {
  1399. const MCOperand &MO1 = MI.getOperand(OpNum);
  1400. const MCOperand &MO2 = MI.getOperand(OpNum+1);
  1401. const MCOperand &MO3 = MI.getOperand(OpNum+2);
  1402. // Encoded as [Rn, Rm, imm].
  1403. // FIXME: Needs fixup support.
  1404. unsigned Value = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg());
  1405. Value <<= 4;
  1406. Value |= CTX.getRegisterInfo()->getEncodingValue(MO2.getReg());
  1407. Value <<= 2;
  1408. Value |= MO3.getImm();
  1409. return Value;
  1410. }
  1411. template<unsigned Bits, unsigned Shift>
  1412. unsigned ARMMCCodeEmitter::
  1413. getT2AddrModeImmOpValue(const MCInst &MI, unsigned OpNum,
  1414. SmallVectorImpl<MCFixup> &Fixups,
  1415. const MCSubtargetInfo &STI) const {
  1416. const MCOperand &MO1 = MI.getOperand(OpNum);
  1417. const MCOperand &MO2 = MI.getOperand(OpNum+1);
  1418. // FIXME: Needs fixup support.
  1419. unsigned Value = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg());
  1420. // If the immediate is B bits long, we need B+1 bits in order
  1421. // to represent the (inverse of the) sign bit.
  1422. Value <<= (Bits + 1);
  1423. int32_t tmp = (int32_t)MO2.getImm();
  1424. if (tmp == INT32_MIN) { // represents subtracting zero rather than adding it
  1425. tmp = 0;
  1426. } else if (tmp < 0) {
  1427. tmp = abs(tmp);
  1428. } else {
  1429. Value |= (1U << Bits); // Set the ADD bit
  1430. }
  1431. Value |= (tmp >> Shift) & ((1U << Bits) - 1);
  1432. return Value;
  1433. }
  1434. unsigned ARMMCCodeEmitter::
  1435. getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum,
  1436. SmallVectorImpl<MCFixup> &Fixups,
  1437. const MCSubtargetInfo &STI) const {
  1438. const MCOperand &MO1 = MI.getOperand(OpNum);
  1439. // FIXME: Needs fixup support.
  1440. unsigned Value = 0;
  1441. int32_t tmp = (int32_t)MO1.getImm();
  1442. if (tmp < 0)
  1443. tmp = abs(tmp);
  1444. else
  1445. Value |= 256; // Set the ADD bit
  1446. Value |= tmp & 255;
  1447. return Value;
  1448. }
  1449. unsigned ARMMCCodeEmitter::
  1450. getT2SORegOpValue(const MCInst &MI, unsigned OpIdx,
  1451. SmallVectorImpl<MCFixup> &Fixups,
  1452. const MCSubtargetInfo &STI) const {
  1453. // Sub-operands are [reg, imm]. The first register is Rm, the reg to be
  1454. // shifted. The second is the amount to shift by.
  1455. //
  1456. // {3-0} = Rm.
  1457. // {4} = 0
  1458. // {6-5} = type
  1459. // {11-7} = imm
  1460. const MCOperand &MO = MI.getOperand(OpIdx);
  1461. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  1462. ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO1.getImm());
  1463. // Encode Rm.
  1464. unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1465. // Encode the shift opcode.
  1466. unsigned SBits = 0;
  1467. // Set shift operand (bit[6:4]).
  1468. // LSL - 000
  1469. // LSR - 010
  1470. // ASR - 100
  1471. // ROR - 110
  1472. switch (SOpc) {
  1473. default: llvm_unreachable("Unknown shift opc!");
  1474. case ARM_AM::lsl: SBits = 0x0; break;
  1475. case ARM_AM::lsr: SBits = 0x2; break;
  1476. case ARM_AM::asr: SBits = 0x4; break;
  1477. case ARM_AM::rrx: [[fallthrough]];
  1478. case ARM_AM::ror: SBits = 0x6; break;
  1479. }
  1480. Binary |= SBits << 4;
  1481. if (SOpc == ARM_AM::rrx)
  1482. return Binary;
  1483. // Encode shift_imm bit[11:7].
  1484. return Binary | ARM_AM::getSORegOffset(MO1.getImm()) << 7;
  1485. }
  1486. unsigned ARMMCCodeEmitter::
  1487. getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op,
  1488. SmallVectorImpl<MCFixup> &Fixups,
  1489. const MCSubtargetInfo &STI) const {
  1490. // 10 bits. lower 5 bits are the lsb of the mask, high five bits are the
  1491. // msb of the mask.
  1492. const MCOperand &MO = MI.getOperand(Op);
  1493. uint32_t v = ~MO.getImm();
  1494. uint32_t lsb = countTrailingZeros(v);
  1495. uint32_t msb = (32 - countLeadingZeros (v)) - 1;
  1496. assert(v != 0 && lsb < 32 && msb < 32 && "Illegal bitfield mask!");
  1497. return lsb | (msb << 5);
  1498. }
  1499. unsigned ARMMCCodeEmitter::
  1500. getRegisterListOpValue(const MCInst &MI, unsigned Op,
  1501. SmallVectorImpl<MCFixup> &Fixups,
  1502. const MCSubtargetInfo &STI) const {
  1503. // VLDM/VSTM/VSCCLRM:
  1504. // {12-8} = Vd
  1505. // {7-0} = Number of registers
  1506. //
  1507. // LDM/STM:
  1508. // {15-0} = Bitfield of GPRs.
  1509. unsigned Reg = MI.getOperand(Op).getReg();
  1510. bool SPRRegs = ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg);
  1511. bool DPRRegs = ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg);
  1512. unsigned Binary = 0;
  1513. if (SPRRegs || DPRRegs) {
  1514. // VLDM/VSTM/VSCCLRM
  1515. unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg);
  1516. unsigned NumRegs = (MI.getNumOperands() - Op) & 0xff;
  1517. Binary |= (RegNo & 0x1f) << 8;
  1518. // Ignore VPR
  1519. if (MI.getOpcode() == ARM::VSCCLRMD || MI.getOpcode() == ARM::VSCCLRMS)
  1520. --NumRegs;
  1521. if (SPRRegs)
  1522. Binary |= NumRegs;
  1523. else
  1524. Binary |= NumRegs * 2;
  1525. } else {
  1526. const MCRegisterInfo &MRI = *CTX.getRegisterInfo();
  1527. assert(is_sorted(drop_begin(MI, Op),
  1528. [&](const MCOperand &LHS, const MCOperand &RHS) {
  1529. return MRI.getEncodingValue(LHS.getReg()) <
  1530. MRI.getEncodingValue(RHS.getReg());
  1531. }));
  1532. for (unsigned I = Op, E = MI.getNumOperands(); I < E; ++I) {
  1533. unsigned RegNo = MRI.getEncodingValue(MI.getOperand(I).getReg());
  1534. Binary |= 1 << RegNo;
  1535. }
  1536. }
  1537. return Binary;
  1538. }
  1539. /// getAddrMode6AddressOpValue - Encode an addrmode6 register number along
  1540. /// with the alignment operand.
  1541. unsigned ARMMCCodeEmitter::
  1542. getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op,
  1543. SmallVectorImpl<MCFixup> &Fixups,
  1544. const MCSubtargetInfo &STI) const {
  1545. const MCOperand &Reg = MI.getOperand(Op);
  1546. const MCOperand &Imm = MI.getOperand(Op + 1);
  1547. unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg.getReg());
  1548. unsigned Align = 0;
  1549. switch (Imm.getImm()) {
  1550. default: break;
  1551. case 2:
  1552. case 4:
  1553. case 8: Align = 0x01; break;
  1554. case 16: Align = 0x02; break;
  1555. case 32: Align = 0x03; break;
  1556. }
  1557. return RegNo | (Align << 4);
  1558. }
  1559. /// getAddrMode6OneLane32AddressOpValue - Encode an addrmode6 register number
  1560. /// along with the alignment operand for use in VST1 and VLD1 with size 32.
  1561. unsigned ARMMCCodeEmitter::
  1562. getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op,
  1563. SmallVectorImpl<MCFixup> &Fixups,
  1564. const MCSubtargetInfo &STI) const {
  1565. const MCOperand &Reg = MI.getOperand(Op);
  1566. const MCOperand &Imm = MI.getOperand(Op + 1);
  1567. unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg.getReg());
  1568. unsigned Align = 0;
  1569. switch (Imm.getImm()) {
  1570. default: break;
  1571. case 8:
  1572. case 16:
  1573. case 32: // Default '0' value for invalid alignments of 8, 16, 32 bytes.
  1574. case 2: Align = 0x00; break;
  1575. case 4: Align = 0x03; break;
  1576. }
  1577. return RegNo | (Align << 4);
  1578. }
  1579. /// getAddrMode6DupAddressOpValue - Encode an addrmode6 register number and
  1580. /// alignment operand for use in VLD-dup instructions. This is the same as
  1581. /// getAddrMode6AddressOpValue except for the alignment encoding, which is
  1582. /// different for VLD4-dup.
  1583. unsigned ARMMCCodeEmitter::
  1584. getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op,
  1585. SmallVectorImpl<MCFixup> &Fixups,
  1586. const MCSubtargetInfo &STI) const {
  1587. const MCOperand &Reg = MI.getOperand(Op);
  1588. const MCOperand &Imm = MI.getOperand(Op + 1);
  1589. unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg.getReg());
  1590. unsigned Align = 0;
  1591. switch (Imm.getImm()) {
  1592. default: break;
  1593. case 2:
  1594. case 4:
  1595. case 8: Align = 0x01; break;
  1596. case 16: Align = 0x03; break;
  1597. }
  1598. return RegNo | (Align << 4);
  1599. }
  1600. unsigned ARMMCCodeEmitter::
  1601. getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op,
  1602. SmallVectorImpl<MCFixup> &Fixups,
  1603. const MCSubtargetInfo &STI) const {
  1604. const MCOperand &MO = MI.getOperand(Op);
  1605. if (MO.getReg() == 0) return 0x0D;
  1606. return CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1607. }
  1608. unsigned ARMMCCodeEmitter::
  1609. getShiftRight8Imm(const MCInst &MI, unsigned Op,
  1610. SmallVectorImpl<MCFixup> &Fixups,
  1611. const MCSubtargetInfo &STI) const {
  1612. return 8 - MI.getOperand(Op).getImm();
  1613. }
  1614. unsigned ARMMCCodeEmitter::
  1615. getShiftRight16Imm(const MCInst &MI, unsigned Op,
  1616. SmallVectorImpl<MCFixup> &Fixups,
  1617. const MCSubtargetInfo &STI) const {
  1618. return 16 - MI.getOperand(Op).getImm();
  1619. }
  1620. unsigned ARMMCCodeEmitter::
  1621. getShiftRight32Imm(const MCInst &MI, unsigned Op,
  1622. SmallVectorImpl<MCFixup> &Fixups,
  1623. const MCSubtargetInfo &STI) const {
  1624. return 32 - MI.getOperand(Op).getImm();
  1625. }
  1626. unsigned ARMMCCodeEmitter::
  1627. getShiftRight64Imm(const MCInst &MI, unsigned Op,
  1628. SmallVectorImpl<MCFixup> &Fixups,
  1629. const MCSubtargetInfo &STI) const {
  1630. return 64 - MI.getOperand(Op).getImm();
  1631. }
  1632. void ARMMCCodeEmitter::
  1633. encodeInstruction(const MCInst &MI, raw_ostream &OS,
  1634. SmallVectorImpl<MCFixup> &Fixups,
  1635. const MCSubtargetInfo &STI) const {
  1636. // Pseudo instructions don't get encoded.
  1637. const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
  1638. uint64_t TSFlags = Desc.TSFlags;
  1639. if ((TSFlags & ARMII::FormMask) == ARMII::Pseudo)
  1640. return;
  1641. int Size;
  1642. if (Desc.getSize() == 2 || Desc.getSize() == 4)
  1643. Size = Desc.getSize();
  1644. else
  1645. llvm_unreachable("Unexpected instruction size!");
  1646. uint32_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
  1647. // Thumb 32-bit wide instructions need to emit the high order halfword
  1648. // first.
  1649. if (isThumb(STI) && Size == 4) {
  1650. EmitConstant(Binary >> 16, 2, OS);
  1651. EmitConstant(Binary & 0xffff, 2, OS);
  1652. } else
  1653. EmitConstant(Binary, Size, OS);
  1654. ++MCNumEmitted; // Keep track of the # of mi's emitted.
  1655. }
  1656. template <bool isNeg, ARM::Fixups fixup>
  1657. uint32_t
  1658. ARMMCCodeEmitter::getBFTargetOpValue(const MCInst &MI, unsigned OpIdx,
  1659. SmallVectorImpl<MCFixup> &Fixups,
  1660. const MCSubtargetInfo &STI) const {
  1661. const MCOperand MO = MI.getOperand(OpIdx);
  1662. if (MO.isExpr())
  1663. return ::getBranchTargetOpValue(MI, OpIdx, fixup, Fixups, STI);
  1664. return isNeg ? -(MO.getImm() >> 1) : (MO.getImm() >> 1);
  1665. }
  1666. uint32_t
  1667. ARMMCCodeEmitter::getBFAfterTargetOpValue(const MCInst &MI, unsigned OpIdx,
  1668. SmallVectorImpl<MCFixup> &Fixups,
  1669. const MCSubtargetInfo &STI) const {
  1670. const MCOperand MO = MI.getOperand(OpIdx);
  1671. const MCOperand BranchMO = MI.getOperand(0);
  1672. if (MO.isExpr()) {
  1673. assert(BranchMO.isExpr());
  1674. const MCExpr *DiffExpr = MCBinaryExpr::createSub(
  1675. MO.getExpr(), BranchMO.getExpr(), CTX);
  1676. MCFixupKind Kind = MCFixupKind(ARM::fixup_bfcsel_else_target);
  1677. Fixups.push_back(llvm::MCFixup::create(0, DiffExpr, Kind, MI.getLoc()));
  1678. return 0;
  1679. }
  1680. assert(MO.isImm() && BranchMO.isImm());
  1681. int Diff = MO.getImm() - BranchMO.getImm();
  1682. assert(Diff == 4 || Diff == 2);
  1683. return Diff == 4;
  1684. }
  1685. uint32_t ARMMCCodeEmitter::getVPTMaskOpValue(const MCInst &MI, unsigned OpIdx,
  1686. SmallVectorImpl<MCFixup> &Fixups,
  1687. const MCSubtargetInfo &STI)const {
  1688. const MCOperand MO = MI.getOperand(OpIdx);
  1689. assert(MO.isImm() && "Unexpected operand type!");
  1690. int Value = MO.getImm();
  1691. int Imm = 0;
  1692. // VPT Masks are actually encoded as a series of invert/don't invert bits,
  1693. // rather than true/false bits.
  1694. unsigned PrevBit = 0;
  1695. for (int i = 3; i >= 0; --i) {
  1696. unsigned Bit = (Value >> i) & 1;
  1697. // Check if we are at the end of the mask.
  1698. if ((Value & ~(~0U << i)) == 0) {
  1699. Imm |= (1 << i);
  1700. break;
  1701. }
  1702. // Convert the bit in the mask based on the previous bit.
  1703. if (Bit != PrevBit)
  1704. Imm |= (1 << i);
  1705. PrevBit = Bit;
  1706. }
  1707. return Imm;
  1708. }
  1709. uint32_t ARMMCCodeEmitter::getRestrictedCondCodeOpValue(
  1710. const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
  1711. const MCSubtargetInfo &STI) const {
  1712. const MCOperand MO = MI.getOperand(OpIdx);
  1713. assert(MO.isImm() && "Unexpected operand type!");
  1714. switch (MO.getImm()) {
  1715. default:
  1716. assert(0 && "Unexpected Condition!");
  1717. return 0;
  1718. case ARMCC::HS:
  1719. case ARMCC::EQ:
  1720. return 0;
  1721. case ARMCC::HI:
  1722. case ARMCC::NE:
  1723. return 1;
  1724. case ARMCC::GE:
  1725. return 4;
  1726. case ARMCC::LT:
  1727. return 5;
  1728. case ARMCC::GT:
  1729. return 6;
  1730. case ARMCC::LE:
  1731. return 7;
  1732. }
  1733. }
  1734. uint32_t ARMMCCodeEmitter::
  1735. getPowerTwoOpValue(const MCInst &MI, unsigned OpIdx,
  1736. SmallVectorImpl<MCFixup> &Fixups,
  1737. const MCSubtargetInfo &STI) const {
  1738. const MCOperand &MO = MI.getOperand(OpIdx);
  1739. assert(MO.isImm() && "Unexpected operand type!");
  1740. return countTrailingZeros((uint64_t)MO.getImm());
  1741. }
  1742. template <unsigned start>
  1743. uint32_t ARMMCCodeEmitter::
  1744. getMVEPairVectorIndexOpValue(const MCInst &MI, unsigned OpIdx,
  1745. SmallVectorImpl<MCFixup> &Fixups,
  1746. const MCSubtargetInfo &STI) const {
  1747. const MCOperand MO = MI.getOperand(OpIdx);
  1748. assert(MO.isImm() && "Unexpected operand type!");
  1749. int Value = MO.getImm();
  1750. return Value - start;
  1751. }
  1752. #include "ARMGenMCCodeEmitter.inc"
  1753. MCCodeEmitter *llvm::createARMLEMCCodeEmitter(const MCInstrInfo &MCII,
  1754. MCContext &Ctx) {
  1755. return new ARMMCCodeEmitter(MCII, Ctx, true);
  1756. }
  1757. MCCodeEmitter *llvm::createARMBEMCCodeEmitter(const MCInstrInfo &MCII,
  1758. MCContext &Ctx) {
  1759. return new ARMMCCodeEmitter(MCII, Ctx, false);
  1760. }