ARMMCCodeEmitter.cpp 74 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018
  1. //===-- ARM/ARMMCCodeEmitter.cpp - Convert ARM code to machine code -------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the ARMMCCodeEmitter class.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "MCTargetDesc/ARMAddressingModes.h"
  13. #include "MCTargetDesc/ARMBaseInfo.h"
  14. #include "MCTargetDesc/ARMFixupKinds.h"
  15. #include "MCTargetDesc/ARMMCExpr.h"
  16. #include "llvm/ADT/APFloat.h"
  17. #include "llvm/ADT/APInt.h"
  18. #include "llvm/ADT/SmallVector.h"
  19. #include "llvm/ADT/Statistic.h"
  20. #include "llvm/ADT/Triple.h"
  21. #include "llvm/MC/MCCodeEmitter.h"
  22. #include "llvm/MC/MCContext.h"
  23. #include "llvm/MC/MCExpr.h"
  24. #include "llvm/MC/MCFixup.h"
  25. #include "llvm/MC/MCInst.h"
  26. #include "llvm/MC/MCInstrDesc.h"
  27. #include "llvm/MC/MCInstrInfo.h"
  28. #include "llvm/MC/MCRegisterInfo.h"
  29. #include "llvm/MC/MCSubtargetInfo.h"
  30. #include "llvm/Support/Casting.h"
  31. #include "llvm/Support/Compiler.h"
  32. #include "llvm/Support/ErrorHandling.h"
  33. #include "llvm/Support/MathExtras.h"
  34. #include "llvm/Support/raw_ostream.h"
  35. #include <algorithm>
  36. #include <cassert>
  37. #include <cstdint>
  38. #include <cstdlib>
  39. using namespace llvm;
  40. #define DEBUG_TYPE "mccodeemitter"
  41. STATISTIC(MCNumEmitted, "Number of MC instructions emitted.");
  42. STATISTIC(MCNumCPRelocations, "Number of constant pool relocations created.");
  43. namespace {
  44. class ARMMCCodeEmitter : public MCCodeEmitter {
  45. const MCInstrInfo &MCII;
  46. MCContext &CTX;
  47. bool IsLittleEndian;
  48. public:
  49. ARMMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx, bool IsLittle)
  50. : MCII(mcii), CTX(ctx), IsLittleEndian(IsLittle) {
  51. }
  52. ARMMCCodeEmitter(const ARMMCCodeEmitter &) = delete;
  53. ARMMCCodeEmitter &operator=(const ARMMCCodeEmitter &) = delete;
  54. ~ARMMCCodeEmitter() override = default;
  55. bool isThumb(const MCSubtargetInfo &STI) const {
  56. return STI.getFeatureBits()[ARM::ModeThumb];
  57. }
  58. bool isThumb2(const MCSubtargetInfo &STI) const {
  59. return isThumb(STI) && STI.getFeatureBits()[ARM::FeatureThumb2];
  60. }
  61. bool isTargetMachO(const MCSubtargetInfo &STI) const {
  62. const Triple &TT = STI.getTargetTriple();
  63. return TT.isOSBinFormatMachO();
  64. }
  65. unsigned getMachineSoImmOpValue(unsigned SoImm) const;
  66. // getBinaryCodeForInstr - TableGen'erated function for getting the
  67. // binary encoding for an instruction.
  68. uint64_t getBinaryCodeForInstr(const MCInst &MI,
  69. SmallVectorImpl<MCFixup> &Fixups,
  70. const MCSubtargetInfo &STI) const;
  71. /// getMachineOpValue - Return binary encoding of operand. If the machine
  72. /// operand requires relocation, record the relocation and return zero.
  73. unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
  74. SmallVectorImpl<MCFixup> &Fixups,
  75. const MCSubtargetInfo &STI) const;
  76. /// getHiLo16ImmOpValue - Return the encoding for the hi / low 16-bit of
  77. /// the specified operand. This is used for operands with :lower16: and
  78. /// :upper16: prefixes.
  79. uint32_t getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
  80. SmallVectorImpl<MCFixup> &Fixups,
  81. const MCSubtargetInfo &STI) const;
  82. bool EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx,
  83. unsigned &Reg, unsigned &Imm,
  84. SmallVectorImpl<MCFixup> &Fixups,
  85. const MCSubtargetInfo &STI) const;
  86. /// getThumbBLTargetOpValue - Return encoding info for Thumb immediate
  87. /// BL branch target.
  88. uint32_t getThumbBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
  89. SmallVectorImpl<MCFixup> &Fixups,
  90. const MCSubtargetInfo &STI) const;
  91. /// getThumbBLXTargetOpValue - Return encoding info for Thumb immediate
  92. /// BLX branch target.
  93. uint32_t getThumbBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
  94. SmallVectorImpl<MCFixup> &Fixups,
  95. const MCSubtargetInfo &STI) const;
  96. /// getThumbBRTargetOpValue - Return encoding info for Thumb branch target.
  97. uint32_t getThumbBRTargetOpValue(const MCInst &MI, unsigned OpIdx,
  98. SmallVectorImpl<MCFixup> &Fixups,
  99. const MCSubtargetInfo &STI) const;
  100. /// getThumbBCCTargetOpValue - Return encoding info for Thumb branch target.
  101. uint32_t getThumbBCCTargetOpValue(const MCInst &MI, unsigned OpIdx,
  102. SmallVectorImpl<MCFixup> &Fixups,
  103. const MCSubtargetInfo &STI) const;
  104. /// getThumbCBTargetOpValue - Return encoding info for Thumb branch target.
  105. uint32_t getThumbCBTargetOpValue(const MCInst &MI, unsigned OpIdx,
  106. SmallVectorImpl<MCFixup> &Fixups,
  107. const MCSubtargetInfo &STI) const;
  108. /// getBranchTargetOpValue - Return encoding info for 24-bit immediate
  109. /// branch target.
  110. uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  111. SmallVectorImpl<MCFixup> &Fixups,
  112. const MCSubtargetInfo &STI) const;
  113. /// getThumbBranchTargetOpValue - Return encoding info for 24-bit
  114. /// immediate Thumb2 direct branch target.
  115. uint32_t getThumbBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  116. SmallVectorImpl<MCFixup> &Fixups,
  117. const MCSubtargetInfo &STI) const;
  118. /// getARMBranchTargetOpValue - Return encoding info for 24-bit immediate
  119. /// branch target.
  120. uint32_t getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  121. SmallVectorImpl<MCFixup> &Fixups,
  122. const MCSubtargetInfo &STI) const;
  123. uint32_t getARMBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
  124. SmallVectorImpl<MCFixup> &Fixups,
  125. const MCSubtargetInfo &STI) const;
  126. uint32_t getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
  127. SmallVectorImpl<MCFixup> &Fixups,
  128. const MCSubtargetInfo &STI) const;
  129. /// getAdrLabelOpValue - Return encoding info for 12-bit immediate
  130. /// ADR label target.
  131. uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
  132. SmallVectorImpl<MCFixup> &Fixups,
  133. const MCSubtargetInfo &STI) const;
  134. uint32_t getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
  135. SmallVectorImpl<MCFixup> &Fixups,
  136. const MCSubtargetInfo &STI) const;
  137. uint32_t getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
  138. SmallVectorImpl<MCFixup> &Fixups,
  139. const MCSubtargetInfo &STI) const;
  140. uint32_t getITMaskOpValue(const MCInst &MI, unsigned OpIdx,
  141. SmallVectorImpl<MCFixup> &Fixups,
  142. const MCSubtargetInfo &STI) const;
  143. /// getMVEShiftImmOpValue - Return encoding info for the 'sz:imm5'
  144. /// operand.
  145. uint32_t getMVEShiftImmOpValue(const MCInst &MI, unsigned OpIdx,
  146. SmallVectorImpl<MCFixup> &Fixups,
  147. const MCSubtargetInfo &STI) const;
  148. /// getAddrModeImm12OpValue - Return encoding info for 'reg +/- imm12'
  149. /// operand.
  150. uint32_t getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx,
  151. SmallVectorImpl<MCFixup> &Fixups,
  152. const MCSubtargetInfo &STI) const;
  153. /// getThumbAddrModeRegRegOpValue - Return encoding for 'reg + reg' operand.
  154. uint32_t getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx,
  155. SmallVectorImpl<MCFixup> &Fixups,
  156. const MCSubtargetInfo &STI) const;
  157. /// getT2AddrModeImm8s4OpValue - Return encoding info for 'reg +/- imm8<<2'
  158. /// operand.
  159. uint32_t getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx,
  160. SmallVectorImpl<MCFixup> &Fixups,
  161. const MCSubtargetInfo &STI) const;
  162. /// getT2AddrModeImm7s4OpValue - Return encoding info for 'reg +/- imm7<<2'
  163. /// operand.
  164. uint32_t getT2AddrModeImm7s4OpValue(const MCInst &MI, unsigned OpIdx,
  165. SmallVectorImpl<MCFixup> &Fixups,
  166. const MCSubtargetInfo &STI) const;
  167. /// getT2AddrModeImm0_1020s4OpValue - Return encoding info for 'reg + imm8<<2'
  168. /// operand.
  169. uint32_t getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx,
  170. SmallVectorImpl<MCFixup> &Fixups,
  171. const MCSubtargetInfo &STI) const;
  172. /// getT2ScaledImmOpValue - Return encoding info for '+/- immX<<Y'
  173. /// operand.
  174. template<unsigned Bits, unsigned Shift>
  175. uint32_t getT2ScaledImmOpValue(const MCInst &MI, unsigned OpIdx,
  176. SmallVectorImpl<MCFixup> &Fixups,
  177. const MCSubtargetInfo &STI) const;
  178. /// getMveAddrModeRQOpValue - Return encoding info for 'reg, vreg'
  179. /// operand.
  180. uint32_t getMveAddrModeRQOpValue(const MCInst &MI, unsigned OpIdx,
  181. SmallVectorImpl<MCFixup> &Fixups,
  182. const MCSubtargetInfo &STI) const;
  183. /// getMveAddrModeQOpValue - Return encoding info for 'reg +/- imm7<<{shift}'
  184. /// operand.
  185. template<int shift>
  186. uint32_t getMveAddrModeQOpValue(const MCInst &MI, unsigned OpIdx,
  187. SmallVectorImpl<MCFixup> &Fixups,
  188. const MCSubtargetInfo &STI) const;
  189. /// getLdStSORegOpValue - Return encoding info for 'reg +/- reg shop imm'
  190. /// operand as needed by load/store instructions.
  191. uint32_t getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx,
  192. SmallVectorImpl<MCFixup> &Fixups,
  193. const MCSubtargetInfo &STI) const;
  194. /// getLdStmModeOpValue - Return encoding for load/store multiple mode.
  195. uint32_t getLdStmModeOpValue(const MCInst &MI, unsigned OpIdx,
  196. SmallVectorImpl<MCFixup> &Fixups,
  197. const MCSubtargetInfo &STI) const {
  198. ARM_AM::AMSubMode Mode = (ARM_AM::AMSubMode)MI.getOperand(OpIdx).getImm();
  199. switch (Mode) {
  200. default: llvm_unreachable("Unknown addressing sub-mode!");
  201. case ARM_AM::da: return 0;
  202. case ARM_AM::ia: return 1;
  203. case ARM_AM::db: return 2;
  204. case ARM_AM::ib: return 3;
  205. }
  206. }
  207. /// getShiftOp - Return the shift opcode (bit[6:5]) of the immediate value.
  208. ///
  209. unsigned getShiftOp(ARM_AM::ShiftOpc ShOpc) const {
  210. switch (ShOpc) {
  211. case ARM_AM::no_shift:
  212. case ARM_AM::lsl: return 0;
  213. case ARM_AM::lsr: return 1;
  214. case ARM_AM::asr: return 2;
  215. case ARM_AM::ror:
  216. case ARM_AM::rrx: return 3;
  217. default:
  218. llvm_unreachable("Invalid ShiftOpc!");
  219. }
  220. }
  221. /// getAddrMode2OffsetOpValue - Return encoding for am2offset operands.
  222. uint32_t getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx,
  223. SmallVectorImpl<MCFixup> &Fixups,
  224. const MCSubtargetInfo &STI) const;
  225. /// getPostIdxRegOpValue - Return encoding for postidx_reg operands.
  226. uint32_t getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx,
  227. SmallVectorImpl<MCFixup> &Fixups,
  228. const MCSubtargetInfo &STI) const;
  229. /// getAddrMode3OffsetOpValue - Return encoding for am3offset operands.
  230. uint32_t getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx,
  231. SmallVectorImpl<MCFixup> &Fixups,
  232. const MCSubtargetInfo &STI) const;
  233. /// getAddrMode3OpValue - Return encoding for addrmode3 operands.
  234. uint32_t getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx,
  235. SmallVectorImpl<MCFixup> &Fixups,
  236. const MCSubtargetInfo &STI) const;
  237. /// getAddrModeThumbSPOpValue - Return encoding info for 'reg +/- imm12'
  238. /// operand.
  239. uint32_t getAddrModeThumbSPOpValue(const MCInst &MI, unsigned OpIdx,
  240. SmallVectorImpl<MCFixup> &Fixups,
  241. const MCSubtargetInfo &STI) const;
  242. /// getAddrModeISOpValue - Encode the t_addrmode_is# operands.
  243. uint32_t getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx,
  244. SmallVectorImpl<MCFixup> &Fixups,
  245. const MCSubtargetInfo &STI) const;
  246. /// getAddrModePCOpValue - Return encoding for t_addrmode_pc operands.
  247. uint32_t getAddrModePCOpValue(const MCInst &MI, unsigned OpIdx,
  248. SmallVectorImpl<MCFixup> &Fixups,
  249. const MCSubtargetInfo &STI) const;
  250. /// getAddrMode5OpValue - Return encoding info for 'reg +/- (imm8 << 2)' operand.
  251. uint32_t getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
  252. SmallVectorImpl<MCFixup> &Fixups,
  253. const MCSubtargetInfo &STI) const;
  254. /// getAddrMode5FP16OpValue - Return encoding info for 'reg +/- (imm8 << 1)' operand.
  255. uint32_t getAddrMode5FP16OpValue(const MCInst &MI, unsigned OpIdx,
  256. SmallVectorImpl<MCFixup> &Fixups,
  257. const MCSubtargetInfo &STI) const;
  258. /// getCCOutOpValue - Return encoding of the 's' bit.
  259. unsigned getCCOutOpValue(const MCInst &MI, unsigned Op,
  260. SmallVectorImpl<MCFixup> &Fixups,
  261. const MCSubtargetInfo &STI) const {
  262. // The operand is either reg0 or CPSR. The 's' bit is encoded as '0' or
  263. // '1' respectively.
  264. return MI.getOperand(Op).getReg() == ARM::CPSR;
  265. }
  266. unsigned getModImmOpValue(const MCInst &MI, unsigned Op,
  267. SmallVectorImpl<MCFixup> &Fixups,
  268. const MCSubtargetInfo &ST) const {
  269. const MCOperand &MO = MI.getOperand(Op);
  270. // Support for fixups (MCFixup)
  271. if (MO.isExpr()) {
  272. const MCExpr *Expr = MO.getExpr();
  273. // Fixups resolve to plain values that need to be encoded.
  274. MCFixupKind Kind = MCFixupKind(ARM::fixup_arm_mod_imm);
  275. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  276. return 0;
  277. }
  278. // Immediate is already in its encoded format
  279. return MO.getImm();
  280. }
  281. /// getT2SOImmOpValue - Return an encoded 12-bit shifted-immediate value.
  282. unsigned getT2SOImmOpValue(const MCInst &MI, unsigned Op,
  283. SmallVectorImpl<MCFixup> &Fixups,
  284. const MCSubtargetInfo &STI) const {
  285. const MCOperand &MO = MI.getOperand(Op);
  286. // Support for fixups (MCFixup)
  287. if (MO.isExpr()) {
  288. const MCExpr *Expr = MO.getExpr();
  289. // Fixups resolve to plain values that need to be encoded.
  290. MCFixupKind Kind = MCFixupKind(ARM::fixup_t2_so_imm);
  291. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  292. return 0;
  293. }
  294. unsigned SoImm = MO.getImm();
  295. unsigned Encoded = ARM_AM::getT2SOImmVal(SoImm);
  296. assert(Encoded != ~0U && "Not a Thumb2 so_imm value?");
  297. return Encoded;
  298. }
  299. unsigned getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum,
  300. SmallVectorImpl<MCFixup> &Fixups,
  301. const MCSubtargetInfo &STI) const;
  302. template<unsigned Bits, unsigned Shift>
  303. unsigned getT2AddrModeImmOpValue(const MCInst &MI, unsigned OpNum,
  304. SmallVectorImpl<MCFixup> &Fixups,
  305. const MCSubtargetInfo &STI) const;
  306. unsigned getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum,
  307. SmallVectorImpl<MCFixup> &Fixups,
  308. const MCSubtargetInfo &STI) const;
  309. /// getSORegOpValue - Return an encoded so_reg shifted register value.
  310. unsigned getSORegRegOpValue(const MCInst &MI, unsigned Op,
  311. SmallVectorImpl<MCFixup> &Fixups,
  312. const MCSubtargetInfo &STI) const;
  313. unsigned getSORegImmOpValue(const MCInst &MI, unsigned Op,
  314. SmallVectorImpl<MCFixup> &Fixups,
  315. const MCSubtargetInfo &STI) const;
  316. unsigned getT2SORegOpValue(const MCInst &MI, unsigned Op,
  317. SmallVectorImpl<MCFixup> &Fixups,
  318. const MCSubtargetInfo &STI) const;
  319. unsigned getNEONVcvtImm32OpValue(const MCInst &MI, unsigned Op,
  320. SmallVectorImpl<MCFixup> &Fixups,
  321. const MCSubtargetInfo &STI) const {
  322. return 64 - MI.getOperand(Op).getImm();
  323. }
  324. unsigned getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op,
  325. SmallVectorImpl<MCFixup> &Fixups,
  326. const MCSubtargetInfo &STI) const;
  327. unsigned getRegisterListOpValue(const MCInst &MI, unsigned Op,
  328. SmallVectorImpl<MCFixup> &Fixups,
  329. const MCSubtargetInfo &STI) const;
  330. unsigned getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op,
  331. SmallVectorImpl<MCFixup> &Fixups,
  332. const MCSubtargetInfo &STI) const;
  333. unsigned getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op,
  334. SmallVectorImpl<MCFixup> &Fixups,
  335. const MCSubtargetInfo &STI) const;
  336. unsigned getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op,
  337. SmallVectorImpl<MCFixup> &Fixups,
  338. const MCSubtargetInfo &STI) const;
  339. unsigned getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op,
  340. SmallVectorImpl<MCFixup> &Fixups,
  341. const MCSubtargetInfo &STI) const;
  342. unsigned getShiftRight8Imm(const MCInst &MI, unsigned Op,
  343. SmallVectorImpl<MCFixup> &Fixups,
  344. const MCSubtargetInfo &STI) const;
  345. unsigned getShiftRight16Imm(const MCInst &MI, unsigned Op,
  346. SmallVectorImpl<MCFixup> &Fixups,
  347. const MCSubtargetInfo &STI) const;
  348. unsigned getShiftRight32Imm(const MCInst &MI, unsigned Op,
  349. SmallVectorImpl<MCFixup> &Fixups,
  350. const MCSubtargetInfo &STI) const;
  351. unsigned getShiftRight64Imm(const MCInst &MI, unsigned Op,
  352. SmallVectorImpl<MCFixup> &Fixups,
  353. const MCSubtargetInfo &STI) const;
  354. unsigned getThumbSRImmOpValue(const MCInst &MI, unsigned Op,
  355. SmallVectorImpl<MCFixup> &Fixups,
  356. const MCSubtargetInfo &STI) const;
  357. unsigned NEONThumb2DataIPostEncoder(const MCInst &MI,
  358. unsigned EncodedValue,
  359. const MCSubtargetInfo &STI) const;
  360. unsigned NEONThumb2LoadStorePostEncoder(const MCInst &MI,
  361. unsigned EncodedValue,
  362. const MCSubtargetInfo &STI) const;
  363. unsigned NEONThumb2DupPostEncoder(const MCInst &MI,
  364. unsigned EncodedValue,
  365. const MCSubtargetInfo &STI) const;
  366. unsigned NEONThumb2V8PostEncoder(const MCInst &MI,
  367. unsigned EncodedValue,
  368. const MCSubtargetInfo &STI) const;
  369. unsigned VFPThumb2PostEncoder(const MCInst &MI,
  370. unsigned EncodedValue,
  371. const MCSubtargetInfo &STI) const;
  372. uint32_t getPowerTwoOpValue(const MCInst &MI, unsigned OpIdx,
  373. SmallVectorImpl<MCFixup> &Fixups,
  374. const MCSubtargetInfo &STI) const;
  375. void EmitByte(unsigned char C, raw_ostream &OS) const {
  376. OS << (char)C;
  377. }
  378. void EmitConstant(uint64_t Val, unsigned Size, raw_ostream &OS) const {
  379. // Output the constant in little endian byte order.
  380. for (unsigned i = 0; i != Size; ++i) {
  381. unsigned Shift = IsLittleEndian ? i * 8 : (Size - 1 - i) * 8;
  382. EmitByte((Val >> Shift) & 0xff, OS);
  383. }
  384. }
  385. void encodeInstruction(const MCInst &MI, raw_ostream &OS,
  386. SmallVectorImpl<MCFixup> &Fixups,
  387. const MCSubtargetInfo &STI) const override;
  388. template <bool isNeg, ARM::Fixups fixup>
  389. uint32_t getBFTargetOpValue(const MCInst &MI, unsigned OpIdx,
  390. SmallVectorImpl<MCFixup> &Fixups,
  391. const MCSubtargetInfo &STI) const;
  392. uint32_t getBFAfterTargetOpValue(const MCInst &MI, unsigned OpIdx,
  393. SmallVectorImpl<MCFixup> &Fixups,
  394. const MCSubtargetInfo &STI) const;
  395. uint32_t getVPTMaskOpValue(const MCInst &MI, unsigned OpIdx,
  396. SmallVectorImpl<MCFixup> &Fixups,
  397. const MCSubtargetInfo &STI) const;
  398. uint32_t getRestrictedCondCodeOpValue(const MCInst &MI, unsigned OpIdx,
  399. SmallVectorImpl<MCFixup> &Fixups,
  400. const MCSubtargetInfo &STI) const;
  401. template <unsigned size>
  402. uint32_t getMVEPairVectorIndexOpValue(const MCInst &MI, unsigned OpIdx,
  403. SmallVectorImpl<MCFixup> &Fixups,
  404. const MCSubtargetInfo &STI) const;
  405. };
  406. } // end anonymous namespace
  407. /// NEONThumb2DataIPostEncoder - Post-process encoded NEON data-processing
  408. /// instructions, and rewrite them to their Thumb2 form if we are currently in
  409. /// Thumb2 mode.
  410. unsigned ARMMCCodeEmitter::NEONThumb2DataIPostEncoder(const MCInst &MI,
  411. unsigned EncodedValue,
  412. const MCSubtargetInfo &STI) const {
  413. if (isThumb2(STI)) {
  414. // NEON Thumb2 data-processsing encodings are very simple: bit 24 is moved
  415. // to bit 12 of the high half-word (i.e. bit 28), and bits 27-24 are
  416. // set to 1111.
  417. unsigned Bit24 = EncodedValue & 0x01000000;
  418. unsigned Bit28 = Bit24 << 4;
  419. EncodedValue &= 0xEFFFFFFF;
  420. EncodedValue |= Bit28;
  421. EncodedValue |= 0x0F000000;
  422. }
  423. return EncodedValue;
  424. }
  425. /// NEONThumb2LoadStorePostEncoder - Post-process encoded NEON load/store
  426. /// instructions, and rewrite them to their Thumb2 form if we are currently in
  427. /// Thumb2 mode.
  428. unsigned ARMMCCodeEmitter::NEONThumb2LoadStorePostEncoder(const MCInst &MI,
  429. unsigned EncodedValue,
  430. const MCSubtargetInfo &STI) const {
  431. if (isThumb2(STI)) {
  432. EncodedValue &= 0xF0FFFFFF;
  433. EncodedValue |= 0x09000000;
  434. }
  435. return EncodedValue;
  436. }
  437. /// NEONThumb2DupPostEncoder - Post-process encoded NEON vdup
  438. /// instructions, and rewrite them to their Thumb2 form if we are currently in
  439. /// Thumb2 mode.
  440. unsigned ARMMCCodeEmitter::NEONThumb2DupPostEncoder(const MCInst &MI,
  441. unsigned EncodedValue,
  442. const MCSubtargetInfo &STI) const {
  443. if (isThumb2(STI)) {
  444. EncodedValue &= 0x00FFFFFF;
  445. EncodedValue |= 0xEE000000;
  446. }
  447. return EncodedValue;
  448. }
  449. /// Post-process encoded NEON v8 instructions, and rewrite them to Thumb2 form
  450. /// if we are in Thumb2.
  451. unsigned ARMMCCodeEmitter::NEONThumb2V8PostEncoder(const MCInst &MI,
  452. unsigned EncodedValue,
  453. const MCSubtargetInfo &STI) const {
  454. if (isThumb2(STI)) {
  455. EncodedValue |= 0xC000000; // Set bits 27-26
  456. }
  457. return EncodedValue;
  458. }
  459. /// VFPThumb2PostEncoder - Post-process encoded VFP instructions and rewrite
  460. /// them to their Thumb2 form if we are currently in Thumb2 mode.
  461. unsigned ARMMCCodeEmitter::
  462. VFPThumb2PostEncoder(const MCInst &MI, unsigned EncodedValue,
  463. const MCSubtargetInfo &STI) const {
  464. if (isThumb2(STI)) {
  465. EncodedValue &= 0x0FFFFFFF;
  466. EncodedValue |= 0xE0000000;
  467. }
  468. return EncodedValue;
  469. }
  470. /// getMachineOpValue - Return binary encoding of operand. If the machine
  471. /// operand requires relocation, record the relocation and return zero.
  472. unsigned ARMMCCodeEmitter::
  473. getMachineOpValue(const MCInst &MI, const MCOperand &MO,
  474. SmallVectorImpl<MCFixup> &Fixups,
  475. const MCSubtargetInfo &STI) const {
  476. if (MO.isReg()) {
  477. unsigned Reg = MO.getReg();
  478. unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg);
  479. // In NEON, Q registers are encoded as 2x their register number,
  480. // because they're using the same indices as the D registers they
  481. // overlap. In MVE, there are no 64-bit vector instructions, so
  482. // the encodings all refer to Q-registers by their literal
  483. // register number.
  484. if (STI.getFeatureBits()[ARM::HasMVEIntegerOps])
  485. return RegNo;
  486. switch (Reg) {
  487. default:
  488. return RegNo;
  489. case ARM::Q0: case ARM::Q1: case ARM::Q2: case ARM::Q3:
  490. case ARM::Q4: case ARM::Q5: case ARM::Q6: case ARM::Q7:
  491. case ARM::Q8: case ARM::Q9: case ARM::Q10: case ARM::Q11:
  492. case ARM::Q12: case ARM::Q13: case ARM::Q14: case ARM::Q15:
  493. return 2 * RegNo;
  494. }
  495. } else if (MO.isImm()) {
  496. return static_cast<unsigned>(MO.getImm());
  497. } else if (MO.isDFPImm()) {
  498. return static_cast<unsigned>(APFloat(bit_cast<double>(MO.getDFPImm()))
  499. .bitcastToAPInt()
  500. .getHiBits(32)
  501. .getLimitedValue());
  502. }
  503. llvm_unreachable("Unable to encode MCOperand!");
  504. }
  505. /// getAddrModeImmOpValue - Return encoding info for 'reg +/- imm' operand.
  506. bool ARMMCCodeEmitter::
  507. EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx, unsigned &Reg,
  508. unsigned &Imm, SmallVectorImpl<MCFixup> &Fixups,
  509. const MCSubtargetInfo &STI) const {
  510. const MCOperand &MO = MI.getOperand(OpIdx);
  511. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  512. Reg = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  513. int32_t SImm = MO1.getImm();
  514. bool isAdd = true;
  515. // Special value for #-0
  516. if (SImm == INT32_MIN) {
  517. SImm = 0;
  518. isAdd = false;
  519. }
  520. // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
  521. if (SImm < 0) {
  522. SImm = -SImm;
  523. isAdd = false;
  524. }
  525. Imm = SImm;
  526. return isAdd;
  527. }
  528. /// getBranchTargetOpValue - Helper function to get the branch target operand,
  529. /// which is either an immediate or requires a fixup.
  530. static uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  531. unsigned FixupKind,
  532. SmallVectorImpl<MCFixup> &Fixups,
  533. const MCSubtargetInfo &STI) {
  534. const MCOperand &MO = MI.getOperand(OpIdx);
  535. // If the destination is an immediate, we have nothing to do.
  536. if (MO.isImm()) return MO.getImm();
  537. assert(MO.isExpr() && "Unexpected branch target type!");
  538. const MCExpr *Expr = MO.getExpr();
  539. MCFixupKind Kind = MCFixupKind(FixupKind);
  540. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  541. // All of the information is in the fixup.
  542. return 0;
  543. }
  544. // Thumb BL and BLX use a strange offset encoding where bits 22 and 21 are
  545. // determined by negating them and XOR'ing them with bit 23.
  546. static int32_t encodeThumbBLOffset(int32_t offset) {
  547. offset >>= 1;
  548. uint32_t S = (offset & 0x800000) >> 23;
  549. uint32_t J1 = (offset & 0x400000) >> 22;
  550. uint32_t J2 = (offset & 0x200000) >> 21;
  551. J1 = (~J1 & 0x1);
  552. J2 = (~J2 & 0x1);
  553. J1 ^= S;
  554. J2 ^= S;
  555. offset &= ~0x600000;
  556. offset |= J1 << 22;
  557. offset |= J2 << 21;
  558. return offset;
  559. }
  560. /// getThumbBLTargetOpValue - Return encoding info for immediate branch target.
  561. uint32_t ARMMCCodeEmitter::
  562. getThumbBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
  563. SmallVectorImpl<MCFixup> &Fixups,
  564. const MCSubtargetInfo &STI) const {
  565. const MCOperand MO = MI.getOperand(OpIdx);
  566. if (MO.isExpr())
  567. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_bl,
  568. Fixups, STI);
  569. return encodeThumbBLOffset(MO.getImm());
  570. }
  571. /// getThumbBLXTargetOpValue - Return encoding info for Thumb immediate
  572. /// BLX branch target.
  573. uint32_t ARMMCCodeEmitter::
  574. getThumbBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
  575. SmallVectorImpl<MCFixup> &Fixups,
  576. const MCSubtargetInfo &STI) const {
  577. const MCOperand MO = MI.getOperand(OpIdx);
  578. if (MO.isExpr())
  579. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_blx,
  580. Fixups, STI);
  581. return encodeThumbBLOffset(MO.getImm());
  582. }
  583. /// getThumbBRTargetOpValue - Return encoding info for Thumb branch target.
  584. uint32_t ARMMCCodeEmitter::
  585. getThumbBRTargetOpValue(const MCInst &MI, unsigned OpIdx,
  586. SmallVectorImpl<MCFixup> &Fixups,
  587. const MCSubtargetInfo &STI) const {
  588. const MCOperand MO = MI.getOperand(OpIdx);
  589. if (MO.isExpr())
  590. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_br,
  591. Fixups, STI);
  592. return (MO.getImm() >> 1);
  593. }
  594. /// getThumbBCCTargetOpValue - Return encoding info for Thumb branch target.
  595. uint32_t ARMMCCodeEmitter::
  596. getThumbBCCTargetOpValue(const MCInst &MI, unsigned OpIdx,
  597. SmallVectorImpl<MCFixup> &Fixups,
  598. const MCSubtargetInfo &STI) const {
  599. const MCOperand MO = MI.getOperand(OpIdx);
  600. if (MO.isExpr())
  601. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_bcc,
  602. Fixups, STI);
  603. return (MO.getImm() >> 1);
  604. }
  605. /// getThumbCBTargetOpValue - Return encoding info for Thumb branch target.
  606. uint32_t ARMMCCodeEmitter::
  607. getThumbCBTargetOpValue(const MCInst &MI, unsigned OpIdx,
  608. SmallVectorImpl<MCFixup> &Fixups,
  609. const MCSubtargetInfo &STI) const {
  610. const MCOperand MO = MI.getOperand(OpIdx);
  611. if (MO.isExpr())
  612. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_cb, Fixups, STI);
  613. return (MO.getImm() >> 1);
  614. }
  615. /// Return true if this branch has a non-always predication
  616. static bool HasConditionalBranch(const MCInst &MI) {
  617. int NumOp = MI.getNumOperands();
  618. if (NumOp >= 2) {
  619. for (int i = 0; i < NumOp-1; ++i) {
  620. const MCOperand &MCOp1 = MI.getOperand(i);
  621. const MCOperand &MCOp2 = MI.getOperand(i + 1);
  622. if (MCOp1.isImm() && MCOp2.isReg() &&
  623. (MCOp2.getReg() == 0 || MCOp2.getReg() == ARM::CPSR)) {
  624. if (ARMCC::CondCodes(MCOp1.getImm()) != ARMCC::AL)
  625. return true;
  626. }
  627. }
  628. }
  629. return false;
  630. }
  631. /// getBranchTargetOpValue - Return encoding info for 24-bit immediate branch
  632. /// target.
  633. uint32_t ARMMCCodeEmitter::
  634. getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  635. SmallVectorImpl<MCFixup> &Fixups,
  636. const MCSubtargetInfo &STI) const {
  637. // FIXME: This really, really shouldn't use TargetMachine. We don't want
  638. // coupling between MC and TM anywhere we can help it.
  639. if (isThumb2(STI))
  640. return
  641. ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_condbranch, Fixups, STI);
  642. return getARMBranchTargetOpValue(MI, OpIdx, Fixups, STI);
  643. }
  644. /// getBranchTargetOpValue - Return encoding info for 24-bit immediate branch
  645. /// target.
  646. uint32_t ARMMCCodeEmitter::
  647. getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
  648. SmallVectorImpl<MCFixup> &Fixups,
  649. const MCSubtargetInfo &STI) const {
  650. const MCOperand MO = MI.getOperand(OpIdx);
  651. if (MO.isExpr()) {
  652. if (HasConditionalBranch(MI))
  653. return ::getBranchTargetOpValue(MI, OpIdx,
  654. ARM::fixup_arm_condbranch, Fixups, STI);
  655. return ::getBranchTargetOpValue(MI, OpIdx,
  656. ARM::fixup_arm_uncondbranch, Fixups, STI);
  657. }
  658. return MO.getImm() >> 2;
  659. }
  660. uint32_t ARMMCCodeEmitter::
  661. getARMBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
  662. SmallVectorImpl<MCFixup> &Fixups,
  663. const MCSubtargetInfo &STI) const {
  664. const MCOperand MO = MI.getOperand(OpIdx);
  665. if (MO.isExpr()) {
  666. if (HasConditionalBranch(MI))
  667. return ::getBranchTargetOpValue(MI, OpIdx,
  668. ARM::fixup_arm_condbl, Fixups, STI);
  669. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_uncondbl, Fixups, STI);
  670. }
  671. return MO.getImm() >> 2;
  672. }
  673. uint32_t ARMMCCodeEmitter::
  674. getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
  675. SmallVectorImpl<MCFixup> &Fixups,
  676. const MCSubtargetInfo &STI) const {
  677. const MCOperand MO = MI.getOperand(OpIdx);
  678. if (MO.isExpr())
  679. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_blx, Fixups, STI);
  680. return MO.getImm() >> 1;
  681. }
  682. /// getUnconditionalBranchTargetOpValue - Return encoding info for 24-bit
  683. /// immediate branch target.
  684. uint32_t ARMMCCodeEmitter::getThumbBranchTargetOpValue(
  685. const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
  686. const MCSubtargetInfo &STI) const {
  687. unsigned Val = 0;
  688. const MCOperand MO = MI.getOperand(OpIdx);
  689. if(MO.isExpr())
  690. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_uncondbranch, Fixups, STI);
  691. else
  692. Val = MO.getImm() >> 1;
  693. bool I = (Val & 0x800000);
  694. bool J1 = (Val & 0x400000);
  695. bool J2 = (Val & 0x200000);
  696. if (I ^ J1)
  697. Val &= ~0x400000;
  698. else
  699. Val |= 0x400000;
  700. if (I ^ J2)
  701. Val &= ~0x200000;
  702. else
  703. Val |= 0x200000;
  704. return Val;
  705. }
  706. /// getAdrLabelOpValue - Return encoding info for 12-bit shifted-immediate
  707. /// ADR label target.
  708. uint32_t ARMMCCodeEmitter::
  709. getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
  710. SmallVectorImpl<MCFixup> &Fixups,
  711. const MCSubtargetInfo &STI) const {
  712. const MCOperand MO = MI.getOperand(OpIdx);
  713. if (MO.isExpr())
  714. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_adr_pcrel_12,
  715. Fixups, STI);
  716. int64_t offset = MO.getImm();
  717. uint32_t Val = 0x2000;
  718. int SoImmVal;
  719. if (offset == INT32_MIN) {
  720. Val = 0x1000;
  721. SoImmVal = 0;
  722. } else if (offset < 0) {
  723. Val = 0x1000;
  724. offset *= -1;
  725. SoImmVal = ARM_AM::getSOImmVal(offset);
  726. if(SoImmVal == -1) {
  727. Val = 0x2000;
  728. offset *= -1;
  729. SoImmVal = ARM_AM::getSOImmVal(offset);
  730. }
  731. } else {
  732. SoImmVal = ARM_AM::getSOImmVal(offset);
  733. if(SoImmVal == -1) {
  734. Val = 0x1000;
  735. offset *= -1;
  736. SoImmVal = ARM_AM::getSOImmVal(offset);
  737. }
  738. }
  739. assert(SoImmVal != -1 && "Not a valid so_imm value!");
  740. Val |= SoImmVal;
  741. return Val;
  742. }
  743. /// getT2AdrLabelOpValue - Return encoding info for 12-bit immediate ADR label
  744. /// target.
  745. uint32_t ARMMCCodeEmitter::
  746. getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
  747. SmallVectorImpl<MCFixup> &Fixups,
  748. const MCSubtargetInfo &STI) const {
  749. const MCOperand MO = MI.getOperand(OpIdx);
  750. if (MO.isExpr())
  751. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_adr_pcrel_12,
  752. Fixups, STI);
  753. int32_t Val = MO.getImm();
  754. if (Val == INT32_MIN)
  755. Val = 0x1000;
  756. else if (Val < 0) {
  757. Val *= -1;
  758. Val |= 0x1000;
  759. }
  760. return Val;
  761. }
  762. /// getITMaskOpValue - Return the architectural encoding of an IT
  763. /// predication mask, given the MCOperand format.
  764. uint32_t ARMMCCodeEmitter::
  765. getITMaskOpValue(const MCInst &MI, unsigned OpIdx,
  766. SmallVectorImpl<MCFixup> &Fixups,
  767. const MCSubtargetInfo &STI) const {
  768. const MCOperand MaskMO = MI.getOperand(OpIdx);
  769. assert(MaskMO.isImm() && "Unexpected operand type!");
  770. unsigned Mask = MaskMO.getImm();
  771. // IT masks are encoded as a sequence of replacement low-order bits
  772. // for the condition code. So if the low bit of the starting
  773. // condition code is 1, then we have to flip all the bits above the
  774. // terminating bit (which is the lowest 1 bit).
  775. assert(OpIdx > 0 && "IT mask appears first!");
  776. const MCOperand CondMO = MI.getOperand(OpIdx-1);
  777. assert(CondMO.isImm() && "Unexpected operand type!");
  778. if (CondMO.getImm() & 1) {
  779. unsigned LowBit = Mask & -Mask;
  780. unsigned BitsAboveLowBit = 0xF & (-LowBit << 1);
  781. Mask ^= BitsAboveLowBit;
  782. }
  783. return Mask;
  784. }
  785. /// getThumbAdrLabelOpValue - Return encoding info for 8-bit immediate ADR label
  786. /// target.
  787. uint32_t ARMMCCodeEmitter::
  788. getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
  789. SmallVectorImpl<MCFixup> &Fixups,
  790. const MCSubtargetInfo &STI) const {
  791. const MCOperand MO = MI.getOperand(OpIdx);
  792. if (MO.isExpr())
  793. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_thumb_adr_pcrel_10,
  794. Fixups, STI);
  795. return MO.getImm();
  796. }
  797. /// getThumbAddrModeRegRegOpValue - Return encoding info for 'reg + reg'
  798. /// operand.
  799. uint32_t ARMMCCodeEmitter::
  800. getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx,
  801. SmallVectorImpl<MCFixup> &,
  802. const MCSubtargetInfo &STI) const {
  803. // [Rn, Rm]
  804. // {5-3} = Rm
  805. // {2-0} = Rn
  806. const MCOperand &MO1 = MI.getOperand(OpIdx);
  807. const MCOperand &MO2 = MI.getOperand(OpIdx + 1);
  808. unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg());
  809. unsigned Rm = CTX.getRegisterInfo()->getEncodingValue(MO2.getReg());
  810. return (Rm << 3) | Rn;
  811. }
  812. /// getMVEShiftImmOpValue - Return encoding info for the 'sz:imm5'
  813. /// operand.
  814. uint32_t
  815. ARMMCCodeEmitter::getMVEShiftImmOpValue(const MCInst &MI, unsigned OpIdx,
  816. SmallVectorImpl<MCFixup> &Fixups,
  817. const MCSubtargetInfo &STI) const {
  818. // {4-0} = szimm5
  819. // The value we are trying to encode is an immediate between either the
  820. // range of [1-7] or [1-15] depending on whether we are dealing with the
  821. // u8/s8 or the u16/s16 variants respectively.
  822. // This value is encoded as follows, if ShiftImm is the value within those
  823. // ranges then the encoding szimm5 = ShiftImm + size, where size is either 8
  824. // or 16.
  825. unsigned Size, ShiftImm;
  826. switch(MI.getOpcode()) {
  827. case ARM::MVE_VSHLL_imms16bh:
  828. case ARM::MVE_VSHLL_imms16th:
  829. case ARM::MVE_VSHLL_immu16bh:
  830. case ARM::MVE_VSHLL_immu16th:
  831. Size = 16;
  832. break;
  833. case ARM::MVE_VSHLL_imms8bh:
  834. case ARM::MVE_VSHLL_imms8th:
  835. case ARM::MVE_VSHLL_immu8bh:
  836. case ARM::MVE_VSHLL_immu8th:
  837. Size = 8;
  838. break;
  839. default:
  840. llvm_unreachable("Use of operand not supported by this instruction");
  841. }
  842. ShiftImm = MI.getOperand(OpIdx).getImm();
  843. return Size + ShiftImm;
  844. }
  845. /// getAddrModeImm12OpValue - Return encoding info for 'reg +/- imm12' operand.
  846. uint32_t ARMMCCodeEmitter::
  847. getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx,
  848. SmallVectorImpl<MCFixup> &Fixups,
  849. const MCSubtargetInfo &STI) const {
  850. // {17-13} = reg
  851. // {12} = (U)nsigned (add == '1', sub == '0')
  852. // {11-0} = imm12
  853. unsigned Reg = 0, Imm12 = 0;
  854. bool isAdd = true;
  855. // If The first operand isn't a register, we have a label reference.
  856. const MCOperand &MO = MI.getOperand(OpIdx);
  857. if (MO.isReg()) {
  858. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  859. if (MO1.isImm()) {
  860. isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm12, Fixups, STI);
  861. } else if (MO1.isExpr()) {
  862. assert(!isThumb(STI) && !isThumb2(STI) &&
  863. "Thumb mode requires different encoding");
  864. Reg = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  865. isAdd = false; // 'U' bit is set as part of the fixup.
  866. MCFixupKind Kind = MCFixupKind(ARM::fixup_arm_ldst_abs_12);
  867. Fixups.push_back(MCFixup::create(0, MO1.getExpr(), Kind, MI.getLoc()));
  868. }
  869. } else if (MO.isExpr()) {
  870. Reg = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC.
  871. isAdd = false; // 'U' bit is set as part of the fixup.
  872. MCFixupKind Kind;
  873. if (isThumb2(STI))
  874. Kind = MCFixupKind(ARM::fixup_t2_ldst_pcrel_12);
  875. else
  876. Kind = MCFixupKind(ARM::fixup_arm_ldst_pcrel_12);
  877. Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
  878. ++MCNumCPRelocations;
  879. } else {
  880. Reg = ARM::PC;
  881. int32_t Offset = MO.getImm();
  882. if (Offset == INT32_MIN) {
  883. Offset = 0;
  884. isAdd = false;
  885. } else if (Offset < 0) {
  886. Offset *= -1;
  887. isAdd = false;
  888. }
  889. Imm12 = Offset;
  890. }
  891. uint32_t Binary = Imm12 & 0xfff;
  892. // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
  893. if (isAdd)
  894. Binary |= (1 << 12);
  895. Binary |= (Reg << 13);
  896. return Binary;
  897. }
  898. template<unsigned Bits, unsigned Shift>
  899. uint32_t ARMMCCodeEmitter::
  900. getT2ScaledImmOpValue(const MCInst &MI, unsigned OpIdx,
  901. SmallVectorImpl<MCFixup> &Fixups,
  902. const MCSubtargetInfo &STI) const {
  903. // FIXME: The immediate operand should have already been encoded like this
  904. // before ever getting here. The encoder method should just need to combine
  905. // the MI operands for the register and the offset into a single
  906. // representation for the complex operand in the .td file. This isn't just
  907. // style, unfortunately. As-is, we can't represent the distinct encoding
  908. // for #-0.
  909. // {Bits} = (U)nsigned (add == '1', sub == '0')
  910. // {(Bits-1)-0} = immediate
  911. int32_t Imm = MI.getOperand(OpIdx).getImm();
  912. bool isAdd = Imm >= 0;
  913. // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
  914. if (Imm < 0)
  915. Imm = -(uint32_t)Imm;
  916. Imm >>= Shift;
  917. uint32_t Binary = Imm & ((1U << Bits) - 1);
  918. // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
  919. if (isAdd)
  920. Binary |= (1U << Bits);
  921. return Binary;
  922. }
  923. /// getMveAddrModeRQOpValue - Return encoding info for 'reg, vreg'
  924. /// operand.
  925. uint32_t ARMMCCodeEmitter::
  926. getMveAddrModeRQOpValue(const MCInst &MI, unsigned OpIdx,
  927. SmallVectorImpl<MCFixup> &Fixups,
  928. const MCSubtargetInfo &STI) const {
  929. // {6-3} Rn
  930. // {2-0} Qm
  931. const MCOperand &M0 = MI.getOperand(OpIdx);
  932. const MCOperand &M1 = MI.getOperand(OpIdx + 1);
  933. unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(M0.getReg());
  934. unsigned Qm = CTX.getRegisterInfo()->getEncodingValue(M1.getReg());
  935. assert(Qm < 8 && "Qm is supposed to be encodable in 3 bits");
  936. return (Rn << 3) | Qm;
  937. }
  938. /// getMveAddrModeRQOpValue - Return encoding info for 'reg, vreg'
  939. /// operand.
  940. template<int shift>
  941. uint32_t ARMMCCodeEmitter::
  942. getMveAddrModeQOpValue(const MCInst &MI, unsigned OpIdx,
  943. SmallVectorImpl<MCFixup> &Fixups,
  944. const MCSubtargetInfo &STI) const {
  945. // {10-8} Qm
  946. // {7-0} Imm
  947. const MCOperand &M0 = MI.getOperand(OpIdx);
  948. const MCOperand &M1 = MI.getOperand(OpIdx + 1);
  949. unsigned Qm = CTX.getRegisterInfo()->getEncodingValue(M0.getReg());
  950. int32_t Imm = M1.getImm();
  951. bool isAdd = Imm >= 0;
  952. Imm >>= shift;
  953. if (!isAdd)
  954. Imm = -(uint32_t)Imm;
  955. Imm &= 0x7f;
  956. if (isAdd)
  957. Imm |= 0x80;
  958. assert(Qm < 8 && "Qm is supposed to be encodable in 3 bits");
  959. return (Qm << 8) | Imm;
  960. }
  961. /// getT2AddrModeImm8s4OpValue - Return encoding info for
  962. /// 'reg +/- imm8<<2' operand.
  963. uint32_t ARMMCCodeEmitter::
  964. getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx,
  965. SmallVectorImpl<MCFixup> &Fixups,
  966. const MCSubtargetInfo &STI) const {
  967. // {12-9} = reg
  968. // {8} = (U)nsigned (add == '1', sub == '0')
  969. // {7-0} = imm8
  970. unsigned Reg, Imm8;
  971. bool isAdd = true;
  972. // If The first operand isn't a register, we have a label reference.
  973. const MCOperand &MO = MI.getOperand(OpIdx);
  974. if (!MO.isReg()) {
  975. Reg = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC.
  976. Imm8 = 0;
  977. isAdd = false ; // 'U' bit is set as part of the fixup.
  978. assert(MO.isExpr() && "Unexpected machine operand type!");
  979. const MCExpr *Expr = MO.getExpr();
  980. MCFixupKind Kind = MCFixupKind(ARM::fixup_t2_pcrel_10);
  981. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  982. ++MCNumCPRelocations;
  983. } else
  984. isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm8, Fixups, STI);
  985. // FIXME: The immediate operand should have already been encoded like this
  986. // before ever getting here. The encoder method should just need to combine
  987. // the MI operands for the register and the offset into a single
  988. // representation for the complex operand in the .td file. This isn't just
  989. // style, unfortunately. As-is, we can't represent the distinct encoding
  990. // for #-0.
  991. assert(((Imm8 & 0x3) == 0) && "Not a valid immediate!");
  992. uint32_t Binary = (Imm8 >> 2) & 0xff;
  993. // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
  994. if (isAdd)
  995. Binary |= (1 << 8);
  996. Binary |= (Reg << 9);
  997. return Binary;
  998. }
  999. /// getT2AddrModeImm7s4OpValue - Return encoding info for
  1000. /// 'reg +/- imm7<<2' operand.
  1001. uint32_t
  1002. ARMMCCodeEmitter::getT2AddrModeImm7s4OpValue(const MCInst &MI, unsigned OpIdx,
  1003. SmallVectorImpl<MCFixup> &Fixups,
  1004. const MCSubtargetInfo &STI) const {
  1005. // {11-8} = reg
  1006. // {7} = (A)dd (add == '1', sub == '0')
  1007. // {6-0} = imm7
  1008. unsigned Reg, Imm7;
  1009. // If The first operand isn't a register, we have a label reference.
  1010. bool isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm7, Fixups, STI);
  1011. // FIXME: The immediate operand should have already been encoded like this
  1012. // before ever getting here. The encoder method should just need to combine
  1013. // the MI operands for the register and the offset into a single
  1014. // representation for the complex operand in the .td file. This isn't just
  1015. // style, unfortunately. As-is, we can't represent the distinct encoding
  1016. // for #-0.
  1017. uint32_t Binary = (Imm7 >> 2) & 0xff;
  1018. // Immediate is always encoded as positive. The 'A' bit controls add vs sub.
  1019. if (isAdd)
  1020. Binary |= (1 << 7);
  1021. Binary |= (Reg << 8);
  1022. return Binary;
  1023. }
  1024. /// getT2AddrModeImm0_1020s4OpValue - Return encoding info for
  1025. /// 'reg + imm8<<2' operand.
  1026. uint32_t ARMMCCodeEmitter::
  1027. getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx,
  1028. SmallVectorImpl<MCFixup> &Fixups,
  1029. const MCSubtargetInfo &STI) const {
  1030. // {11-8} = reg
  1031. // {7-0} = imm8
  1032. const MCOperand &MO = MI.getOperand(OpIdx);
  1033. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  1034. unsigned Reg = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1035. unsigned Imm8 = MO1.getImm();
  1036. return (Reg << 8) | Imm8;
  1037. }
  1038. uint32_t
  1039. ARMMCCodeEmitter::getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
  1040. SmallVectorImpl<MCFixup> &Fixups,
  1041. const MCSubtargetInfo &STI) const {
  1042. // {20-16} = imm{15-12}
  1043. // {11-0} = imm{11-0}
  1044. const MCOperand &MO = MI.getOperand(OpIdx);
  1045. if (MO.isImm())
  1046. // Hi / lo 16 bits already extracted during earlier passes.
  1047. return static_cast<unsigned>(MO.getImm());
  1048. // Handle :upper16: and :lower16: assembly prefixes.
  1049. const MCExpr *E = MO.getExpr();
  1050. MCFixupKind Kind;
  1051. if (E->getKind() == MCExpr::Target) {
  1052. const ARMMCExpr *ARM16Expr = cast<ARMMCExpr>(E);
  1053. E = ARM16Expr->getSubExpr();
  1054. if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(E)) {
  1055. const int64_t Value = MCE->getValue();
  1056. if (Value > UINT32_MAX)
  1057. report_fatal_error("constant value truncated (limited to 32-bit)");
  1058. switch (ARM16Expr->getKind()) {
  1059. case ARMMCExpr::VK_ARM_HI16:
  1060. return (int32_t(Value) & 0xffff0000) >> 16;
  1061. case ARMMCExpr::VK_ARM_LO16:
  1062. return (int32_t(Value) & 0x0000ffff);
  1063. default: llvm_unreachable("Unsupported ARMFixup");
  1064. }
  1065. }
  1066. switch (ARM16Expr->getKind()) {
  1067. default: llvm_unreachable("Unsupported ARMFixup");
  1068. case ARMMCExpr::VK_ARM_HI16:
  1069. Kind = MCFixupKind(isThumb(STI) ? ARM::fixup_t2_movt_hi16
  1070. : ARM::fixup_arm_movt_hi16);
  1071. break;
  1072. case ARMMCExpr::VK_ARM_LO16:
  1073. Kind = MCFixupKind(isThumb(STI) ? ARM::fixup_t2_movw_lo16
  1074. : ARM::fixup_arm_movw_lo16);
  1075. break;
  1076. }
  1077. Fixups.push_back(MCFixup::create(0, E, Kind, MI.getLoc()));
  1078. return 0;
  1079. }
  1080. // If the expression doesn't have :upper16: or :lower16: on it,
  1081. // it's just a plain immediate expression, previously those evaluated to
  1082. // the lower 16 bits of the expression regardless of whether
  1083. // we have a movt or a movw, but that led to misleadingly results.
  1084. // This is disallowed in the AsmParser in validateInstruction()
  1085. // so this should never happen.
  1086. llvm_unreachable("expression without :upper16: or :lower16:");
  1087. }
  1088. uint32_t ARMMCCodeEmitter::
  1089. getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx,
  1090. SmallVectorImpl<MCFixup> &Fixups,
  1091. const MCSubtargetInfo &STI) const {
  1092. const MCOperand &MO = MI.getOperand(OpIdx);
  1093. const MCOperand &MO1 = MI.getOperand(OpIdx+1);
  1094. const MCOperand &MO2 = MI.getOperand(OpIdx+2);
  1095. unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1096. unsigned Rm = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg());
  1097. unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm());
  1098. bool isAdd = ARM_AM::getAM2Op(MO2.getImm()) == ARM_AM::add;
  1099. ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(MO2.getImm());
  1100. unsigned SBits = getShiftOp(ShOp);
  1101. // While "lsr #32" and "asr #32" exist, they are encoded with a 0 in the shift
  1102. // amount. However, it would be an easy mistake to make so check here.
  1103. assert((ShImm & ~0x1f) == 0 && "Out of range shift amount");
  1104. // {16-13} = Rn
  1105. // {12} = isAdd
  1106. // {11-0} = shifter
  1107. // {3-0} = Rm
  1108. // {4} = 0
  1109. // {6-5} = type
  1110. // {11-7} = imm
  1111. uint32_t Binary = Rm;
  1112. Binary |= Rn << 13;
  1113. Binary |= SBits << 5;
  1114. Binary |= ShImm << 7;
  1115. if (isAdd)
  1116. Binary |= 1 << 12;
  1117. return Binary;
  1118. }
  1119. uint32_t ARMMCCodeEmitter::
  1120. getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx,
  1121. SmallVectorImpl<MCFixup> &Fixups,
  1122. const MCSubtargetInfo &STI) const {
  1123. // {13} 1 == imm12, 0 == Rm
  1124. // {12} isAdd
  1125. // {11-0} imm12/Rm
  1126. const MCOperand &MO = MI.getOperand(OpIdx);
  1127. const MCOperand &MO1 = MI.getOperand(OpIdx+1);
  1128. unsigned Imm = MO1.getImm();
  1129. bool isAdd = ARM_AM::getAM2Op(Imm) == ARM_AM::add;
  1130. bool isReg = MO.getReg() != 0;
  1131. uint32_t Binary = ARM_AM::getAM2Offset(Imm);
  1132. // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm12
  1133. if (isReg) {
  1134. ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(Imm);
  1135. Binary <<= 7; // Shift amount is bits [11:7]
  1136. Binary |= getShiftOp(ShOp) << 5; // Shift type is bits [6:5]
  1137. Binary |= CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); // Rm is bits [3:0]
  1138. }
  1139. return Binary | (isAdd << 12) | (isReg << 13);
  1140. }
  1141. uint32_t ARMMCCodeEmitter::
  1142. getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx,
  1143. SmallVectorImpl<MCFixup> &Fixups,
  1144. const MCSubtargetInfo &STI) const {
  1145. // {4} isAdd
  1146. // {3-0} Rm
  1147. const MCOperand &MO = MI.getOperand(OpIdx);
  1148. const MCOperand &MO1 = MI.getOperand(OpIdx+1);
  1149. bool isAdd = MO1.getImm() != 0;
  1150. return CTX.getRegisterInfo()->getEncodingValue(MO.getReg()) | (isAdd << 4);
  1151. }
  1152. uint32_t ARMMCCodeEmitter::
  1153. getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx,
  1154. SmallVectorImpl<MCFixup> &Fixups,
  1155. const MCSubtargetInfo &STI) const {
  1156. // {9} 1 == imm8, 0 == Rm
  1157. // {8} isAdd
  1158. // {7-4} imm7_4/zero
  1159. // {3-0} imm3_0/Rm
  1160. const MCOperand &MO = MI.getOperand(OpIdx);
  1161. const MCOperand &MO1 = MI.getOperand(OpIdx+1);
  1162. unsigned Imm = MO1.getImm();
  1163. bool isAdd = ARM_AM::getAM3Op(Imm) == ARM_AM::add;
  1164. bool isImm = MO.getReg() == 0;
  1165. uint32_t Imm8 = ARM_AM::getAM3Offset(Imm);
  1166. // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8
  1167. if (!isImm)
  1168. Imm8 = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1169. return Imm8 | (isAdd << 8) | (isImm << 9);
  1170. }
  1171. uint32_t ARMMCCodeEmitter::
  1172. getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx,
  1173. SmallVectorImpl<MCFixup> &Fixups,
  1174. const MCSubtargetInfo &STI) const {
  1175. // {13} 1 == imm8, 0 == Rm
  1176. // {12-9} Rn
  1177. // {8} isAdd
  1178. // {7-4} imm7_4/zero
  1179. // {3-0} imm3_0/Rm
  1180. const MCOperand &MO = MI.getOperand(OpIdx);
  1181. const MCOperand &MO1 = MI.getOperand(OpIdx+1);
  1182. const MCOperand &MO2 = MI.getOperand(OpIdx+2);
  1183. // If The first operand isn't a register, we have a label reference.
  1184. if (!MO.isReg()) {
  1185. unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC.
  1186. assert(MO.isExpr() && "Unexpected machine operand type!");
  1187. const MCExpr *Expr = MO.getExpr();
  1188. MCFixupKind Kind = MCFixupKind(ARM::fixup_arm_pcrel_10_unscaled);
  1189. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  1190. ++MCNumCPRelocations;
  1191. return (Rn << 9) | (1 << 13);
  1192. }
  1193. unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1194. unsigned Imm = MO2.getImm();
  1195. bool isAdd = ARM_AM::getAM3Op(Imm) == ARM_AM::add;
  1196. bool isImm = MO1.getReg() == 0;
  1197. uint32_t Imm8 = ARM_AM::getAM3Offset(Imm);
  1198. // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8
  1199. if (!isImm)
  1200. Imm8 = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg());
  1201. return (Rn << 9) | Imm8 | (isAdd << 8) | (isImm << 13);
  1202. }
  1203. /// getAddrModeThumbSPOpValue - Encode the t_addrmode_sp operands.
  1204. uint32_t ARMMCCodeEmitter::
  1205. getAddrModeThumbSPOpValue(const MCInst &MI, unsigned OpIdx,
  1206. SmallVectorImpl<MCFixup> &Fixups,
  1207. const MCSubtargetInfo &STI) const {
  1208. // [SP, #imm]
  1209. // {7-0} = imm8
  1210. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  1211. assert(MI.getOperand(OpIdx).getReg() == ARM::SP &&
  1212. "Unexpected base register!");
  1213. // The immediate is already shifted for the implicit zeroes, so no change
  1214. // here.
  1215. return MO1.getImm() & 0xff;
  1216. }
  1217. /// getAddrModeISOpValue - Encode the t_addrmode_is# operands.
  1218. uint32_t ARMMCCodeEmitter::
  1219. getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx,
  1220. SmallVectorImpl<MCFixup> &Fixups,
  1221. const MCSubtargetInfo &STI) const {
  1222. // [Rn, #imm]
  1223. // {7-3} = imm5
  1224. // {2-0} = Rn
  1225. const MCOperand &MO = MI.getOperand(OpIdx);
  1226. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  1227. unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1228. unsigned Imm5 = MO1.getImm();
  1229. return ((Imm5 & 0x1f) << 3) | Rn;
  1230. }
  1231. /// getAddrModePCOpValue - Return encoding for t_addrmode_pc operands.
  1232. uint32_t ARMMCCodeEmitter::
  1233. getAddrModePCOpValue(const MCInst &MI, unsigned OpIdx,
  1234. SmallVectorImpl<MCFixup> &Fixups,
  1235. const MCSubtargetInfo &STI) const {
  1236. const MCOperand MO = MI.getOperand(OpIdx);
  1237. if (MO.isExpr())
  1238. return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_cp, Fixups, STI);
  1239. return (MO.getImm() >> 2);
  1240. }
  1241. /// getAddrMode5OpValue - Return encoding info for 'reg +/- (imm8 << 2)' operand.
  1242. uint32_t ARMMCCodeEmitter::
  1243. getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
  1244. SmallVectorImpl<MCFixup> &Fixups,
  1245. const MCSubtargetInfo &STI) const {
  1246. // {12-9} = reg
  1247. // {8} = (U)nsigned (add == '1', sub == '0')
  1248. // {7-0} = imm8
  1249. unsigned Reg, Imm8;
  1250. bool isAdd;
  1251. // If The first operand isn't a register, we have a label reference.
  1252. const MCOperand &MO = MI.getOperand(OpIdx);
  1253. if (!MO.isReg()) {
  1254. Reg = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC.
  1255. Imm8 = 0;
  1256. isAdd = false; // 'U' bit is handled as part of the fixup.
  1257. assert(MO.isExpr() && "Unexpected machine operand type!");
  1258. const MCExpr *Expr = MO.getExpr();
  1259. MCFixupKind Kind;
  1260. if (isThumb2(STI))
  1261. Kind = MCFixupKind(ARM::fixup_t2_pcrel_10);
  1262. else
  1263. Kind = MCFixupKind(ARM::fixup_arm_pcrel_10);
  1264. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  1265. ++MCNumCPRelocations;
  1266. } else {
  1267. EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm8, Fixups, STI);
  1268. isAdd = ARM_AM::getAM5Op(Imm8) == ARM_AM::add;
  1269. }
  1270. uint32_t Binary = ARM_AM::getAM5Offset(Imm8);
  1271. // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
  1272. if (isAdd)
  1273. Binary |= (1 << 8);
  1274. Binary |= (Reg << 9);
  1275. return Binary;
  1276. }
  1277. /// getAddrMode5FP16OpValue - Return encoding info for 'reg +/- (imm8 << 1)' operand.
  1278. uint32_t ARMMCCodeEmitter::
  1279. getAddrMode5FP16OpValue(const MCInst &MI, unsigned OpIdx,
  1280. SmallVectorImpl<MCFixup> &Fixups,
  1281. const MCSubtargetInfo &STI) const {
  1282. // {12-9} = reg
  1283. // {8} = (U)nsigned (add == '1', sub == '0')
  1284. // {7-0} = imm8
  1285. unsigned Reg, Imm8;
  1286. bool isAdd;
  1287. // If The first operand isn't a register, we have a label reference.
  1288. const MCOperand &MO = MI.getOperand(OpIdx);
  1289. if (!MO.isReg()) {
  1290. Reg = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC.
  1291. Imm8 = 0;
  1292. isAdd = false; // 'U' bit is handled as part of the fixup.
  1293. assert(MO.isExpr() && "Unexpected machine operand type!");
  1294. const MCExpr *Expr = MO.getExpr();
  1295. MCFixupKind Kind;
  1296. if (isThumb2(STI))
  1297. Kind = MCFixupKind(ARM::fixup_t2_pcrel_9);
  1298. else
  1299. Kind = MCFixupKind(ARM::fixup_arm_pcrel_9);
  1300. Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
  1301. ++MCNumCPRelocations;
  1302. } else {
  1303. EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm8, Fixups, STI);
  1304. isAdd = ARM_AM::getAM5Op(Imm8) == ARM_AM::add;
  1305. }
  1306. uint32_t Binary = ARM_AM::getAM5Offset(Imm8);
  1307. // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
  1308. if (isAdd)
  1309. Binary |= (1 << 8);
  1310. Binary |= (Reg << 9);
  1311. return Binary;
  1312. }
  1313. unsigned ARMMCCodeEmitter::
  1314. getSORegRegOpValue(const MCInst &MI, unsigned OpIdx,
  1315. SmallVectorImpl<MCFixup> &Fixups,
  1316. const MCSubtargetInfo &STI) const {
  1317. // Sub-operands are [reg, reg, imm]. The first register is Rm, the reg to be
  1318. // shifted. The second is Rs, the amount to shift by, and the third specifies
  1319. // the type of the shift.
  1320. //
  1321. // {3-0} = Rm.
  1322. // {4} = 1
  1323. // {6-5} = type
  1324. // {11-8} = Rs
  1325. // {7} = 0
  1326. const MCOperand &MO = MI.getOperand(OpIdx);
  1327. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  1328. const MCOperand &MO2 = MI.getOperand(OpIdx + 2);
  1329. ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO2.getImm());
  1330. // Encode Rm.
  1331. unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1332. // Encode the shift opcode.
  1333. unsigned SBits = 0;
  1334. unsigned Rs = MO1.getReg();
  1335. if (Rs) {
  1336. // Set shift operand (bit[7:4]).
  1337. // LSL - 0001
  1338. // LSR - 0011
  1339. // ASR - 0101
  1340. // ROR - 0111
  1341. switch (SOpc) {
  1342. default: llvm_unreachable("Unknown shift opc!");
  1343. case ARM_AM::lsl: SBits = 0x1; break;
  1344. case ARM_AM::lsr: SBits = 0x3; break;
  1345. case ARM_AM::asr: SBits = 0x5; break;
  1346. case ARM_AM::ror: SBits = 0x7; break;
  1347. }
  1348. }
  1349. Binary |= SBits << 4;
  1350. // Encode the shift operation Rs.
  1351. // Encode Rs bit[11:8].
  1352. assert(ARM_AM::getSORegOffset(MO2.getImm()) == 0);
  1353. return Binary | (CTX.getRegisterInfo()->getEncodingValue(Rs) << ARMII::RegRsShift);
  1354. }
  1355. unsigned ARMMCCodeEmitter::
  1356. getSORegImmOpValue(const MCInst &MI, unsigned OpIdx,
  1357. SmallVectorImpl<MCFixup> &Fixups,
  1358. const MCSubtargetInfo &STI) const {
  1359. // Sub-operands are [reg, imm]. The first register is Rm, the reg to be
  1360. // shifted. The second is the amount to shift by.
  1361. //
  1362. // {3-0} = Rm.
  1363. // {4} = 0
  1364. // {6-5} = type
  1365. // {11-7} = imm
  1366. const MCOperand &MO = MI.getOperand(OpIdx);
  1367. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  1368. ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO1.getImm());
  1369. // Encode Rm.
  1370. unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1371. // Encode the shift opcode.
  1372. unsigned SBits = 0;
  1373. // Set shift operand (bit[6:4]).
  1374. // LSL - 000
  1375. // LSR - 010
  1376. // ASR - 100
  1377. // ROR - 110
  1378. // RRX - 110 and bit[11:8] clear.
  1379. switch (SOpc) {
  1380. default: llvm_unreachable("Unknown shift opc!");
  1381. case ARM_AM::lsl: SBits = 0x0; break;
  1382. case ARM_AM::lsr: SBits = 0x2; break;
  1383. case ARM_AM::asr: SBits = 0x4; break;
  1384. case ARM_AM::ror: SBits = 0x6; break;
  1385. case ARM_AM::rrx:
  1386. Binary |= 0x60;
  1387. return Binary;
  1388. }
  1389. // Encode shift_imm bit[11:7].
  1390. Binary |= SBits << 4;
  1391. unsigned Offset = ARM_AM::getSORegOffset(MO1.getImm());
  1392. assert(Offset < 32 && "Offset must be in range 0-31!");
  1393. return Binary | (Offset << 7);
  1394. }
  1395. unsigned ARMMCCodeEmitter::
  1396. getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum,
  1397. SmallVectorImpl<MCFixup> &Fixups,
  1398. const MCSubtargetInfo &STI) const {
  1399. const MCOperand &MO1 = MI.getOperand(OpNum);
  1400. const MCOperand &MO2 = MI.getOperand(OpNum+1);
  1401. const MCOperand &MO3 = MI.getOperand(OpNum+2);
  1402. // Encoded as [Rn, Rm, imm].
  1403. // FIXME: Needs fixup support.
  1404. unsigned Value = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg());
  1405. Value <<= 4;
  1406. Value |= CTX.getRegisterInfo()->getEncodingValue(MO2.getReg());
  1407. Value <<= 2;
  1408. Value |= MO3.getImm();
  1409. return Value;
  1410. }
  1411. template<unsigned Bits, unsigned Shift>
  1412. unsigned ARMMCCodeEmitter::
  1413. getT2AddrModeImmOpValue(const MCInst &MI, unsigned OpNum,
  1414. SmallVectorImpl<MCFixup> &Fixups,
  1415. const MCSubtargetInfo &STI) const {
  1416. const MCOperand &MO1 = MI.getOperand(OpNum);
  1417. const MCOperand &MO2 = MI.getOperand(OpNum+1);
  1418. // FIXME: Needs fixup support.
  1419. unsigned Value = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg());
  1420. // If the immediate is B bits long, we need B+1 bits in order
  1421. // to represent the (inverse of the) sign bit.
  1422. Value <<= (Bits + 1);
  1423. int32_t tmp = (int32_t)MO2.getImm();
  1424. if (tmp == INT32_MIN) { // represents subtracting zero rather than adding it
  1425. tmp = 0;
  1426. } else if (tmp < 0) {
  1427. tmp = abs(tmp);
  1428. } else {
  1429. Value |= (1U << Bits); // Set the ADD bit
  1430. }
  1431. Value |= (tmp >> Shift) & ((1U << Bits) - 1);
  1432. return Value;
  1433. }
  1434. unsigned ARMMCCodeEmitter::
  1435. getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum,
  1436. SmallVectorImpl<MCFixup> &Fixups,
  1437. const MCSubtargetInfo &STI) const {
  1438. const MCOperand &MO1 = MI.getOperand(OpNum);
  1439. // FIXME: Needs fixup support.
  1440. unsigned Value = 0;
  1441. int32_t tmp = (int32_t)MO1.getImm();
  1442. if (tmp < 0)
  1443. tmp = abs(tmp);
  1444. else
  1445. Value |= 256; // Set the ADD bit
  1446. Value |= tmp & 255;
  1447. return Value;
  1448. }
  1449. unsigned ARMMCCodeEmitter::
  1450. getT2SORegOpValue(const MCInst &MI, unsigned OpIdx,
  1451. SmallVectorImpl<MCFixup> &Fixups,
  1452. const MCSubtargetInfo &STI) const {
  1453. // Sub-operands are [reg, imm]. The first register is Rm, the reg to be
  1454. // shifted. The second is the amount to shift by.
  1455. //
  1456. // {3-0} = Rm.
  1457. // {4} = 0
  1458. // {6-5} = type
  1459. // {11-7} = imm
  1460. const MCOperand &MO = MI.getOperand(OpIdx);
  1461. const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
  1462. ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO1.getImm());
  1463. // Encode Rm.
  1464. unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1465. // Encode the shift opcode.
  1466. unsigned SBits = 0;
  1467. // Set shift operand (bit[6:4]).
  1468. // LSL - 000
  1469. // LSR - 010
  1470. // ASR - 100
  1471. // ROR - 110
  1472. switch (SOpc) {
  1473. default: llvm_unreachable("Unknown shift opc!");
  1474. case ARM_AM::lsl: SBits = 0x0; break;
  1475. case ARM_AM::lsr: SBits = 0x2; break;
  1476. case ARM_AM::asr: SBits = 0x4; break;
  1477. case ARM_AM::rrx: LLVM_FALLTHROUGH;
  1478. case ARM_AM::ror: SBits = 0x6; break;
  1479. }
  1480. Binary |= SBits << 4;
  1481. if (SOpc == ARM_AM::rrx)
  1482. return Binary;
  1483. // Encode shift_imm bit[11:7].
  1484. return Binary | ARM_AM::getSORegOffset(MO1.getImm()) << 7;
  1485. }
  1486. unsigned ARMMCCodeEmitter::
  1487. getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op,
  1488. SmallVectorImpl<MCFixup> &Fixups,
  1489. const MCSubtargetInfo &STI) const {
  1490. // 10 bits. lower 5 bits are the lsb of the mask, high five bits are the
  1491. // msb of the mask.
  1492. const MCOperand &MO = MI.getOperand(Op);
  1493. uint32_t v = ~MO.getImm();
  1494. uint32_t lsb = countTrailingZeros(v);
  1495. uint32_t msb = (32 - countLeadingZeros (v)) - 1;
  1496. assert(v != 0 && lsb < 32 && msb < 32 && "Illegal bitfield mask!");
  1497. return lsb | (msb << 5);
  1498. }
  1499. unsigned ARMMCCodeEmitter::
  1500. getRegisterListOpValue(const MCInst &MI, unsigned Op,
  1501. SmallVectorImpl<MCFixup> &Fixups,
  1502. const MCSubtargetInfo &STI) const {
  1503. // VLDM/VSTM/VSCCLRM:
  1504. // {12-8} = Vd
  1505. // {7-0} = Number of registers
  1506. //
  1507. // LDM/STM:
  1508. // {15-0} = Bitfield of GPRs.
  1509. unsigned Reg = MI.getOperand(Op).getReg();
  1510. bool SPRRegs = ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg);
  1511. bool DPRRegs = ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg);
  1512. unsigned Binary = 0;
  1513. if (SPRRegs || DPRRegs) {
  1514. // VLDM/VSTM/VSCCLRM
  1515. unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg);
  1516. unsigned NumRegs = (MI.getNumOperands() - Op) & 0xff;
  1517. Binary |= (RegNo & 0x1f) << 8;
  1518. // Ignore VPR
  1519. if (MI.getOpcode() == ARM::VSCCLRMD || MI.getOpcode() == ARM::VSCCLRMS)
  1520. --NumRegs;
  1521. if (SPRRegs)
  1522. Binary |= NumRegs;
  1523. else
  1524. Binary |= NumRegs * 2;
  1525. } else {
  1526. const MCRegisterInfo &MRI = *CTX.getRegisterInfo();
  1527. assert(is_sorted(drop_begin(MI, Op),
  1528. [&](const MCOperand &LHS, const MCOperand &RHS) {
  1529. return MRI.getEncodingValue(LHS.getReg()) <
  1530. MRI.getEncodingValue(RHS.getReg());
  1531. }));
  1532. for (unsigned I = Op, E = MI.getNumOperands(); I < E; ++I) {
  1533. unsigned RegNo = MRI.getEncodingValue(MI.getOperand(I).getReg());
  1534. Binary |= 1 << RegNo;
  1535. }
  1536. }
  1537. return Binary;
  1538. }
  1539. /// getAddrMode6AddressOpValue - Encode an addrmode6 register number along
  1540. /// with the alignment operand.
  1541. unsigned ARMMCCodeEmitter::
  1542. getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op,
  1543. SmallVectorImpl<MCFixup> &Fixups,
  1544. const MCSubtargetInfo &STI) const {
  1545. const MCOperand &Reg = MI.getOperand(Op);
  1546. const MCOperand &Imm = MI.getOperand(Op + 1);
  1547. unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg.getReg());
  1548. unsigned Align = 0;
  1549. switch (Imm.getImm()) {
  1550. default: break;
  1551. case 2:
  1552. case 4:
  1553. case 8: Align = 0x01; break;
  1554. case 16: Align = 0x02; break;
  1555. case 32: Align = 0x03; break;
  1556. }
  1557. return RegNo | (Align << 4);
  1558. }
  1559. /// getAddrMode6OneLane32AddressOpValue - Encode an addrmode6 register number
  1560. /// along with the alignment operand for use in VST1 and VLD1 with size 32.
  1561. unsigned ARMMCCodeEmitter::
  1562. getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op,
  1563. SmallVectorImpl<MCFixup> &Fixups,
  1564. const MCSubtargetInfo &STI) const {
  1565. const MCOperand &Reg = MI.getOperand(Op);
  1566. const MCOperand &Imm = MI.getOperand(Op + 1);
  1567. unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg.getReg());
  1568. unsigned Align = 0;
  1569. switch (Imm.getImm()) {
  1570. default: break;
  1571. case 8:
  1572. case 16:
  1573. case 32: // Default '0' value for invalid alignments of 8, 16, 32 bytes.
  1574. case 2: Align = 0x00; break;
  1575. case 4: Align = 0x03; break;
  1576. }
  1577. return RegNo | (Align << 4);
  1578. }
  1579. /// getAddrMode6DupAddressOpValue - Encode an addrmode6 register number and
  1580. /// alignment operand for use in VLD-dup instructions. This is the same as
  1581. /// getAddrMode6AddressOpValue except for the alignment encoding, which is
  1582. /// different for VLD4-dup.
  1583. unsigned ARMMCCodeEmitter::
  1584. getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op,
  1585. SmallVectorImpl<MCFixup> &Fixups,
  1586. const MCSubtargetInfo &STI) const {
  1587. const MCOperand &Reg = MI.getOperand(Op);
  1588. const MCOperand &Imm = MI.getOperand(Op + 1);
  1589. unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg.getReg());
  1590. unsigned Align = 0;
  1591. switch (Imm.getImm()) {
  1592. default: break;
  1593. case 2:
  1594. case 4:
  1595. case 8: Align = 0x01; break;
  1596. case 16: Align = 0x03; break;
  1597. }
  1598. return RegNo | (Align << 4);
  1599. }
  1600. unsigned ARMMCCodeEmitter::
  1601. getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op,
  1602. SmallVectorImpl<MCFixup> &Fixups,
  1603. const MCSubtargetInfo &STI) const {
  1604. const MCOperand &MO = MI.getOperand(Op);
  1605. if (MO.getReg() == 0) return 0x0D;
  1606. return CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
  1607. }
  1608. unsigned ARMMCCodeEmitter::
  1609. getShiftRight8Imm(const MCInst &MI, unsigned Op,
  1610. SmallVectorImpl<MCFixup> &Fixups,
  1611. const MCSubtargetInfo &STI) const {
  1612. return 8 - MI.getOperand(Op).getImm();
  1613. }
  1614. unsigned ARMMCCodeEmitter::
  1615. getShiftRight16Imm(const MCInst &MI, unsigned Op,
  1616. SmallVectorImpl<MCFixup> &Fixups,
  1617. const MCSubtargetInfo &STI) const {
  1618. return 16 - MI.getOperand(Op).getImm();
  1619. }
  1620. unsigned ARMMCCodeEmitter::
  1621. getShiftRight32Imm(const MCInst &MI, unsigned Op,
  1622. SmallVectorImpl<MCFixup> &Fixups,
  1623. const MCSubtargetInfo &STI) const {
  1624. return 32 - MI.getOperand(Op).getImm();
  1625. }
  1626. unsigned ARMMCCodeEmitter::
  1627. getShiftRight64Imm(const MCInst &MI, unsigned Op,
  1628. SmallVectorImpl<MCFixup> &Fixups,
  1629. const MCSubtargetInfo &STI) const {
  1630. return 64 - MI.getOperand(Op).getImm();
  1631. }
  1632. void ARMMCCodeEmitter::
  1633. encodeInstruction(const MCInst &MI, raw_ostream &OS,
  1634. SmallVectorImpl<MCFixup> &Fixups,
  1635. const MCSubtargetInfo &STI) const {
  1636. // Pseudo instructions don't get encoded.
  1637. const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
  1638. uint64_t TSFlags = Desc.TSFlags;
  1639. if ((TSFlags & ARMII::FormMask) == ARMII::Pseudo)
  1640. return;
  1641. int Size;
  1642. if (Desc.getSize() == 2 || Desc.getSize() == 4)
  1643. Size = Desc.getSize();
  1644. else
  1645. llvm_unreachable("Unexpected instruction size!");
  1646. uint32_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
  1647. // Thumb 32-bit wide instructions need to emit the high order halfword
  1648. // first.
  1649. if (isThumb(STI) && Size == 4) {
  1650. EmitConstant(Binary >> 16, 2, OS);
  1651. EmitConstant(Binary & 0xffff, 2, OS);
  1652. } else
  1653. EmitConstant(Binary, Size, OS);
  1654. ++MCNumEmitted; // Keep track of the # of mi's emitted.
  1655. }
  1656. template <bool isNeg, ARM::Fixups fixup>
  1657. uint32_t
  1658. ARMMCCodeEmitter::getBFTargetOpValue(const MCInst &MI, unsigned OpIdx,
  1659. SmallVectorImpl<MCFixup> &Fixups,
  1660. const MCSubtargetInfo &STI) const {
  1661. const MCOperand MO = MI.getOperand(OpIdx);
  1662. if (MO.isExpr())
  1663. return ::getBranchTargetOpValue(MI, OpIdx, fixup, Fixups, STI);
  1664. return isNeg ? -(MO.getImm() >> 1) : (MO.getImm() >> 1);
  1665. }
  1666. uint32_t
  1667. ARMMCCodeEmitter::getBFAfterTargetOpValue(const MCInst &MI, unsigned OpIdx,
  1668. SmallVectorImpl<MCFixup> &Fixups,
  1669. const MCSubtargetInfo &STI) const {
  1670. const MCOperand MO = MI.getOperand(OpIdx);
  1671. const MCOperand BranchMO = MI.getOperand(0);
  1672. if (MO.isExpr()) {
  1673. assert(BranchMO.isExpr());
  1674. const MCExpr *DiffExpr = MCBinaryExpr::createSub(
  1675. MO.getExpr(), BranchMO.getExpr(), CTX);
  1676. MCFixupKind Kind = MCFixupKind(ARM::fixup_bfcsel_else_target);
  1677. Fixups.push_back(llvm::MCFixup::create(0, DiffExpr, Kind, MI.getLoc()));
  1678. return 0;
  1679. }
  1680. assert(MO.isImm() && BranchMO.isImm());
  1681. int Diff = MO.getImm() - BranchMO.getImm();
  1682. assert(Diff == 4 || Diff == 2);
  1683. return Diff == 4;
  1684. }
  1685. uint32_t ARMMCCodeEmitter::getVPTMaskOpValue(const MCInst &MI, unsigned OpIdx,
  1686. SmallVectorImpl<MCFixup> &Fixups,
  1687. const MCSubtargetInfo &STI)const {
  1688. const MCOperand MO = MI.getOperand(OpIdx);
  1689. assert(MO.isImm() && "Unexpected operand type!");
  1690. int Value = MO.getImm();
  1691. int Imm = 0;
  1692. // VPT Masks are actually encoded as a series of invert/don't invert bits,
  1693. // rather than true/false bits.
  1694. unsigned PrevBit = 0;
  1695. for (int i = 3; i >= 0; --i) {
  1696. unsigned Bit = (Value >> i) & 1;
  1697. // Check if we are at the end of the mask.
  1698. if ((Value & ~(~0U << i)) == 0) {
  1699. Imm |= (1 << i);
  1700. break;
  1701. }
  1702. // Convert the bit in the mask based on the previous bit.
  1703. if (Bit != PrevBit)
  1704. Imm |= (1 << i);
  1705. PrevBit = Bit;
  1706. }
  1707. return Imm;
  1708. }
  1709. uint32_t ARMMCCodeEmitter::getRestrictedCondCodeOpValue(
  1710. const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
  1711. const MCSubtargetInfo &STI) const {
  1712. const MCOperand MO = MI.getOperand(OpIdx);
  1713. assert(MO.isImm() && "Unexpected operand type!");
  1714. switch (MO.getImm()) {
  1715. default:
  1716. assert(0 && "Unexpected Condition!");
  1717. return 0;
  1718. case ARMCC::HS:
  1719. case ARMCC::EQ:
  1720. return 0;
  1721. case ARMCC::HI:
  1722. case ARMCC::NE:
  1723. return 1;
  1724. case ARMCC::GE:
  1725. return 4;
  1726. case ARMCC::LT:
  1727. return 5;
  1728. case ARMCC::GT:
  1729. return 6;
  1730. case ARMCC::LE:
  1731. return 7;
  1732. }
  1733. }
  1734. uint32_t ARMMCCodeEmitter::
  1735. getPowerTwoOpValue(const MCInst &MI, unsigned OpIdx,
  1736. SmallVectorImpl<MCFixup> &Fixups,
  1737. const MCSubtargetInfo &STI) const {
  1738. const MCOperand &MO = MI.getOperand(OpIdx);
  1739. assert(MO.isImm() && "Unexpected operand type!");
  1740. return countTrailingZeros((uint64_t)MO.getImm());
  1741. }
  1742. template <unsigned start>
  1743. uint32_t ARMMCCodeEmitter::
  1744. getMVEPairVectorIndexOpValue(const MCInst &MI, unsigned OpIdx,
  1745. SmallVectorImpl<MCFixup> &Fixups,
  1746. const MCSubtargetInfo &STI) const {
  1747. const MCOperand MO = MI.getOperand(OpIdx);
  1748. assert(MO.isImm() && "Unexpected operand type!");
  1749. int Value = MO.getImm();
  1750. return Value - start;
  1751. }
  1752. #include "ARMGenMCCodeEmitter.inc"
  1753. MCCodeEmitter *llvm::createARMLEMCCodeEmitter(const MCInstrInfo &MCII,
  1754. const MCRegisterInfo &MRI,
  1755. MCContext &Ctx) {
  1756. return new ARMMCCodeEmitter(MCII, Ctx, true);
  1757. }
  1758. MCCodeEmitter *llvm::createARMBEMCCodeEmitter(const MCInstrInfo &MCII,
  1759. const MCRegisterInfo &MRI,
  1760. MCContext &Ctx) {
  1761. return new ARMMCCodeEmitter(MCII, Ctx, false);
  1762. }