X86MCCodeEmitter.cpp 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778
  1. //===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the X86MCCodeEmitter class.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "MCTargetDesc/X86BaseInfo.h"
  13. #include "MCTargetDesc/X86FixupKinds.h"
  14. #include "MCTargetDesc/X86MCTargetDesc.h"
  15. #include "llvm/ADT/SmallVector.h"
  16. #include "llvm/MC/MCCodeEmitter.h"
  17. #include "llvm/MC/MCContext.h"
  18. #include "llvm/MC/MCExpr.h"
  19. #include "llvm/MC/MCFixup.h"
  20. #include "llvm/MC/MCInst.h"
  21. #include "llvm/MC/MCInstrDesc.h"
  22. #include "llvm/MC/MCInstrInfo.h"
  23. #include "llvm/MC/MCRegisterInfo.h"
  24. #include "llvm/MC/MCSubtargetInfo.h"
  25. #include "llvm/MC/MCSymbol.h"
  26. #include "llvm/Support/Casting.h"
  27. #include "llvm/Support/ErrorHandling.h"
  28. #include "llvm/Support/raw_ostream.h"
  29. #include <cassert>
  30. #include <cstdint>
  31. #include <cstdlib>
  32. using namespace llvm;
  33. #define DEBUG_TYPE "mccodeemitter"
  34. namespace {
  35. class X86MCCodeEmitter : public MCCodeEmitter {
  36. const MCInstrInfo &MCII;
  37. MCContext &Ctx;
  38. public:
  39. X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
  40. : MCII(mcii), Ctx(ctx) {}
  41. X86MCCodeEmitter(const X86MCCodeEmitter &) = delete;
  42. X86MCCodeEmitter &operator=(const X86MCCodeEmitter &) = delete;
  43. ~X86MCCodeEmitter() override = default;
  44. void emitPrefix(const MCInst &MI, raw_ostream &OS,
  45. const MCSubtargetInfo &STI) const override;
  46. void encodeInstruction(const MCInst &MI, raw_ostream &OS,
  47. SmallVectorImpl<MCFixup> &Fixups,
  48. const MCSubtargetInfo &STI) const override;
  49. private:
  50. unsigned getX86RegNum(const MCOperand &MO) const;
  51. unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const;
  52. /// \param MI a single low-level machine instruction.
  53. /// \param OpNum the operand #.
  54. /// \returns true if the OpNumth operand of MI require a bit to be set in
  55. /// REX prefix.
  56. bool isREXExtendedReg(const MCInst &MI, unsigned OpNum) const;
  57. void emitImmediate(const MCOperand &Disp, SMLoc Loc, unsigned ImmSize,
  58. MCFixupKind FixupKind, uint64_t StartByte, raw_ostream &OS,
  59. SmallVectorImpl<MCFixup> &Fixups, int ImmOffset = 0) const;
  60. void emitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
  61. raw_ostream &OS) const;
  62. void emitSIBByte(unsigned SS, unsigned Index, unsigned Base,
  63. raw_ostream &OS) const;
  64. void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField,
  65. uint64_t TSFlags, bool HasREX, uint64_t StartByte,
  66. raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups,
  67. const MCSubtargetInfo &STI,
  68. bool ForceSIB = false) const;
  69. bool emitPrefixImpl(unsigned &CurOp, const MCInst &MI,
  70. const MCSubtargetInfo &STI, raw_ostream &OS) const;
  71. void emitVEXOpcodePrefix(int MemOperand, const MCInst &MI,
  72. raw_ostream &OS) const;
  73. void emitSegmentOverridePrefix(unsigned SegOperand, const MCInst &MI,
  74. raw_ostream &OS) const;
  75. bool emitOpcodePrefix(int MemOperand, const MCInst &MI,
  76. const MCSubtargetInfo &STI, raw_ostream &OS) const;
  77. bool emitREXPrefix(int MemOperand, const MCInst &MI,
  78. const MCSubtargetInfo &STI, raw_ostream &OS) const;
  79. };
  80. } // end anonymous namespace
  81. static uint8_t modRMByte(unsigned Mod, unsigned RegOpcode, unsigned RM) {
  82. assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
  83. return RM | (RegOpcode << 3) | (Mod << 6);
  84. }
  85. static void emitByte(uint8_t C, raw_ostream &OS) { OS << static_cast<char>(C); }
  86. static void emitConstant(uint64_t Val, unsigned Size, raw_ostream &OS) {
  87. // Output the constant in little endian byte order.
  88. for (unsigned i = 0; i != Size; ++i) {
  89. emitByte(Val & 255, OS);
  90. Val >>= 8;
  91. }
  92. }
  93. /// Determine if this immediate can fit in a disp8 or a compressed disp8 for
  94. /// EVEX instructions. \p will be set to the value to pass to the ImmOffset
  95. /// parameter of emitImmediate.
  96. static bool isDispOrCDisp8(uint64_t TSFlags, int Value, int &ImmOffset) {
  97. bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX;
  98. int CD8_Scale =
  99. (TSFlags & X86II::CD8_Scale_Mask) >> X86II::CD8_Scale_Shift;
  100. if (!HasEVEX || CD8_Scale == 0)
  101. return isInt<8>(Value);
  102. assert(isPowerOf2_32(CD8_Scale) && "Unexpected CD8 scale!");
  103. if (Value & (CD8_Scale - 1)) // Unaligned offset
  104. return false;
  105. int CDisp8 = Value / CD8_Scale;
  106. if (!isInt<8>(CDisp8))
  107. return false;
  108. // ImmOffset will be added to Value in emitImmediate leaving just CDisp8.
  109. ImmOffset = CDisp8 - Value;
  110. return true;
  111. }
  112. /// \returns the appropriate fixup kind to use for an immediate in an
  113. /// instruction with the specified TSFlags.
  114. static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
  115. unsigned Size = X86II::getSizeOfImm(TSFlags);
  116. bool isPCRel = X86II::isImmPCRel(TSFlags);
  117. if (X86II::isImmSigned(TSFlags)) {
  118. switch (Size) {
  119. default:
  120. llvm_unreachable("Unsupported signed fixup size!");
  121. case 4:
  122. return MCFixupKind(X86::reloc_signed_4byte);
  123. }
  124. }
  125. return MCFixup::getKindForSize(Size, isPCRel);
  126. }
  127. enum GlobalOffsetTableExprKind { GOT_None, GOT_Normal, GOT_SymDiff };
  128. /// Check if this expression starts with _GLOBAL_OFFSET_TABLE_ and if it is
  129. /// of the form _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on
  130. /// ELF i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that
  131. /// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start of a
  132. /// binary expression.
  133. static GlobalOffsetTableExprKind
  134. startsWithGlobalOffsetTable(const MCExpr *Expr) {
  135. const MCExpr *RHS = nullptr;
  136. if (Expr->getKind() == MCExpr::Binary) {
  137. const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
  138. Expr = BE->getLHS();
  139. RHS = BE->getRHS();
  140. }
  141. if (Expr->getKind() != MCExpr::SymbolRef)
  142. return GOT_None;
  143. const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
  144. const MCSymbol &S = Ref->getSymbol();
  145. if (S.getName() != "_GLOBAL_OFFSET_TABLE_")
  146. return GOT_None;
  147. if (RHS && RHS->getKind() == MCExpr::SymbolRef)
  148. return GOT_SymDiff;
  149. return GOT_Normal;
  150. }
  151. static bool hasSecRelSymbolRef(const MCExpr *Expr) {
  152. if (Expr->getKind() == MCExpr::SymbolRef) {
  153. const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
  154. return Ref->getKind() == MCSymbolRefExpr::VK_SECREL;
  155. }
  156. return false;
  157. }
  158. static bool isPCRel32Branch(const MCInst &MI, const MCInstrInfo &MCII) {
  159. unsigned Opcode = MI.getOpcode();
  160. const MCInstrDesc &Desc = MCII.get(Opcode);
  161. if ((Opcode != X86::CALL64pcrel32 && Opcode != X86::JMP_4 &&
  162. Opcode != X86::JCC_4) ||
  163. getImmFixupKind(Desc.TSFlags) != FK_PCRel_4)
  164. return false;
  165. unsigned CurOp = X86II::getOperandBias(Desc);
  166. const MCOperand &Op = MI.getOperand(CurOp);
  167. if (!Op.isExpr())
  168. return false;
  169. const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(Op.getExpr());
  170. return Ref && Ref->getKind() == MCSymbolRefExpr::VK_None;
  171. }
  172. unsigned X86MCCodeEmitter::getX86RegNum(const MCOperand &MO) const {
  173. return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
  174. }
  175. unsigned X86MCCodeEmitter::getX86RegEncoding(const MCInst &MI,
  176. unsigned OpNum) const {
  177. return Ctx.getRegisterInfo()->getEncodingValue(MI.getOperand(OpNum).getReg());
  178. }
  179. /// \param MI a single low-level machine instruction.
  180. /// \param OpNum the operand #.
  181. /// \returns true if the OpNumth operand of MI require a bit to be set in
  182. /// REX prefix.
  183. bool X86MCCodeEmitter::isREXExtendedReg(const MCInst &MI,
  184. unsigned OpNum) const {
  185. return (getX86RegEncoding(MI, OpNum) >> 3) & 1;
  186. }
  187. void X86MCCodeEmitter::emitImmediate(const MCOperand &DispOp, SMLoc Loc,
  188. unsigned Size, MCFixupKind FixupKind,
  189. uint64_t StartByte, raw_ostream &OS,
  190. SmallVectorImpl<MCFixup> &Fixups,
  191. int ImmOffset) const {
  192. const MCExpr *Expr = nullptr;
  193. if (DispOp.isImm()) {
  194. // If this is a simple integer displacement that doesn't require a
  195. // relocation, emit it now.
  196. if (FixupKind != FK_PCRel_1 && FixupKind != FK_PCRel_2 &&
  197. FixupKind != FK_PCRel_4) {
  198. emitConstant(DispOp.getImm() + ImmOffset, Size, OS);
  199. return;
  200. }
  201. Expr = MCConstantExpr::create(DispOp.getImm(), Ctx);
  202. } else {
  203. Expr = DispOp.getExpr();
  204. }
  205. // If we have an immoffset, add it to the expression.
  206. if ((FixupKind == FK_Data_4 || FixupKind == FK_Data_8 ||
  207. FixupKind == MCFixupKind(X86::reloc_signed_4byte))) {
  208. GlobalOffsetTableExprKind Kind = startsWithGlobalOffsetTable(Expr);
  209. if (Kind != GOT_None) {
  210. assert(ImmOffset == 0);
  211. if (Size == 8) {
  212. FixupKind = MCFixupKind(X86::reloc_global_offset_table8);
  213. } else {
  214. assert(Size == 4);
  215. FixupKind = MCFixupKind(X86::reloc_global_offset_table);
  216. }
  217. if (Kind == GOT_Normal)
  218. ImmOffset = static_cast<int>(OS.tell() - StartByte);
  219. } else if (Expr->getKind() == MCExpr::SymbolRef) {
  220. if (hasSecRelSymbolRef(Expr)) {
  221. FixupKind = MCFixupKind(FK_SecRel_4);
  222. }
  223. } else if (Expr->getKind() == MCExpr::Binary) {
  224. const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr *>(Expr);
  225. if (hasSecRelSymbolRef(Bin->getLHS()) ||
  226. hasSecRelSymbolRef(Bin->getRHS())) {
  227. FixupKind = MCFixupKind(FK_SecRel_4);
  228. }
  229. }
  230. }
  231. // If the fixup is pc-relative, we need to bias the value to be relative to
  232. // the start of the field, not the end of the field.
  233. if (FixupKind == FK_PCRel_4 ||
  234. FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
  235. FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load) ||
  236. FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax) ||
  237. FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex) ||
  238. FixupKind == MCFixupKind(X86::reloc_branch_4byte_pcrel)) {
  239. ImmOffset -= 4;
  240. // If this is a pc-relative load off _GLOBAL_OFFSET_TABLE_:
  241. // leaq _GLOBAL_OFFSET_TABLE_(%rip), %r15
  242. // this needs to be a GOTPC32 relocation.
  243. if (startsWithGlobalOffsetTable(Expr) != GOT_None)
  244. FixupKind = MCFixupKind(X86::reloc_global_offset_table);
  245. }
  246. if (FixupKind == FK_PCRel_2)
  247. ImmOffset -= 2;
  248. if (FixupKind == FK_PCRel_1)
  249. ImmOffset -= 1;
  250. if (ImmOffset)
  251. Expr = MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(ImmOffset, Ctx),
  252. Ctx);
  253. // Emit a symbolic constant as a fixup and 4 zeros.
  254. Fixups.push_back(MCFixup::create(static_cast<uint32_t>(OS.tell() - StartByte),
  255. Expr, FixupKind, Loc));
  256. emitConstant(0, Size, OS);
  257. }
  258. void X86MCCodeEmitter::emitRegModRMByte(const MCOperand &ModRMReg,
  259. unsigned RegOpcodeFld,
  260. raw_ostream &OS) const {
  261. emitByte(modRMByte(3, RegOpcodeFld, getX86RegNum(ModRMReg)), OS);
  262. }
  263. void X86MCCodeEmitter::emitSIBByte(unsigned SS, unsigned Index, unsigned Base,
  264. raw_ostream &OS) const {
  265. // SIB byte is in the same format as the modRMByte.
  266. emitByte(modRMByte(SS, Index, Base), OS);
  267. }
  268. void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
  269. unsigned RegOpcodeField,
  270. uint64_t TSFlags, bool HasREX,
  271. uint64_t StartByte, raw_ostream &OS,
  272. SmallVectorImpl<MCFixup> &Fixups,
  273. const MCSubtargetInfo &STI,
  274. bool ForceSIB) const {
  275. const MCOperand &Disp = MI.getOperand(Op + X86::AddrDisp);
  276. const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
  277. const MCOperand &Scale = MI.getOperand(Op + X86::AddrScaleAmt);
  278. const MCOperand &IndexReg = MI.getOperand(Op + X86::AddrIndexReg);
  279. unsigned BaseReg = Base.getReg();
  280. // Handle %rip relative addressing.
  281. if (BaseReg == X86::RIP ||
  282. BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode
  283. assert(STI.hasFeature(X86::Is64Bit) &&
  284. "Rip-relative addressing requires 64-bit mode");
  285. assert(IndexReg.getReg() == 0 && !ForceSIB &&
  286. "Invalid rip-relative address");
  287. emitByte(modRMByte(0, RegOpcodeField, 5), OS);
  288. unsigned Opcode = MI.getOpcode();
  289. unsigned FixupKind = [&]() {
  290. // Enable relaxed relocation only for a MCSymbolRefExpr. We cannot use a
  291. // relaxed relocation if an offset is present (e.g. x@GOTPCREL+4).
  292. if (!(Disp.isExpr() && isa<MCSymbolRefExpr>(Disp.getExpr())))
  293. return X86::reloc_riprel_4byte;
  294. // Certain loads for GOT references can be relocated against the symbol
  295. // directly if the symbol ends up in the same linkage unit.
  296. switch (Opcode) {
  297. default:
  298. return X86::reloc_riprel_4byte;
  299. case X86::MOV64rm:
  300. // movq loads is a subset of reloc_riprel_4byte_relax_rex. It is a
  301. // special case because COFF and Mach-O don't support ELF's more
  302. // flexible R_X86_64_REX_GOTPCRELX relaxation.
  303. assert(HasREX);
  304. return X86::reloc_riprel_4byte_movq_load;
  305. case X86::ADC32rm:
  306. case X86::ADD32rm:
  307. case X86::AND32rm:
  308. case X86::CMP32rm:
  309. case X86::MOV32rm:
  310. case X86::OR32rm:
  311. case X86::SBB32rm:
  312. case X86::SUB32rm:
  313. case X86::TEST32mr:
  314. case X86::XOR32rm:
  315. case X86::CALL64m:
  316. case X86::JMP64m:
  317. case X86::TAILJMPm64:
  318. case X86::TEST64mr:
  319. case X86::ADC64rm:
  320. case X86::ADD64rm:
  321. case X86::AND64rm:
  322. case X86::CMP64rm:
  323. case X86::OR64rm:
  324. case X86::SBB64rm:
  325. case X86::SUB64rm:
  326. case X86::XOR64rm:
  327. return HasREX ? X86::reloc_riprel_4byte_relax_rex
  328. : X86::reloc_riprel_4byte_relax;
  329. }
  330. }();
  331. // rip-relative addressing is actually relative to the *next* instruction.
  332. // Since an immediate can follow the mod/rm byte for an instruction, this
  333. // means that we need to bias the displacement field of the instruction with
  334. // the size of the immediate field. If we have this case, add it into the
  335. // expression to emit.
  336. // Note: rip-relative addressing using immediate displacement values should
  337. // not be adjusted, assuming it was the user's intent.
  338. int ImmSize = !Disp.isImm() && X86II::hasImm(TSFlags)
  339. ? X86II::getSizeOfImm(TSFlags)
  340. : 0;
  341. emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), StartByte, OS,
  342. Fixups, -ImmSize);
  343. return;
  344. }
  345. unsigned BaseRegNo = BaseReg ? getX86RegNum(Base) : -1U;
  346. // 16-bit addressing forms of the ModR/M byte have a different encoding for
  347. // the R/M field and are far more limited in which registers can be used.
  348. if (X86_MC::is16BitMemOperand(MI, Op, STI)) {
  349. if (BaseReg) {
  350. // For 32-bit addressing, the row and column values in Table 2-2 are
  351. // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with
  352. // some special cases. And getX86RegNum reflects that numbering.
  353. // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A,
  354. // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only
  355. // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order,
  356. // while values 0-3 indicate the allowed combinations (base+index) of
  357. // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI.
  358. //
  359. // R16Table[] is a lookup from the normal RegNo, to the row values from
  360. // Table 2-1 for 16-bit addressing modes. Where zero means disallowed.
  361. static const unsigned R16Table[] = {0, 0, 0, 7, 0, 6, 4, 5};
  362. unsigned RMfield = R16Table[BaseRegNo];
  363. assert(RMfield && "invalid 16-bit base register");
  364. if (IndexReg.getReg()) {
  365. unsigned IndexReg16 = R16Table[getX86RegNum(IndexReg)];
  366. assert(IndexReg16 && "invalid 16-bit index register");
  367. // We must have one of SI/DI (4,5), and one of BP/BX (6,7).
  368. assert(((IndexReg16 ^ RMfield) & 2) &&
  369. "invalid 16-bit base/index register combination");
  370. assert(Scale.getImm() == 1 &&
  371. "invalid scale for 16-bit memory reference");
  372. // Allow base/index to appear in either order (although GAS doesn't).
  373. if (IndexReg16 & 2)
  374. RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1);
  375. else
  376. RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1);
  377. }
  378. if (Disp.isImm() && isInt<8>(Disp.getImm())) {
  379. if (Disp.getImm() == 0 && RMfield != 6) {
  380. // There is no displacement; just the register.
  381. emitByte(modRMByte(0, RegOpcodeField, RMfield), OS);
  382. return;
  383. }
  384. // Use the [REG]+disp8 form, including for [BP] which cannot be encoded.
  385. emitByte(modRMByte(1, RegOpcodeField, RMfield), OS);
  386. emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, OS, Fixups);
  387. return;
  388. }
  389. // This is the [REG]+disp16 case.
  390. emitByte(modRMByte(2, RegOpcodeField, RMfield), OS);
  391. } else {
  392. assert(IndexReg.getReg() == 0 && "Unexpected index register!");
  393. // There is no BaseReg; this is the plain [disp16] case.
  394. emitByte(modRMByte(0, RegOpcodeField, 6), OS);
  395. }
  396. // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases.
  397. emitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, StartByte, OS, Fixups);
  398. return;
  399. }
  400. // Check for presence of {disp8} or {disp32} pseudo prefixes.
  401. bool UseDisp8 = MI.getFlags() & X86::IP_USE_DISP8;
  402. bool UseDisp32 = MI.getFlags() & X86::IP_USE_DISP32;
  403. // We only allow no displacement if no pseudo prefix is present.
  404. bool AllowNoDisp = !UseDisp8 && !UseDisp32;
  405. // Disp8 is allowed unless the {disp32} prefix is present.
  406. bool AllowDisp8 = !UseDisp32;
  407. // Determine whether a SIB byte is needed.
  408. if (// The SIB byte must be used if there is an index register or the
  409. // encoding requires a SIB byte.
  410. !ForceSIB && IndexReg.getReg() == 0 &&
  411. // The SIB byte must be used if the base is ESP/RSP/R12, all of which
  412. // encode to an R/M value of 4, which indicates that a SIB byte is
  413. // present.
  414. BaseRegNo != N86::ESP &&
  415. // If there is no base register and we're in 64-bit mode, we need a SIB
  416. // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
  417. (!STI.hasFeature(X86::Is64Bit) || BaseReg != 0)) {
  418. if (BaseReg == 0) { // [disp32] in X86-32 mode
  419. emitByte(modRMByte(0, RegOpcodeField, 5), OS);
  420. emitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, StartByte, OS, Fixups);
  421. return;
  422. }
  423. // If the base is not EBP/ESP/R12/R13 and there is no displacement, use
  424. // simple indirect register encoding, this handles addresses like [EAX].
  425. // The encoding for [EBP] or[R13] with no displacement means [disp32] so we
  426. // handle it by emitting a displacement of 0 later.
  427. if (BaseRegNo != N86::EBP) {
  428. if (Disp.isImm() && Disp.getImm() == 0 && AllowNoDisp) {
  429. emitByte(modRMByte(0, RegOpcodeField, BaseRegNo), OS);
  430. return;
  431. }
  432. // If the displacement is @tlscall, treat it as a zero.
  433. if (Disp.isExpr()) {
  434. auto *Sym = dyn_cast<MCSymbolRefExpr>(Disp.getExpr());
  435. if (Sym && Sym->getKind() == MCSymbolRefExpr::VK_TLSCALL) {
  436. // This is exclusively used by call *a@tlscall(base). The relocation
  437. // (R_386_TLSCALL or R_X86_64_TLSCALL) applies to the beginning.
  438. Fixups.push_back(MCFixup::create(0, Sym, FK_NONE, MI.getLoc()));
  439. emitByte(modRMByte(0, RegOpcodeField, BaseRegNo), OS);
  440. return;
  441. }
  442. }
  443. }
  444. // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
  445. // Including a compressed disp8 for EVEX instructions that support it.
  446. // This also handles the 0 displacement for [EBP] or [R13]. We can't use
  447. // disp8 if the {disp32} pseudo prefix is present.
  448. if (Disp.isImm() && AllowDisp8) {
  449. int ImmOffset = 0;
  450. if (isDispOrCDisp8(TSFlags, Disp.getImm(), ImmOffset)) {
  451. emitByte(modRMByte(1, RegOpcodeField, BaseRegNo), OS);
  452. emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, OS, Fixups,
  453. ImmOffset);
  454. return;
  455. }
  456. }
  457. // Otherwise, emit the most general non-SIB encoding: [REG+disp32].
  458. // Displacement may be 0 for [EBP] or [R13] case if {disp32} pseudo prefix
  459. // prevented using disp8 above.
  460. emitByte(modRMByte(2, RegOpcodeField, BaseRegNo), OS);
  461. unsigned Opcode = MI.getOpcode();
  462. unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax
  463. : X86::reloc_signed_4byte;
  464. emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), StartByte, OS,
  465. Fixups);
  466. return;
  467. }
  468. // We need a SIB byte, so start by outputting the ModR/M byte first
  469. assert(IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP &&
  470. "Cannot use ESP as index reg!");
  471. bool ForceDisp32 = false;
  472. bool ForceDisp8 = false;
  473. int ImmOffset = 0;
  474. if (BaseReg == 0) {
  475. // If there is no base register, we emit the special case SIB byte with
  476. // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
  477. BaseRegNo = 5;
  478. emitByte(modRMByte(0, RegOpcodeField, 4), OS);
  479. ForceDisp32 = true;
  480. } else if (Disp.isImm() && Disp.getImm() == 0 && AllowNoDisp &&
  481. // Base reg can't be EBP/RBP/R13 as that would end up with '5' as
  482. // the base field, but that is the magic [*] nomenclature that
  483. // indicates no base when mod=0. For these cases we'll emit a 0
  484. // displacement instead.
  485. BaseRegNo != N86::EBP) {
  486. // Emit no displacement ModR/M byte
  487. emitByte(modRMByte(0, RegOpcodeField, 4), OS);
  488. } else if (Disp.isImm() && AllowDisp8 &&
  489. isDispOrCDisp8(TSFlags, Disp.getImm(), ImmOffset)) {
  490. // Displacement fits in a byte or matches an EVEX compressed disp8, use
  491. // disp8 encoding. This also handles EBP/R13 base with 0 displacement unless
  492. // {disp32} pseudo prefix was used.
  493. emitByte(modRMByte(1, RegOpcodeField, 4), OS);
  494. ForceDisp8 = true;
  495. } else {
  496. // Otherwise, emit the normal disp32 encoding.
  497. emitByte(modRMByte(2, RegOpcodeField, 4), OS);
  498. ForceDisp32 = true;
  499. }
  500. // Calculate what the SS field value should be...
  501. static const unsigned SSTable[] = {~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3};
  502. unsigned SS = SSTable[Scale.getImm()];
  503. unsigned IndexRegNo = IndexReg.getReg() ? getX86RegNum(IndexReg) : 4;
  504. emitSIBByte(SS, IndexRegNo, BaseRegNo, OS);
  505. // Do we need to output a displacement?
  506. if (ForceDisp8)
  507. emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, OS, Fixups,
  508. ImmOffset);
  509. else if (ForceDisp32)
  510. emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
  511. StartByte, OS, Fixups);
  512. }
  513. /// Emit all instruction prefixes.
  514. ///
  515. /// \returns true if REX prefix is used, otherwise returns false.
  516. bool X86MCCodeEmitter::emitPrefixImpl(unsigned &CurOp, const MCInst &MI,
  517. const MCSubtargetInfo &STI,
  518. raw_ostream &OS) const {
  519. uint64_t TSFlags = MCII.get(MI.getOpcode()).TSFlags;
  520. // Determine where the memory operand starts, if present.
  521. int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
  522. // Emit segment override opcode prefix as needed.
  523. if (MemoryOperand != -1) {
  524. MemoryOperand += CurOp;
  525. emitSegmentOverridePrefix(MemoryOperand + X86::AddrSegmentReg, MI, OS);
  526. }
  527. // Emit the repeat opcode prefix as needed.
  528. unsigned Flags = MI.getFlags();
  529. if (TSFlags & X86II::REP || Flags & X86::IP_HAS_REPEAT)
  530. emitByte(0xF3, OS);
  531. if (Flags & X86::IP_HAS_REPEAT_NE)
  532. emitByte(0xF2, OS);
  533. // Emit the address size opcode prefix as needed.
  534. if (X86_MC::needsAddressSizeOverride(MI, STI, MemoryOperand, TSFlags) ||
  535. Flags & X86::IP_HAS_AD_SIZE)
  536. emitByte(0x67, OS);
  537. uint64_t Form = TSFlags & X86II::FormMask;
  538. switch (Form) {
  539. default:
  540. break;
  541. case X86II::RawFrmDstSrc: {
  542. // Emit segment override opcode prefix as needed (not for %ds).
  543. if (MI.getOperand(2).getReg() != X86::DS)
  544. emitSegmentOverridePrefix(2, MI, OS);
  545. CurOp += 3; // Consume operands.
  546. break;
  547. }
  548. case X86II::RawFrmSrc: {
  549. // Emit segment override opcode prefix as needed (not for %ds).
  550. if (MI.getOperand(1).getReg() != X86::DS)
  551. emitSegmentOverridePrefix(1, MI, OS);
  552. CurOp += 2; // Consume operands.
  553. break;
  554. }
  555. case X86II::RawFrmDst: {
  556. ++CurOp; // Consume operand.
  557. break;
  558. }
  559. case X86II::RawFrmMemOffs: {
  560. // Emit segment override opcode prefix as needed.
  561. emitSegmentOverridePrefix(1, MI, OS);
  562. break;
  563. }
  564. }
  565. // REX prefix is optional, but if used must be immediately before the opcode
  566. // Encoding type for this instruction.
  567. uint64_t Encoding = TSFlags & X86II::EncodingMask;
  568. bool HasREX = false;
  569. if (Encoding)
  570. emitVEXOpcodePrefix(MemoryOperand, MI, OS);
  571. else
  572. HasREX = emitOpcodePrefix(MemoryOperand, MI, STI, OS);
  573. return HasREX;
  574. }
  575. /// AVX instructions are encoded using a opcode prefix called VEX.
  576. void X86MCCodeEmitter::emitVEXOpcodePrefix(int MemOperand, const MCInst &MI,
  577. raw_ostream &OS) const {
  578. const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
  579. uint64_t TSFlags = Desc.TSFlags;
  580. assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX.");
  581. uint64_t Encoding = TSFlags & X86II::EncodingMask;
  582. bool HasEVEX_K = TSFlags & X86II::EVEX_K;
  583. bool HasVEX_4V = TSFlags & X86II::VEX_4V;
  584. bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
  585. // VEX_R: opcode externsion equivalent to REX.R in
  586. // 1's complement (inverted) form
  587. //
  588. // 1: Same as REX_R=0 (must be 1 in 32-bit mode)
  589. // 0: Same as REX_R=1 (64 bit mode only)
  590. //
  591. uint8_t VEX_R = 0x1;
  592. uint8_t EVEX_R2 = 0x1;
  593. // VEX_X: equivalent to REX.X, only used when a
  594. // register is used for index in SIB Byte.
  595. //
  596. // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
  597. // 0: Same as REX.X=1 (64-bit mode only)
  598. uint8_t VEX_X = 0x1;
  599. // VEX_B:
  600. //
  601. // 1: Same as REX_B=0 (ignored in 32-bit mode)
  602. // 0: Same as REX_B=1 (64 bit mode only)
  603. //
  604. uint8_t VEX_B = 0x1;
  605. // VEX_W: opcode specific (use like REX.W, or used for
  606. // opcode extension, or ignored, depending on the opcode byte)
  607. uint8_t VEX_W = (TSFlags & X86II::VEX_W) ? 1 : 0;
  608. // VEX_5M (VEX m-mmmmm field):
  609. //
  610. // 0b00000: Reserved for future use
  611. // 0b00001: implied 0F leading opcode
  612. // 0b00010: implied 0F 38 leading opcode bytes
  613. // 0b00011: implied 0F 3A leading opcode bytes
  614. // 0b00100: Reserved for future use
  615. // 0b00101: VEX MAP5
  616. // 0b00110: VEX MAP6
  617. // 0b00111-0b11111: Reserved for future use
  618. // 0b01000: XOP map select - 08h instructions with imm byte
  619. // 0b01001: XOP map select - 09h instructions with no imm byte
  620. // 0b01010: XOP map select - 0Ah instructions with imm dword
  621. uint8_t VEX_5M;
  622. switch (TSFlags & X86II::OpMapMask) {
  623. default:
  624. llvm_unreachable("Invalid prefix!");
  625. case X86II::TB:
  626. VEX_5M = 0x1;
  627. break; // 0F
  628. case X86II::T8:
  629. VEX_5M = 0x2;
  630. break; // 0F 38
  631. case X86II::TA:
  632. VEX_5M = 0x3;
  633. break; // 0F 3A
  634. case X86II::XOP8:
  635. VEX_5M = 0x8;
  636. break;
  637. case X86II::XOP9:
  638. VEX_5M = 0x9;
  639. break;
  640. case X86II::XOPA:
  641. VEX_5M = 0xA;
  642. break;
  643. case X86II::T_MAP5:
  644. VEX_5M = 0x5;
  645. break;
  646. case X86II::T_MAP6:
  647. VEX_5M = 0x6;
  648. break;
  649. }
  650. // VEX_4V (VEX vvvv field): a register specifier
  651. // (in 1's complement form) or 1111 if unused.
  652. uint8_t VEX_4V = 0xf;
  653. uint8_t EVEX_V2 = 0x1;
  654. // EVEX_L2/VEX_L (Vector Length):
  655. //
  656. // L2 L
  657. // 0 0: scalar or 128-bit vector
  658. // 0 1: 256-bit vector
  659. // 1 0: 512-bit vector
  660. //
  661. uint8_t VEX_L = (TSFlags & X86II::VEX_L) ? 1 : 0;
  662. uint8_t EVEX_L2 = (TSFlags & X86II::EVEX_L2) ? 1 : 0;
  663. // VEX_PP: opcode extension providing equivalent
  664. // functionality of a SIMD prefix
  665. //
  666. // 0b00: None
  667. // 0b01: 66
  668. // 0b10: F3
  669. // 0b11: F2
  670. //
  671. uint8_t VEX_PP = 0;
  672. switch (TSFlags & X86II::OpPrefixMask) {
  673. case X86II::PD:
  674. VEX_PP = 0x1;
  675. break; // 66
  676. case X86II::XS:
  677. VEX_PP = 0x2;
  678. break; // F3
  679. case X86II::XD:
  680. VEX_PP = 0x3;
  681. break; // F2
  682. }
  683. // EVEX_U
  684. uint8_t EVEX_U = 1; // Always '1' so far
  685. // EVEX_z
  686. uint8_t EVEX_z = (HasEVEX_K && (TSFlags & X86II::EVEX_Z)) ? 1 : 0;
  687. // EVEX_b
  688. uint8_t EVEX_b = (TSFlags & X86II::EVEX_B) ? 1 : 0;
  689. // EVEX_rc
  690. uint8_t EVEX_rc = 0;
  691. // EVEX_aaa
  692. uint8_t EVEX_aaa = 0;
  693. bool EncodeRC = false;
  694. // Classify VEX_B, VEX_4V, VEX_R, VEX_X
  695. unsigned NumOps = Desc.getNumOperands();
  696. unsigned CurOp = X86II::getOperandBias(Desc);
  697. switch (TSFlags & X86II::FormMask) {
  698. default:
  699. llvm_unreachable("Unexpected form in emitVEXOpcodePrefix!");
  700. case X86II::MRMDestMem4VOp3CC: {
  701. // MemAddr, src1(ModR/M), src2(VEX_4V)
  702. unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
  703. VEX_B = ~(BaseRegEnc >> 3) & 1;
  704. unsigned IndexRegEnc =
  705. getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg);
  706. VEX_X = ~(IndexRegEnc >> 3) & 1;
  707. CurOp += X86::AddrNumOperands;
  708. unsigned RegEnc = getX86RegEncoding(MI, ++CurOp);
  709. VEX_R = ~(RegEnc >> 3) & 1;
  710. unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
  711. VEX_4V = ~VRegEnc & 0xf;
  712. break;
  713. }
  714. case X86II::MRM_C0:
  715. case X86II::RawFrm:
  716. case X86II::PrefixByte:
  717. break;
  718. case X86II::MRMDestMemFSIB:
  719. case X86II::MRMDestMem: {
  720. // MRMDestMem instructions forms:
  721. // MemAddr, src1(ModR/M)
  722. // MemAddr, src1(VEX_4V), src2(ModR/M)
  723. // MemAddr, src1(ModR/M), imm8
  724. //
  725. unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
  726. VEX_B = ~(BaseRegEnc >> 3) & 1;
  727. unsigned IndexRegEnc =
  728. getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg);
  729. VEX_X = ~(IndexRegEnc >> 3) & 1;
  730. if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
  731. EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
  732. CurOp += X86::AddrNumOperands;
  733. if (HasEVEX_K)
  734. EVEX_aaa = getX86RegEncoding(MI, CurOp++);
  735. if (HasVEX_4V) {
  736. unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
  737. VEX_4V = ~VRegEnc & 0xf;
  738. EVEX_V2 = ~(VRegEnc >> 4) & 1;
  739. }
  740. unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
  741. VEX_R = ~(RegEnc >> 3) & 1;
  742. EVEX_R2 = ~(RegEnc >> 4) & 1;
  743. break;
  744. }
  745. case X86II::MRMSrcMemFSIB:
  746. case X86II::MRMSrcMem: {
  747. // MRMSrcMem instructions forms:
  748. // src1(ModR/M), MemAddr
  749. // src1(ModR/M), src2(VEX_4V), MemAddr
  750. // src1(ModR/M), MemAddr, imm8
  751. // src1(ModR/M), MemAddr, src2(Imm[7:4])
  752. //
  753. // FMA4:
  754. // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
  755. unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
  756. VEX_R = ~(RegEnc >> 3) & 1;
  757. EVEX_R2 = ~(RegEnc >> 4) & 1;
  758. if (HasEVEX_K)
  759. EVEX_aaa = getX86RegEncoding(MI, CurOp++);
  760. if (HasVEX_4V) {
  761. unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
  762. VEX_4V = ~VRegEnc & 0xf;
  763. EVEX_V2 = ~(VRegEnc >> 4) & 1;
  764. }
  765. unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
  766. VEX_B = ~(BaseRegEnc >> 3) & 1;
  767. unsigned IndexRegEnc =
  768. getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg);
  769. VEX_X = ~(IndexRegEnc >> 3) & 1;
  770. if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
  771. EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
  772. break;
  773. }
  774. case X86II::MRMSrcMem4VOp3: {
  775. // Instruction format for 4VOp3:
  776. // src1(ModR/M), MemAddr, src3(VEX_4V)
  777. unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
  778. VEX_R = ~(RegEnc >> 3) & 1;
  779. unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
  780. VEX_B = ~(BaseRegEnc >> 3) & 1;
  781. unsigned IndexRegEnc =
  782. getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg);
  783. VEX_X = ~(IndexRegEnc >> 3) & 1;
  784. VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf;
  785. break;
  786. }
  787. case X86II::MRMSrcMemOp4: {
  788. // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
  789. unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
  790. VEX_R = ~(RegEnc >> 3) & 1;
  791. unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
  792. VEX_4V = ~VRegEnc & 0xf;
  793. unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
  794. VEX_B = ~(BaseRegEnc >> 3) & 1;
  795. unsigned IndexRegEnc =
  796. getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg);
  797. VEX_X = ~(IndexRegEnc >> 3) & 1;
  798. break;
  799. }
  800. case X86II::MRM0m:
  801. case X86II::MRM1m:
  802. case X86II::MRM2m:
  803. case X86II::MRM3m:
  804. case X86II::MRM4m:
  805. case X86II::MRM5m:
  806. case X86II::MRM6m:
  807. case X86II::MRM7m: {
  808. // MRM[0-9]m instructions forms:
  809. // MemAddr
  810. // src1(VEX_4V), MemAddr
  811. if (HasVEX_4V) {
  812. unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
  813. VEX_4V = ~VRegEnc & 0xf;
  814. EVEX_V2 = ~(VRegEnc >> 4) & 1;
  815. }
  816. if (HasEVEX_K)
  817. EVEX_aaa = getX86RegEncoding(MI, CurOp++);
  818. unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
  819. VEX_B = ~(BaseRegEnc >> 3) & 1;
  820. unsigned IndexRegEnc =
  821. getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg);
  822. VEX_X = ~(IndexRegEnc >> 3) & 1;
  823. if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
  824. EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
  825. break;
  826. }
  827. case X86II::MRMSrcReg: {
  828. // MRMSrcReg instructions forms:
  829. // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
  830. // dst(ModR/M), src1(ModR/M)
  831. // dst(ModR/M), src1(ModR/M), imm8
  832. //
  833. // FMA4:
  834. // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
  835. unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
  836. VEX_R = ~(RegEnc >> 3) & 1;
  837. EVEX_R2 = ~(RegEnc >> 4) & 1;
  838. if (HasEVEX_K)
  839. EVEX_aaa = getX86RegEncoding(MI, CurOp++);
  840. if (HasVEX_4V) {
  841. unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
  842. VEX_4V = ~VRegEnc & 0xf;
  843. EVEX_V2 = ~(VRegEnc >> 4) & 1;
  844. }
  845. RegEnc = getX86RegEncoding(MI, CurOp++);
  846. VEX_B = ~(RegEnc >> 3) & 1;
  847. VEX_X = ~(RegEnc >> 4) & 1;
  848. if (EVEX_b) {
  849. if (HasEVEX_RC) {
  850. unsigned RcOperand = NumOps - 1;
  851. assert(RcOperand >= CurOp);
  852. EVEX_rc = MI.getOperand(RcOperand).getImm();
  853. assert(EVEX_rc <= 3 && "Invalid rounding control!");
  854. }
  855. EncodeRC = true;
  856. }
  857. break;
  858. }
  859. case X86II::MRMSrcReg4VOp3: {
  860. // Instruction format for 4VOp3:
  861. // src1(ModR/M), src2(ModR/M), src3(VEX_4V)
  862. unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
  863. VEX_R = ~(RegEnc >> 3) & 1;
  864. RegEnc = getX86RegEncoding(MI, CurOp++);
  865. VEX_B = ~(RegEnc >> 3) & 1;
  866. VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf;
  867. break;
  868. }
  869. case X86II::MRMSrcRegOp4: {
  870. // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
  871. unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
  872. VEX_R = ~(RegEnc >> 3) & 1;
  873. unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
  874. VEX_4V = ~VRegEnc & 0xf;
  875. // Skip second register source (encoded in Imm[7:4])
  876. ++CurOp;
  877. RegEnc = getX86RegEncoding(MI, CurOp++);
  878. VEX_B = ~(RegEnc >> 3) & 1;
  879. VEX_X = ~(RegEnc >> 4) & 1;
  880. break;
  881. }
  882. case X86II::MRMDestReg: {
  883. // MRMDestReg instructions forms:
  884. // dst(ModR/M), src(ModR/M)
  885. // dst(ModR/M), src(ModR/M), imm8
  886. // dst(ModR/M), src1(VEX_4V), src2(ModR/M)
  887. unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
  888. VEX_B = ~(RegEnc >> 3) & 1;
  889. VEX_X = ~(RegEnc >> 4) & 1;
  890. if (HasEVEX_K)
  891. EVEX_aaa = getX86RegEncoding(MI, CurOp++);
  892. if (HasVEX_4V) {
  893. unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
  894. VEX_4V = ~VRegEnc & 0xf;
  895. EVEX_V2 = ~(VRegEnc >> 4) & 1;
  896. }
  897. RegEnc = getX86RegEncoding(MI, CurOp++);
  898. VEX_R = ~(RegEnc >> 3) & 1;
  899. EVEX_R2 = ~(RegEnc >> 4) & 1;
  900. if (EVEX_b)
  901. EncodeRC = true;
  902. break;
  903. }
  904. case X86II::MRMr0: {
  905. // MRMr0 instructions forms:
  906. // 11:rrr:000
  907. // dst(ModR/M)
  908. unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
  909. VEX_R = ~(RegEnc >> 3) & 1;
  910. EVEX_R2 = ~(RegEnc >> 4) & 1;
  911. break;
  912. }
  913. case X86II::MRM0r:
  914. case X86II::MRM1r:
  915. case X86II::MRM2r:
  916. case X86II::MRM3r:
  917. case X86II::MRM4r:
  918. case X86II::MRM5r:
  919. case X86II::MRM6r:
  920. case X86II::MRM7r: {
  921. // MRM0r-MRM7r instructions forms:
  922. // dst(VEX_4V), src(ModR/M), imm8
  923. if (HasVEX_4V) {
  924. unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
  925. VEX_4V = ~VRegEnc & 0xf;
  926. EVEX_V2 = ~(VRegEnc >> 4) & 1;
  927. }
  928. if (HasEVEX_K)
  929. EVEX_aaa = getX86RegEncoding(MI, CurOp++);
  930. unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
  931. VEX_B = ~(RegEnc >> 3) & 1;
  932. VEX_X = ~(RegEnc >> 4) & 1;
  933. break;
  934. }
  935. }
  936. if (Encoding == X86II::VEX || Encoding == X86II::XOP) {
  937. // VEX opcode prefix can have 2 or 3 bytes
  938. //
  939. // 3 bytes:
  940. // +-----+ +--------------+ +-------------------+
  941. // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
  942. // +-----+ +--------------+ +-------------------+
  943. // 2 bytes:
  944. // +-----+ +-------------------+
  945. // | C5h | | R | vvvv | L | pp |
  946. // +-----+ +-------------------+
  947. //
  948. // XOP uses a similar prefix:
  949. // +-----+ +--------------+ +-------------------+
  950. // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
  951. // +-----+ +--------------+ +-------------------+
  952. uint8_t LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
  953. // Can we use the 2 byte VEX prefix?
  954. if (!(MI.getFlags() & X86::IP_USE_VEX3) && Encoding == X86II::VEX &&
  955. VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) {
  956. emitByte(0xC5, OS);
  957. emitByte(LastByte | (VEX_R << 7), OS);
  958. return;
  959. }
  960. // 3 byte VEX prefix
  961. emitByte(Encoding == X86II::XOP ? 0x8F : 0xC4, OS);
  962. emitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, OS);
  963. emitByte(LastByte | (VEX_W << 7), OS);
  964. } else {
  965. assert(Encoding == X86II::EVEX && "unknown encoding!");
  966. // EVEX opcode prefix can have 4 bytes
  967. //
  968. // +-----+ +--------------+ +-------------------+ +------------------------+
  969. // | 62h | | RXBR' | 0mmm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa |
  970. // +-----+ +--------------+ +-------------------+ +------------------------+
  971. assert((VEX_5M & 0x7) == VEX_5M &&
  972. "More than 3 significant bits in VEX.m-mmmm fields for EVEX!");
  973. emitByte(0x62, OS);
  974. emitByte((VEX_R << 7) | (VEX_X << 6) | (VEX_B << 5) | (EVEX_R2 << 4) |
  975. VEX_5M,
  976. OS);
  977. emitByte((VEX_W << 7) | (VEX_4V << 3) | (EVEX_U << 2) | VEX_PP, OS);
  978. if (EncodeRC)
  979. emitByte((EVEX_z << 7) | (EVEX_rc << 5) | (EVEX_b << 4) | (EVEX_V2 << 3) |
  980. EVEX_aaa,
  981. OS);
  982. else
  983. emitByte((EVEX_z << 7) | (EVEX_L2 << 6) | (VEX_L << 5) | (EVEX_b << 4) |
  984. (EVEX_V2 << 3) | EVEX_aaa,
  985. OS);
  986. }
  987. }
  988. /// Emit REX prefix which specifies
  989. /// 1) 64-bit instructions,
  990. /// 2) non-default operand size, and
  991. /// 3) use of X86-64 extended registers.
  992. ///
  993. /// \returns true if REX prefix is used, otherwise returns false.
  994. bool X86MCCodeEmitter::emitREXPrefix(int MemOperand, const MCInst &MI,
  995. const MCSubtargetInfo &STI,
  996. raw_ostream &OS) const {
  997. uint8_t REX = [&, MemOperand]() {
  998. uint8_t REX = 0;
  999. bool UsesHighByteReg = false;
  1000. const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
  1001. uint64_t TSFlags = Desc.TSFlags;
  1002. if (TSFlags & X86II::REX_W)
  1003. REX |= 1 << 3; // set REX.W
  1004. if (MI.getNumOperands() == 0)
  1005. return REX;
  1006. unsigned NumOps = MI.getNumOperands();
  1007. unsigned CurOp = X86II::getOperandBias(Desc);
  1008. // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
  1009. for (unsigned i = CurOp; i != NumOps; ++i) {
  1010. const MCOperand &MO = MI.getOperand(i);
  1011. if (MO.isReg()) {
  1012. unsigned Reg = MO.getReg();
  1013. if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH ||
  1014. Reg == X86::DH)
  1015. UsesHighByteReg = true;
  1016. if (X86II::isX86_64NonExtLowByteReg(Reg))
  1017. // FIXME: The caller of determineREXPrefix slaps this prefix onto
  1018. // anything that returns non-zero.
  1019. REX |= 0x40; // REX fixed encoding prefix
  1020. } else if (MO.isExpr() && STI.getTargetTriple().isX32()) {
  1021. // GOTTPOFF and TLSDESC relocations require a REX prefix to allow
  1022. // linker optimizations: even if the instructions we see may not require
  1023. // any prefix, they may be replaced by instructions that do. This is
  1024. // handled as a special case here so that it also works for hand-written
  1025. // assembly without the user needing to write REX, as with GNU as.
  1026. const auto *Ref = dyn_cast<MCSymbolRefExpr>(MO.getExpr());
  1027. if (Ref && (Ref->getKind() == MCSymbolRefExpr::VK_GOTTPOFF ||
  1028. Ref->getKind() == MCSymbolRefExpr::VK_TLSDESC)) {
  1029. REX |= 0x40; // REX fixed encoding prefix
  1030. }
  1031. }
  1032. }
  1033. switch (TSFlags & X86II::FormMask) {
  1034. case X86II::AddRegFrm:
  1035. REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
  1036. break;
  1037. case X86II::MRMSrcReg:
  1038. case X86II::MRMSrcRegCC:
  1039. REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
  1040. REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
  1041. break;
  1042. case X86II::MRMSrcMem:
  1043. case X86II::MRMSrcMemCC:
  1044. REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
  1045. REX |= isREXExtendedReg(MI, MemOperand + X86::AddrBaseReg) << 0; // REX.B
  1046. REX |= isREXExtendedReg(MI, MemOperand + X86::AddrIndexReg) << 1; // REX.X
  1047. CurOp += X86::AddrNumOperands;
  1048. break;
  1049. case X86II::MRMDestReg:
  1050. REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
  1051. REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
  1052. break;
  1053. case X86II::MRMDestMem:
  1054. REX |= isREXExtendedReg(MI, MemOperand + X86::AddrBaseReg) << 0; // REX.B
  1055. REX |= isREXExtendedReg(MI, MemOperand + X86::AddrIndexReg) << 1; // REX.X
  1056. CurOp += X86::AddrNumOperands;
  1057. REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
  1058. break;
  1059. case X86II::MRMXmCC:
  1060. case X86II::MRMXm:
  1061. case X86II::MRM0m:
  1062. case X86II::MRM1m:
  1063. case X86II::MRM2m:
  1064. case X86II::MRM3m:
  1065. case X86II::MRM4m:
  1066. case X86II::MRM5m:
  1067. case X86II::MRM6m:
  1068. case X86II::MRM7m:
  1069. REX |= isREXExtendedReg(MI, MemOperand + X86::AddrBaseReg) << 0; // REX.B
  1070. REX |= isREXExtendedReg(MI, MemOperand + X86::AddrIndexReg) << 1; // REX.X
  1071. break;
  1072. case X86II::MRMXrCC:
  1073. case X86II::MRMXr:
  1074. case X86II::MRM0r:
  1075. case X86II::MRM1r:
  1076. case X86II::MRM2r:
  1077. case X86II::MRM3r:
  1078. case X86II::MRM4r:
  1079. case X86II::MRM5r:
  1080. case X86II::MRM6r:
  1081. case X86II::MRM7r:
  1082. REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
  1083. break;
  1084. case X86II::MRMr0:
  1085. REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
  1086. break;
  1087. case X86II::MRMDestMemFSIB:
  1088. llvm_unreachable("FSIB format never need REX prefix!");
  1089. }
  1090. if (REX && UsesHighByteReg)
  1091. report_fatal_error(
  1092. "Cannot encode high byte register in REX-prefixed instruction");
  1093. return REX;
  1094. }();
  1095. if (!REX)
  1096. return false;
  1097. emitByte(0x40 | REX, OS);
  1098. return true;
  1099. }
  1100. /// Emit segment override opcode prefix as needed.
  1101. void X86MCCodeEmitter::emitSegmentOverridePrefix(unsigned SegOperand,
  1102. const MCInst &MI,
  1103. raw_ostream &OS) const {
  1104. // Check for explicit segment override on memory operand.
  1105. if (unsigned Reg = MI.getOperand(SegOperand).getReg())
  1106. emitByte(X86::getSegmentOverridePrefixForReg(Reg), OS);
  1107. }
  1108. /// Emit all instruction prefixes prior to the opcode.
  1109. ///
  1110. /// \param MemOperand the operand # of the start of a memory operand if present.
  1111. /// If not present, it is -1.
  1112. ///
  1113. /// \returns true if REX prefix is used, otherwise returns false.
  1114. bool X86MCCodeEmitter::emitOpcodePrefix(int MemOperand, const MCInst &MI,
  1115. const MCSubtargetInfo &STI,
  1116. raw_ostream &OS) const {
  1117. const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
  1118. uint64_t TSFlags = Desc.TSFlags;
  1119. // Emit the operand size opcode prefix as needed.
  1120. if ((TSFlags & X86II::OpSizeMask) ==
  1121. (STI.hasFeature(X86::Is16Bit) ? X86II::OpSize32 : X86II::OpSize16))
  1122. emitByte(0x66, OS);
  1123. // Emit the LOCK opcode prefix.
  1124. if (TSFlags & X86II::LOCK || MI.getFlags() & X86::IP_HAS_LOCK)
  1125. emitByte(0xF0, OS);
  1126. // Emit the NOTRACK opcode prefix.
  1127. if (TSFlags & X86II::NOTRACK || MI.getFlags() & X86::IP_HAS_NOTRACK)
  1128. emitByte(0x3E, OS);
  1129. switch (TSFlags & X86II::OpPrefixMask) {
  1130. case X86II::PD: // 66
  1131. emitByte(0x66, OS);
  1132. break;
  1133. case X86II::XS: // F3
  1134. emitByte(0xF3, OS);
  1135. break;
  1136. case X86II::XD: // F2
  1137. emitByte(0xF2, OS);
  1138. break;
  1139. }
  1140. // Handle REX prefix.
  1141. assert((STI.hasFeature(X86::Is64Bit) || !(TSFlags & X86II::REX_W)) &&
  1142. "REX.W requires 64bit mode.");
  1143. bool HasREX = STI.hasFeature(X86::Is64Bit)
  1144. ? emitREXPrefix(MemOperand, MI, STI, OS)
  1145. : false;
  1146. // 0x0F escape code must be emitted just before the opcode.
  1147. switch (TSFlags & X86II::OpMapMask) {
  1148. case X86II::TB: // Two-byte opcode map
  1149. case X86II::T8: // 0F 38
  1150. case X86II::TA: // 0F 3A
  1151. case X86II::ThreeDNow: // 0F 0F, second 0F emitted by caller.
  1152. emitByte(0x0F, OS);
  1153. break;
  1154. }
  1155. switch (TSFlags & X86II::OpMapMask) {
  1156. case X86II::T8: // 0F 38
  1157. emitByte(0x38, OS);
  1158. break;
  1159. case X86II::TA: // 0F 3A
  1160. emitByte(0x3A, OS);
  1161. break;
  1162. }
  1163. return HasREX;
  1164. }
  1165. void X86MCCodeEmitter::emitPrefix(const MCInst &MI, raw_ostream &OS,
  1166. const MCSubtargetInfo &STI) const {
  1167. unsigned Opcode = MI.getOpcode();
  1168. const MCInstrDesc &Desc = MCII.get(Opcode);
  1169. uint64_t TSFlags = Desc.TSFlags;
  1170. // Pseudo instructions don't get encoded.
  1171. if (X86II::isPseudo(TSFlags))
  1172. return;
  1173. unsigned CurOp = X86II::getOperandBias(Desc);
  1174. emitPrefixImpl(CurOp, MI, STI, OS);
  1175. }
  1176. void X86MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
  1177. SmallVectorImpl<MCFixup> &Fixups,
  1178. const MCSubtargetInfo &STI) const {
  1179. unsigned Opcode = MI.getOpcode();
  1180. const MCInstrDesc &Desc = MCII.get(Opcode);
  1181. uint64_t TSFlags = Desc.TSFlags;
  1182. // Pseudo instructions don't get encoded.
  1183. if (X86II::isPseudo(TSFlags))
  1184. return;
  1185. unsigned NumOps = Desc.getNumOperands();
  1186. unsigned CurOp = X86II::getOperandBias(Desc);
  1187. uint64_t StartByte = OS.tell();
  1188. bool HasREX = emitPrefixImpl(CurOp, MI, STI, OS);
  1189. // It uses the VEX.VVVV field?
  1190. bool HasVEX_4V = TSFlags & X86II::VEX_4V;
  1191. bool HasVEX_I8Reg = (TSFlags & X86II::ImmMask) == X86II::Imm8Reg;
  1192. // It uses the EVEX.aaa field?
  1193. bool HasEVEX_K = TSFlags & X86II::EVEX_K;
  1194. bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
  1195. // Used if a register is encoded in 7:4 of immediate.
  1196. unsigned I8RegNum = 0;
  1197. uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
  1198. if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow)
  1199. BaseOpcode = 0x0F; // Weird 3DNow! encoding.
  1200. unsigned OpcodeOffset = 0;
  1201. uint64_t Form = TSFlags & X86II::FormMask;
  1202. switch (Form) {
  1203. default:
  1204. errs() << "FORM: " << Form << "\n";
  1205. llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
  1206. case X86II::Pseudo:
  1207. llvm_unreachable("Pseudo instruction shouldn't be emitted");
  1208. case X86II::RawFrmDstSrc:
  1209. case X86II::RawFrmSrc:
  1210. case X86II::RawFrmDst:
  1211. case X86II::PrefixByte:
  1212. emitByte(BaseOpcode, OS);
  1213. break;
  1214. case X86II::AddCCFrm: {
  1215. // This will be added to the opcode in the fallthrough.
  1216. OpcodeOffset = MI.getOperand(NumOps - 1).getImm();
  1217. assert(OpcodeOffset < 16 && "Unexpected opcode offset!");
  1218. --NumOps; // Drop the operand from the end.
  1219. [[fallthrough]];
  1220. case X86II::RawFrm:
  1221. emitByte(BaseOpcode + OpcodeOffset, OS);
  1222. if (!STI.hasFeature(X86::Is64Bit) || !isPCRel32Branch(MI, MCII))
  1223. break;
  1224. const MCOperand &Op = MI.getOperand(CurOp++);
  1225. emitImmediate(Op, MI.getLoc(), X86II::getSizeOfImm(TSFlags),
  1226. MCFixupKind(X86::reloc_branch_4byte_pcrel), StartByte, OS,
  1227. Fixups);
  1228. break;
  1229. }
  1230. case X86II::RawFrmMemOffs:
  1231. emitByte(BaseOpcode, OS);
  1232. emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
  1233. X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
  1234. StartByte, OS, Fixups);
  1235. ++CurOp; // skip segment operand
  1236. break;
  1237. case X86II::RawFrmImm8:
  1238. emitByte(BaseOpcode, OS);
  1239. emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
  1240. X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
  1241. StartByte, OS, Fixups);
  1242. emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, StartByte,
  1243. OS, Fixups);
  1244. break;
  1245. case X86II::RawFrmImm16:
  1246. emitByte(BaseOpcode, OS);
  1247. emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
  1248. X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
  1249. StartByte, OS, Fixups);
  1250. emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, StartByte,
  1251. OS, Fixups);
  1252. break;
  1253. case X86II::AddRegFrm:
  1254. emitByte(BaseOpcode + getX86RegNum(MI.getOperand(CurOp++)), OS);
  1255. break;
  1256. case X86II::MRMDestReg: {
  1257. emitByte(BaseOpcode, OS);
  1258. unsigned SrcRegNum = CurOp + 1;
  1259. if (HasEVEX_K) // Skip writemask
  1260. ++SrcRegNum;
  1261. if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
  1262. ++SrcRegNum;
  1263. emitRegModRMByte(MI.getOperand(CurOp),
  1264. getX86RegNum(MI.getOperand(SrcRegNum)), OS);
  1265. CurOp = SrcRegNum + 1;
  1266. break;
  1267. }
  1268. case X86II::MRMDestMem4VOp3CC: {
  1269. unsigned CC = MI.getOperand(8).getImm();
  1270. emitByte(BaseOpcode + CC, OS);
  1271. unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
  1272. emitMemModRMByte(MI, CurOp + 1, getX86RegNum(MI.getOperand(0)), TSFlags,
  1273. HasREX, StartByte, OS, Fixups, STI, false);
  1274. CurOp = SrcRegNum + 3; // skip reg, VEX_V4 and CC
  1275. break;
  1276. }
  1277. case X86II::MRMDestMemFSIB:
  1278. case X86II::MRMDestMem: {
  1279. emitByte(BaseOpcode, OS);
  1280. unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
  1281. if (HasEVEX_K) // Skip writemask
  1282. ++SrcRegNum;
  1283. if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
  1284. ++SrcRegNum;
  1285. bool ForceSIB = (Form == X86II::MRMDestMemFSIB);
  1286. emitMemModRMByte(MI, CurOp, getX86RegNum(MI.getOperand(SrcRegNum)), TSFlags,
  1287. HasREX, StartByte, OS, Fixups, STI, ForceSIB);
  1288. CurOp = SrcRegNum + 1;
  1289. break;
  1290. }
  1291. case X86II::MRMSrcReg: {
  1292. emitByte(BaseOpcode, OS);
  1293. unsigned SrcRegNum = CurOp + 1;
  1294. if (HasEVEX_K) // Skip writemask
  1295. ++SrcRegNum;
  1296. if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
  1297. ++SrcRegNum;
  1298. emitRegModRMByte(MI.getOperand(SrcRegNum),
  1299. getX86RegNum(MI.getOperand(CurOp)), OS);
  1300. CurOp = SrcRegNum + 1;
  1301. if (HasVEX_I8Reg)
  1302. I8RegNum = getX86RegEncoding(MI, CurOp++);
  1303. // do not count the rounding control operand
  1304. if (HasEVEX_RC)
  1305. --NumOps;
  1306. break;
  1307. }
  1308. case X86II::MRMSrcReg4VOp3: {
  1309. emitByte(BaseOpcode, OS);
  1310. unsigned SrcRegNum = CurOp + 1;
  1311. emitRegModRMByte(MI.getOperand(SrcRegNum),
  1312. getX86RegNum(MI.getOperand(CurOp)), OS);
  1313. CurOp = SrcRegNum + 1;
  1314. ++CurOp; // Encoded in VEX.VVVV
  1315. break;
  1316. }
  1317. case X86II::MRMSrcRegOp4: {
  1318. emitByte(BaseOpcode, OS);
  1319. unsigned SrcRegNum = CurOp + 1;
  1320. // Skip 1st src (which is encoded in VEX_VVVV)
  1321. ++SrcRegNum;
  1322. // Capture 2nd src (which is encoded in Imm[7:4])
  1323. assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg");
  1324. I8RegNum = getX86RegEncoding(MI, SrcRegNum++);
  1325. emitRegModRMByte(MI.getOperand(SrcRegNum),
  1326. getX86RegNum(MI.getOperand(CurOp)), OS);
  1327. CurOp = SrcRegNum + 1;
  1328. break;
  1329. }
  1330. case X86II::MRMSrcRegCC: {
  1331. unsigned FirstOp = CurOp++;
  1332. unsigned SecondOp = CurOp++;
  1333. unsigned CC = MI.getOperand(CurOp++).getImm();
  1334. emitByte(BaseOpcode + CC, OS);
  1335. emitRegModRMByte(MI.getOperand(SecondOp),
  1336. getX86RegNum(MI.getOperand(FirstOp)), OS);
  1337. break;
  1338. }
  1339. case X86II::MRMSrcMemFSIB:
  1340. case X86II::MRMSrcMem: {
  1341. unsigned FirstMemOp = CurOp + 1;
  1342. if (HasEVEX_K) // Skip writemask
  1343. ++FirstMemOp;
  1344. if (HasVEX_4V)
  1345. ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
  1346. emitByte(BaseOpcode, OS);
  1347. bool ForceSIB = (Form == X86II::MRMSrcMemFSIB);
  1348. emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)),
  1349. TSFlags, HasREX, StartByte, OS, Fixups, STI, ForceSIB);
  1350. CurOp = FirstMemOp + X86::AddrNumOperands;
  1351. if (HasVEX_I8Reg)
  1352. I8RegNum = getX86RegEncoding(MI, CurOp++);
  1353. break;
  1354. }
  1355. case X86II::MRMSrcMem4VOp3: {
  1356. unsigned FirstMemOp = CurOp + 1;
  1357. emitByte(BaseOpcode, OS);
  1358. emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)),
  1359. TSFlags, HasREX, StartByte, OS, Fixups, STI);
  1360. CurOp = FirstMemOp + X86::AddrNumOperands;
  1361. ++CurOp; // Encoded in VEX.VVVV.
  1362. break;
  1363. }
  1364. case X86II::MRMSrcMemOp4: {
  1365. unsigned FirstMemOp = CurOp + 1;
  1366. ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
  1367. // Capture second register source (encoded in Imm[7:4])
  1368. assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg");
  1369. I8RegNum = getX86RegEncoding(MI, FirstMemOp++);
  1370. emitByte(BaseOpcode, OS);
  1371. emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)),
  1372. TSFlags, HasREX, StartByte, OS, Fixups, STI);
  1373. CurOp = FirstMemOp + X86::AddrNumOperands;
  1374. break;
  1375. }
  1376. case X86II::MRMSrcMemCC: {
  1377. unsigned RegOp = CurOp++;
  1378. unsigned FirstMemOp = CurOp;
  1379. CurOp = FirstMemOp + X86::AddrNumOperands;
  1380. unsigned CC = MI.getOperand(CurOp++).getImm();
  1381. emitByte(BaseOpcode + CC, OS);
  1382. emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(RegOp)),
  1383. TSFlags, HasREX, StartByte, OS, Fixups, STI);
  1384. break;
  1385. }
  1386. case X86II::MRMXrCC: {
  1387. unsigned RegOp = CurOp++;
  1388. unsigned CC = MI.getOperand(CurOp++).getImm();
  1389. emitByte(BaseOpcode + CC, OS);
  1390. emitRegModRMByte(MI.getOperand(RegOp), 0, OS);
  1391. break;
  1392. }
  1393. case X86II::MRMXr:
  1394. case X86II::MRM0r:
  1395. case X86II::MRM1r:
  1396. case X86II::MRM2r:
  1397. case X86II::MRM3r:
  1398. case X86II::MRM4r:
  1399. case X86II::MRM5r:
  1400. case X86II::MRM6r:
  1401. case X86II::MRM7r:
  1402. if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
  1403. ++CurOp;
  1404. if (HasEVEX_K) // Skip writemask
  1405. ++CurOp;
  1406. emitByte(BaseOpcode, OS);
  1407. emitRegModRMByte(MI.getOperand(CurOp++),
  1408. (Form == X86II::MRMXr) ? 0 : Form - X86II::MRM0r, OS);
  1409. break;
  1410. case X86II::MRMr0:
  1411. emitByte(BaseOpcode, OS);
  1412. emitByte(modRMByte(3, getX86RegNum(MI.getOperand(CurOp++)),0), OS);
  1413. break;
  1414. case X86II::MRMXmCC: {
  1415. unsigned FirstMemOp = CurOp;
  1416. CurOp = FirstMemOp + X86::AddrNumOperands;
  1417. unsigned CC = MI.getOperand(CurOp++).getImm();
  1418. emitByte(BaseOpcode + CC, OS);
  1419. emitMemModRMByte(MI, FirstMemOp, 0, TSFlags, HasREX, StartByte, OS, Fixups,
  1420. STI);
  1421. break;
  1422. }
  1423. case X86II::MRMXm:
  1424. case X86II::MRM0m:
  1425. case X86II::MRM1m:
  1426. case X86II::MRM2m:
  1427. case X86II::MRM3m:
  1428. case X86II::MRM4m:
  1429. case X86II::MRM5m:
  1430. case X86II::MRM6m:
  1431. case X86II::MRM7m:
  1432. if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
  1433. ++CurOp;
  1434. if (HasEVEX_K) // Skip writemask
  1435. ++CurOp;
  1436. emitByte(BaseOpcode, OS);
  1437. emitMemModRMByte(MI, CurOp,
  1438. (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags,
  1439. HasREX, StartByte, OS, Fixups, STI);
  1440. CurOp += X86::AddrNumOperands;
  1441. break;
  1442. case X86II::MRM0X:
  1443. case X86II::MRM1X:
  1444. case X86II::MRM2X:
  1445. case X86II::MRM3X:
  1446. case X86II::MRM4X:
  1447. case X86II::MRM5X:
  1448. case X86II::MRM6X:
  1449. case X86II::MRM7X:
  1450. emitByte(BaseOpcode, OS);
  1451. emitByte(0xC0 + ((Form - X86II::MRM0X) << 3), OS);
  1452. break;
  1453. case X86II::MRM_C0:
  1454. case X86II::MRM_C1:
  1455. case X86II::MRM_C2:
  1456. case X86II::MRM_C3:
  1457. case X86II::MRM_C4:
  1458. case X86II::MRM_C5:
  1459. case X86II::MRM_C6:
  1460. case X86II::MRM_C7:
  1461. case X86II::MRM_C8:
  1462. case X86II::MRM_C9:
  1463. case X86II::MRM_CA:
  1464. case X86II::MRM_CB:
  1465. case X86II::MRM_CC:
  1466. case X86II::MRM_CD:
  1467. case X86II::MRM_CE:
  1468. case X86II::MRM_CF:
  1469. case X86II::MRM_D0:
  1470. case X86II::MRM_D1:
  1471. case X86II::MRM_D2:
  1472. case X86II::MRM_D3:
  1473. case X86II::MRM_D4:
  1474. case X86II::MRM_D5:
  1475. case X86II::MRM_D6:
  1476. case X86II::MRM_D7:
  1477. case X86II::MRM_D8:
  1478. case X86II::MRM_D9:
  1479. case X86II::MRM_DA:
  1480. case X86II::MRM_DB:
  1481. case X86II::MRM_DC:
  1482. case X86II::MRM_DD:
  1483. case X86II::MRM_DE:
  1484. case X86II::MRM_DF:
  1485. case X86II::MRM_E0:
  1486. case X86II::MRM_E1:
  1487. case X86II::MRM_E2:
  1488. case X86II::MRM_E3:
  1489. case X86II::MRM_E4:
  1490. case X86II::MRM_E5:
  1491. case X86II::MRM_E6:
  1492. case X86II::MRM_E7:
  1493. case X86II::MRM_E8:
  1494. case X86II::MRM_E9:
  1495. case X86II::MRM_EA:
  1496. case X86II::MRM_EB:
  1497. case X86II::MRM_EC:
  1498. case X86II::MRM_ED:
  1499. case X86II::MRM_EE:
  1500. case X86II::MRM_EF:
  1501. case X86II::MRM_F0:
  1502. case X86II::MRM_F1:
  1503. case X86II::MRM_F2:
  1504. case X86II::MRM_F3:
  1505. case X86II::MRM_F4:
  1506. case X86II::MRM_F5:
  1507. case X86II::MRM_F6:
  1508. case X86II::MRM_F7:
  1509. case X86II::MRM_F8:
  1510. case X86II::MRM_F9:
  1511. case X86II::MRM_FA:
  1512. case X86II::MRM_FB:
  1513. case X86II::MRM_FC:
  1514. case X86II::MRM_FD:
  1515. case X86II::MRM_FE:
  1516. case X86II::MRM_FF:
  1517. emitByte(BaseOpcode, OS);
  1518. emitByte(0xC0 + Form - X86II::MRM_C0, OS);
  1519. break;
  1520. }
  1521. if (HasVEX_I8Reg) {
  1522. // The last source register of a 4 operand instruction in AVX is encoded
  1523. // in bits[7:4] of a immediate byte.
  1524. assert(I8RegNum < 16 && "Register encoding out of range");
  1525. I8RegNum <<= 4;
  1526. if (CurOp != NumOps) {
  1527. unsigned Val = MI.getOperand(CurOp++).getImm();
  1528. assert(Val < 16 && "Immediate operand value out of range");
  1529. I8RegNum |= Val;
  1530. }
  1531. emitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1,
  1532. StartByte, OS, Fixups);
  1533. } else {
  1534. // If there is a remaining operand, it must be a trailing immediate. Emit it
  1535. // according to the right size for the instruction. Some instructions
  1536. // (SSE4a extrq and insertq) have two trailing immediates.
  1537. while (CurOp != NumOps && NumOps - CurOp <= 2) {
  1538. emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
  1539. X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
  1540. StartByte, OS, Fixups);
  1541. }
  1542. }
  1543. if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow)
  1544. emitByte(X86II::getBaseOpcodeFor(TSFlags), OS);
  1545. assert(OS.tell() - StartByte <= 15 &&
  1546. "The size of instruction must be no longer than 15.");
  1547. #ifndef NDEBUG
  1548. // FIXME: Verify.
  1549. if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
  1550. errs() << "Cannot encode all operands of: ";
  1551. MI.dump();
  1552. errs() << '\n';
  1553. abort();
  1554. }
  1555. #endif
  1556. }
  1557. MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
  1558. MCContext &Ctx) {
  1559. return new X86MCCodeEmitter(MCII, Ctx);
  1560. }