ARMAsmBackend.cpp 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325
  1. //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #include "MCTargetDesc/ARMAsmBackend.h"
  9. #include "MCTargetDesc/ARMAddressingModes.h"
  10. #include "MCTargetDesc/ARMAsmBackendDarwin.h"
  11. #include "MCTargetDesc/ARMAsmBackendELF.h"
  12. #include "MCTargetDesc/ARMAsmBackendWinCOFF.h"
  13. #include "MCTargetDesc/ARMFixupKinds.h"
  14. #include "MCTargetDesc/ARMMCTargetDesc.h"
  15. #include "llvm/ADT/StringSwitch.h"
  16. #include "llvm/BinaryFormat/ELF.h"
  17. #include "llvm/BinaryFormat/MachO.h"
  18. #include "llvm/MC/MCAsmBackend.h"
  19. #include "llvm/MC/MCAssembler.h"
  20. #include "llvm/MC/MCContext.h"
  21. #include "llvm/MC/MCDirectives.h"
  22. #include "llvm/MC/MCELFObjectWriter.h"
  23. #include "llvm/MC/MCExpr.h"
  24. #include "llvm/MC/MCFixupKindInfo.h"
  25. #include "llvm/MC/MCObjectWriter.h"
  26. #include "llvm/MC/MCRegisterInfo.h"
  27. #include "llvm/MC/MCSectionELF.h"
  28. #include "llvm/MC/MCSectionMachO.h"
  29. #include "llvm/MC/MCSubtargetInfo.h"
  30. #include "llvm/MC/MCValue.h"
  31. #include "llvm/MC/MCAsmLayout.h"
  32. #include "llvm/Support/Debug.h"
  33. #include "llvm/Support/EndianStream.h"
  34. #include "llvm/Support/ErrorHandling.h"
  35. #include "llvm/Support/Format.h"
  36. #include "llvm/Support/TargetParser.h"
  37. #include "llvm/Support/raw_ostream.h"
  38. using namespace llvm;
  39. namespace {
  40. class ARMELFObjectWriter : public MCELFObjectTargetWriter {
  41. public:
  42. ARMELFObjectWriter(uint8_t OSABI)
  43. : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
  44. /*HasRelocationAddend*/ false) {}
  45. };
  46. } // end anonymous namespace
  47. Optional<MCFixupKind> ARMAsmBackend::getFixupKind(StringRef Name) const {
  48. return None;
  49. }
  50. Optional<MCFixupKind> ARMAsmBackendELF::getFixupKind(StringRef Name) const {
  51. unsigned Type = llvm::StringSwitch<unsigned>(Name)
  52. #define ELF_RELOC(X, Y) .Case(#X, Y)
  53. #include "llvm/BinaryFormat/ELFRelocs/ARM.def"
  54. #undef ELF_RELOC
  55. .Case("BFD_RELOC_NONE", ELF::R_ARM_NONE)
  56. .Case("BFD_RELOC_8", ELF::R_ARM_ABS8)
  57. .Case("BFD_RELOC_16", ELF::R_ARM_ABS16)
  58. .Case("BFD_RELOC_32", ELF::R_ARM_ABS32)
  59. .Default(-1u);
  60. if (Type == -1u)
  61. return None;
  62. return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
  63. }
  64. const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
  65. unsigned IsPCRelConstant =
  66. MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_Constant;
  67. const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
  68. // This table *must* be in the order that the fixup_* kinds are defined in
  69. // ARMFixupKinds.h.
  70. //
  71. // Name Offset (bits) Size (bits) Flags
  72. {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant},
  73. {"fixup_t2_ldst_pcrel_12", 0, 32,
  74. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  75. {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant},
  76. {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant},
  77. {"fixup_t2_pcrel_10", 0, 32,
  78. MCFixupKindInfo::FKF_IsPCRel |
  79. MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  80. {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  81. {"fixup_t2_pcrel_9", 0, 32,
  82. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  83. {"fixup_arm_ldst_abs_12", 0, 32, 0},
  84. {"fixup_thumb_adr_pcrel_10", 0, 8,
  85. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  86. {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant},
  87. {"fixup_t2_adr_pcrel_12", 0, 32,
  88. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  89. {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
  90. {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
  91. {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  92. {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  93. {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
  94. {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
  95. {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
  96. {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
  97. {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  98. {"fixup_arm_thumb_blx", 0, 32,
  99. MCFixupKindInfo::FKF_IsPCRel |
  100. MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  101. {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
  102. {"fixup_arm_thumb_cp", 0, 8,
  103. MCFixupKindInfo::FKF_IsPCRel |
  104. MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  105. {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel},
  106. // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
  107. // - 19.
  108. {"fixup_arm_movt_hi16", 0, 20, 0},
  109. {"fixup_arm_movw_lo16", 0, 20, 0},
  110. {"fixup_t2_movt_hi16", 0, 20, 0},
  111. {"fixup_t2_movw_lo16", 0, 20, 0},
  112. {"fixup_arm_mod_imm", 0, 12, 0},
  113. {"fixup_t2_so_imm", 0, 26, 0},
  114. {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  115. {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  116. {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  117. {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  118. {"fixup_bfcsel_else_target", 0, 32, 0},
  119. {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  120. {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}};
  121. const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
  122. // This table *must* be in the order that the fixup_* kinds are defined in
  123. // ARMFixupKinds.h.
  124. //
  125. // Name Offset (bits) Size (bits) Flags
  126. {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant},
  127. {"fixup_t2_ldst_pcrel_12", 0, 32,
  128. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  129. {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant},
  130. {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant},
  131. {"fixup_t2_pcrel_10", 0, 32,
  132. MCFixupKindInfo::FKF_IsPCRel |
  133. MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  134. {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  135. {"fixup_t2_pcrel_9", 0, 32,
  136. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  137. {"fixup_arm_ldst_abs_12", 0, 32, 0},
  138. {"fixup_thumb_adr_pcrel_10", 8, 8,
  139. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  140. {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant},
  141. {"fixup_t2_adr_pcrel_12", 0, 32,
  142. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  143. {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
  144. {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
  145. {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  146. {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  147. {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
  148. {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
  149. {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
  150. {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
  151. {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  152. {"fixup_arm_thumb_blx", 0, 32,
  153. MCFixupKindInfo::FKF_IsPCRel |
  154. MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  155. {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
  156. {"fixup_arm_thumb_cp", 8, 8,
  157. MCFixupKindInfo::FKF_IsPCRel |
  158. MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  159. {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel},
  160. // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
  161. // - 19.
  162. {"fixup_arm_movt_hi16", 12, 20, 0},
  163. {"fixup_arm_movw_lo16", 12, 20, 0},
  164. {"fixup_t2_movt_hi16", 12, 20, 0},
  165. {"fixup_t2_movw_lo16", 12, 20, 0},
  166. {"fixup_arm_mod_imm", 20, 12, 0},
  167. {"fixup_t2_so_imm", 26, 6, 0},
  168. {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  169. {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  170. {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  171. {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  172. {"fixup_bfcsel_else_target", 0, 32, 0},
  173. {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  174. {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}};
  175. // Fixup kinds from .reloc directive are like R_ARM_NONE. They do not require
  176. // any extra processing.
  177. if (Kind >= FirstLiteralRelocationKind)
  178. return MCAsmBackend::getFixupKindInfo(FK_NONE);
  179. if (Kind < FirstTargetFixupKind)
  180. return MCAsmBackend::getFixupKindInfo(Kind);
  181. assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
  182. "Invalid kind!");
  183. return (Endian == support::little ? InfosLE
  184. : InfosBE)[Kind - FirstTargetFixupKind];
  185. }
  186. void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
  187. switch (Flag) {
  188. default:
  189. break;
  190. case MCAF_Code16:
  191. setIsThumb(true);
  192. break;
  193. case MCAF_Code32:
  194. setIsThumb(false);
  195. break;
  196. }
  197. }
  198. unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op,
  199. const MCSubtargetInfo &STI) const {
  200. bool HasThumb2 = STI.getFeatureBits()[ARM::FeatureThumb2];
  201. bool HasV8MBaselineOps = STI.getFeatureBits()[ARM::HasV8MBaselineOps];
  202. switch (Op) {
  203. default:
  204. return Op;
  205. case ARM::tBcc:
  206. return HasThumb2 ? (unsigned)ARM::t2Bcc : Op;
  207. case ARM::tLDRpci:
  208. return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op;
  209. case ARM::tADR:
  210. return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
  211. case ARM::tB:
  212. return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
  213. case ARM::tCBZ:
  214. return ARM::tHINT;
  215. case ARM::tCBNZ:
  216. return ARM::tHINT;
  217. }
  218. }
  219. bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst,
  220. const MCSubtargetInfo &STI) const {
  221. if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode())
  222. return true;
  223. return false;
  224. }
  225. static const char *checkPCRelOffset(uint64_t Value, int64_t Min, int64_t Max) {
  226. int64_t Offset = int64_t(Value) - 4;
  227. if (Offset < Min || Offset > Max)
  228. return "out of range pc-relative fixup value";
  229. return nullptr;
  230. }
  231. const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup,
  232. uint64_t Value) const {
  233. switch (Fixup.getTargetKind()) {
  234. case ARM::fixup_arm_thumb_br: {
  235. // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
  236. // low bit being an implied zero. There's an implied +4 offset for the
  237. // branch, so we adjust the other way here to determine what's
  238. // encodable.
  239. //
  240. // Relax if the value is too big for a (signed) i8.
  241. int64_t Offset = int64_t(Value) - 4;
  242. if (Offset > 2046 || Offset < -2048)
  243. return "out of range pc-relative fixup value";
  244. break;
  245. }
  246. case ARM::fixup_arm_thumb_bcc: {
  247. // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
  248. // low bit being an implied zero. There's an implied +4 offset for the
  249. // branch, so we adjust the other way here to determine what's
  250. // encodable.
  251. //
  252. // Relax if the value is too big for a (signed) i8.
  253. int64_t Offset = int64_t(Value) - 4;
  254. if (Offset > 254 || Offset < -256)
  255. return "out of range pc-relative fixup value";
  256. break;
  257. }
  258. case ARM::fixup_thumb_adr_pcrel_10:
  259. case ARM::fixup_arm_thumb_cp: {
  260. // If the immediate is negative, greater than 1020, or not a multiple
  261. // of four, the wide version of the instruction must be used.
  262. int64_t Offset = int64_t(Value) - 4;
  263. if (Offset & 3)
  264. return "misaligned pc-relative fixup value";
  265. else if (Offset > 1020 || Offset < 0)
  266. return "out of range pc-relative fixup value";
  267. break;
  268. }
  269. case ARM::fixup_arm_thumb_cb: {
  270. // If we have a Thumb CBZ or CBNZ instruction and its target is the next
  271. // instruction it is actually out of range for the instruction.
  272. // It will be changed to a NOP.
  273. int64_t Offset = (Value & ~1);
  274. if (Offset == 2)
  275. return "will be converted to nop";
  276. break;
  277. }
  278. case ARM::fixup_bf_branch:
  279. return checkPCRelOffset(Value, 0, 30);
  280. case ARM::fixup_bf_target:
  281. return checkPCRelOffset(Value, -0x10000, +0xfffe);
  282. case ARM::fixup_bfl_target:
  283. return checkPCRelOffset(Value, -0x40000, +0x3fffe);
  284. case ARM::fixup_bfc_target:
  285. return checkPCRelOffset(Value, -0x1000, +0xffe);
  286. case ARM::fixup_wls:
  287. return checkPCRelOffset(Value, 0, +0xffe);
  288. case ARM::fixup_le:
  289. // The offset field in the LE and LETP instructions is an 11-bit
  290. // value shifted left by 2 (i.e. 0,2,4,...,4094), and it is
  291. // interpreted as a negative offset from the value read from pc,
  292. // i.e. from instruction_address+4.
  293. //
  294. // So an LE instruction can in principle address the instruction
  295. // immediately after itself, or (not very usefully) the address
  296. // half way through the 4-byte LE.
  297. return checkPCRelOffset(Value, -0xffe, 0);
  298. case ARM::fixup_bfcsel_else_target: {
  299. if (Value != 2 && Value != 4)
  300. return "out of range label-relative fixup value";
  301. break;
  302. }
  303. default:
  304. llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!");
  305. }
  306. return nullptr;
  307. }
  308. bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
  309. const MCRelaxableFragment *DF,
  310. const MCAsmLayout &Layout) const {
  311. return reasonForFixupRelaxation(Fixup, Value);
  312. }
  313. void ARMAsmBackend::relaxInstruction(MCInst &Inst,
  314. const MCSubtargetInfo &STI) const {
  315. unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI);
  316. // Return a diagnostic if we get here w/ a bogus instruction.
  317. if (RelaxedOp == Inst.getOpcode()) {
  318. SmallString<256> Tmp;
  319. raw_svector_ostream OS(Tmp);
  320. Inst.dump_pretty(OS);
  321. OS << "\n";
  322. report_fatal_error("unexpected instruction to relax: " + OS.str());
  323. }
  324. // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
  325. // have to change the operands too.
  326. if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
  327. RelaxedOp == ARM::tHINT) {
  328. MCInst Res;
  329. Res.setOpcode(RelaxedOp);
  330. Res.addOperand(MCOperand::createImm(0));
  331. Res.addOperand(MCOperand::createImm(14));
  332. Res.addOperand(MCOperand::createReg(0));
  333. Inst = std::move(Res);
  334. return;
  335. }
  336. // The rest of instructions we're relaxing have the same operands.
  337. // We just need to update to the proper opcode.
  338. Inst.setOpcode(RelaxedOp);
  339. }
  340. bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
  341. const MCSubtargetInfo *STI) const {
  342. const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
  343. const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
  344. const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0
  345. const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
  346. if (isThumb()) {
  347. const uint16_t nopEncoding =
  348. hasNOP(STI) ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding;
  349. uint64_t NumNops = Count / 2;
  350. for (uint64_t i = 0; i != NumNops; ++i)
  351. support::endian::write(OS, nopEncoding, Endian);
  352. if (Count & 1)
  353. OS << '\0';
  354. return true;
  355. }
  356. // ARM mode
  357. const uint32_t nopEncoding =
  358. hasNOP(STI) ? ARMv6T2_NopEncoding : ARMv4_NopEncoding;
  359. uint64_t NumNops = Count / 4;
  360. for (uint64_t i = 0; i != NumNops; ++i)
  361. support::endian::write(OS, nopEncoding, Endian);
  362. // FIXME: should this function return false when unable to write exactly
  363. // 'Count' bytes with NOP encodings?
  364. switch (Count % 4) {
  365. default:
  366. break; // No leftover bytes to write
  367. case 1:
  368. OS << '\0';
  369. break;
  370. case 2:
  371. OS.write("\0\0", 2);
  372. break;
  373. case 3:
  374. OS.write("\0\0\xa0", 3);
  375. break;
  376. }
  377. return true;
  378. }
  379. static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
  380. if (IsLittleEndian) {
  381. // Note that the halfwords are stored high first and low second in thumb;
  382. // so we need to swap the fixup value here to map properly.
  383. uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
  384. Swapped |= (Value & 0x0000FFFF) << 16;
  385. return Swapped;
  386. } else
  387. return Value;
  388. }
  389. static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
  390. bool IsLittleEndian) {
  391. uint32_t Value;
  392. if (IsLittleEndian) {
  393. Value = (SecondHalf & 0xFFFF) << 16;
  394. Value |= (FirstHalf & 0xFFFF);
  395. } else {
  396. Value = (SecondHalf & 0xFFFF);
  397. Value |= (FirstHalf & 0xFFFF) << 16;
  398. }
  399. return Value;
  400. }
  401. unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
  402. const MCFixup &Fixup,
  403. const MCValue &Target, uint64_t Value,
  404. bool IsResolved, MCContext &Ctx,
  405. const MCSubtargetInfo* STI) const {
  406. unsigned Kind = Fixup.getKind();
  407. // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT
  408. // and .word relocations they put the Thumb bit into the addend if possible.
  409. // Other relocation types don't want this bit though (branches couldn't encode
  410. // it if it *was* present, and no other relocations exist) and it can
  411. // interfere with checking valid expressions.
  412. if (const MCSymbolRefExpr *A = Target.getSymA()) {
  413. if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) &&
  414. A->getSymbol().isExternal() &&
  415. (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 ||
  416. Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 ||
  417. Kind == ARM::fixup_t2_movt_hi16))
  418. Value |= 1;
  419. }
  420. switch (Kind) {
  421. default:
  422. Ctx.reportError(Fixup.getLoc(), "bad relocation fixup type");
  423. return 0;
  424. case FK_Data_1:
  425. case FK_Data_2:
  426. case FK_Data_4:
  427. return Value;
  428. case FK_SecRel_2:
  429. return Value;
  430. case FK_SecRel_4:
  431. return Value;
  432. case ARM::fixup_arm_movt_hi16:
  433. assert(STI != nullptr);
  434. if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
  435. Value >>= 16;
  436. LLVM_FALLTHROUGH;
  437. case ARM::fixup_arm_movw_lo16: {
  438. unsigned Hi4 = (Value & 0xF000) >> 12;
  439. unsigned Lo12 = Value & 0x0FFF;
  440. // inst{19-16} = Hi4;
  441. // inst{11-0} = Lo12;
  442. Value = (Hi4 << 16) | (Lo12);
  443. return Value;
  444. }
  445. case ARM::fixup_t2_movt_hi16:
  446. assert(STI != nullptr);
  447. if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
  448. Value >>= 16;
  449. LLVM_FALLTHROUGH;
  450. case ARM::fixup_t2_movw_lo16: {
  451. unsigned Hi4 = (Value & 0xF000) >> 12;
  452. unsigned i = (Value & 0x800) >> 11;
  453. unsigned Mid3 = (Value & 0x700) >> 8;
  454. unsigned Lo8 = Value & 0x0FF;
  455. // inst{19-16} = Hi4;
  456. // inst{26} = i;
  457. // inst{14-12} = Mid3;
  458. // inst{7-0} = Lo8;
  459. Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
  460. return swapHalfWords(Value, Endian == support::little);
  461. }
  462. case ARM::fixup_arm_ldst_pcrel_12:
  463. // ARM PC-relative values are offset by 8.
  464. Value -= 4;
  465. LLVM_FALLTHROUGH;
  466. case ARM::fixup_t2_ldst_pcrel_12:
  467. // Offset by 4, adjusted by two due to the half-word ordering of thumb.
  468. Value -= 4;
  469. LLVM_FALLTHROUGH;
  470. case ARM::fixup_arm_ldst_abs_12: {
  471. bool isAdd = true;
  472. if ((int64_t)Value < 0) {
  473. Value = -Value;
  474. isAdd = false;
  475. }
  476. if (Value >= 4096) {
  477. Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
  478. return 0;
  479. }
  480. Value |= isAdd << 23;
  481. // Same addressing mode as fixup_arm_pcrel_10,
  482. // but with 16-bit halfwords swapped.
  483. if (Kind == ARM::fixup_t2_ldst_pcrel_12)
  484. return swapHalfWords(Value, Endian == support::little);
  485. return Value;
  486. }
  487. case ARM::fixup_arm_adr_pcrel_12: {
  488. // ARM PC-relative values are offset by 8.
  489. Value -= 8;
  490. unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
  491. if ((int64_t)Value < 0) {
  492. Value = -Value;
  493. opc = 2; // 0b0010
  494. }
  495. if (ARM_AM::getSOImmVal(Value) == -1) {
  496. Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
  497. return 0;
  498. }
  499. // Encode the immediate and shift the opcode into place.
  500. return ARM_AM::getSOImmVal(Value) | (opc << 21);
  501. }
  502. case ARM::fixup_t2_adr_pcrel_12: {
  503. Value -= 4;
  504. unsigned opc = 0;
  505. if ((int64_t)Value < 0) {
  506. Value = -Value;
  507. opc = 5;
  508. }
  509. uint32_t out = (opc << 21);
  510. out |= (Value & 0x800) << 15;
  511. out |= (Value & 0x700) << 4;
  512. out |= (Value & 0x0FF);
  513. return swapHalfWords(out, Endian == support::little);
  514. }
  515. case ARM::fixup_arm_condbranch:
  516. case ARM::fixup_arm_uncondbranch:
  517. case ARM::fixup_arm_uncondbl:
  518. case ARM::fixup_arm_condbl:
  519. case ARM::fixup_arm_blx:
  520. // These values don't encode the low two bits since they're always zero.
  521. // Offset by 8 just as above.
  522. if (const MCSymbolRefExpr *SRE =
  523. dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
  524. if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
  525. return 0;
  526. return 0xffffff & ((Value - 8) >> 2);
  527. case ARM::fixup_t2_uncondbranch: {
  528. Value = Value - 4;
  529. if (!isInt<25>(Value)) {
  530. Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
  531. return 0;
  532. }
  533. Value >>= 1; // Low bit is not encoded.
  534. uint32_t out = 0;
  535. bool I = Value & 0x800000;
  536. bool J1 = Value & 0x400000;
  537. bool J2 = Value & 0x200000;
  538. J1 ^= I;
  539. J2 ^= I;
  540. out |= I << 26; // S bit
  541. out |= !J1 << 13; // J1 bit
  542. out |= !J2 << 11; // J2 bit
  543. out |= (Value & 0x1FF800) << 5; // imm6 field
  544. out |= (Value & 0x0007FF); // imm11 field
  545. return swapHalfWords(out, Endian == support::little);
  546. }
  547. case ARM::fixup_t2_condbranch: {
  548. Value = Value - 4;
  549. if (!isInt<21>(Value)) {
  550. Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
  551. return 0;
  552. }
  553. Value >>= 1; // Low bit is not encoded.
  554. uint64_t out = 0;
  555. out |= (Value & 0x80000) << 7; // S bit
  556. out |= (Value & 0x40000) >> 7; // J2 bit
  557. out |= (Value & 0x20000) >> 4; // J1 bit
  558. out |= (Value & 0x1F800) << 5; // imm6 field
  559. out |= (Value & 0x007FF); // imm11 field
  560. return swapHalfWords(out, Endian == support::little);
  561. }
  562. case ARM::fixup_arm_thumb_bl: {
  563. if (!isInt<25>(Value - 4) ||
  564. (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
  565. !STI->getFeatureBits()[ARM::HasV8MBaselineOps] &&
  566. !STI->getFeatureBits()[ARM::HasV6MOps] &&
  567. !isInt<23>(Value - 4))) {
  568. Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
  569. return 0;
  570. }
  571. // The value doesn't encode the low bit (always zero) and is offset by
  572. // four. The 32-bit immediate value is encoded as
  573. // imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
  574. // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
  575. // The value is encoded into disjoint bit positions in the destination
  576. // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
  577. // J = either J1 or J2 bit
  578. //
  579. // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
  580. //
  581. // Note that the halfwords are stored high first, low second; so we need
  582. // to transpose the fixup value here to map properly.
  583. uint32_t offset = (Value - 4) >> 1;
  584. uint32_t signBit = (offset & 0x800000) >> 23;
  585. uint32_t I1Bit = (offset & 0x400000) >> 22;
  586. uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
  587. uint32_t I2Bit = (offset & 0x200000) >> 21;
  588. uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
  589. uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
  590. uint32_t imm11Bits = (offset & 0x000007FF);
  591. uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
  592. uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
  593. (uint16_t)imm11Bits);
  594. return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
  595. }
  596. case ARM::fixup_arm_thumb_blx: {
  597. // The value doesn't encode the low two bits (always zero) and is offset by
  598. // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
  599. // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
  600. // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
  601. // The value is encoded into disjoint bit positions in the destination
  602. // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
  603. // J = either J1 or J2 bit, 0 = zero.
  604. //
  605. // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
  606. //
  607. // Note that the halfwords are stored high first, low second; so we need
  608. // to transpose the fixup value here to map properly.
  609. if (Value % 4 != 0) {
  610. Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination");
  611. return 0;
  612. }
  613. uint32_t offset = (Value - 4) >> 2;
  614. if (const MCSymbolRefExpr *SRE =
  615. dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
  616. if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
  617. offset = 0;
  618. uint32_t signBit = (offset & 0x400000) >> 22;
  619. uint32_t I1Bit = (offset & 0x200000) >> 21;
  620. uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
  621. uint32_t I2Bit = (offset & 0x100000) >> 20;
  622. uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
  623. uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
  624. uint32_t imm10LBits = (offset & 0x3FF);
  625. uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
  626. uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
  627. ((uint16_t)imm10LBits) << 1);
  628. return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
  629. }
  630. case ARM::fixup_thumb_adr_pcrel_10:
  631. case ARM::fixup_arm_thumb_cp:
  632. // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
  633. // could have an error on our hands.
  634. assert(STI != nullptr);
  635. if (!STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) {
  636. const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
  637. if (FixupDiagnostic) {
  638. Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
  639. return 0;
  640. }
  641. }
  642. // Offset by 4, and don't encode the low two bits.
  643. return ((Value - 4) >> 2) & 0xff;
  644. case ARM::fixup_arm_thumb_cb: {
  645. // CB instructions can only branch to offsets in [4, 126] in multiples of 2
  646. // so ensure that the raw value LSB is zero and it lies in [2, 130].
  647. // An offset of 2 will be relaxed to a NOP.
  648. if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
  649. Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
  650. return 0;
  651. }
  652. // Offset by 4 and don't encode the lower bit, which is always 0.
  653. // FIXME: diagnose if no Thumb2
  654. uint32_t Binary = (Value - 4) >> 1;
  655. return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
  656. }
  657. case ARM::fixup_arm_thumb_br:
  658. // Offset by 4 and don't encode the lower bit, which is always 0.
  659. assert(STI != nullptr);
  660. if (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
  661. !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
  662. const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
  663. if (FixupDiagnostic) {
  664. Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
  665. return 0;
  666. }
  667. }
  668. return ((Value - 4) >> 1) & 0x7ff;
  669. case ARM::fixup_arm_thumb_bcc:
  670. // Offset by 4 and don't encode the lower bit, which is always 0.
  671. assert(STI != nullptr);
  672. if (!STI->getFeatureBits()[ARM::FeatureThumb2]) {
  673. const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
  674. if (FixupDiagnostic) {
  675. Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
  676. return 0;
  677. }
  678. }
  679. return ((Value - 4) >> 1) & 0xff;
  680. case ARM::fixup_arm_pcrel_10_unscaled: {
  681. Value = Value - 8; // ARM fixups offset by an additional word and don't
  682. // need to adjust for the half-word ordering.
  683. bool isAdd = true;
  684. if ((int64_t)Value < 0) {
  685. Value = -Value;
  686. isAdd = false;
  687. }
  688. // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
  689. if (Value >= 256) {
  690. Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
  691. return 0;
  692. }
  693. Value = (Value & 0xf) | ((Value & 0xf0) << 4);
  694. return Value | (isAdd << 23);
  695. }
  696. case ARM::fixup_arm_pcrel_10:
  697. Value = Value - 4; // ARM fixups offset by an additional word and don't
  698. // need to adjust for the half-word ordering.
  699. LLVM_FALLTHROUGH;
  700. case ARM::fixup_t2_pcrel_10: {
  701. // Offset by 4, adjusted by two due to the half-word ordering of thumb.
  702. Value = Value - 4;
  703. bool isAdd = true;
  704. if ((int64_t)Value < 0) {
  705. Value = -Value;
  706. isAdd = false;
  707. }
  708. // These values don't encode the low two bits since they're always zero.
  709. Value >>= 2;
  710. if (Value >= 256) {
  711. Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
  712. return 0;
  713. }
  714. Value |= isAdd << 23;
  715. // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
  716. // swapped.
  717. if (Kind == ARM::fixup_t2_pcrel_10)
  718. return swapHalfWords(Value, Endian == support::little);
  719. return Value;
  720. }
  721. case ARM::fixup_arm_pcrel_9:
  722. Value = Value - 4; // ARM fixups offset by an additional word and don't
  723. // need to adjust for the half-word ordering.
  724. LLVM_FALLTHROUGH;
  725. case ARM::fixup_t2_pcrel_9: {
  726. // Offset by 4, adjusted by two due to the half-word ordering of thumb.
  727. Value = Value - 4;
  728. bool isAdd = true;
  729. if ((int64_t)Value < 0) {
  730. Value = -Value;
  731. isAdd = false;
  732. }
  733. // These values don't encode the low bit since it's always zero.
  734. if (Value & 1) {
  735. Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup");
  736. return 0;
  737. }
  738. Value >>= 1;
  739. if (Value >= 256) {
  740. Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
  741. return 0;
  742. }
  743. Value |= isAdd << 23;
  744. // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
  745. // swapped.
  746. if (Kind == ARM::fixup_t2_pcrel_9)
  747. return swapHalfWords(Value, Endian == support::little);
  748. return Value;
  749. }
  750. case ARM::fixup_arm_mod_imm:
  751. Value = ARM_AM::getSOImmVal(Value);
  752. if (Value >> 12) {
  753. Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
  754. return 0;
  755. }
  756. return Value;
  757. case ARM::fixup_t2_so_imm: {
  758. Value = ARM_AM::getT2SOImmVal(Value);
  759. if ((int64_t)Value < 0) {
  760. Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
  761. return 0;
  762. }
  763. // Value will contain a 12-bit value broken up into a 4-bit shift in bits
  764. // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate
  765. // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit
  766. // 10 of the upper half-word and imm3 is placed at 14:12 of the lower
  767. // half-word.
  768. uint64_t EncValue = 0;
  769. EncValue |= (Value & 0x800) << 15;
  770. EncValue |= (Value & 0x700) << 4;
  771. EncValue |= (Value & 0xff);
  772. return swapHalfWords(EncValue, Endian == support::little);
  773. }
  774. case ARM::fixup_bf_branch: {
  775. const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
  776. if (FixupDiagnostic) {
  777. Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
  778. return 0;
  779. }
  780. uint32_t out = (((Value - 4) >> 1) & 0xf) << 23;
  781. return swapHalfWords(out, Endian == support::little);
  782. }
  783. case ARM::fixup_bf_target:
  784. case ARM::fixup_bfl_target:
  785. case ARM::fixup_bfc_target: {
  786. const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
  787. if (FixupDiagnostic) {
  788. Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
  789. return 0;
  790. }
  791. uint32_t out = 0;
  792. uint32_t HighBitMask = (Kind == ARM::fixup_bf_target ? 0xf800 :
  793. Kind == ARM::fixup_bfl_target ? 0x3f800 : 0x800);
  794. out |= (((Value - 4) >> 1) & 0x1) << 11;
  795. out |= (((Value - 4) >> 1) & 0x7fe);
  796. out |= (((Value - 4) >> 1) & HighBitMask) << 5;
  797. return swapHalfWords(out, Endian == support::little);
  798. }
  799. case ARM::fixup_bfcsel_else_target: {
  800. // If this is a fixup of a branch future's else target then it should be a
  801. // constant MCExpr representing the distance between the branch targetted
  802. // and the instruction after that same branch.
  803. Value = Target.getConstant();
  804. const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
  805. if (FixupDiagnostic) {
  806. Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
  807. return 0;
  808. }
  809. uint32_t out = ((Value >> 2) & 1) << 17;
  810. return swapHalfWords(out, Endian == support::little);
  811. }
  812. case ARM::fixup_wls:
  813. case ARM::fixup_le: {
  814. const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
  815. if (FixupDiagnostic) {
  816. Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
  817. return 0;
  818. }
  819. uint64_t real_value = Value - 4;
  820. uint32_t out = 0;
  821. if (Kind == ARM::fixup_le)
  822. real_value = -real_value;
  823. out |= ((real_value >> 1) & 0x1) << 11;
  824. out |= ((real_value >> 1) & 0x7fe);
  825. return swapHalfWords(out, Endian == support::little);
  826. }
  827. }
  828. }
  829. bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
  830. const MCFixup &Fixup,
  831. const MCValue &Target) {
  832. const MCSymbolRefExpr *A = Target.getSymA();
  833. const MCSymbol *Sym = A ? &A->getSymbol() : nullptr;
  834. const unsigned FixupKind = Fixup.getKind();
  835. if (FixupKind >= FirstLiteralRelocationKind)
  836. return true;
  837. if (FixupKind == ARM::fixup_arm_thumb_bl) {
  838. assert(Sym && "How did we resolve this?");
  839. // If the symbol is external the linker will handle it.
  840. // FIXME: Should we handle it as an optimization?
  841. // If the symbol is out of range, produce a relocation and hope the
  842. // linker can handle it. GNU AS produces an error in this case.
  843. if (Sym->isExternal())
  844. return true;
  845. }
  846. // Create relocations for unconditional branches to function symbols with
  847. // different execution mode in ELF binaries.
  848. if (Sym && Sym->isELF()) {
  849. unsigned Type = cast<MCSymbolELF>(Sym)->getType();
  850. if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
  851. if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
  852. return true;
  853. if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br ||
  854. FixupKind == ARM::fixup_arm_thumb_bl ||
  855. FixupKind == ARM::fixup_t2_condbranch ||
  856. FixupKind == ARM::fixup_t2_uncondbranch))
  857. return true;
  858. }
  859. }
  860. // We must always generate a relocation for BL/BLX instructions if we have
  861. // a symbol to reference, as the linker relies on knowing the destination
  862. // symbol's thumb-ness to get interworking right.
  863. if (A && (FixupKind == ARM::fixup_arm_thumb_blx ||
  864. FixupKind == ARM::fixup_arm_blx ||
  865. FixupKind == ARM::fixup_arm_uncondbl ||
  866. FixupKind == ARM::fixup_arm_condbl))
  867. return true;
  868. return false;
  869. }
  870. /// getFixupKindNumBytes - The number of bytes the fixup may change.
  871. static unsigned getFixupKindNumBytes(unsigned Kind) {
  872. switch (Kind) {
  873. default:
  874. llvm_unreachable("Unknown fixup kind!");
  875. case FK_Data_1:
  876. case ARM::fixup_arm_thumb_bcc:
  877. case ARM::fixup_arm_thumb_cp:
  878. case ARM::fixup_thumb_adr_pcrel_10:
  879. return 1;
  880. case FK_Data_2:
  881. case ARM::fixup_arm_thumb_br:
  882. case ARM::fixup_arm_thumb_cb:
  883. case ARM::fixup_arm_mod_imm:
  884. return 2;
  885. case ARM::fixup_arm_pcrel_10_unscaled:
  886. case ARM::fixup_arm_ldst_pcrel_12:
  887. case ARM::fixup_arm_pcrel_10:
  888. case ARM::fixup_arm_pcrel_9:
  889. case ARM::fixup_arm_ldst_abs_12:
  890. case ARM::fixup_arm_adr_pcrel_12:
  891. case ARM::fixup_arm_uncondbl:
  892. case ARM::fixup_arm_condbl:
  893. case ARM::fixup_arm_blx:
  894. case ARM::fixup_arm_condbranch:
  895. case ARM::fixup_arm_uncondbranch:
  896. return 3;
  897. case FK_Data_4:
  898. case ARM::fixup_t2_ldst_pcrel_12:
  899. case ARM::fixup_t2_condbranch:
  900. case ARM::fixup_t2_uncondbranch:
  901. case ARM::fixup_t2_pcrel_10:
  902. case ARM::fixup_t2_pcrel_9:
  903. case ARM::fixup_t2_adr_pcrel_12:
  904. case ARM::fixup_arm_thumb_bl:
  905. case ARM::fixup_arm_thumb_blx:
  906. case ARM::fixup_arm_movt_hi16:
  907. case ARM::fixup_arm_movw_lo16:
  908. case ARM::fixup_t2_movt_hi16:
  909. case ARM::fixup_t2_movw_lo16:
  910. case ARM::fixup_t2_so_imm:
  911. case ARM::fixup_bf_branch:
  912. case ARM::fixup_bf_target:
  913. case ARM::fixup_bfl_target:
  914. case ARM::fixup_bfc_target:
  915. case ARM::fixup_bfcsel_else_target:
  916. case ARM::fixup_wls:
  917. case ARM::fixup_le:
  918. return 4;
  919. case FK_SecRel_2:
  920. return 2;
  921. case FK_SecRel_4:
  922. return 4;
  923. }
  924. }
  925. /// getFixupKindContainerSizeBytes - The number of bytes of the
  926. /// container involved in big endian.
  927. static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
  928. switch (Kind) {
  929. default:
  930. llvm_unreachable("Unknown fixup kind!");
  931. case FK_Data_1:
  932. return 1;
  933. case FK_Data_2:
  934. return 2;
  935. case FK_Data_4:
  936. return 4;
  937. case ARM::fixup_arm_thumb_bcc:
  938. case ARM::fixup_arm_thumb_cp:
  939. case ARM::fixup_thumb_adr_pcrel_10:
  940. case ARM::fixup_arm_thumb_br:
  941. case ARM::fixup_arm_thumb_cb:
  942. // Instruction size is 2 bytes.
  943. return 2;
  944. case ARM::fixup_arm_pcrel_10_unscaled:
  945. case ARM::fixup_arm_ldst_pcrel_12:
  946. case ARM::fixup_arm_pcrel_10:
  947. case ARM::fixup_arm_pcrel_9:
  948. case ARM::fixup_arm_adr_pcrel_12:
  949. case ARM::fixup_arm_uncondbl:
  950. case ARM::fixup_arm_condbl:
  951. case ARM::fixup_arm_blx:
  952. case ARM::fixup_arm_condbranch:
  953. case ARM::fixup_arm_uncondbranch:
  954. case ARM::fixup_t2_ldst_pcrel_12:
  955. case ARM::fixup_t2_condbranch:
  956. case ARM::fixup_t2_uncondbranch:
  957. case ARM::fixup_t2_pcrel_10:
  958. case ARM::fixup_t2_pcrel_9:
  959. case ARM::fixup_t2_adr_pcrel_12:
  960. case ARM::fixup_arm_thumb_bl:
  961. case ARM::fixup_arm_thumb_blx:
  962. case ARM::fixup_arm_movt_hi16:
  963. case ARM::fixup_arm_movw_lo16:
  964. case ARM::fixup_t2_movt_hi16:
  965. case ARM::fixup_t2_movw_lo16:
  966. case ARM::fixup_arm_mod_imm:
  967. case ARM::fixup_t2_so_imm:
  968. case ARM::fixup_bf_branch:
  969. case ARM::fixup_bf_target:
  970. case ARM::fixup_bfl_target:
  971. case ARM::fixup_bfc_target:
  972. case ARM::fixup_bfcsel_else_target:
  973. case ARM::fixup_wls:
  974. case ARM::fixup_le:
  975. // Instruction size is 4 bytes.
  976. return 4;
  977. }
  978. }
  979. void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
  980. const MCValue &Target,
  981. MutableArrayRef<char> Data, uint64_t Value,
  982. bool IsResolved,
  983. const MCSubtargetInfo* STI) const {
  984. unsigned Kind = Fixup.getKind();
  985. if (Kind >= FirstLiteralRelocationKind)
  986. return;
  987. MCContext &Ctx = Asm.getContext();
  988. Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI);
  989. if (!Value)
  990. return; // Doesn't change encoding.
  991. const unsigned NumBytes = getFixupKindNumBytes(Kind);
  992. unsigned Offset = Fixup.getOffset();
  993. assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
  994. // Used to point to big endian bytes.
  995. unsigned FullSizeBytes;
  996. if (Endian == support::big) {
  997. FullSizeBytes = getFixupKindContainerSizeBytes(Kind);
  998. assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!");
  999. assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
  1000. }
  1001. // For each byte of the fragment that the fixup touches, mask in the bits from
  1002. // the fixup value. The Value has been "split up" into the appropriate
  1003. // bitfields above.
  1004. for (unsigned i = 0; i != NumBytes; ++i) {
  1005. unsigned Idx = Endian == support::little ? i : (FullSizeBytes - 1 - i);
  1006. Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
  1007. }
  1008. }
  1009. namespace CU {
  1010. /// Compact unwind encoding values.
  1011. enum CompactUnwindEncodings {
  1012. UNWIND_ARM_MODE_MASK = 0x0F000000,
  1013. UNWIND_ARM_MODE_FRAME = 0x01000000,
  1014. UNWIND_ARM_MODE_FRAME_D = 0x02000000,
  1015. UNWIND_ARM_MODE_DWARF = 0x04000000,
  1016. UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000,
  1017. UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001,
  1018. UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002,
  1019. UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004,
  1020. UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008,
  1021. UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010,
  1022. UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020,
  1023. UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040,
  1024. UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080,
  1025. UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00,
  1026. UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF
  1027. };
  1028. } // end CU namespace
  1029. /// Generate compact unwind encoding for the function based on the CFI
  1030. /// instructions. If the CFI instructions describe a frame that cannot be
  1031. /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which
  1032. /// tells the runtime to fallback and unwind using dwarf.
  1033. uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
  1034. ArrayRef<MCCFIInstruction> Instrs) const {
  1035. DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n");
  1036. // Only armv7k uses CFI based unwinding.
  1037. if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K)
  1038. return 0;
  1039. // No .cfi directives means no frame.
  1040. if (Instrs.empty())
  1041. return 0;
  1042. // Start off assuming CFA is at SP+0.
  1043. unsigned CFARegister = ARM::SP;
  1044. int CFARegisterOffset = 0;
  1045. // Mark savable registers as initially unsaved
  1046. DenseMap<unsigned, int> RegOffsets;
  1047. int FloatRegCount = 0;
  1048. // Process each .cfi directive and build up compact unwind info.
  1049. for (const MCCFIInstruction &Inst : Instrs) {
  1050. unsigned Reg;
  1051. switch (Inst.getOperation()) {
  1052. case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
  1053. CFARegisterOffset = Inst.getOffset();
  1054. CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
  1055. break;
  1056. case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset
  1057. CFARegisterOffset = Inst.getOffset();
  1058. break;
  1059. case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register
  1060. CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
  1061. break;
  1062. case MCCFIInstruction::OpOffset: // DW_CFA_offset
  1063. Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
  1064. if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
  1065. RegOffsets[Reg] = Inst.getOffset();
  1066. else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
  1067. RegOffsets[Reg] = Inst.getOffset();
  1068. ++FloatRegCount;
  1069. } else {
  1070. DEBUG_WITH_TYPE("compact-unwind",
  1071. llvm::dbgs() << ".cfi_offset on unknown register="
  1072. << Inst.getRegister() << "\n");
  1073. return CU::UNWIND_ARM_MODE_DWARF;
  1074. }
  1075. break;
  1076. case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc
  1077. // Ignore
  1078. break;
  1079. default:
  1080. // Directive not convertable to compact unwind, bail out.
  1081. DEBUG_WITH_TYPE("compact-unwind",
  1082. llvm::dbgs()
  1083. << "CFI directive not compatiable with comact "
  1084. "unwind encoding, opcode=" << Inst.getOperation()
  1085. << "\n");
  1086. return CU::UNWIND_ARM_MODE_DWARF;
  1087. break;
  1088. }
  1089. }
  1090. // If no frame set up, return no unwind info.
  1091. if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0))
  1092. return 0;
  1093. // Verify standard frame (lr/r7) was used.
  1094. if (CFARegister != ARM::R7) {
  1095. DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "
  1096. << CFARegister
  1097. << " instead of r7\n");
  1098. return CU::UNWIND_ARM_MODE_DWARF;
  1099. }
  1100. int StackAdjust = CFARegisterOffset - 8;
  1101. if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) {
  1102. DEBUG_WITH_TYPE("compact-unwind",
  1103. llvm::dbgs()
  1104. << "LR not saved as standard frame, StackAdjust="
  1105. << StackAdjust
  1106. << ", CFARegisterOffset=" << CFARegisterOffset
  1107. << ", lr save at offset=" << RegOffsets[14] << "\n");
  1108. return CU::UNWIND_ARM_MODE_DWARF;
  1109. }
  1110. if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) {
  1111. DEBUG_WITH_TYPE("compact-unwind",
  1112. llvm::dbgs() << "r7 not saved as standard frame\n");
  1113. return CU::UNWIND_ARM_MODE_DWARF;
  1114. }
  1115. uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME;
  1116. // If var-args are used, there may be a stack adjust required.
  1117. switch (StackAdjust) {
  1118. case 0:
  1119. break;
  1120. case 4:
  1121. CompactUnwindEncoding |= 0x00400000;
  1122. break;
  1123. case 8:
  1124. CompactUnwindEncoding |= 0x00800000;
  1125. break;
  1126. case 12:
  1127. CompactUnwindEncoding |= 0x00C00000;
  1128. break;
  1129. default:
  1130. DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs()
  1131. << ".cfi_def_cfa stack adjust ("
  1132. << StackAdjust << ") out of range\n");
  1133. return CU::UNWIND_ARM_MODE_DWARF;
  1134. }
  1135. // If r6 is saved, it must be right below r7.
  1136. static struct {
  1137. unsigned Reg;
  1138. unsigned Encoding;
  1139. } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6},
  1140. {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5},
  1141. {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4},
  1142. {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12},
  1143. {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11},
  1144. {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10},
  1145. {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9},
  1146. {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}};
  1147. int CurOffset = -8 - StackAdjust;
  1148. for (auto CSReg : GPRCSRegs) {
  1149. auto Offset = RegOffsets.find(CSReg.Reg);
  1150. if (Offset == RegOffsets.end())
  1151. continue;
  1152. int RegOffset = Offset->second;
  1153. if (RegOffset != CurOffset - 4) {
  1154. DEBUG_WITH_TYPE("compact-unwind",
  1155. llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at "
  1156. << RegOffset << " but only supported at "
  1157. << CurOffset << "\n");
  1158. return CU::UNWIND_ARM_MODE_DWARF;
  1159. }
  1160. CompactUnwindEncoding |= CSReg.Encoding;
  1161. CurOffset -= 4;
  1162. }
  1163. // If no floats saved, we are done.
  1164. if (FloatRegCount == 0)
  1165. return CompactUnwindEncoding;
  1166. // Switch mode to include D register saving.
  1167. CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK;
  1168. CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D;
  1169. // FIXME: supporting more than 4 saved D-registers compactly would be trivial,
  1170. // but needs coordination with the linker and libunwind.
  1171. if (FloatRegCount > 4) {
  1172. DEBUG_WITH_TYPE("compact-unwind",
  1173. llvm::dbgs() << "unsupported number of D registers saved ("
  1174. << FloatRegCount << ")\n");
  1175. return CU::UNWIND_ARM_MODE_DWARF;
  1176. }
  1177. // Floating point registers must either be saved sequentially, or we defer to
  1178. // DWARF. No gaps allowed here so check that each saved d-register is
  1179. // precisely where it should be.
  1180. static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 };
  1181. for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) {
  1182. auto Offset = RegOffsets.find(FPRCSRegs[Idx]);
  1183. if (Offset == RegOffsets.end()) {
  1184. DEBUG_WITH_TYPE("compact-unwind",
  1185. llvm::dbgs() << FloatRegCount << " D-regs saved, but "
  1186. << MRI.getName(FPRCSRegs[Idx])
  1187. << " not saved\n");
  1188. return CU::UNWIND_ARM_MODE_DWARF;
  1189. } else if (Offset->second != CurOffset - 8) {
  1190. DEBUG_WITH_TYPE("compact-unwind",
  1191. llvm::dbgs() << FloatRegCount << " D-regs saved, but "
  1192. << MRI.getName(FPRCSRegs[Idx])
  1193. << " saved at " << Offset->second
  1194. << ", expected at " << CurOffset - 8
  1195. << "\n");
  1196. return CU::UNWIND_ARM_MODE_DWARF;
  1197. }
  1198. CurOffset -= 8;
  1199. }
  1200. return CompactUnwindEncoding | ((FloatRegCount - 1) << 8);
  1201. }
  1202. static MCAsmBackend *createARMAsmBackend(const Target &T,
  1203. const MCSubtargetInfo &STI,
  1204. const MCRegisterInfo &MRI,
  1205. const MCTargetOptions &Options,
  1206. support::endianness Endian) {
  1207. const Triple &TheTriple = STI.getTargetTriple();
  1208. switch (TheTriple.getObjectFormat()) {
  1209. default:
  1210. llvm_unreachable("unsupported object format");
  1211. case Triple::MachO:
  1212. return new ARMAsmBackendDarwin(T, STI, MRI);
  1213. case Triple::COFF:
  1214. assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
  1215. return new ARMAsmBackendWinCOFF(T, STI.getTargetTriple().isThumb());
  1216. case Triple::ELF:
  1217. assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
  1218. uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
  1219. return new ARMAsmBackendELF(T, STI.getTargetTriple().isThumb(), OSABI,
  1220. Endian);
  1221. }
  1222. }
  1223. MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
  1224. const MCSubtargetInfo &STI,
  1225. const MCRegisterInfo &MRI,
  1226. const MCTargetOptions &Options) {
  1227. return createARMAsmBackend(T, STI, MRI, Options, support::little);
  1228. }
  1229. MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
  1230. const MCSubtargetInfo &STI,
  1231. const MCRegisterInfo &MRI,
  1232. const MCTargetOptions &Options) {
  1233. return createARMAsmBackend(T, STI, MRI, Options, support::big);
  1234. }