ARMAsmBackend.cpp 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326
  1. //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #include "MCTargetDesc/ARMAsmBackend.h"
  9. #include "MCTargetDesc/ARMAddressingModes.h"
  10. #include "MCTargetDesc/ARMAsmBackendDarwin.h"
  11. #include "MCTargetDesc/ARMAsmBackendELF.h"
  12. #include "MCTargetDesc/ARMAsmBackendWinCOFF.h"
  13. #include "MCTargetDesc/ARMFixupKinds.h"
  14. #include "MCTargetDesc/ARMMCTargetDesc.h"
  15. #include "llvm/ADT/StringSwitch.h"
  16. #include "llvm/BinaryFormat/ELF.h"
  17. #include "llvm/BinaryFormat/MachO.h"
  18. #include "llvm/MC/MCAsmBackend.h"
  19. #include "llvm/MC/MCAssembler.h"
  20. #include "llvm/MC/MCContext.h"
  21. #include "llvm/MC/MCDirectives.h"
  22. #include "llvm/MC/MCELFObjectWriter.h"
  23. #include "llvm/MC/MCExpr.h"
  24. #include "llvm/MC/MCFixupKindInfo.h"
  25. #include "llvm/MC/MCObjectWriter.h"
  26. #include "llvm/MC/MCRegisterInfo.h"
  27. #include "llvm/MC/MCSectionELF.h"
  28. #include "llvm/MC/MCSectionMachO.h"
  29. #include "llvm/MC/MCSubtargetInfo.h"
  30. #include "llvm/MC/MCValue.h"
  31. #include "llvm/MC/MCAsmLayout.h"
  32. #include "llvm/Support/Debug.h"
  33. #include "llvm/Support/EndianStream.h"
  34. #include "llvm/Support/ErrorHandling.h"
  35. #include "llvm/Support/Format.h"
  36. #include "llvm/Support/TargetParser.h"
  37. #include "llvm/Support/raw_ostream.h"
  38. using namespace llvm;
  39. namespace {
  40. class ARMELFObjectWriter : public MCELFObjectTargetWriter {
  41. public:
  42. ARMELFObjectWriter(uint8_t OSABI)
  43. : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
  44. /*HasRelocationAddend*/ false) {}
  45. };
  46. } // end anonymous namespace
  47. std::optional<MCFixupKind> ARMAsmBackend::getFixupKind(StringRef Name) const {
  48. return std::nullopt;
  49. }
  50. std::optional<MCFixupKind>
  51. ARMAsmBackendELF::getFixupKind(StringRef Name) const {
  52. unsigned Type = llvm::StringSwitch<unsigned>(Name)
  53. #define ELF_RELOC(X, Y) .Case(#X, Y)
  54. #include "llvm/BinaryFormat/ELFRelocs/ARM.def"
  55. #undef ELF_RELOC
  56. .Case("BFD_RELOC_NONE", ELF::R_ARM_NONE)
  57. .Case("BFD_RELOC_8", ELF::R_ARM_ABS8)
  58. .Case("BFD_RELOC_16", ELF::R_ARM_ABS16)
  59. .Case("BFD_RELOC_32", ELF::R_ARM_ABS32)
  60. .Default(-1u);
  61. if (Type == -1u)
  62. return std::nullopt;
  63. return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
  64. }
  65. const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
  66. unsigned IsPCRelConstant =
  67. MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_Constant;
  68. const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
  69. // This table *must* be in the order that the fixup_* kinds are defined in
  70. // ARMFixupKinds.h.
  71. //
  72. // Name Offset (bits) Size (bits) Flags
  73. {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant},
  74. {"fixup_t2_ldst_pcrel_12", 0, 32,
  75. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  76. {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant},
  77. {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant},
  78. {"fixup_t2_pcrel_10", 0, 32,
  79. MCFixupKindInfo::FKF_IsPCRel |
  80. MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  81. {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  82. {"fixup_t2_pcrel_9", 0, 32,
  83. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  84. {"fixup_arm_ldst_abs_12", 0, 32, 0},
  85. {"fixup_thumb_adr_pcrel_10", 0, 8,
  86. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  87. {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant},
  88. {"fixup_t2_adr_pcrel_12", 0, 32,
  89. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  90. {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
  91. {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
  92. {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  93. {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  94. {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
  95. {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
  96. {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
  97. {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
  98. {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  99. {"fixup_arm_thumb_blx", 0, 32,
  100. MCFixupKindInfo::FKF_IsPCRel |
  101. MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  102. {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
  103. {"fixup_arm_thumb_cp", 0, 8,
  104. MCFixupKindInfo::FKF_IsPCRel |
  105. MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  106. {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel},
  107. // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
  108. // - 19.
  109. {"fixup_arm_movt_hi16", 0, 20, 0},
  110. {"fixup_arm_movw_lo16", 0, 20, 0},
  111. {"fixup_t2_movt_hi16", 0, 20, 0},
  112. {"fixup_t2_movw_lo16", 0, 20, 0},
  113. {"fixup_arm_mod_imm", 0, 12, 0},
  114. {"fixup_t2_so_imm", 0, 26, 0},
  115. {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  116. {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  117. {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  118. {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  119. {"fixup_bfcsel_else_target", 0, 32, 0},
  120. {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  121. {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}};
  122. const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
  123. // This table *must* be in the order that the fixup_* kinds are defined in
  124. // ARMFixupKinds.h.
  125. //
  126. // Name Offset (bits) Size (bits) Flags
  127. {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant},
  128. {"fixup_t2_ldst_pcrel_12", 0, 32,
  129. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  130. {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant},
  131. {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant},
  132. {"fixup_t2_pcrel_10", 0, 32,
  133. MCFixupKindInfo::FKF_IsPCRel |
  134. MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  135. {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  136. {"fixup_t2_pcrel_9", 0, 32,
  137. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  138. {"fixup_arm_ldst_abs_12", 0, 32, 0},
  139. {"fixup_thumb_adr_pcrel_10", 8, 8,
  140. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  141. {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant},
  142. {"fixup_t2_adr_pcrel_12", 0, 32,
  143. IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  144. {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
  145. {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
  146. {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  147. {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  148. {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
  149. {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
  150. {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
  151. {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
  152. {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  153. {"fixup_arm_thumb_blx", 0, 32,
  154. MCFixupKindInfo::FKF_IsPCRel |
  155. MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  156. {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
  157. {"fixup_arm_thumb_cp", 8, 8,
  158. MCFixupKindInfo::FKF_IsPCRel |
  159. MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
  160. {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel},
  161. // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
  162. // - 19.
  163. {"fixup_arm_movt_hi16", 12, 20, 0},
  164. {"fixup_arm_movw_lo16", 12, 20, 0},
  165. {"fixup_t2_movt_hi16", 12, 20, 0},
  166. {"fixup_t2_movw_lo16", 12, 20, 0},
  167. {"fixup_arm_mod_imm", 20, 12, 0},
  168. {"fixup_t2_so_imm", 26, 6, 0},
  169. {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  170. {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  171. {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  172. {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  173. {"fixup_bfcsel_else_target", 0, 32, 0},
  174. {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
  175. {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}};
  176. // Fixup kinds from .reloc directive are like R_ARM_NONE. They do not require
  177. // any extra processing.
  178. if (Kind >= FirstLiteralRelocationKind)
  179. return MCAsmBackend::getFixupKindInfo(FK_NONE);
  180. if (Kind < FirstTargetFixupKind)
  181. return MCAsmBackend::getFixupKindInfo(Kind);
  182. assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
  183. "Invalid kind!");
  184. return (Endian == support::little ? InfosLE
  185. : InfosBE)[Kind - FirstTargetFixupKind];
  186. }
  187. void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
  188. switch (Flag) {
  189. default:
  190. break;
  191. case MCAF_Code16:
  192. setIsThumb(true);
  193. break;
  194. case MCAF_Code32:
  195. setIsThumb(false);
  196. break;
  197. }
  198. }
  199. unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op,
  200. const MCSubtargetInfo &STI) const {
  201. bool HasThumb2 = STI.getFeatureBits()[ARM::FeatureThumb2];
  202. bool HasV8MBaselineOps = STI.getFeatureBits()[ARM::HasV8MBaselineOps];
  203. switch (Op) {
  204. default:
  205. return Op;
  206. case ARM::tBcc:
  207. return HasThumb2 ? (unsigned)ARM::t2Bcc : Op;
  208. case ARM::tLDRpci:
  209. return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op;
  210. case ARM::tADR:
  211. return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
  212. case ARM::tB:
  213. return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
  214. case ARM::tCBZ:
  215. return ARM::tHINT;
  216. case ARM::tCBNZ:
  217. return ARM::tHINT;
  218. }
  219. }
  220. bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst,
  221. const MCSubtargetInfo &STI) const {
  222. if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode())
  223. return true;
  224. return false;
  225. }
  226. static const char *checkPCRelOffset(uint64_t Value, int64_t Min, int64_t Max) {
  227. int64_t Offset = int64_t(Value) - 4;
  228. if (Offset < Min || Offset > Max)
  229. return "out of range pc-relative fixup value";
  230. return nullptr;
  231. }
  232. const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup,
  233. uint64_t Value) const {
  234. switch (Fixup.getTargetKind()) {
  235. case ARM::fixup_arm_thumb_br: {
  236. // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
  237. // low bit being an implied zero. There's an implied +4 offset for the
  238. // branch, so we adjust the other way here to determine what's
  239. // encodable.
  240. //
  241. // Relax if the value is too big for a (signed) i8.
  242. int64_t Offset = int64_t(Value) - 4;
  243. if (Offset > 2046 || Offset < -2048)
  244. return "out of range pc-relative fixup value";
  245. break;
  246. }
  247. case ARM::fixup_arm_thumb_bcc: {
  248. // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
  249. // low bit being an implied zero. There's an implied +4 offset for the
  250. // branch, so we adjust the other way here to determine what's
  251. // encodable.
  252. //
  253. // Relax if the value is too big for a (signed) i8.
  254. int64_t Offset = int64_t(Value) - 4;
  255. if (Offset > 254 || Offset < -256)
  256. return "out of range pc-relative fixup value";
  257. break;
  258. }
  259. case ARM::fixup_thumb_adr_pcrel_10:
  260. case ARM::fixup_arm_thumb_cp: {
  261. // If the immediate is negative, greater than 1020, or not a multiple
  262. // of four, the wide version of the instruction must be used.
  263. int64_t Offset = int64_t(Value) - 4;
  264. if (Offset & 3)
  265. return "misaligned pc-relative fixup value";
  266. else if (Offset > 1020 || Offset < 0)
  267. return "out of range pc-relative fixup value";
  268. break;
  269. }
  270. case ARM::fixup_arm_thumb_cb: {
  271. // If we have a Thumb CBZ or CBNZ instruction and its target is the next
  272. // instruction it is actually out of range for the instruction.
  273. // It will be changed to a NOP.
  274. int64_t Offset = (Value & ~1);
  275. if (Offset == 2)
  276. return "will be converted to nop";
  277. break;
  278. }
  279. case ARM::fixup_bf_branch:
  280. return checkPCRelOffset(Value, 0, 30);
  281. case ARM::fixup_bf_target:
  282. return checkPCRelOffset(Value, -0x10000, +0xfffe);
  283. case ARM::fixup_bfl_target:
  284. return checkPCRelOffset(Value, -0x40000, +0x3fffe);
  285. case ARM::fixup_bfc_target:
  286. return checkPCRelOffset(Value, -0x1000, +0xffe);
  287. case ARM::fixup_wls:
  288. return checkPCRelOffset(Value, 0, +0xffe);
  289. case ARM::fixup_le:
  290. // The offset field in the LE and LETP instructions is an 11-bit
  291. // value shifted left by 2 (i.e. 0,2,4,...,4094), and it is
  292. // interpreted as a negative offset from the value read from pc,
  293. // i.e. from instruction_address+4.
  294. //
  295. // So an LE instruction can in principle address the instruction
  296. // immediately after itself, or (not very usefully) the address
  297. // half way through the 4-byte LE.
  298. return checkPCRelOffset(Value, -0xffe, 0);
  299. case ARM::fixup_bfcsel_else_target: {
  300. if (Value != 2 && Value != 4)
  301. return "out of range label-relative fixup value";
  302. break;
  303. }
  304. default:
  305. llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!");
  306. }
  307. return nullptr;
  308. }
  309. bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
  310. const MCRelaxableFragment *DF,
  311. const MCAsmLayout &Layout) const {
  312. return reasonForFixupRelaxation(Fixup, Value);
  313. }
  314. void ARMAsmBackend::relaxInstruction(MCInst &Inst,
  315. const MCSubtargetInfo &STI) const {
  316. unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI);
  317. // Return a diagnostic if we get here w/ a bogus instruction.
  318. if (RelaxedOp == Inst.getOpcode()) {
  319. SmallString<256> Tmp;
  320. raw_svector_ostream OS(Tmp);
  321. Inst.dump_pretty(OS);
  322. OS << "\n";
  323. report_fatal_error("unexpected instruction to relax: " + OS.str());
  324. }
  325. // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
  326. // have to change the operands too.
  327. if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
  328. RelaxedOp == ARM::tHINT) {
  329. MCInst Res;
  330. Res.setOpcode(RelaxedOp);
  331. Res.addOperand(MCOperand::createImm(0));
  332. Res.addOperand(MCOperand::createImm(14));
  333. Res.addOperand(MCOperand::createReg(0));
  334. Inst = std::move(Res);
  335. return;
  336. }
  337. // The rest of instructions we're relaxing have the same operands.
  338. // We just need to update to the proper opcode.
  339. Inst.setOpcode(RelaxedOp);
  340. }
  341. bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
  342. const MCSubtargetInfo *STI) const {
  343. const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
  344. const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
  345. const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0
  346. const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
  347. if (isThumb()) {
  348. const uint16_t nopEncoding =
  349. hasNOP(STI) ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding;
  350. uint64_t NumNops = Count / 2;
  351. for (uint64_t i = 0; i != NumNops; ++i)
  352. support::endian::write(OS, nopEncoding, Endian);
  353. if (Count & 1)
  354. OS << '\0';
  355. return true;
  356. }
  357. // ARM mode
  358. const uint32_t nopEncoding =
  359. hasNOP(STI) ? ARMv6T2_NopEncoding : ARMv4_NopEncoding;
  360. uint64_t NumNops = Count / 4;
  361. for (uint64_t i = 0; i != NumNops; ++i)
  362. support::endian::write(OS, nopEncoding, Endian);
  363. // FIXME: should this function return false when unable to write exactly
  364. // 'Count' bytes with NOP encodings?
  365. switch (Count % 4) {
  366. default:
  367. break; // No leftover bytes to write
  368. case 1:
  369. OS << '\0';
  370. break;
  371. case 2:
  372. OS.write("\0\0", 2);
  373. break;
  374. case 3:
  375. OS.write("\0\0\xa0", 3);
  376. break;
  377. }
  378. return true;
  379. }
  380. static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
  381. if (IsLittleEndian) {
  382. // Note that the halfwords are stored high first and low second in thumb;
  383. // so we need to swap the fixup value here to map properly.
  384. uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
  385. Swapped |= (Value & 0x0000FFFF) << 16;
  386. return Swapped;
  387. } else
  388. return Value;
  389. }
  390. static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
  391. bool IsLittleEndian) {
  392. uint32_t Value;
  393. if (IsLittleEndian) {
  394. Value = (SecondHalf & 0xFFFF) << 16;
  395. Value |= (FirstHalf & 0xFFFF);
  396. } else {
  397. Value = (SecondHalf & 0xFFFF);
  398. Value |= (FirstHalf & 0xFFFF) << 16;
  399. }
  400. return Value;
  401. }
  402. unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
  403. const MCFixup &Fixup,
  404. const MCValue &Target, uint64_t Value,
  405. bool IsResolved, MCContext &Ctx,
  406. const MCSubtargetInfo* STI) const {
  407. unsigned Kind = Fixup.getKind();
  408. // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT
  409. // and .word relocations they put the Thumb bit into the addend if possible.
  410. // Other relocation types don't want this bit though (branches couldn't encode
  411. // it if it *was* present, and no other relocations exist) and it can
  412. // interfere with checking valid expressions.
  413. if (const MCSymbolRefExpr *A = Target.getSymA()) {
  414. if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) &&
  415. A->getSymbol().isExternal() &&
  416. (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 ||
  417. Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 ||
  418. Kind == ARM::fixup_t2_movt_hi16))
  419. Value |= 1;
  420. }
  421. switch (Kind) {
  422. default:
  423. Ctx.reportError(Fixup.getLoc(), "bad relocation fixup type");
  424. return 0;
  425. case FK_Data_1:
  426. case FK_Data_2:
  427. case FK_Data_4:
  428. return Value;
  429. case FK_SecRel_2:
  430. return Value;
  431. case FK_SecRel_4:
  432. return Value;
  433. case ARM::fixup_arm_movt_hi16:
  434. assert(STI != nullptr);
  435. if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
  436. Value >>= 16;
  437. [[fallthrough]];
  438. case ARM::fixup_arm_movw_lo16: {
  439. unsigned Hi4 = (Value & 0xF000) >> 12;
  440. unsigned Lo12 = Value & 0x0FFF;
  441. // inst{19-16} = Hi4;
  442. // inst{11-0} = Lo12;
  443. Value = (Hi4 << 16) | (Lo12);
  444. return Value;
  445. }
  446. case ARM::fixup_t2_movt_hi16:
  447. assert(STI != nullptr);
  448. if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
  449. Value >>= 16;
  450. [[fallthrough]];
  451. case ARM::fixup_t2_movw_lo16: {
  452. unsigned Hi4 = (Value & 0xF000) >> 12;
  453. unsigned i = (Value & 0x800) >> 11;
  454. unsigned Mid3 = (Value & 0x700) >> 8;
  455. unsigned Lo8 = Value & 0x0FF;
  456. // inst{19-16} = Hi4;
  457. // inst{26} = i;
  458. // inst{14-12} = Mid3;
  459. // inst{7-0} = Lo8;
  460. Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
  461. return swapHalfWords(Value, Endian == support::little);
  462. }
  463. case ARM::fixup_arm_ldst_pcrel_12:
  464. // ARM PC-relative values are offset by 8.
  465. Value -= 4;
  466. [[fallthrough]];
  467. case ARM::fixup_t2_ldst_pcrel_12:
  468. // Offset by 4, adjusted by two due to the half-word ordering of thumb.
  469. Value -= 4;
  470. [[fallthrough]];
  471. case ARM::fixup_arm_ldst_abs_12: {
  472. bool isAdd = true;
  473. if ((int64_t)Value < 0) {
  474. Value = -Value;
  475. isAdd = false;
  476. }
  477. if (Value >= 4096) {
  478. Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
  479. return 0;
  480. }
  481. Value |= isAdd << 23;
  482. // Same addressing mode as fixup_arm_pcrel_10,
  483. // but with 16-bit halfwords swapped.
  484. if (Kind == ARM::fixup_t2_ldst_pcrel_12)
  485. return swapHalfWords(Value, Endian == support::little);
  486. return Value;
  487. }
  488. case ARM::fixup_arm_adr_pcrel_12: {
  489. // ARM PC-relative values are offset by 8.
  490. Value -= 8;
  491. unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
  492. if ((int64_t)Value < 0) {
  493. Value = -Value;
  494. opc = 2; // 0b0010
  495. }
  496. if (ARM_AM::getSOImmVal(Value) == -1) {
  497. Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
  498. return 0;
  499. }
  500. // Encode the immediate and shift the opcode into place.
  501. return ARM_AM::getSOImmVal(Value) | (opc << 21);
  502. }
  503. case ARM::fixup_t2_adr_pcrel_12: {
  504. Value -= 4;
  505. unsigned opc = 0;
  506. if ((int64_t)Value < 0) {
  507. Value = -Value;
  508. opc = 5;
  509. }
  510. uint32_t out = (opc << 21);
  511. out |= (Value & 0x800) << 15;
  512. out |= (Value & 0x700) << 4;
  513. out |= (Value & 0x0FF);
  514. return swapHalfWords(out, Endian == support::little);
  515. }
  516. case ARM::fixup_arm_condbranch:
  517. case ARM::fixup_arm_uncondbranch:
  518. case ARM::fixup_arm_uncondbl:
  519. case ARM::fixup_arm_condbl:
  520. case ARM::fixup_arm_blx:
  521. // These values don't encode the low two bits since they're always zero.
  522. // Offset by 8 just as above.
  523. if (const MCSymbolRefExpr *SRE =
  524. dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
  525. if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
  526. return 0;
  527. return 0xffffff & ((Value - 8) >> 2);
  528. case ARM::fixup_t2_uncondbranch: {
  529. Value = Value - 4;
  530. if (!isInt<25>(Value)) {
  531. Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
  532. return 0;
  533. }
  534. Value >>= 1; // Low bit is not encoded.
  535. uint32_t out = 0;
  536. bool I = Value & 0x800000;
  537. bool J1 = Value & 0x400000;
  538. bool J2 = Value & 0x200000;
  539. J1 ^= I;
  540. J2 ^= I;
  541. out |= I << 26; // S bit
  542. out |= !J1 << 13; // J1 bit
  543. out |= !J2 << 11; // J2 bit
  544. out |= (Value & 0x1FF800) << 5; // imm6 field
  545. out |= (Value & 0x0007FF); // imm11 field
  546. return swapHalfWords(out, Endian == support::little);
  547. }
  548. case ARM::fixup_t2_condbranch: {
  549. Value = Value - 4;
  550. if (!isInt<21>(Value)) {
  551. Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
  552. return 0;
  553. }
  554. Value >>= 1; // Low bit is not encoded.
  555. uint64_t out = 0;
  556. out |= (Value & 0x80000) << 7; // S bit
  557. out |= (Value & 0x40000) >> 7; // J2 bit
  558. out |= (Value & 0x20000) >> 4; // J1 bit
  559. out |= (Value & 0x1F800) << 5; // imm6 field
  560. out |= (Value & 0x007FF); // imm11 field
  561. return swapHalfWords(out, Endian == support::little);
  562. }
  563. case ARM::fixup_arm_thumb_bl: {
  564. if (!isInt<25>(Value - 4) ||
  565. (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
  566. !STI->getFeatureBits()[ARM::HasV8MBaselineOps] &&
  567. !STI->getFeatureBits()[ARM::HasV6MOps] &&
  568. !isInt<23>(Value - 4))) {
  569. Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
  570. return 0;
  571. }
  572. // The value doesn't encode the low bit (always zero) and is offset by
  573. // four. The 32-bit immediate value is encoded as
  574. // imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
  575. // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
  576. // The value is encoded into disjoint bit positions in the destination
  577. // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
  578. // J = either J1 or J2 bit
  579. //
  580. // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
  581. //
  582. // Note that the halfwords are stored high first, low second; so we need
  583. // to transpose the fixup value here to map properly.
  584. uint32_t offset = (Value - 4) >> 1;
  585. uint32_t signBit = (offset & 0x800000) >> 23;
  586. uint32_t I1Bit = (offset & 0x400000) >> 22;
  587. uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
  588. uint32_t I2Bit = (offset & 0x200000) >> 21;
  589. uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
  590. uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
  591. uint32_t imm11Bits = (offset & 0x000007FF);
  592. uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
  593. uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
  594. (uint16_t)imm11Bits);
  595. return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
  596. }
  597. case ARM::fixup_arm_thumb_blx: {
  598. // The value doesn't encode the low two bits (always zero) and is offset by
  599. // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
  600. // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
  601. // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
  602. // The value is encoded into disjoint bit positions in the destination
  603. // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
  604. // J = either J1 or J2 bit, 0 = zero.
  605. //
  606. // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
  607. //
  608. // Note that the halfwords are stored high first, low second; so we need
  609. // to transpose the fixup value here to map properly.
  610. if (Value % 4 != 0) {
  611. Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination");
  612. return 0;
  613. }
  614. uint32_t offset = (Value - 4) >> 2;
  615. if (const MCSymbolRefExpr *SRE =
  616. dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
  617. if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
  618. offset = 0;
  619. uint32_t signBit = (offset & 0x400000) >> 22;
  620. uint32_t I1Bit = (offset & 0x200000) >> 21;
  621. uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
  622. uint32_t I2Bit = (offset & 0x100000) >> 20;
  623. uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
  624. uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
  625. uint32_t imm10LBits = (offset & 0x3FF);
  626. uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
  627. uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
  628. ((uint16_t)imm10LBits) << 1);
  629. return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
  630. }
  631. case ARM::fixup_thumb_adr_pcrel_10:
  632. case ARM::fixup_arm_thumb_cp:
  633. // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
  634. // could have an error on our hands.
  635. assert(STI != nullptr);
  636. if (!STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) {
  637. const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
  638. if (FixupDiagnostic) {
  639. Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
  640. return 0;
  641. }
  642. }
  643. // Offset by 4, and don't encode the low two bits.
  644. return ((Value - 4) >> 2) & 0xff;
  645. case ARM::fixup_arm_thumb_cb: {
  646. // CB instructions can only branch to offsets in [4, 126] in multiples of 2
  647. // so ensure that the raw value LSB is zero and it lies in [2, 130].
  648. // An offset of 2 will be relaxed to a NOP.
  649. if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
  650. Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
  651. return 0;
  652. }
  653. // Offset by 4 and don't encode the lower bit, which is always 0.
  654. // FIXME: diagnose if no Thumb2
  655. uint32_t Binary = (Value - 4) >> 1;
  656. return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
  657. }
  658. case ARM::fixup_arm_thumb_br:
  659. // Offset by 4 and don't encode the lower bit, which is always 0.
  660. assert(STI != nullptr);
  661. if (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
  662. !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
  663. const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
  664. if (FixupDiagnostic) {
  665. Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
  666. return 0;
  667. }
  668. }
  669. return ((Value - 4) >> 1) & 0x7ff;
  670. case ARM::fixup_arm_thumb_bcc:
  671. // Offset by 4 and don't encode the lower bit, which is always 0.
  672. assert(STI != nullptr);
  673. if (!STI->getFeatureBits()[ARM::FeatureThumb2]) {
  674. const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
  675. if (FixupDiagnostic) {
  676. Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
  677. return 0;
  678. }
  679. }
  680. return ((Value - 4) >> 1) & 0xff;
  681. case ARM::fixup_arm_pcrel_10_unscaled: {
  682. Value = Value - 8; // ARM fixups offset by an additional word and don't
  683. // need to adjust for the half-word ordering.
  684. bool isAdd = true;
  685. if ((int64_t)Value < 0) {
  686. Value = -Value;
  687. isAdd = false;
  688. }
  689. // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
  690. if (Value >= 256) {
  691. Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
  692. return 0;
  693. }
  694. Value = (Value & 0xf) | ((Value & 0xf0) << 4);
  695. return Value | (isAdd << 23);
  696. }
  697. case ARM::fixup_arm_pcrel_10:
  698. Value = Value - 4; // ARM fixups offset by an additional word and don't
  699. // need to adjust for the half-word ordering.
  700. [[fallthrough]];
  701. case ARM::fixup_t2_pcrel_10: {
  702. // Offset by 4, adjusted by two due to the half-word ordering of thumb.
  703. Value = Value - 4;
  704. bool isAdd = true;
  705. if ((int64_t)Value < 0) {
  706. Value = -Value;
  707. isAdd = false;
  708. }
  709. // These values don't encode the low two bits since they're always zero.
  710. Value >>= 2;
  711. if (Value >= 256) {
  712. Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
  713. return 0;
  714. }
  715. Value |= isAdd << 23;
  716. // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
  717. // swapped.
  718. if (Kind == ARM::fixup_t2_pcrel_10)
  719. return swapHalfWords(Value, Endian == support::little);
  720. return Value;
  721. }
  722. case ARM::fixup_arm_pcrel_9:
  723. Value = Value - 4; // ARM fixups offset by an additional word and don't
  724. // need to adjust for the half-word ordering.
  725. [[fallthrough]];
  726. case ARM::fixup_t2_pcrel_9: {
  727. // Offset by 4, adjusted by two due to the half-word ordering of thumb.
  728. Value = Value - 4;
  729. bool isAdd = true;
  730. if ((int64_t)Value < 0) {
  731. Value = -Value;
  732. isAdd = false;
  733. }
  734. // These values don't encode the low bit since it's always zero.
  735. if (Value & 1) {
  736. Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup");
  737. return 0;
  738. }
  739. Value >>= 1;
  740. if (Value >= 256) {
  741. Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
  742. return 0;
  743. }
  744. Value |= isAdd << 23;
  745. // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
  746. // swapped.
  747. if (Kind == ARM::fixup_t2_pcrel_9)
  748. return swapHalfWords(Value, Endian == support::little);
  749. return Value;
  750. }
  751. case ARM::fixup_arm_mod_imm:
  752. Value = ARM_AM::getSOImmVal(Value);
  753. if (Value >> 12) {
  754. Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
  755. return 0;
  756. }
  757. return Value;
  758. case ARM::fixup_t2_so_imm: {
  759. Value = ARM_AM::getT2SOImmVal(Value);
  760. if ((int64_t)Value < 0) {
  761. Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
  762. return 0;
  763. }
  764. // Value will contain a 12-bit value broken up into a 4-bit shift in bits
  765. // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate
  766. // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit
  767. // 10 of the upper half-word and imm3 is placed at 14:12 of the lower
  768. // half-word.
  769. uint64_t EncValue = 0;
  770. EncValue |= (Value & 0x800) << 15;
  771. EncValue |= (Value & 0x700) << 4;
  772. EncValue |= (Value & 0xff);
  773. return swapHalfWords(EncValue, Endian == support::little);
  774. }
  775. case ARM::fixup_bf_branch: {
  776. const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
  777. if (FixupDiagnostic) {
  778. Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
  779. return 0;
  780. }
  781. uint32_t out = (((Value - 4) >> 1) & 0xf) << 23;
  782. return swapHalfWords(out, Endian == support::little);
  783. }
  784. case ARM::fixup_bf_target:
  785. case ARM::fixup_bfl_target:
  786. case ARM::fixup_bfc_target: {
  787. const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
  788. if (FixupDiagnostic) {
  789. Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
  790. return 0;
  791. }
  792. uint32_t out = 0;
  793. uint32_t HighBitMask = (Kind == ARM::fixup_bf_target ? 0xf800 :
  794. Kind == ARM::fixup_bfl_target ? 0x3f800 : 0x800);
  795. out |= (((Value - 4) >> 1) & 0x1) << 11;
  796. out |= (((Value - 4) >> 1) & 0x7fe);
  797. out |= (((Value - 4) >> 1) & HighBitMask) << 5;
  798. return swapHalfWords(out, Endian == support::little);
  799. }
  800. case ARM::fixup_bfcsel_else_target: {
  801. // If this is a fixup of a branch future's else target then it should be a
  802. // constant MCExpr representing the distance between the branch targetted
  803. // and the instruction after that same branch.
  804. Value = Target.getConstant();
  805. const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
  806. if (FixupDiagnostic) {
  807. Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
  808. return 0;
  809. }
  810. uint32_t out = ((Value >> 2) & 1) << 17;
  811. return swapHalfWords(out, Endian == support::little);
  812. }
  813. case ARM::fixup_wls:
  814. case ARM::fixup_le: {
  815. const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
  816. if (FixupDiagnostic) {
  817. Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
  818. return 0;
  819. }
  820. uint64_t real_value = Value - 4;
  821. uint32_t out = 0;
  822. if (Kind == ARM::fixup_le)
  823. real_value = -real_value;
  824. out |= ((real_value >> 1) & 0x1) << 11;
  825. out |= ((real_value >> 1) & 0x7fe);
  826. return swapHalfWords(out, Endian == support::little);
  827. }
  828. }
  829. }
  830. bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
  831. const MCFixup &Fixup,
  832. const MCValue &Target) {
  833. const MCSymbolRefExpr *A = Target.getSymA();
  834. const MCSymbol *Sym = A ? &A->getSymbol() : nullptr;
  835. const unsigned FixupKind = Fixup.getKind();
  836. if (FixupKind >= FirstLiteralRelocationKind)
  837. return true;
  838. if (FixupKind == ARM::fixup_arm_thumb_bl) {
  839. assert(Sym && "How did we resolve this?");
  840. // If the symbol is external the linker will handle it.
  841. // FIXME: Should we handle it as an optimization?
  842. // If the symbol is out of range, produce a relocation and hope the
  843. // linker can handle it. GNU AS produces an error in this case.
  844. if (Sym->isExternal())
  845. return true;
  846. }
  847. // Create relocations for unconditional branches to function symbols with
  848. // different execution mode in ELF binaries.
  849. if (Sym && Sym->isELF()) {
  850. unsigned Type = cast<MCSymbolELF>(Sym)->getType();
  851. if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
  852. if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
  853. return true;
  854. if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br ||
  855. FixupKind == ARM::fixup_arm_thumb_bl ||
  856. FixupKind == ARM::fixup_t2_condbranch ||
  857. FixupKind == ARM::fixup_t2_uncondbranch))
  858. return true;
  859. }
  860. }
  861. // We must always generate a relocation for BL/BLX instructions if we have
  862. // a symbol to reference, as the linker relies on knowing the destination
  863. // symbol's thumb-ness to get interworking right.
  864. if (A && (FixupKind == ARM::fixup_arm_thumb_blx ||
  865. FixupKind == ARM::fixup_arm_blx ||
  866. FixupKind == ARM::fixup_arm_uncondbl ||
  867. FixupKind == ARM::fixup_arm_condbl))
  868. return true;
  869. return false;
  870. }
  871. /// getFixupKindNumBytes - The number of bytes the fixup may change.
  872. static unsigned getFixupKindNumBytes(unsigned Kind) {
  873. switch (Kind) {
  874. default:
  875. llvm_unreachable("Unknown fixup kind!");
  876. case FK_Data_1:
  877. case ARM::fixup_arm_thumb_bcc:
  878. case ARM::fixup_arm_thumb_cp:
  879. case ARM::fixup_thumb_adr_pcrel_10:
  880. return 1;
  881. case FK_Data_2:
  882. case ARM::fixup_arm_thumb_br:
  883. case ARM::fixup_arm_thumb_cb:
  884. case ARM::fixup_arm_mod_imm:
  885. return 2;
  886. case ARM::fixup_arm_pcrel_10_unscaled:
  887. case ARM::fixup_arm_ldst_pcrel_12:
  888. case ARM::fixup_arm_pcrel_10:
  889. case ARM::fixup_arm_pcrel_9:
  890. case ARM::fixup_arm_ldst_abs_12:
  891. case ARM::fixup_arm_adr_pcrel_12:
  892. case ARM::fixup_arm_uncondbl:
  893. case ARM::fixup_arm_condbl:
  894. case ARM::fixup_arm_blx:
  895. case ARM::fixup_arm_condbranch:
  896. case ARM::fixup_arm_uncondbranch:
  897. return 3;
  898. case FK_Data_4:
  899. case ARM::fixup_t2_ldst_pcrel_12:
  900. case ARM::fixup_t2_condbranch:
  901. case ARM::fixup_t2_uncondbranch:
  902. case ARM::fixup_t2_pcrel_10:
  903. case ARM::fixup_t2_pcrel_9:
  904. case ARM::fixup_t2_adr_pcrel_12:
  905. case ARM::fixup_arm_thumb_bl:
  906. case ARM::fixup_arm_thumb_blx:
  907. case ARM::fixup_arm_movt_hi16:
  908. case ARM::fixup_arm_movw_lo16:
  909. case ARM::fixup_t2_movt_hi16:
  910. case ARM::fixup_t2_movw_lo16:
  911. case ARM::fixup_t2_so_imm:
  912. case ARM::fixup_bf_branch:
  913. case ARM::fixup_bf_target:
  914. case ARM::fixup_bfl_target:
  915. case ARM::fixup_bfc_target:
  916. case ARM::fixup_bfcsel_else_target:
  917. case ARM::fixup_wls:
  918. case ARM::fixup_le:
  919. return 4;
  920. case FK_SecRel_2:
  921. return 2;
  922. case FK_SecRel_4:
  923. return 4;
  924. }
  925. }
  926. /// getFixupKindContainerSizeBytes - The number of bytes of the
  927. /// container involved in big endian.
  928. static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
  929. switch (Kind) {
  930. default:
  931. llvm_unreachable("Unknown fixup kind!");
  932. case FK_Data_1:
  933. return 1;
  934. case FK_Data_2:
  935. return 2;
  936. case FK_Data_4:
  937. return 4;
  938. case ARM::fixup_arm_thumb_bcc:
  939. case ARM::fixup_arm_thumb_cp:
  940. case ARM::fixup_thumb_adr_pcrel_10:
  941. case ARM::fixup_arm_thumb_br:
  942. case ARM::fixup_arm_thumb_cb:
  943. // Instruction size is 2 bytes.
  944. return 2;
  945. case ARM::fixup_arm_pcrel_10_unscaled:
  946. case ARM::fixup_arm_ldst_pcrel_12:
  947. case ARM::fixup_arm_pcrel_10:
  948. case ARM::fixup_arm_pcrel_9:
  949. case ARM::fixup_arm_adr_pcrel_12:
  950. case ARM::fixup_arm_uncondbl:
  951. case ARM::fixup_arm_condbl:
  952. case ARM::fixup_arm_blx:
  953. case ARM::fixup_arm_condbranch:
  954. case ARM::fixup_arm_uncondbranch:
  955. case ARM::fixup_t2_ldst_pcrel_12:
  956. case ARM::fixup_t2_condbranch:
  957. case ARM::fixup_t2_uncondbranch:
  958. case ARM::fixup_t2_pcrel_10:
  959. case ARM::fixup_t2_pcrel_9:
  960. case ARM::fixup_t2_adr_pcrel_12:
  961. case ARM::fixup_arm_thumb_bl:
  962. case ARM::fixup_arm_thumb_blx:
  963. case ARM::fixup_arm_movt_hi16:
  964. case ARM::fixup_arm_movw_lo16:
  965. case ARM::fixup_t2_movt_hi16:
  966. case ARM::fixup_t2_movw_lo16:
  967. case ARM::fixup_arm_mod_imm:
  968. case ARM::fixup_t2_so_imm:
  969. case ARM::fixup_bf_branch:
  970. case ARM::fixup_bf_target:
  971. case ARM::fixup_bfl_target:
  972. case ARM::fixup_bfc_target:
  973. case ARM::fixup_bfcsel_else_target:
  974. case ARM::fixup_wls:
  975. case ARM::fixup_le:
  976. // Instruction size is 4 bytes.
  977. return 4;
  978. }
  979. }
  980. void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
  981. const MCValue &Target,
  982. MutableArrayRef<char> Data, uint64_t Value,
  983. bool IsResolved,
  984. const MCSubtargetInfo* STI) const {
  985. unsigned Kind = Fixup.getKind();
  986. if (Kind >= FirstLiteralRelocationKind)
  987. return;
  988. MCContext &Ctx = Asm.getContext();
  989. Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI);
  990. if (!Value)
  991. return; // Doesn't change encoding.
  992. const unsigned NumBytes = getFixupKindNumBytes(Kind);
  993. unsigned Offset = Fixup.getOffset();
  994. assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
  995. // Used to point to big endian bytes.
  996. unsigned FullSizeBytes;
  997. if (Endian == support::big) {
  998. FullSizeBytes = getFixupKindContainerSizeBytes(Kind);
  999. assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!");
  1000. assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
  1001. }
  1002. // For each byte of the fragment that the fixup touches, mask in the bits from
  1003. // the fixup value. The Value has been "split up" into the appropriate
  1004. // bitfields above.
  1005. for (unsigned i = 0; i != NumBytes; ++i) {
  1006. unsigned Idx = Endian == support::little ? i : (FullSizeBytes - 1 - i);
  1007. Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
  1008. }
  1009. }
  1010. namespace CU {
  1011. /// Compact unwind encoding values.
  1012. enum CompactUnwindEncodings {
  1013. UNWIND_ARM_MODE_MASK = 0x0F000000,
  1014. UNWIND_ARM_MODE_FRAME = 0x01000000,
  1015. UNWIND_ARM_MODE_FRAME_D = 0x02000000,
  1016. UNWIND_ARM_MODE_DWARF = 0x04000000,
  1017. UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000,
  1018. UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001,
  1019. UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002,
  1020. UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004,
  1021. UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008,
  1022. UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010,
  1023. UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020,
  1024. UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040,
  1025. UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080,
  1026. UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00,
  1027. UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF
  1028. };
  1029. } // end CU namespace
  1030. /// Generate compact unwind encoding for the function based on the CFI
  1031. /// instructions. If the CFI instructions describe a frame that cannot be
  1032. /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which
  1033. /// tells the runtime to fallback and unwind using dwarf.
  1034. uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
  1035. ArrayRef<MCCFIInstruction> Instrs) const {
  1036. DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n");
  1037. // Only armv7k uses CFI based unwinding.
  1038. if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K)
  1039. return 0;
  1040. // No .cfi directives means no frame.
  1041. if (Instrs.empty())
  1042. return 0;
  1043. // Start off assuming CFA is at SP+0.
  1044. unsigned CFARegister = ARM::SP;
  1045. int CFARegisterOffset = 0;
  1046. // Mark savable registers as initially unsaved
  1047. DenseMap<unsigned, int> RegOffsets;
  1048. int FloatRegCount = 0;
  1049. // Process each .cfi directive and build up compact unwind info.
  1050. for (const MCCFIInstruction &Inst : Instrs) {
  1051. unsigned Reg;
  1052. switch (Inst.getOperation()) {
  1053. case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
  1054. CFARegisterOffset = Inst.getOffset();
  1055. CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
  1056. break;
  1057. case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset
  1058. CFARegisterOffset = Inst.getOffset();
  1059. break;
  1060. case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register
  1061. CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
  1062. break;
  1063. case MCCFIInstruction::OpOffset: // DW_CFA_offset
  1064. Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
  1065. if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
  1066. RegOffsets[Reg] = Inst.getOffset();
  1067. else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
  1068. RegOffsets[Reg] = Inst.getOffset();
  1069. ++FloatRegCount;
  1070. } else {
  1071. DEBUG_WITH_TYPE("compact-unwind",
  1072. llvm::dbgs() << ".cfi_offset on unknown register="
  1073. << Inst.getRegister() << "\n");
  1074. return CU::UNWIND_ARM_MODE_DWARF;
  1075. }
  1076. break;
  1077. case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc
  1078. // Ignore
  1079. break;
  1080. default:
  1081. // Directive not convertable to compact unwind, bail out.
  1082. DEBUG_WITH_TYPE("compact-unwind",
  1083. llvm::dbgs()
  1084. << "CFI directive not compatiable with comact "
  1085. "unwind encoding, opcode=" << Inst.getOperation()
  1086. << "\n");
  1087. return CU::UNWIND_ARM_MODE_DWARF;
  1088. break;
  1089. }
  1090. }
  1091. // If no frame set up, return no unwind info.
  1092. if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0))
  1093. return 0;
  1094. // Verify standard frame (lr/r7) was used.
  1095. if (CFARegister != ARM::R7) {
  1096. DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "
  1097. << CFARegister
  1098. << " instead of r7\n");
  1099. return CU::UNWIND_ARM_MODE_DWARF;
  1100. }
  1101. int StackAdjust = CFARegisterOffset - 8;
  1102. if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) {
  1103. DEBUG_WITH_TYPE("compact-unwind",
  1104. llvm::dbgs()
  1105. << "LR not saved as standard frame, StackAdjust="
  1106. << StackAdjust
  1107. << ", CFARegisterOffset=" << CFARegisterOffset
  1108. << ", lr save at offset=" << RegOffsets[14] << "\n");
  1109. return CU::UNWIND_ARM_MODE_DWARF;
  1110. }
  1111. if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) {
  1112. DEBUG_WITH_TYPE("compact-unwind",
  1113. llvm::dbgs() << "r7 not saved as standard frame\n");
  1114. return CU::UNWIND_ARM_MODE_DWARF;
  1115. }
  1116. uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME;
  1117. // If var-args are used, there may be a stack adjust required.
  1118. switch (StackAdjust) {
  1119. case 0:
  1120. break;
  1121. case 4:
  1122. CompactUnwindEncoding |= 0x00400000;
  1123. break;
  1124. case 8:
  1125. CompactUnwindEncoding |= 0x00800000;
  1126. break;
  1127. case 12:
  1128. CompactUnwindEncoding |= 0x00C00000;
  1129. break;
  1130. default:
  1131. DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs()
  1132. << ".cfi_def_cfa stack adjust ("
  1133. << StackAdjust << ") out of range\n");
  1134. return CU::UNWIND_ARM_MODE_DWARF;
  1135. }
  1136. // If r6 is saved, it must be right below r7.
  1137. static struct {
  1138. unsigned Reg;
  1139. unsigned Encoding;
  1140. } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6},
  1141. {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5},
  1142. {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4},
  1143. {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12},
  1144. {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11},
  1145. {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10},
  1146. {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9},
  1147. {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}};
  1148. int CurOffset = -8 - StackAdjust;
  1149. for (auto CSReg : GPRCSRegs) {
  1150. auto Offset = RegOffsets.find(CSReg.Reg);
  1151. if (Offset == RegOffsets.end())
  1152. continue;
  1153. int RegOffset = Offset->second;
  1154. if (RegOffset != CurOffset - 4) {
  1155. DEBUG_WITH_TYPE("compact-unwind",
  1156. llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at "
  1157. << RegOffset << " but only supported at "
  1158. << CurOffset << "\n");
  1159. return CU::UNWIND_ARM_MODE_DWARF;
  1160. }
  1161. CompactUnwindEncoding |= CSReg.Encoding;
  1162. CurOffset -= 4;
  1163. }
  1164. // If no floats saved, we are done.
  1165. if (FloatRegCount == 0)
  1166. return CompactUnwindEncoding;
  1167. // Switch mode to include D register saving.
  1168. CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK;
  1169. CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D;
  1170. // FIXME: supporting more than 4 saved D-registers compactly would be trivial,
  1171. // but needs coordination with the linker and libunwind.
  1172. if (FloatRegCount > 4) {
  1173. DEBUG_WITH_TYPE("compact-unwind",
  1174. llvm::dbgs() << "unsupported number of D registers saved ("
  1175. << FloatRegCount << ")\n");
  1176. return CU::UNWIND_ARM_MODE_DWARF;
  1177. }
  1178. // Floating point registers must either be saved sequentially, or we defer to
  1179. // DWARF. No gaps allowed here so check that each saved d-register is
  1180. // precisely where it should be.
  1181. static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 };
  1182. for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) {
  1183. auto Offset = RegOffsets.find(FPRCSRegs[Idx]);
  1184. if (Offset == RegOffsets.end()) {
  1185. DEBUG_WITH_TYPE("compact-unwind",
  1186. llvm::dbgs() << FloatRegCount << " D-regs saved, but "
  1187. << MRI.getName(FPRCSRegs[Idx])
  1188. << " not saved\n");
  1189. return CU::UNWIND_ARM_MODE_DWARF;
  1190. } else if (Offset->second != CurOffset - 8) {
  1191. DEBUG_WITH_TYPE("compact-unwind",
  1192. llvm::dbgs() << FloatRegCount << " D-regs saved, but "
  1193. << MRI.getName(FPRCSRegs[Idx])
  1194. << " saved at " << Offset->second
  1195. << ", expected at " << CurOffset - 8
  1196. << "\n");
  1197. return CU::UNWIND_ARM_MODE_DWARF;
  1198. }
  1199. CurOffset -= 8;
  1200. }
  1201. return CompactUnwindEncoding | ((FloatRegCount - 1) << 8);
  1202. }
  1203. static MCAsmBackend *createARMAsmBackend(const Target &T,
  1204. const MCSubtargetInfo &STI,
  1205. const MCRegisterInfo &MRI,
  1206. const MCTargetOptions &Options,
  1207. support::endianness Endian) {
  1208. const Triple &TheTriple = STI.getTargetTriple();
  1209. switch (TheTriple.getObjectFormat()) {
  1210. default:
  1211. llvm_unreachable("unsupported object format");
  1212. case Triple::MachO:
  1213. return new ARMAsmBackendDarwin(T, STI, MRI);
  1214. case Triple::COFF:
  1215. assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
  1216. return new ARMAsmBackendWinCOFF(T, STI.getTargetTriple().isThumb());
  1217. case Triple::ELF:
  1218. assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
  1219. uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
  1220. return new ARMAsmBackendELF(T, STI.getTargetTriple().isThumb(), OSABI,
  1221. Endian);
  1222. }
  1223. }
  1224. MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
  1225. const MCSubtargetInfo &STI,
  1226. const MCRegisterInfo &MRI,
  1227. const MCTargetOptions &Options) {
  1228. return createARMAsmBackend(T, STI, MRI, Options, support::little);
  1229. }
  1230. MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
  1231. const MCSubtargetInfo &STI,
  1232. const MCRegisterInfo &MRI,
  1233. const MCTargetOptions &Options) {
  1234. return createARMAsmBackend(T, STI, MRI, Options, support::big);
  1235. }