ARMUnwindOpAsm.cpp 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. //===-- ARMUnwindOpAsm.cpp - ARM Unwind Opcodes Assembler -------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the unwind opcode assembler for ARM exception handling
  10. // table.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "ARMUnwindOpAsm.h"
  14. #include "llvm/Support/ARMEHABI.h"
  15. #include "llvm/Support/LEB128.h"
  16. #include "llvm/Support/MathExtras.h"
  17. #include <cassert>
  18. using namespace llvm;
  19. namespace {
  20. /// UnwindOpcodeStreamer - The simple wrapper over SmallVector to emit bytes
  21. /// with MSB to LSB per uint32_t ordering. For example, the first byte will
  22. /// be placed in Vec[3], and the following bytes will be placed in 2, 1, 0,
  23. /// 7, 6, 5, 4, 11, 10, 9, 8, and so on.
  24. class UnwindOpcodeStreamer {
  25. private:
  26. SmallVectorImpl<uint8_t> &Vec;
  27. size_t Pos = 3;
  28. public:
  29. UnwindOpcodeStreamer(SmallVectorImpl<uint8_t> &V) : Vec(V) {}
  30. /// Emit the byte in MSB to LSB per uint32_t order.
  31. void EmitByte(uint8_t elem) {
  32. Vec[Pos] = elem;
  33. Pos = (((Pos ^ 0x3u) + 1) ^ 0x3u);
  34. }
  35. /// Emit the size prefix.
  36. void EmitSize(size_t Size) {
  37. size_t SizeInWords = (Size + 3) / 4;
  38. assert(SizeInWords <= 0x100u &&
  39. "Only 256 additional words are allowed for unwind opcodes");
  40. EmitByte(static_cast<uint8_t>(SizeInWords - 1));
  41. }
  42. /// Emit the personality index prefix.
  43. void EmitPersonalityIndex(unsigned PI) {
  44. assert(PI < ARM::EHABI::NUM_PERSONALITY_INDEX &&
  45. "Invalid personality prefix");
  46. EmitByte(ARM::EHABI::EHT_COMPACT | PI);
  47. }
  48. /// Fill the rest of bytes with FINISH opcode.
  49. void FillFinishOpcode() {
  50. while (Pos < Vec.size())
  51. EmitByte(ARM::EHABI::UNWIND_OPCODE_FINISH);
  52. }
  53. };
  54. } // end anonymous namespace
  55. void UnwindOpcodeAssembler::EmitRegSave(uint32_t RegSave) {
  56. if (RegSave == 0u) {
  57. // That's the special case for RA PAC.
  58. EmitInt8(ARM::EHABI::UNWIND_OPCODE_POP_RA_AUTH_CODE);
  59. return;
  60. }
  61. // One byte opcode to save register r14 and r11-r4
  62. if (RegSave & (1u << 4)) {
  63. // The one byte opcode will always save r4, thus we can't use the one byte
  64. // opcode when r4 is not in .save directive.
  65. // Compute the consecutive registers from r4 to r11.
  66. uint32_t Mask = RegSave & 0xff0u;
  67. uint32_t Range = countTrailingOnes(Mask >> 5); // Exclude r4.
  68. // Mask off non-consecutive registers. Keep r4.
  69. Mask &= ~(0xffffffe0u << Range);
  70. // Emit this opcode when the mask covers every registers.
  71. uint32_t UnmaskedReg = RegSave & 0xfff0u & (~Mask);
  72. if (UnmaskedReg == 0u) {
  73. // Pop r[4 : (4 + n)]
  74. EmitInt8(ARM::EHABI::UNWIND_OPCODE_POP_REG_RANGE_R4 | Range);
  75. RegSave &= 0x000fu;
  76. } else if (UnmaskedReg == (1u << 14)) {
  77. // Pop r[14] + r[4 : (4 + n)]
  78. EmitInt8(ARM::EHABI::UNWIND_OPCODE_POP_REG_RANGE_R4_R14 | Range);
  79. RegSave &= 0x000fu;
  80. }
  81. }
  82. // Two bytes opcode to save register r15-r4
  83. if ((RegSave & 0xfff0u) != 0)
  84. EmitInt16(ARM::EHABI::UNWIND_OPCODE_POP_REG_MASK_R4 | (RegSave >> 4));
  85. // Opcode to save register r3-r0
  86. if ((RegSave & 0x000fu) != 0)
  87. EmitInt16(ARM::EHABI::UNWIND_OPCODE_POP_REG_MASK | (RegSave & 0x000fu));
  88. }
  89. /// Emit unwind opcodes for .vsave directives
  90. void UnwindOpcodeAssembler::EmitVFPRegSave(uint32_t VFPRegSave) {
  91. // We only have 4 bits to save the offset in the opcode so look at the lower
  92. // and upper 16 bits separately.
  93. for (uint32_t Regs : {VFPRegSave & 0xffff0000u, VFPRegSave & 0x0000ffffu}) {
  94. while (Regs) {
  95. // Now look for a run of set bits. Remember the MSB and LSB of the run.
  96. auto RangeMSB = 32 - countLeadingZeros(Regs);
  97. auto RangeLen = countLeadingOnes(Regs << (32 - RangeMSB));
  98. auto RangeLSB = RangeMSB - RangeLen;
  99. int Opcode = RangeLSB >= 16
  100. ? ARM::EHABI::UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D16
  101. : ARM::EHABI::UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD;
  102. EmitInt16(Opcode | ((RangeLSB % 16) << 4) | (RangeLen - 1));
  103. // Zero out bits we're done with.
  104. Regs &= ~(-1u << RangeLSB);
  105. }
  106. }
  107. }
  108. /// Emit unwind opcodes to copy address from source register to $sp.
  109. void UnwindOpcodeAssembler::EmitSetSP(uint16_t Reg) {
  110. EmitInt8(ARM::EHABI::UNWIND_OPCODE_SET_VSP | Reg);
  111. }
  112. /// Emit unwind opcodes to add $sp with an offset.
  113. void UnwindOpcodeAssembler::EmitSPOffset(int64_t Offset) {
  114. if (Offset > 0x200) {
  115. uint8_t Buff[16];
  116. Buff[0] = ARM::EHABI::UNWIND_OPCODE_INC_VSP_ULEB128;
  117. size_t ULEBSize = encodeULEB128((Offset - 0x204) >> 2, Buff + 1);
  118. emitBytes(Buff, ULEBSize + 1);
  119. } else if (Offset > 0) {
  120. if (Offset > 0x100) {
  121. EmitInt8(ARM::EHABI::UNWIND_OPCODE_INC_VSP | 0x3fu);
  122. Offset -= 0x100;
  123. }
  124. EmitInt8(ARM::EHABI::UNWIND_OPCODE_INC_VSP |
  125. static_cast<uint8_t>((Offset - 4) >> 2));
  126. } else if (Offset < 0) {
  127. while (Offset < -0x100) {
  128. EmitInt8(ARM::EHABI::UNWIND_OPCODE_DEC_VSP | 0x3fu);
  129. Offset += 0x100;
  130. }
  131. EmitInt8(ARM::EHABI::UNWIND_OPCODE_DEC_VSP |
  132. static_cast<uint8_t>(((-Offset) - 4) >> 2));
  133. }
  134. }
  135. void UnwindOpcodeAssembler::Finalize(unsigned &PersonalityIndex,
  136. SmallVectorImpl<uint8_t> &Result) {
  137. UnwindOpcodeStreamer OpStreamer(Result);
  138. if (HasPersonality) {
  139. // User-specifed personality routine: [ SIZE , OP1 , OP2 , ... ]
  140. PersonalityIndex = ARM::EHABI::NUM_PERSONALITY_INDEX;
  141. size_t TotalSize = Ops.size() + 1;
  142. size_t RoundUpSize = (TotalSize + 3) / 4 * 4;
  143. Result.resize(RoundUpSize);
  144. OpStreamer.EmitSize(RoundUpSize);
  145. } else {
  146. // If no personalityindex is specified, select ane
  147. if (PersonalityIndex == ARM::EHABI::NUM_PERSONALITY_INDEX)
  148. PersonalityIndex = (Ops.size() <= 3) ? ARM::EHABI::AEABI_UNWIND_CPP_PR0
  149. : ARM::EHABI::AEABI_UNWIND_CPP_PR1;
  150. if (PersonalityIndex == ARM::EHABI::AEABI_UNWIND_CPP_PR0) {
  151. // __aeabi_unwind_cpp_pr0: [ 0x80 , OP1 , OP2 , OP3 ]
  152. assert(Ops.size() <= 3 && "too many opcodes for __aeabi_unwind_cpp_pr0");
  153. Result.resize(4);
  154. OpStreamer.EmitPersonalityIndex(PersonalityIndex);
  155. } else {
  156. // __aeabi_unwind_cpp_pr{1,2}: [ {0x81,0x82} , SIZE , OP1 , OP2 , ... ]
  157. size_t TotalSize = Ops.size() + 2;
  158. size_t RoundUpSize = (TotalSize + 3) / 4 * 4;
  159. Result.resize(RoundUpSize);
  160. OpStreamer.EmitPersonalityIndex(PersonalityIndex);
  161. OpStreamer.EmitSize(RoundUpSize);
  162. }
  163. }
  164. // Copy the unwind opcodes
  165. for (size_t i = OpBegins.size() - 1; i > 0; --i)
  166. for (size_t j = OpBegins[i - 1], end = OpBegins[i]; j < end; ++j)
  167. OpStreamer.EmitByte(Ops[j]);
  168. // Emit the padding finish opcodes if the size is not multiple of 4.
  169. OpStreamer.FillFinishOpcode();
  170. // Reset the assembler state
  171. Reset();
  172. }